void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { resource_size_t start = pci_resource_start(dev, bar); resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (unlikely(!len || !start)) return NULL; if (maxlen && len > maxlen) len = maxlen; /* * Presently the IORESOURCE_MEM case is a bit special, most * SH7751 style PCI controllers have PCI memory at a fixed * location in the address space where no remapping is desired * (typically at 0xfd000000, but is_pci_memaddr() will know * best). With the IORESOURCE_MEM case more care has to be taken * to inhibit page table mapping for legacy cores, but this is * punted off to __ioremap(). * -- PFM. */ if (flags & IORESOURCE_IO) return ioport_map(start, len); if (flags & IORESOURCE_MEM) return ioremap(start, len); return NULL; }
void _kc_pci_release_regions(struct pci_dev *dev) { int i; for (i = 0; i < 6; i++) { if (pci_resource_len(dev, i) == 0) continue; if (pci_resource_flags(dev, i) & IORESOURCE_IO) release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); } }
/*************************************************************************/ /*! @Function OSPCIRequestAddrRegion @Description Request a given region from an address range for subsequent use @Input hPVRPCI PCI device handle @Input ui32Index Address range index @Input ui32Offset Offset into the address range that forms the start of the region @Input ui32Length Length of the region @Return PVRSRV_ERROR Services error code */ /**************************************************************************/ PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Length) { PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; resource_size_t start; resource_size_t end; start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); /* Check that the requested region is valid */ if ((start + ui32Offset + ui32Length - 1) > end) { return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; } if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) { if (request_region(start + ui32Offset, ui32Length, PVRSRV_MODNAME) == NULL) { return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; } } else { if (request_mem_region(start + ui32Offset, ui32Length, PVRSRV_MODNAME) == NULL) { return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; } } return PVRSRV_OK; }
/** * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR * @bar: BAR number * @offset: map memory at the given offset in BAR * @maxlen: max length of the memory to map * * Using this function you will get a __iomem address to your device BAR. * You can access it using ioread*() and iowrite*(). These functions hide * the details if this is a MMIO or PIO address space and will just do what * you expect from them in the correct way. When possible write combining * is used. * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR from offset to the end, pass %0 here. * */ void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen) { resource_size_t start = pci_resource_start(dev, bar); resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (flags & IORESOURCE_IO) return NULL; if (len <= offset || !start) return NULL; len -= offset; start += offset; if (maxlen && len > maxlen) len = maxlen; if (flags & IORESOURCE_MEM) return ioremap_wc(start, len); /* What? */ return NULL; }
static int hptiop_map_pci_bar(struct hptiop_hba *hba) { u32 mem_base_phy, length; void __iomem *mem_base_virt; struct pci_dev *pcidev = hba->pcidev; if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) { printk(KERN_ERR "scsi%d: pci resource invalid\n", hba->host->host_no); return -1; } mem_base_phy = pci_resource_start(pcidev, 0); length = pci_resource_len(pcidev, 0); mem_base_virt = ioremap(mem_base_phy, length); if (!mem_base_virt) { printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n", hba->host->host_no); return -1; } hba->iop = mem_base_virt; dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop); return 0; }
/*************************************************************************/ /*! @Function OSPCIReleaseAddrRegion @Description Release a given region, from an address range, that is no longer in use @Input hPVRPCI PCI device handle @Input ui32Index Address range index @Input ui32Offset Offset into the address range that forms the start of the region @Input ui32Length Length of the region @Return PVRSRV_ERROR Services error code */ /**************************************************************************/ PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Length) { PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; resource_size_t start; resource_size_t end; start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); /* Check that the region is valid */ if ((start + ui32Offset + ui32Length - 1) > end) { return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; } if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) { release_region(start + ui32Offset, ui32Length); } else { release_mem_region(start + ui32Offset, ui32Length); } return PVRSRV_OK; }
static void release_pci_io_addr(struct pci_dev *pdev, u32 index, resource_size_t start, resource_size_t length) { if (pci_resource_flags(pdev, index) & IORESOURCE_IO) release_region(start, length); else release_mem_region(start, length); }
static int dio200_pci_auto_attach(struct comedi_device *dev, unsigned long context_model) { struct pci_dev *pci_dev = comedi_to_pci_dev(dev); const struct dio200_board *thisboard = NULL; struct dio200_private *devpriv; unsigned int bar; int ret; if (context_model < ARRAY_SIZE(dio200_pci_boards)) thisboard = &dio200_pci_boards[context_model]; if (!thisboard) return -EINVAL; dev->board_ptr = thisboard; dev->board_name = thisboard->name; dev_info(dev->class_dev, "%s: attach pci %s (%s)\n", dev->driver->driver_name, pci_name(pci_dev), dev->board_name); devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; ret = comedi_pci_enable(dev); if (ret) return ret; bar = thisboard->mainbar; if (pci_resource_len(pci_dev, bar) < thisboard->mainsize) { dev_err(dev->class_dev, "error! PCI region size too small!\n"); return -EINVAL; } if (pci_resource_flags(pci_dev, bar) & IORESOURCE_MEM) { devpriv->io.u.membase = pci_ioremap_bar(pci_dev, bar); if (!devpriv->io.u.membase) { dev_err(dev->class_dev, "error! cannot remap registers\n"); return -ENOMEM; } devpriv->io.regtype = mmio_regtype; } else { devpriv->io.u.iobase = pci_resource_start(pci_dev, bar); devpriv->io.regtype = io_regtype; } switch (context_model) { case pcie215_model: case pcie236_model: case pcie296_model: ret = dio200_pcie_board_setup(dev); if (ret < 0) return ret; break; default: break; } return amplc_dio200_common_attach(dev, pci_dev->irq, IRQF_SHARED); }
static void __iomem *pci_ioremap_bar(struct pci_dev* pdev,int bar) { if(!(pci_resource_flags(pdev,bar) & IORESOURCE_MEM )){ WARN_ON(1); return NULL; } return ioremap_nocache(pci_resource_start(pdev,bar), pci_resource_len(pdev,bar)); }
static dev_node_t *serial_attach(dev_locator_t *loc) { u_int io; u_char irq; int line; struct serial_struct serial; struct pci_dev *pdev; dev_node_t *node; MOD_INC_USE_COUNT; if (loc->bus != LOC_PCI) goto err_out; pdev = pci_find_slot (loc->b.pci.bus, loc->b.pci.devfn); if (!pdev) goto err_out; if (pci_enable_device(pdev)) goto err_out; printk(KERN_INFO "serial_attach(bus %d, fn %d)\n", pdev->bus->number, pdev->devfn); io = pci_resource_start (pdev, 0); irq = pdev->irq; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { printk(KERN_NOTICE "serial_cb: PCI base address 0 is not IO\n"); goto err_out; } device_setup(pdev, io); memset(&serial, 0, sizeof(serial)); serial.port = io; serial.irq = irq; serial.flags = ASYNC_SKIP_TEST | ASYNC_SHARE_IRQ; /* Some devices seem to need extra time */ __set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ/50); line = register_serial(&serial); if (line < 0) { printk(KERN_NOTICE "serial_cb: register_serial() at 0x%04x, " "irq %d failed\n", serial.port, serial.irq); goto err_out; } node = kmalloc(sizeof(dev_node_t), GFP_KERNEL); if (!node) goto err_out_unregister; sprintf(node->dev_name, "ttyS%d", line); node->major = TTY_MAJOR; node->minor = 0x40 + line; node->next = NULL; return node; err_out_unregister: unregister_serial(line); err_out: MOD_DEC_USE_COUNT; return NULL; }
static int __devinit vrc4173_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned long start, flags; int err; err = pci_enable_device(dev); if (err < 0) { printk(KERN_ERR "vrc4173: Failed to enable PCI device, aborting\n"); return err; } pci_set_master(dev); start = pci_resource_start(dev, 0); if (start == 0) { printk(KERN_ERR "vrc4173:No such PCI I/O resource, aborting\n"); return -ENXIO; } flags = pci_resource_flags(dev, 0); if ((flags & IORESOURCE_IO) == 0) { printk(KERN_ERR "vrc4173: No such PCI I/O resource, aborting\n"); return -ENXIO; } err = pci_request_regions(dev, "NEC VRC4173"); if (err < 0) { printk(KERN_ERR "vrc4173: PCI resources are busy, aborting\n"); return err; } set_vrc4173_io_offset(start); vrc4173_cmu_init(); vrc4173_giu_init(); err = vrc4173_icu_init(dev->irq); if (err < 0) { printk(KERN_ERR "vrc4173: Invalid IRQ %d, aborting\n", dev->irq); return err; } err = vr41xx_cascade_irq(dev->irq, vrc4173_get_irq_number); if (err < 0) { printk(KERN_ERR "vrc4173: IRQ resource %d is busy, aborting\n", dev->irq); return err; } printk(KERN_INFO "NEC VRC4173 at 0x%#08lx, IRQ is cascaded to %d\n", start, dev->irq); return 0; }
void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) { /* * Make sure the BAR is actually a memory resource, not an IO resource */ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { WARN_ON(1); return NULL; } return ioremap_nocache(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); }
int __init init_module(void) { int cmd, i, flags; struct resource * r; inPos_device = pci_get_device(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_CYCLONE_IV, inPos_device); if (!inPos_device) { printk(KERN_ALERT "ERROR - Device register failed, no such device\n"); return -ENODEV; } pci_read_config_word(inPos_device, PCI_COMMAND, &cmd); /* Set flags */ set_command_flag(&cmd, PCI_COMMAND_MEMORY); /* Enable response in Memory space */ set_command_flag(&cmd, PCI_COMMAND_MASTER); /* Enable bus mastering */ set_command_flag(&cmd, PCI_COMMAND_INVALIDATE); /* Use memory write and invalidate */ if (!dma_set_mask(&inPos_device->dev, 0xffffffff)) { printk (KERN_ALERT "DMA 32-bit not supported\n"); return -ENOTSUPP; } /* Find desired region */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { region.size = pci_resource_len(inPos_device, i); region.phys_addr = pci_resource_start(inPos_device, i); flags = pci_resource_flags(inPos_device, i); if (!(flags & IORESOURCE_IO || region.size)) // is not IO and has size > 0 continue; if ((region.size >= sizeof(struct PhysImg)) && !(flags & IORESOURCE_READONLY)) { region.resource_num = i; break; } } if (region.resource_num < 0) { printk(KERN_ALERT "ERROR - Device memory region with size >= %d not found!\n", sizeof(struct PhysImg)); return -EINVAL; } region.phys_addr &= PCI_BASE_ADDRESS_MEM_MASK; region.size = ~(region.size & PCI_BASE_ADDRESS_MEM_MASK) + 1; dev_major = register_chrdev(IN_POS_MAJOR, name, &fops); if (dev_major < 0) { printk(KERN_ALERT "ERROR - Device register failed with code: %d\n", dev_major); return dev_major; } return 0; }
int _kc_pci_request_regions(struct pci_dev *dev, char *res_name) { int i; for (i = 0; i < 6; i++) { if (pci_resource_len(dev, i) == 0) continue; if (pci_resource_flags(dev, i) & IORESOURCE_IO) { if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { pci_release_regions(dev); return -EBUSY; } } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { pci_release_regions(dev); return -EBUSY; } } } return 0; }
static int __devinit decom_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret; dev_t devnum; ret = pci_enable_device (pdev); if(ret) return ret; decom_drv.mem_base_raw = pci_resource_start(pdev, 0); decom_drv.flags = pci_resource_flags(pdev, 0); decom_drv.len = pci_resource_len(pdev, 0); if(!decom_drv.mem_base_raw || ((decom_drv.flags & IORESOURCE_MEM)==0)) { printk(KERN_ERR "%s: no I/O resource at PCI BAR #0\n", DRV_NAME); return -ENODEV; } if (!request_mem_region(decom_drv.mem_base_raw, decom_drv.len, DRV_NAME)) { printk(KERN_WARNING "%s: memory already in use\n", DRV_NAME); return -EBUSY; } decom_drv.mem_base = ioremap_nocache(decom_drv.mem_base_raw, decom_drv.len); // Register this as a character device ret = alloc_chrdev_region(&devnum, DECOM_MINOR, 1, DRV_NAME); if (ret < 0) { printk(KERN_WARNING DRV_NAME " can't allocate major\n"); return ret; } printk(KERN_DEBUG DRV_NAME " major: %d minor: %d dev: %d\n", MAJOR(devnum), DECOM_MINOR, devnum); cdev_init(&decom_drv.decom_cdev, &decom_fops); decom_drv.decom_cdev.owner = THIS_MODULE; ret = cdev_add(&decom_drv.decom_cdev, devnum, 1); if (ret < 0) printk(KERN_WARNING DRV_NAME " failed to register decom_pci device\n"); decom_drv.timer_on = 0; decom_drv.use_count = 0; decom_wfifo.status = FIFO_DISABLED; decom_pci_start_sysfs(); printk(KERN_NOTICE "%s: driver initialized\n", DRV_NAME); return 0; }
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { unsigned long start = pci_resource_start(dev, bar); unsigned long len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (!len || !start) return NULL; if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) return (void __iomem *) start; return NULL; }
static int request_bar(struct pci_dev *pdev) { int err = 0; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Missing registers BAR, aborting.\n"); return -ENODEV; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); return err; }
static int asd_map_memio(struct asd_ha_struct *asd_ha) { int err, i; struct asd_ha_addrspace *io_handle; asd_ha->iospace = 0; for (i = 0; i < 3; i += 2) { io_handle = &asd_ha->io_handle[i==0?0:1]; io_handle->start = pci_resource_start(asd_ha->pcidev, i); io_handle->len = pci_resource_len(asd_ha->pcidev, i); io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); err = -ENODEV; if (!io_handle->start || !io_handle->len) { asd_printk("MBAR%d start or length for %s is 0.\n", i==0?0:1, pci_name(asd_ha->pcidev)); goto Err; } err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); if (err) { asd_printk("couldn't reserve memory region for %s\n", pci_name(asd_ha->pcidev)); goto Err; } if (io_handle->flags & IORESOURCE_CACHEABLE) io_handle->addr = ioremap(io_handle->start, io_handle->len); else io_handle->addr = ioremap_nocache(io_handle->start, io_handle->len); if (!io_handle->addr) { asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, pci_name(asd_ha->pcidev)); err = -ENOMEM; goto Err_unreq; } } return 0; Err_unreq: pci_release_region(asd_ha->pcidev, i); Err: if (i > 0) { io_handle = &asd_ha->io_handle[0]; iounmap(io_handle->addr); pci_release_region(asd_ha->pcidev, 0); } return err; }
static void intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map) { u32 val; if (map->base) iounmap(map->base); /* * We need to undo the PCI BAR2/PCI ROM BAR address alteration. */ pci_resource_flags(dev, PCI_ROM_RESOURCE) &= ~IORESOURCE_ROM_ENABLE; pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val); val &= ~PCI_ROM_ADDRESS_ENABLE; pci_write_config_dword(dev, PCI_ROM_ADDRESS, val); }
void *os_map_pci_bar( void *osext, int index, HPT_U32 offset, HPT_U32 length ) { PHBA hba = (PHBA)osext; unsigned long base = pci_resource_start(hba->pcidev, index); if (pci_resource_flags(hba->pcidev, index) & IORESOURCE_MEM) return ioremap(base+offset, length); return (char*)base+offset; }
/* Get framebuffer memory from Hyper-V video pci space */ static int hvfb_getmem(struct fb_info *info) { struct pci_dev *pdev; ulong fb_phys; void __iomem *fb_virt; pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, PCI_DEVICE_ID_HYPERV_VIDEO, NULL); if (!pdev) { pr_err("Unable to find PCI Hyper-V video\n"); return -ENODEV; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || pci_resource_len(pdev, 0) < screen_fb_size) goto err1; fb_phys = pci_resource_end(pdev, 0) - screen_fb_size + 1; if (!request_mem_region(fb_phys, screen_fb_size, KBUILD_MODNAME)) goto err1; fb_virt = ioremap(fb_phys, screen_fb_size); if (!fb_virt) goto err2; info->apertures = alloc_apertures(1); if (!info->apertures) goto err3; info->apertures->ranges[0].base = pci_resource_start(pdev, 0); info->apertures->ranges[0].size = pci_resource_len(pdev, 0); info->fix.smem_start = fb_phys; info->fix.smem_len = screen_fb_size; info->screen_base = fb_virt; info->screen_size = screen_fb_size; pci_dev_put(pdev); return 0; err3: iounmap(fb_virt); err2: release_mem_region(fb_phys, screen_fb_size); err1: pci_dev_put(pdev); return -ENOMEM; }
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) { unsigned long start = pci_resource_start(dev, bar); unsigned long len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (!len) return NULL; if (max && len > max) len = max; if (flags & IORESOURCE_IO) return ioport_map(start, len); if (flags & IORESOURCE_MEM) return ioremap(start, len); /* What? */ return NULL; }
static int __devinit vrc4173_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long start, flags; int err; if ((err = pci_enable_device(pdev)) < 0) { printk(KERN_ERR "vrc4173: failed to enable device -- err=%d\n", err); return err; } pci_set_master(pdev); start = pci_resource_start(pdev, 0); if (!start) { printk(KERN_ERR "vrc4173:No PCI I/O resources, aborting\n"); return -ENODEV; } if (!start || (((flags = pci_resource_flags(pdev, 0)) & IORESOURCE_IO) == 0)) { printk(KERN_ERR "vrc4173: No PCI I/O resources, aborting\n"); return -ENODEV; } if ((err = pci_request_regions(pdev, "NEC VRC4173")) < 0) { printk(KERN_ERR "vrc4173: PCI resources are busy, aborting\n"); return err; } set_vrc4173_io_offset(start); vrc4173_cmu_init(); vrc4173_icu_init(pdev->irq); if ((err = vr41xx_cascade_irq(pdev->irq, vrc4173_get_irq_number)) < 0) { printk(KERN_ERR "vrc4173: IRQ resource %d is busy, aborting\n", pdev->irq); return err; } printk(KERN_INFO "NEC VRC4173 at 0x%#08lx, IRQ is cascaded to %d\n", start, pdev->irq); return 0; }
static int request_pci_io_addr(struct pci_dev *pdev, u32 index, resource_size_t offset, resource_size_t length) { resource_size_t start, end; start = pci_resource_start(pdev, index); end = pci_resource_end(pdev, index); if ((start + offset + length - 1) > end) return -EIO; if (pci_resource_flags(pdev, index) & IORESOURCE_IO) { if (request_region(start + offset, length, DRV_NAME) == NULL) return -EIO; } else { if (request_mem_region(start + offset, length, DRV_NAME) == NULL) return -EIO; } return 0; }
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { unsigned long start = pci_resource_start(dev, bar); unsigned long len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (!len || !start) return NULL; if (maxlen && len > maxlen) len = maxlen; if (flags & IORESOURCE_IO) return ioport_map(start, len); if (flags & IORESOURCE_MEM) { if (flags & IORESOURCE_CACHEABLE) return ioremap(start, len); return ioremap_nocache(start, len); } return NULL; }
/* map memory regions in to kernel virtual address space */ static int avp_pci_map_regions(struct pci_dev *dev, struct wrs_avp_pci_dev *avp_dev) { unsigned long addr, length, flags; phys_addr_t phys_addr; unsigned i; void *ptr; for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { length = pci_resource_len(dev, i); addr = pci_resource_start(dev, i); if ((length == 0) && (addr == 0)) { continue; } if ((length == 0) || (addr == 0)) { AVP_ERR("BAR%u has invalid length %lu and address %lu\n", i, addr, length); return -EINVAL; } flags = pci_resource_flags(dev, i); if (flags & IORESOURCE_MEM) { /* map addresses into kernel address space */ ptr = ioremap(addr, length); if (ptr == NULL) { return -1; } avp_dev->addresses[i] = ptr; AVP_DBG("BAR%u 0x%016llx ioremap to 0x%p", i, (unsigned long long)addr, ptr); phys_addr = virt_to_phys(ptr); AVP_DBG("virt_to_phys(0x%p) = 0x%016llx\n", ptr, phys_addr); AVP_DBG("phys_to_virt(0x%016llx) = 0x%p\n", (unsigned long long)phys_addr, phys_to_virt(phys_addr)); } } return 0; }
static int dio200_pci_auto_attach(struct comedi_device *dev, unsigned long context_model) { struct pci_dev *pci_dev = comedi_to_pci_dev(dev); const struct dio200_board *board = NULL; unsigned int bar; int ret; if (context_model < ARRAY_SIZE(dio200_pci_boards)) board = &dio200_pci_boards[context_model]; if (!board) return -EINVAL; dev->board_ptr = board; dev->board_name = board->name; dev_info(dev->class_dev, "%s: attach pci %s (%s)\n", dev->driver->driver_name, pci_name(pci_dev), dev->board_name); ret = comedi_pci_enable(dev); if (ret) return ret; bar = board->mainbar; if (pci_resource_flags(pci_dev, bar) & IORESOURCE_MEM) { dev->mmio = pci_ioremap_bar(pci_dev, bar); if (!dev->mmio) { dev_err(dev->class_dev, "error! cannot remap registers\n"); return -ENOMEM; } } else { dev->iobase = pci_resource_start(pci_dev, bar); } if (board->is_pcie) { ret = dio200_pcie_board_setup(dev); if (ret < 0) return ret; } return amplc_dio200_common_attach(dev, pci_dev->irq, IRQF_SHARED); }
static void usnic_vnic_release_resources(struct usnic_vnic *vnic) { int i; struct pci_dev *pdev; enum usnic_vnic_res_type res_type; pdev = usnic_vnic_get_pdev(vnic); for (res_type = USNIC_VNIC_RES_TYPE_EOL + 1; res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); vnic_dev_unregister(vnic->vdev); for (i = 0; i < ARRAY_SIZE(vnic->bar); i++) { if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) continue; iounmap(vnic->bar[i].vaddr); } }
static int igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info) { int i, iom, iop, ret; unsigned long flags; static const char *bar_names[PCI_STD_RESOURCE_END + 1] = { "BAR0", "BAR1", "BAR2", "BAR3", "BAR4", "BAR5", }; iom = 0; iop = 0; for (i = 0; i < ARRAY_SIZE(bar_names); i++) { if (pci_resource_len(dev, i) != 0 && pci_resource_start(dev, i) != 0) { flags = pci_resource_flags(dev, i); if (flags & IORESOURCE_MEM) { ret = igbuio_pci_setup_iomem(dev, info, iom, i, bar_names[i]); if (ret != 0) return ret; iom++; } else if (flags & IORESOURCE_IO) { ret = igbuio_pci_setup_ioport(dev, info, iop, i, bar_names[i]); if (ret != 0) return ret; iop++; } } } return (iom != 0) ? ret : -ENOENT; }
static void print_bar_info(struct pci_dev* device) { unsigned int flags = 0; unsigned int i = 0; for(i = 0; i < 6; i++) { flags = pci_resource_flags(device, i); if(!flags) printk(KERN_INFO ANB_DEVICE_PREFIX "Device BAR %d: not in use\n", i); else { printk(KERN_INFO ANB_DEVICE_PREFIX "Device BAR %d: %10d bytes (0x%08x ~ 0x%08x) Type: %3s P %c RO %c C %c RL%c SH%c\n", i, (unsigned int)pci_resource_len(device, i), (unsigned int)pci_resource_start(device, i), (unsigned int)pci_resource_end(device, i), ((flags & IORESOURCE_IO) == IORESOURCE_IO) ? "IO" : ((flags & IORESOURCE_MEM) == IORESOURCE_MEM) ? "MEM" : "---", ((flags & IORESOURCE_PREFETCH) == IORESOURCE_PREFETCH) ? '+' : '-', ((flags & IORESOURCE_READONLY) == IORESOURCE_READONLY) ? '+' : '-', ((flags & IORESOURCE_CACHEABLE) == IORESOURCE_CACHEABLE) ? '+' : '-', ((flags & IORESOURCE_RANGELENGTH) == IORESOURCE_RANGELENGTH) ? '+' : '-', ((flags & IORESOURCE_SHADOWABLE) == IORESOURCE_SHADOWABLE) ? '+' : '-'); printk(KERN_INFO ANB_DEVICE_PREFIX " ASz%c ASt%c M64%c W %c M %c\n", ((flags & IORESOURCE_SIZEALIGN) == IORESOURCE_SIZEALIGN) ? '+' : '-', ((flags & IORESOURCE_STARTALIGN) == IORESOURCE_STARTALIGN) ? '+' : '-', ((flags & IORESOURCE_MEM_64) == IORESOURCE_MEM_64) ? '+' : '-', ((flags & IORESOURCE_WINDOW) == IORESOURCE_WINDOW) ? '+' : '-', ((flags & IORESOURCE_MUXED) == IORESOURCE_MUXED) ? '+' : '-'); printk(KERN_INFO ANB_DEVICE_PREFIX " Ex %c Dis%c U %c A %c B %c\n", ((flags & IORESOURCE_EXCLUSIVE) == IORESOURCE_EXCLUSIVE) ? '+' : '-', ((flags & IORESOURCE_DISABLED) == IORESOURCE_DISABLED) ? '+' : '-', ((flags & IORESOURCE_UNSET) == IORESOURCE_UNSET) ? '+' : '-', ((flags & IORESOURCE_AUTO) == IORESOURCE_AUTO) ? '+' : '-', ((flags & IORESOURCE_BUSY) == IORESOURCE_BUSY) ? '+' : '-'); } } }