/* * Store the basic register settings needed by the controller. */ static void configure_hc(struct uhci_hcd *uhci) { /* Set the frame length to the default: 1 ms exactly */ outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF); /* Store the frame list base address */ outl(uhci->frame_dma_handle, uhci->io_addr + USBFLBASEADD); /* Set the current frame number */ outw(uhci->frame_number & UHCI_MAX_SOF_NUMBER, uhci->io_addr + USBFRNUM); /* Mark controller as not halted before we enable interrupts */ uhci_to_hcd(uhci)->state = HC_STATE_SUSPENDED; mb(); /* Enable PIRQ */ pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, USBLEGSUP_DEFAULT); }
static int ahci_pci_reset_controller(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); ahci_reset_controller(host); if (pdev->vendor == PCI_VENDOR_ID_INTEL) { struct ahci_host_priv *hpriv = host->private_data; u16 tmp16; /* configure PCS */ pci_read_config_word(pdev, 0x92, &tmp16); if ((tmp16 & hpriv->port_map) != hpriv->port_map) { tmp16 |= hpriv->port_map; pci_write_config_word(pdev, 0x92, tmp16); } } return 0; }
static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio) { static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 }; struct pci_dev *pdev = to_pci_dev(ap->host->dev); int dn = 2 * ap->port_no + adev->devno; int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1); u32 pio_timing_data; u16 pio_mode_data; pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); pio_mode_data &= ~(0x7 << (4 * dn)); pio_mode_data |= pio << (4 * dn); pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data); pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); pio_timing_data &= ~(0xFF << timing_shift); pio_timing_data |= (pio_timings[pio] << timing_shift); pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); }
unsigned long _BSP_clear_hostbridge_errors(int enableMCP, int quiet) { unsigned long rval; unsigned short pcistat; int count; if (enableMCP) return -1; /* exceptions not supported / MCP not wired */ /* read error status for info return */ pci_read_config_word(0,0,0,PCI_STATUS,&pcistat); rval = pcistat; count=10; do { /* clear error reporting registers */ /* clear PCI status register */ pci_write_config_word(0,0,0,PCI_STATUS, PCI_ERR_BITS); /* read new status */ pci_read_config_word(0,0,0,PCI_STATUS, &pcistat); } while ( ! PCI_STATUS_OK(pcistat) && count-- ); if ( !PCI_STATUS_OK(rval) && !quiet) { printk("Cleared PCI errors: pci_stat was 0x%04x\n", rval); } if ( !PCI_STATUS_OK(pcistat) ) { printk("Unable to clear PCI errors: still 0x%04x after 10 attempts\n", pcistat); } rval &= PCI_ERR_BITS; /* Some VME bridges (Tsi148) don't propagate VME bus errors to PCI status reg. */ if ( _BSP_clear_vmebridge_errors ) rval |= _BSP_clear_vmebridge_errors(quiet)<<16; return rval; }
/** * pci_enable_ats - enable the ATS capability * @dev: the PCI device * @ps: the IOMMU page shift * * Returns 0 on success, or negative on failure. */ int pci_enable_ats(struct pci_dev *dev, int ps) { int rc; u16 ctrl; BUG_ON(dev->ats && dev->ats->is_enabled); if (ps < PCI_ATS_MIN_STU) return -EINVAL; if (dev->is_physfn || dev->is_virtfn) { struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn; mutex_lock(&pdev->sriov->lock); if (pdev->ats) rc = pdev->ats->stu == ps ? 0 : -EINVAL; else rc = ats_alloc_one(pdev, ps); if (!rc) pdev->ats->ref_cnt++; mutex_unlock(&pdev->sriov->lock); if (rc) return rc; } if (!dev->is_physfn) { rc = ats_alloc_one(dev, ps); if (rc) return rc; } ctrl = PCI_ATS_CTRL_ENABLE; if (!dev->is_virtfn) ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU); pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl); dev->ats->is_enabled = 1; return 0; }
static inline int pcibios_enable_resources (struct pci_dev *dev, int mask) { u16 cmd, old_cmd; int idx; struct resource *r; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM; if (!dev) return -EINVAL; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; for (idx=0; idx<PCI_NUM_RESOURCES; idx++) { /* Only set up the desired resources. */ if (!(mask & (1 << idx))) continue; r = &dev->resource[idx]; if (!(r->flags & type_mask)) continue; if ((idx == PCI_ROM_RESOURCE) && (!(r->flags & IORESOURCE_ROM_ENABLE))) continue; if (!r->start && r->end) { printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev)); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; }
/* * Based on settings done by AMI BIOS * (might be useful if drive is not registered in CMOS for any reason). */ static void slc90e66_tune_drive (ide_drive_t *drive, u8 pio) { ide_hwif_t *hwif = HWIF(drive); struct pci_dev *dev = hwif->pci_dev; int is_slave = (&hwif->drives[1] == drive); int master_port = hwif->channel ? 0x42 : 0x40; int slave_port = 0x44; unsigned long flags; u16 master_data; u8 slave_data; /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; pio = ide_get_best_pio_mode(drive, pio, 5, NULL); spin_lock_irqsave(&ide_lock, flags); pci_read_config_word(dev, master_port, &master_data); if (is_slave) { master_data = master_data | 0x4000; if (pio > 1) /* enable PPE, IE and TIME */ master_data = master_data | 0x0070; pci_read_config_byte(dev, slave_port, &slave_data); slave_data = slave_data & (hwif->channel ? 0x0f : 0xf0); slave_data = slave_data | (((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0)); } else { master_data = master_data & 0xccf8; if (pio > 1) /* enable PPE, IE and TIME */ master_data = master_data | 0x0007; master_data = master_data | (timings[pio][0] << 12) | (timings[pio][1] << 8); } pci_write_config_word(dev, master_port, master_data); if (is_slave) pci_write_config_byte(dev, slave_port, slave_data); spin_unlock_irqrestore(&ide_lock, flags); }
static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { int err; if (!atomic_read(&dev->enable_cnt) && is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG "pciback: %s: enable\n", pci_name(dev)); err = pci_enable_device(dev); if (err) return err; } else if (atomic_read(&dev->enable_cnt) && !is_enable_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG "pciback: %s: disable\n", pci_name(dev)); pci_disable_device(dev); } if (!dev->is_busmaster && is_master_cmd(value)) { if (unlikely(verbose_request)) printk(KERN_DEBUG "pciback: %s: set bus master\n", pci_name(dev)); pci_set_master(dev); } if (value & PCI_COMMAND_INVALIDATE) { if (unlikely(verbose_request)) printk(KERN_DEBUG "pciback: %s: enable memory-write-invalidate\n", pci_name(dev)); err = pci_set_mwi(dev); if (err) { printk(KERN_WARNING "pciback: %s: cannot enable memory-write-invalidate (%d)\n", pci_name(dev), err); value &= ~PCI_COMMAND_INVALIDATE; } } return pci_write_config_word(dev, offset, value); }
void FreeDriverData( struct EMU10kxData* dd, struct DriverBase* AHIsubBase ) { if( dd != NULL ) { if( dd->card.pci_dev != NULL ) { if( dd->emu10k1_initialized ) { emu10k1_cleanup( &dd->card ); } if( dd->pci_master_enabled ) { UWORD cmd; #ifdef __AMIGAOS4__ cmd = ((struct PCIDevice * ) dd->card.pci_dev)->ReadConfigWord( PCI_COMMAND ); cmd &= ~( PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER ); ((struct PCIDevice * ) dd->card.pci_dev)->WriteConfigWord( PCI_COMMAND, cmd ); #else cmd = pci_read_config_word( PCI_COMMAND, dd->card.pci_dev ); cmd &= ~( PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER ); pci_write_config_word( PCI_COMMAND, cmd, dd->card.pci_dev ); #endif } } if( dd->interrupt_added ) { #ifdef __AMIGAOS4__ RemIntServer(((struct PCIDevice * ) dd->card.pci_dev)->MapInterrupt(), &dd->interrupt ); #else pci_rem_intserver( &dd->interrupt, dd->card.pci_dev ); #endif } FreeVec( dd ); } }
/* * Make sure the controller is completely inactive, unable to * generate interrupts or do DMA. */ static void reset_hc(struct uhci_hcd *uhci) { int port; /* Turn off PIRQ enable and SMI enable. (This also turns off the * BIOS's USB Legacy Support.) Turn off all the R/WC bits too. */ pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, USBLEGSUP_RWC); /* Reset the HC - this will force us to get a * new notification of any already connected * ports due to the virtual disconnect that it * implies. */ outw(USBCMD_HCRESET, uhci->io_addr + USBCMD); mb(); udelay(5); if (inw(uhci->io_addr + USBCMD) & USBCMD_HCRESET) dev_warn(uhci_dev(uhci), "HCRESET not completed yet!\n"); /* Just to be safe, disable interrupt requests and * make sure the controller is stopped. */ outw(0, uhci->io_addr + USBINTR); outw(0, uhci->io_addr + USBCMD); /* HCRESET doesn't affect the Suspend, Reset, and Resume Detect * bits in the port status and control registers. * We have to clear them by hand. */ for (port = 0; port < uhci->rh_numports; ++port) outw(0, uhci->io_addr + USBPORTSC1 + (port * 2)); uhci->port_c_suspend = uhci->suspended_ports = uhci->resuming_ports = 0; uhci->rh_state = UHCI_RH_RESET; uhci->is_stopped = UHCI_IS_STOPPED; uhci_to_hcd(uhci)->state = HC_STATE_HALT; uhci_to_hcd(uhci)->poll_rh = 0; }
static void aer_error_resume(struct pci_dev *dev) { int pos; u32 status, mask; u16 reg16; pos = pci_pcie_cap(dev); pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, ®16); pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); if (dev->error_state == pci_channel_io_normal) status &= ~mask; else status &= mask; pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); }
static void __devexit ichxrom_remove_one (struct pci_dev *pdev) { struct ichxrom_map_info *info = &ichxrom_map; u16 word; del_mtd_device(info->mtd); map_destroy(info->mtd); info->mtd = NULL; info->map.map_priv_1 = 0; iounmap((void *)(info->window_addr)); info->window_addr = 0; /* Disable writes through the rom window */ pci_read_config_word(pdev, BIOS_CNTL, &word); pci_write_config_word(pdev, BIOS_CNTL, word & ~1); #if RESERVE_MEM_REGION release_mem_region(ICHX_FWH_REGION_START, ICHX_FWH_REGION_SIZE); #endif }
/** * aer_error_resume - clean up corresponding error status bits * @dev: pointer to Root Port's pci_dev data structure * * Invoked by Port Bus driver during nonfatal recovery. **/ static void aer_error_resume(struct pci_dev *dev) { int pos; u32 status, mask; u16 reg16; /* Clean up Root device status */ pos = pci_pcie_cap(dev); pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, ®16); pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); /* Clean AER Root Error Status */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); if (dev->error_state == pci_channel_io_normal) status &= ~mask; /* Clear corresponding nonfatal bits */ else status &= mask; /* Clear corresponding fatal bits */ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); }
int pci_disable_pcie_error_reporting(struct pci_dev *dev) { u16 reg16 = 0; int pos; if (pcie_aer_get_firmware_first(dev)) return -EIO; pos = pci_pcie_cap(dev); if (!pos) return -EIO; pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, ®16); reg16 &= ~(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16); return 0; }
static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *dev = to_pci_dev(ap->host->dev); u16 idetm_data; int control = 0; /* * See Intel Document 298600-004 for the timing programing rules * for PIIX/ICH. Note that the early PIIX does not have the slave * timing port at 0x44. The Radisys is a relative of the PIIX * but not the same so be careful. */ static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, /* Check me */ { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 }, }; if (pio > 0) control |= 1; /* TIME1 enable */ if (ata_pio_need_iordy(adev)) control |= 2; /* IE IORDY */ pci_read_config_word(dev, 0x40, &idetm_data); /* Enable IE and TIME as appropriate. Clear the other drive timing bits */ idetm_data &= 0xCCCC; idetm_data |= (control << (4 * adev->devno)); idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); pci_write_config_word(dev, 0x40, idetm_data); /* Track which port is configured */ ap->private_data = adev; }
static int neo_dma_prepare_bus_direct(zion_params_t *params, int ch, int cmd, unsigned long offset_addr) { u16 tmp_16 = 0; unsigned long current_jiffies; void *chain = (void *)ZION_PCI_PARAM(params)->dma_params[ch].dma_chain; /* add check for DMA Run */ pci_read_config_word(params->dev, NEO_PCI_DMA_COMMAND(ch),&tmp_16); current_jiffies = jiffies; while(tmp_16 & NEO_DMA_RUN) { interruptible_sleep_on_timeout(&(ZION_PCI_PARAM(params)->zion_pci_wait_queue),(100*HZ/1000)); if(current_jiffies + 2*HZ < jiffies) { PERROR("Timeout : DMA Busy (for 2 seconds)\n"); return -ETIME; } pci_read_config_word(params->dev, NEO_PCI_DMA_COMMAND(ch),&tmp_16); } /* set source address */ pci_write_config_dword (params->dev, NEO_PCI_DMA_BUFFER_RW_POINTER(ch), (params->whole_sdram_addr) + offset_addr); /* set chain address */ pci_write_config_dword (params->dev, NEO_PCI_DMA_CHAIN_ADDRESS(ch), virt_to_bus(chain)); /* Clear DONE Status */ pci_write_config_word(params->dev, NEO_PCI_INTERRUPT_STATUS, NEO_DMA_DONE(ch)); /* Set Time out */ ZION_PCI_PARAM(params)->dma_params[ch].condition = ZION_PCI_INT_DISPATCH_PENDING; init_timer(&(ZION_PCI_PARAM(params)->dma_params[ch].timer)); ZION_PCI_PARAM(params)->dma_params[ch].timer.function = neo_dma_timeout; ZION_PCI_PARAM(params)->dma_params[ch].timer.data = (unsigned long)params; return 0; }
static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) { unsigned int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *dev = to_pci_dev(ap->host_set->dev); unsigned int is_slave = (adev->devno != 0); unsigned int master_port= ap->hard_port_no ? 0x42 : 0x40; unsigned int slave_port = 0x44; u16 master_data; u8 slave_data; static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; pci_read_config_word(dev, master_port, &master_data); if (is_slave) { master_data |= 0x4000; /* enable PPE, IE and TIME */ master_data |= 0x0070; pci_read_config_byte(dev, slave_port, &slave_data); slave_data &= (ap->hard_port_no ? 0x0f : 0xf0); slave_data |= (timings[pio][0] << 2) | (timings[pio][1] << (ap->hard_port_no ? 4 : 0)); } else { master_data &= 0xccf8; /* enable PPE, IE and TIME */ master_data |= 0x0007; master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); } pci_write_config_word(dev, master_port, master_data); if (is_slave) pci_write_config_byte(dev, slave_port, slave_data); }
static void sis_ata16_program_timings(ide_drive_t *drive, const u8 mode) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); u16 t1 = 0; u8 drive_pci = 0x40 + drive->dn * 2; const u16 pio_timings[] = { 0x000, 0x607, 0x404, 0x303, 0x301 }; const u16 mwdma_timings[] = { 0x008, 0x302, 0x301 }; pci_read_config_word(dev, drive_pci, &t1); /* clear active/recovery timings */ t1 &= ~0x070f; if (mode >= XFER_MW_DMA_0) { if (chipset_family > ATA_16) t1 &= ~0x8000; /* disable UDMA */ t1 |= mwdma_timings[mode - XFER_MW_DMA_0]; } else t1 |= pio_timings[mode - XFER_PIO_0]; pci_write_config_word(dev, drive_pci, t1); }
static void ns87415_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode) { struct pci_dev *dev = to_pci_dev(ap->host->dev); int unit = 2 * ap->port_no + adev->devno; int timing = 0x44 + 2 * unit; unsigned long T = 1000000000 / 33333; struct ata_timing t; u16 clocking; u8 iordy; u8 status; ata_timing_compute(adev, adev->pio_mode, &t, T, 0); clocking = 17 - clamp_val(t.active, 2, 17); clocking |= (16 - clamp_val(t.recover, 1, 16)) << 4; clocking |= (clocking << 8); pci_write_config_word(dev, timing, clocking); pci_read_config_byte(dev, 0x42, &iordy); iordy &= ~(1 << (4 + unit)); if (mode >= XFER_MW_DMA_0 || !ata_pio_need_iordy(adev)) iordy |= (1 << (4 + unit)); pci_read_config_byte(dev, 0x43, &status); while (status & 0x03) { udelay(1); pci_read_config_byte(dev, 0x43, &status); } pci_write_config_byte(dev, 0x42, iordy); }
static irqreturn_t snd_bt87x_interrupt(int irq, void *dev_id, struct pt_regs *regs) { bt87x_t *chip = dev_id; unsigned int status; status = snd_bt87x_readl(chip, REG_INT_STAT); if (!(status & MY_INTERRUPTS)) return IRQ_NONE; snd_bt87x_writel(chip, REG_INT_STAT, status & MY_INTERRUPTS); if (status & ERROR_INTERRUPTS) { if (status & (INT_FBUS | INT_FTRGT)) snd_printk(KERN_WARNING "FIFO overrun, status %#08x\n", status); if (status & INT_OCERR) snd_printk(KERN_ERR "internal RISC error, status %#08x\n", status); if (status & (INT_PPERR | INT_RIPERR | INT_PABORT)) { u16 pci_status; pci_read_config_word(chip->pci, PCI_STATUS, &pci_status); pci_write_config_word(chip->pci, PCI_STATUS, pci_status & (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY)); snd_printk(KERN_ERR "Aieee - PCI error! status %#08x, PCI status %#04x\n", status, pci_status); } } if ((status & INT_RISCI) && (chip->reg_control & CTL_ACAP_EN)) { int current_block, irq_block; /* assume that exactly one line has been recorded */ chip->current_line = (chip->current_line + 1) % chip->lines; /* but check if some interrupts have been skipped */ current_block = chip->current_line * 16 / chip->lines; irq_block = status >> INT_RISCS_SHIFT; if (current_block != irq_block) chip->current_line = (irq_block * chip->lines + 15) / 16; snd_pcm_period_elapsed(chip->substream); }
/** * pci_disable_ats - disable the ATS capability * @dev: the PCI device */ void pci_disable_ats(struct pci_dev *dev) { struct pci_dev *pdev; u16 ctrl; if (WARN_ON(!dev->ats_enabled)) return; if (atomic_read(&dev->ats_ref_cnt)) return; /* VFs still enabled */ if (dev->is_virtfn) { pdev = pci_physfn(dev); atomic_dec(&pdev->ats_ref_cnt); } pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl); ctrl &= ~PCI_ATS_CTRL_ENABLE; pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl); dev->ats_enabled = 0; }
static int __devinit ems_pci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) { struct rtcan_device *master_dev = NULL; int err; RTCAN_DBG("%s: initializing device %04x:%04x\n", RTCAN_DRV_NAME, pdev->vendor, pdev->device); if ((err = pci_enable_device (pdev))) goto failure; if ((err = pci_request_regions(pdev, RTCAN_DRV_NAME))) goto failure; if ((err = pci_write_config_word(pdev, 0x04, 2))) goto failure_cleanup; if ((err = rtcan_ems_pci_add_chan(pdev, EMS_PCI_MASTER, &master_dev))) goto failure_cleanup; if ((err = rtcan_ems_pci_add_chan(pdev, EMS_PCI_SLAVE, &master_dev))) goto failure_cleanup; pci_set_drvdata(pdev, master_dev); return 0; failure_cleanup: if (master_dev) rtcan_ems_pci_del_chan(master_dev, 0); pci_release_regions(pdev); failure: return err; }
static int uhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); struct pci_dev *pdev = to_pci_dev(uhci_dev(uhci)); int rc = 0; dev_dbg(uhci_dev(uhci), "%s\n", __func__); spin_lock_irq(&uhci->lock); if (!HCD_HW_ACCESSIBLE(hcd) || uhci->dead) goto done_okay; /* Already suspended or dead */ /* All PCI host controllers are required to disable IRQ generation * at the source, so we must turn off PIRQ. */ pci_write_config_word(pdev, USBLEGSUP, 0); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); /* Enable platform-specific non-PME# wakeup */ if (do_wakeup) { if (pdev->vendor == PCI_VENDOR_ID_INTEL) pci_write_config_byte(pdev, USBRES_INTEL, USBPORT1EN | USBPORT2EN); } done_okay: clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); spin_unlock_irq(&uhci->lock); synchronize_irq(hcd->irq); /* Check for race with a wakeup request */ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) { uhci_pci_resume(hcd, false); rc = -EBUSY; } return rc; }
static int ehci_pci_suspend(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); unsigned long flags; int rc = 0; u16 pmc_enable = 0; if (time_before(jiffies, ehci->next_statechange)) msleep(10); /* Root hub was already suspended. Disable irq emission and * mark HW unaccessible, bail out if RH has been resumed. Use * the spinlock to properly synchronize with possible pending * RH suspend or resume activity. * * This is still racy as hcd->state is manipulated outside of * any locks =P But that will be a different fix. */ spin_lock_irqsave (&ehci->lock, flags); if (hcd->state != HC_STATE_SUSPENDED) { rc = -EINVAL; goto bail; } ehci_writel(ehci, 0, &ehci->regs->intr_enable); (void)ehci_readl(ehci, &ehci->regs->intr_enable); clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); bail: spin_unlock_irqrestore (&ehci->lock, flags); // could save FLADJ in case of Vaux power loss // ... we'd only use it to handle clock skew //CharlesTu,for PM high memory pci_read_config_word(to_pci_dev(hcd->self.controller), 0x84, &pmc_enable); pmc_enable |= 0x103; pci_write_config_word(to_pci_dev(hcd->self.controller), 0x84, pmc_enable); return rc; }
static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); unsigned long flags; int timing_shift = (drive->dn & 2) ? 16 : 0 + (drive->dn & 1) ? 0 : 8; u32 pio_timing_data; u16 pio_mode_data; spin_lock_irqsave(&atiixp_lock, flags); pci_read_config_word(dev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); pio_mode_data &= ~(0x07 << (drive->dn * 4)); pio_mode_data |= (pio << (drive->dn * 4)); pci_write_config_word(dev, ATIIXP_IDE_PIO_MODE, pio_mode_data); pci_read_config_dword(dev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data); pio_timing_data &= ~(0xff << timing_shift); pio_timing_data |= (pio_timing[pio].recover_width << timing_shift) | (pio_timing[pio].command_width << (timing_shift + 4)); pci_write_config_dword(dev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); spin_unlock_irqrestore(&atiixp_lock, flags); }
static void __init pci_fixup_isp1020(struct pci_dev *d) { unsigned short command; d->resource[0].start |= ((unsigned long)(bus_to_nid[d->bus->number])<<32); printk("PCI: Fixing isp1020 in [bus:slot.fn] %s\n", d->slot_name); /* * Configure device to allow bus mastering, i/o and memory mapping. * Older qlogicisp driver expects to have the IO space enable * bit set. Things stop working if we program the controllers as not * having PCI_COMMAND_MEMORY, so we have to fudge the mem_flags. */ pci_set_master(d); pci_read_config_word(d, PCI_COMMAND, &command); command |= PCI_COMMAND_MEMORY; command |= PCI_COMMAND_IO; pci_write_config_word(d, PCI_COMMAND, command); d->resource[1].flags |= 1; pci_enable_swapping(d); }
int pci_enable_pcie_error_reporting(struct pci_dev *dev) { u16 reg16 = 0; int pos; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -EIO; pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (!pos) return -EIO; pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, ®16); reg16 = reg16 | PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE; pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16); return 0; }
/* * Read SEEPROM. A zero is written to the flag register when the addres is * written to the Control register. The hardware device will set the flag to a * one when 4B have been transferred to the Data register. */ int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data) { int i = EEPROM_MAX_POLL; u16 val; if (addr >= EEPROMSIZE || (addr & 3)) return -EINVAL; pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr); do { udelay(50); pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val); } while (!(val & F_VPD_OP_FLAG) && --i); if (!(val & F_VPD_OP_FLAG)) { CH_ERR("%s: reading EEPROM address 0x%x failed\n", adapter->name, addr); return -EIO; } pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data); *data = le32_to_cpu(*data); return 0; }
void adjust_pci_device ( struct pci_device *pci ) { unsigned short new_command, pci_command = 0; pci_read_config_word(pci, PCI_COMMAND, &pci_command); new_command = pci_command | PCI_COMMAND_MASTER | PCI_COMMAND_MEM | PCI_COMMAND_IO; if (pci_command != new_command) { LOG("PCI BIOS has not enabled device " FMT_BUSDEVFN "! " "Updating PCI command %04x->%04x\n", PCI_BUS(pci->busdevfn), PCI_SLOT(pci->busdevfn), PCI_FUNC (pci->busdevfn), pci_command, new_command); pci_write_config_word(pci, PCI_COMMAND, new_command); } unsigned char pci_latency; pci_read_config_byte ( pci, PCI_LATENCY_TIMER, &pci_latency); if ( pci_latency < 32 ) { LOG("PCI device " FMT_BUSDEVFN " latency timer is unreasonably " "low at %d. Setting to 32.\n", PCI_BUS(pci->busdevfn), PCI_SLOT ( pci->busdevfn ), PCI_FUNC ( pci->busdevfn ), pci_latency ); pci_write_config_byte ( pci, PCI_LATENCY_TIMER, 32); } }
/** * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers. * @ha: HA context * * Returns 0 on success. */ int qla24xx_pci_config(scsi_qla_host_t *vha) { uint16_t w; unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; pci_set_master(ha->pdev); pci_try_set_mwi(ha->pdev); pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */ if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX)) pcix_set_mmrbc(ha->pdev, 2048); /* PCIe -- adjust Maximum Read Request Size (2048). */ if (pci_is_pcie(ha->pdev)) pcie_set_readrq(ha->pdev, 2048); pci_disable_rom(ha->pdev); ha->chip_revision = ha->pdev->revision; /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->pci_attr = RD_REG_DWORD(®->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; }