Example #1
0
static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
	int pos;

	pos = pci_find_aer_capability(dev);

	/* The device might not support AER */
	if (!pos)
		return AER_SUCCESS;

	if (info->severity == AER_CORRECTABLE) {
		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
			&info->status);
		if (!(info->status & ERR_CORRECTABLE_ERROR_MASK))
			return AER_UNSUCCESS;
	} else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
		info->severity == AER_NONFATAL) {

		/* Link is still healthy for IO reads */
		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
			&info->status);
		if (!(info->status & ERR_UNCORRECTABLE_ERROR_MASK))
			return AER_UNSUCCESS;

		if (info->status & AER_LOG_TLP_MASKS) {
			info->flags |= AER_TLP_HEADER_VALID_FLAG;
			pci_read_config_dword(dev,
				pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
			pci_read_config_dword(dev,
				pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
			pci_read_config_dword(dev,
				pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
			pci_read_config_dword(dev,
				pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
		}
	}

	return AER_SUCCESS;
}
Example #2
0
int pci_save_msi_state(struct pci_dev *dev)
{
	int pos, i = 0;
	u16 control;
	struct pci_cap_saved_state *save_state;
	u32 *cap;

	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
	if (pos <= 0 || dev->no_msi)
		return 0;

	pci_read_config_word(dev, msi_control_reg(pos), &control);
	if (!(control & PCI_MSI_FLAGS_ENABLE))
		return 0;

	save_state = kzalloc(sizeof(struct pci_cap_saved_state) + sizeof(u32) * 5,
		GFP_KERNEL);
	if (!save_state) {
		printk(KERN_ERR "Out of memory in pci_save_msi_state\n");
		return -ENOMEM;
	}
	cap = &save_state->data[0];

	pci_read_config_dword(dev, pos, &cap[i++]);
	control = cap[0] >> 16;
	pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &cap[i++]);
	if (control & PCI_MSI_FLAGS_64BIT) {
		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &cap[i++]);
		pci_read_config_dword(dev, pos + PCI_MSI_DATA_64, &cap[i++]);
	} else
		pci_read_config_dword(dev, pos + PCI_MSI_DATA_32, &cap[i++]);
	if (control & PCI_MSI_FLAGS_MASKBIT)
		pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT, &cap[i++]);
	save_state->cap_nr = PCI_CAP_ID_MSI;
	pci_add_saved_cap(dev, save_state);
	return 0;
}
Example #3
0
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
	struct drm_device *dev = dev_priv->dev;
	struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
	u32 addr;
	u16 new_size;
	u8 *vbt_virtual;
	u8 bpi;
	u8 number_desc = 0;
	struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
	struct gct_r10_timing_info ti;
	void *pGCT;
	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));

	/* Get the address of the platform config vbt, B0:D2:F0;0xFC */
	pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
	pci_dev_put(pci_gfx_root);

	dev_dbg(dev->dev, "drm platform config address is %x\n", addr);

	/* check for platform config address == 0. */
	/* this means fw doesn't support vbt */

	if (addr == 0) {
		vbt->size = 0;
		return;
	}

	/* get the virtual address of the vbt */
	vbt_virtual = ioremap(addr, sizeof(*vbt));
	if (vbt_virtual == NULL) {
		vbt->size = 0;
		return;
	}

	memcpy(vbt, vbt_virtual, sizeof(*vbt));
	iounmap(vbt_virtual); /* Free virtual address space */

	/* No matching signature don't process the data */
	if (memcmp(vbt->signature, "$GCT", 4)) {
		vbt->size = 0;
		return;
	}

	dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);

	switch (vbt->revision) {
	case 0:
		vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
					vbt->size - sizeof(*vbt) + 4);
		pGCT = vbt->oaktrail_gct;
		bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
		dev_priv->gct_data.bpi = bpi;
		dev_priv->gct_data.pt =
			((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
		memcpy(&dev_priv->gct_data.DTD,
			&((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
				sizeof(struct oaktrail_timing_info));
		dev_priv->gct_data.Panel_Port_Control =
		  ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
			((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
		break;
	case 1:
		vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
					vbt->size - sizeof(*vbt) + 4);
		pGCT = vbt->oaktrail_gct;
		bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
		dev_priv->gct_data.bpi = bpi;
		dev_priv->gct_data.pt =
			((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
		memcpy(&dev_priv->gct_data.DTD,
			&((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
				sizeof(struct oaktrail_timing_info));
		dev_priv->gct_data.Panel_Port_Control =
		  ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
			((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
		break;
	case 0x10:
		/*header definition changed from rev 01 (v2) to rev 10h. */
		/*so, some values have changed location*/
		new_size = vbt->checksum; /*checksum contains lo size byte*/
		/*LSB of oaktrail_gct contains hi size byte*/
		new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;

		vbt->checksum = vbt->size; /*size contains the checksum*/
		if (new_size > 0xff)
			vbt->size = 0xff; /*restrict size to 255*/
		else
			vbt->size = new_size;

		/* number of descriptors defined in the GCT */
		number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
		bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
		vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
				GCT_R10_DISPLAY_DESC_SIZE * number_desc);
		pGCT = vbt->oaktrail_gct;
		pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
		dev_priv->gct_data.bpi = bpi; /*save boot panel id*/

		/*copy the GCT display timings into a temp structure*/
		memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));

		/*now copy the temp struct into the dev_priv->gct_data*/
		dp_ti->pixel_clock = ti.pixel_clock;
		dp_ti->hactive_hi = ti.hactive_hi;
		dp_ti->hactive_lo = ti.hactive_lo;
		dp_ti->hblank_hi = ti.hblank_hi;
		dp_ti->hblank_lo = ti.hblank_lo;
		dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
		dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
		dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
		dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
		dp_ti->vactive_hi = ti.vactive_hi;
		dp_ti->vactive_lo = ti.vactive_lo;
		dp_ti->vblank_hi = ti.vblank_hi;
		dp_ti->vblank_lo = ti.vblank_lo;
		dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
		dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
		dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
		dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;

		/* Move the MIPI_Display_Descriptor data from GCT to dev priv */
		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
							*((u8 *)pGCT + 0x0d);
		dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
						(*((u8 *)pGCT + 0x0e)) << 8;
		break;
	default:
		dev_err(dev->dev, "Unknown revision of GCT!\n");
		vbt->size = 0;
	}
}
Example #4
0
static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
{
	struct pci_dev *dev = ctrl->pcie->port;
	return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value);
}
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct ata_port_info pi = sis_port_info;
	const struct ata_port_info *ppi[] = { &pi, &pi };
	struct ata_host *host;
	u32 genctl, val;
	u8 pmr;
	u8 port2_start = 0x20;
	int i, rc;

	ata_print_version_once(&pdev->dev, DRV_VERSION);

	rc = pcim_enable_device(pdev);
	if (rc)
		return rc;

	/* check and see if the SCRs are in IO space or PCI cfg space */
	pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
	if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
		pi.flags |= SIS_FLAG_CFGSCR;

	/* if hardware thinks SCRs are in IO space, but there are
	 * no IO resources assigned, change to PCI cfg space.
	 */
	if ((!(pi.flags & SIS_FLAG_CFGSCR)) &&
	    ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
	     (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
		genctl &= ~GENCTL_IOMAPPED_SCR;
		pci_write_config_dword(pdev, SIS_GENCTL, genctl);
		pi.flags |= SIS_FLAG_CFGSCR;
	}

	pci_read_config_byte(pdev, SIS_PMR, &pmr);
	switch (ent->device) {
	case 0x0180:
	case 0x0181:

		/* The PATA-handling is provided by pata_sis */
		switch (pmr & 0x30) {
		case 0x10:
			ppi[1] = &sis_info133_for_sata;
			break;

		case 0x30:
			ppi[0] = &sis_info133_for_sata;
			break;
		}
		if ((pmr & SIS_PMR_COMBINED) == 0) {
			dev_info(&pdev->dev,
				 "Detected SiS 180/181/964 chipset in SATA mode\n");
			port2_start = 64;
		} else {
			dev_info(&pdev->dev,
				 "Detected SiS 180/181 chipset in combined mode\n");
			port2_start = 0;
			pi.flags |= ATA_FLAG_SLAVE_POSS;
		}
		break;

	case 0x0182:
	case 0x0183:
		pci_read_config_dword(pdev, 0x6C, &val);
		if (val & (1L << 31)) {
			dev_info(&pdev->dev, "Detected SiS 182/965 chipset\n");
			pi.flags |= ATA_FLAG_SLAVE_POSS;
		} else {
			dev_info(&pdev->dev, "Detected SiS 182/965L chipset\n");
		}
		break;

	case 0x1182:
		dev_info(&pdev->dev,
			 "Detected SiS 1182/966/680 SATA controller\n");
		pi.flags |= ATA_FLAG_SLAVE_POSS;
		break;

	case 0x1183:
		dev_info(&pdev->dev,
			 "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
		ppi[0] = &sis_info133_for_sata;
		ppi[1] = &sis_info133_for_sata;
		break;
	}

	rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
	if (rc)
		return rc;

	for (i = 0; i < 2; i++) {
		struct ata_port *ap = host->ports[i];

		if (ap->flags & ATA_FLAG_SATA &&
		    ap->flags & ATA_FLAG_SLAVE_POSS) {
			rc = ata_slave_link_init(ap);
			if (rc)
				return rc;
		}
	}

	if (!(pi.flags & SIS_FLAG_CFGSCR)) {
		void __iomem *mmio;

		rc = pcim_iomap_regions(pdev, 1 << SIS_SCR_PCI_BAR, DRV_NAME);
		if (rc)
			return rc;
		mmio = host->iomap[SIS_SCR_PCI_BAR];

		host->ports[0]->ioaddr.scr_addr = mmio;
		host->ports[1]->ioaddr.scr_addr = mmio + port2_start;
	}

	pci_set_master(pdev);
	pci_intx(pdev, 1);
	return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
				 IRQF_SHARED, &sis_sht);
}
Example #6
0
int mthca_reset(struct mthca_dev *mdev)
{
	int i;
	int err = 0;
	u32 *hca_header    = NULL;
	u32 *bridge_header = NULL;
	struct pci_dev *bridge = NULL;
	int bridge_pcix_cap = 0;
	int hca_pcie_cap = 0;
	int hca_pcix_cap = 0;

	u16 devctl;
	u16 linkctl;

#define MTHCA_RESET_OFFSET 0xf0010
#define MTHCA_RESET_VALUE  swab32(1)

	/*
	 * Reset the chip.  This is somewhat ugly because we have to
	 * save off the PCI header before reset and then restore it
	 * after the chip reboots.  We skip config space offsets 22
	 * and 23 since those have a special meaning.
	 *
	 * To make matters worse, for Tavor (PCI-X HCA) we have to
	 * find the associated bridge device and save off its PCI
	 * header as well.
	 */

	if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) {
		/* Look for the bridge -- its device ID will be 2 more
		   than HCA's device ID. */
		while ((bridge = pci_get_device(mdev->pdev->vendor,
						mdev->pdev->device + 2,
						bridge)) != NULL) {
			if (bridge->hdr_type    == PCI_HEADER_TYPE_BRIDGE &&
			    bridge->subordinate == mdev->pdev->bus) {
				mthca_dbg(mdev, "Found bridge: %s\n",
					  pci_name(bridge));
				break;
			}
		}

		if (!bridge) {
			/*
			 * Didn't find a bridge for a Tavor device --
			 * assume we're in no-bridge mode and hope for
			 * the best.
			 */
			mthca_warn(mdev, "No bridge found for %s\n",
				  pci_name(mdev->pdev));
		}

	}

	/* For Arbel do we need to save off the full 4K PCI Express header?? */
	hca_header = kmalloc(256, GFP_KERNEL);
	if (!hca_header) {
		err = -ENOMEM;
		mthca_err(mdev, "Couldn't allocate memory to save HCA "
			  "PCI header, aborting.\n");
		goto out;
	}

	for (i = 0; i < 64; ++i) {
		if (i == 22 || i == 23)
			continue;
		if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't save HCA "
				  "PCI header, aborting.\n");
			goto out;
		}
	}

	hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
<<<<<<< HEAD
Example #7
0
static int ati_create_gatt_table(struct agp_bridge_data *bridge)
{
	struct aper_size_info_lvl2 *value;
	struct ati_page_map page_dir;
	unsigned long __iomem *cur_gatt;
	unsigned long addr;
	int retval;
	u32 temp;
	int i;
	struct aper_size_info_lvl2 *current_size;

	value = A_SIZE_LVL2(agp_bridge->current_size);
	retval = ati_create_page_map(&page_dir);
	if (retval != 0)
		return retval;

	retval = ati_create_gatt_pages(value->num_entries / 1024);
	if (retval != 0) {
		ati_free_page_map(&page_dir);
		return retval;
	}

	agp_bridge->gatt_table_real = (u32 *)page_dir.real;
	agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
	agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);

	/* Write out the size register */
	current_size = A_SIZE_LVL2(agp_bridge->current_size);

	if (is_r200()) {
		pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
		temp = (((temp & ~(0x0000000e)) | current_size->size_value)
			| 0x00000001);
		pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
		pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
	} else {
		pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
		temp = (((temp & ~(0x0000000e)) | current_size->size_value)
			| 0x00000001);
		pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
		pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
	}

	/*
	 * Get the address for the gart region.
	 * This is a bus address even on the alpha, b/c its
	 * used to program the agp master not the cpu
	 */
	addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
	agp_bridge->gart_bus_addr = addr;

	/* Calculate the agp offset */
	for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
		writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
			page_dir.remapped+GET_PAGE_DIR_OFF(addr));
		readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr));	/* PCI Posting. */
	}

	for (i = 0; i < value->num_entries; i++) {
		addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
		cur_gatt = GET_GATT(addr);
		writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
	}

	return 0;
}
Example #8
0
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	void __iomem *mem;
	struct ath_softc *sc;
	struct ieee80211_hw *hw;
	u8 csz;
	u32 val;
	int ret = 0;
	char hw_name[64];

	if (pci_enable_device(pdev))
		return -EIO;

	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
		goto err_dma;
	}

	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
	if (ret) {
		printk(KERN_ERR "ath9k: 32-bit DMA consistent "
			"DMA enable failed\n");
		goto err_dma;
	}

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
	if (csz == 0) {
		/*
		 * Linux 2.4.18 (at least) writes the cache line size
		 * register as a 16-bit wide register which is wrong.
		 * We must have this setup properly for rx buffer
		 * DMA to work so force a reasonable value here if it
		 * comes up zero.
		 */
		csz = L1_CACHE_BYTES / sizeof(u32);
		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
	}
	/*
	 * The default setting of latency timer yields poor results,
	 * set it to the value used by other systems. It may be worth
	 * tweaking this setting more.
	 */
	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);

	pci_set_master(pdev);

	/*
	 * Disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state.
	 */
	pci_read_config_dword(pdev, 0x40, &val);
	if ((val & 0x0000ff00) != 0)
		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);

	ret = pci_request_region(pdev, 0, "ath9k");
	if (ret) {
		dev_err(&pdev->dev, "PCI memory region reserve error\n");
		ret = -ENODEV;
		goto err_region;
	}

	mem = pci_iomap(pdev, 0, 0);
	if (!mem) {
		printk(KERN_ERR "PCI memory map error\n") ;
		ret = -EIO;
		goto err_iomap;
	}

	hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
	if (!hw) {
		dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
		ret = -ENOMEM;
		goto err_alloc_hw;
	}

	SET_IEEE80211_DEV(hw, &pdev->dev);
	pci_set_drvdata(pdev, hw);

	sc = hw->priv;
	sc->hw = hw;
	sc->dev = &pdev->dev;
	sc->mem = mem;

	/* Will be cleared in ath9k_start() */
	sc->sc_flags |= SC_OP_INVALID;

	ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
	if (ret) {
		dev_err(&pdev->dev, "request_irq failed\n");
		goto err_irq;
	}

	sc->irq = pdev->irq;

	ret = ath9k_init_device(id->device, sc, &ath_pci_bus_ops);
	if (ret) {
		dev_err(&pdev->dev, "Failed to initialize device\n");
		goto err_init;
	}

	ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
	wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
		   hw_name, (unsigned long)mem, pdev->irq);

	return 0;

err_init:
	free_irq(sc->irq, sc);
err_irq:
	ieee80211_free_hw(hw);
err_alloc_hw:
	pci_iounmap(pdev, mem);
err_iomap:
	pci_release_region(pdev, 0);
err_region:
	/* Nothing */
err_dma:
	pci_disable_device(pdev);
	return ret;
}
Example #9
0
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
	u32			temp;
	int			retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TOSHIBA_2:
		/* celleb's companion chip */
		if (pdev->device == 0x01b5) {
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
			ehci->big_endian_mmio = 1;
#else
			ehci_warn(ehci,
				  "unsupported big endian Toshiba quirk\n");
#endif
		}
		break;
	}

	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));

	dbg_hcs_params(ehci, "reset");
	dbg_hcc_params(ehci, "reset");

        /* ehci_init() causes memory for DMA transfers to be
         * allocated.  Thus, any vendor-specific workarounds based on
         * limiting the type of memory used for DMA transfers must
         * happen before ehci_init() is called. */
	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NVIDIA:
		/* NVidia reports that certain chips don't handle
		 * QH, ITD, or SITD addresses above 2GB.  (But TD,
		 * data buffer, and periodic schedule are normal.)
		 */
		switch (pdev->device) {
		case 0x003c:	/* MCP04 */
		case 0x005b:	/* CK804 */
		case 0x00d8:	/* CK8 */
		case 0x00e8:	/* CK8S */
			if (pci_set_consistent_dma_mask(pdev,
						DMA_31BIT_MASK) < 0)
				ehci_warn(ehci, "can't enable NVidia "
					"workaround for >2GB RAM\n");
			break;
		}
		break;
	}

	/* cache this readonly data; minimize chip reads */
	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);

	retval = ehci_halt(ehci);
	if (retval)
		return retval;

	/* data structure init */
	retval = ehci_init(hcd);
	if (retval)
		return retval;

#ifdef CONFIG_BUFFALO_PLATFORM
	if (hcd->self.controller->bus == &pci_bus_type) {
#endif /* CONFIG_BUFFALO_PLATFORM */
	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TDI:
		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
			ehci->is_tdi_rh_tt = 1;
			tdi_reset(ehci);
		}
		break;
	case PCI_VENDOR_ID_AMD:
		/* AMD8111 EHCI doesn't work, according to AMD errata */
		if (pdev->device == 0x7463) {
			ehci_info(ehci, "ignoring AMD8111 (errata)\n");
			retval = -EIO;
			goto done;
		}
		break;
	case PCI_VENDOR_ID_NVIDIA:
		switch (pdev->device) {
		/* Some NForce2 chips have problems with selective suspend;
		 * fixed in newer silicon.
		 */
		case 0x0068:
			pci_read_config_dword(pdev, PCI_REVISION_ID, &temp);
			if ((temp & 0xff) < 0xa4)
				ehci->no_selective_suspend = 1;
			break;
		}
		break;
	}

	if (ehci_is_TDI(ehci))
		ehci_reset(ehci);
#ifdef CONFIG_BUFFALO_PLATFORM
	}
#endif /* CONFIG_BUFFALO_PLATFORM */

	/* at least the Genesys GL880S needs fixup here */
	temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
	temp &= 0x0f;
	if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
		ehci_dbg(ehci, "bogus port configuration: "
			"cc=%d x pcc=%d < ports=%d\n",
			HCS_N_CC(ehci->hcs_params),
			HCS_N_PCC(ehci->hcs_params),
			HCS_N_PORTS(ehci->hcs_params));

#ifdef CONFIG_BUFFALO_PLATFORM
		if (hcd->self.controller->bus == &pci_bus_type) {
#endif /* CONFIG_BUFFALO_PLATFORM */
		switch (pdev->vendor) {
		case 0x17a0:		/* GENESYS */
			/* GL880S: should be PORTS=2 */
			temp |= (ehci->hcs_params & ~0xf);
			ehci->hcs_params = temp;
			break;
		case PCI_VENDOR_ID_NVIDIA:
			/* NF4: should be PCC=10 */
			break;
		}
#ifdef CONFIG_BUFFALO_PLATFORM
		}
#endif /* CONFIG_BUFFALO_PLATFORM */
	}

#ifdef CONFIG_BUFFALO_PLATFORM
	if (hcd->self.controller->bus == &pci_bus_type) {
#endif /* CONFIG_BUFFALO_PLATFORM */
	/* Serial Bus Release Number is at PCI 0x60 offset */
	pci_read_config_byte(pdev, 0x60, &ehci->sbrn);

	/* Workaround current PCI init glitch:  wakeup bits aren't
	 * being set from PCI PM capability.
	 */
	if (!device_can_wakeup(&pdev->dev)) {
		u16	port_wake;

		pci_read_config_word(pdev, 0x62, &port_wake);
		if (port_wake & 0x0001)
			device_init_wakeup(&pdev->dev, 1);
	}

#ifdef	CONFIG_USB_SUSPEND
	/* REVISIT: the controller works fine for wakeup iff the root hub
	 * itself is "globally" suspended, but usbcore currently doesn't
	 * understand such things.
	 *
	 * System suspend currently expects to be able to suspend the entire
	 * device tree, device-at-a-time.  If we failed selective suspend
	 * reports, system suspend would fail; so the root hub code must claim
	 * success.  That's lying to usbcore, and it matters for for runtime
	 * PM scenarios with selective suspend and remote wakeup...
	 */
	if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
		ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif

	retval = ehci_pci_reinit(ehci, pdev);
#ifdef CONFIG_BUFFALO_PLATFORM
	}
#endif /* CONFIG_BUFFALO_PLATFORM */
done:
	return retval;
}
Example #10
0
int rtl8169_initialize(bd_t *bis)
{
	pci_dev_t devno;
	int card_number = 0;
	struct eth_device *dev;
	u32 iobase;
	int idx=0;

	while(1){
		unsigned int region;
		u16 device;
		int err;

		/* Find RTL8169 */
		if ((devno = pci_find_devices(supported, idx++)) < 0)
			break;

		pci_read_config_word(devno, PCI_DEVICE_ID, &device);
		switch (device) {
		case 0x8168:
			region = 2;
			break;

		default:
			region = 1;
			break;
		}

		pci_read_config_dword(devno, PCI_BASE_ADDRESS_0 + (region * 4), &iobase);
		iobase &= ~0xf;

		debug ("rtl8169: REALTEK RTL8169 @0x%x\n", iobase);

		dev = (struct eth_device *)malloc(sizeof *dev);
		if (!dev) {
			printf("Can not allocate memory of rtl8169\n");
			break;
		}

		memset(dev, 0, sizeof(*dev));
		sprintf (dev->name, "RTL8169#%d", card_number);

		dev->priv = (void *)(unsigned long)devno;
		dev->iobase = (int)pci_mem_to_phys(devno, iobase);

		dev->init = rtl_reset;
		dev->halt = rtl_halt;
		dev->send = rtl_send;
		dev->recv = rtl_recv;

		err = rtl_init(dev->iobase, dev->name, dev->enetaddr);
		if (err < 0) {
			printf(pr_fmt("failed to initialize card: %d\n"), err);
			free(dev);
			continue;
		}

		eth_register (dev);

		card_number++;
	}
	return card_number;
}
Example #11
0
static int bcma_host_pci_probe(struct pci_dev *dev,
			     const struct pci_device_id *id)
{
	struct bcma_bus *bus;
	int err = -ENOMEM;
	const char *name;
	u32 val;

	/* Alloc */
	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
	if (!bus)
		goto out;

	/* Basic PCI configuration */
	err = pci_enable_device(dev);
	if (err)
		goto err_kfree_bus;

	name = dev_name(&dev->dev);
	if (dev->driver && dev->driver->name)
		name = dev->driver->name;
	err = pci_request_regions(dev, name);
	if (err)
		goto err_pci_disable;
	pci_set_master(dev);

	/* Disable the RETRY_TIMEOUT register (0x41) to keep
	 * PCI Tx retries from interfering with C3 CPU state */
	pci_read_config_dword(dev, 0x40, &val);
	if ((val & 0x0000ff00) != 0)
		pci_write_config_dword(dev, 0x40, val & 0xffff00ff);

	/* SSB needed additional powering up, do we have any AMBA PCI cards? */
	if (!pci_is_pcie(dev))
		pr_err("PCI card detected, report problems.\n");

	/* Map MMIO */
	err = -ENOMEM;
	bus->mmio = pci_iomap(dev, 0, ~0UL);
	if (!bus->mmio)
		goto err_pci_release_regions;

	/* Host specific */
	bus->host_pci = dev;
	bus->hosttype = BCMA_HOSTTYPE_PCI;
	bus->ops = &bcma_host_pci_ops;

	/* Register */
	err = bcma_bus_register(bus);
	if (err)
		goto err_pci_unmap_mmio;

	pci_set_drvdata(dev, bus);

out:
	return err;

err_pci_unmap_mmio:
	pci_iounmap(dev, bus->mmio);
err_pci_release_regions:
	pci_release_regions(dev);
err_pci_disable:
	pci_disable_device(dev);
err_kfree_bus:
	kfree(bus);
	return err;
}
Example #12
0
int mrfld_gtt_init(struct psb_gtt *pg, int resume)
{
	struct drm_device *dev = pg->dev;
	struct drm_psb_private *dev_priv = dev->dev_private;
	unsigned gtt_pages;
	unsigned long stolen_size, vram_stolen_size, ci_stolen_size;
	unsigned long rar_stolen_size;
	unsigned i, num_pages;
	unsigned pfn_base;
	uint32_t ci_pages, vram_pages;
	uint32_t tt_pages;
	uint32_t *ttm_gtt_map;

	int ret = 0;
	uint32_t pte;

	pg->initialized = 1;

	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
	/* fix me: video mmu has hw bug to access 0x0D0000000,
	 * then make gatt start at 0x0e000,0000 */
	pg->mmu_gatt_start = PSB_MEM_TT_START;
	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
	    >> PAGE_SHIFT;

	pci_read_config_dword(dev->pdev, MRFLD_BGSM, &pg->pge_ctl);
	pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK;

	pci_read_config_dword(dev->pdev, MRFLD_MSAC, &gtt_pages);
	printk(KERN_INFO "01 gtt_pages = 0x%x \n", gtt_pages);
	gtt_pages &= _APERTURE_SIZE_MASK;
	gtt_pages >>= _APERTURE_SIZE_POS;

	printk(KERN_INFO "02 gtt_pages = 0x%x \n", gtt_pages);
	switch (gtt_pages) {
	case _1G_APERTURE:
		gtt_pages = _1G_APERTURE_SIZE >> PAGE_SHIFT;
		break;
	case _512M_APERTURE:
		gtt_pages = _512M_APERTURE_SIZE >> PAGE_SHIFT;
		break;
	case _256M_APERTURE:
		gtt_pages = _256M_APERTURE_SIZE >> PAGE_SHIFT;
		break;
	default:
		DRM_ERROR("%s, invalded aperture size.\n", __func__);
		gtt_pages = _1G_APERTURE_SIZE >> PAGE_SHIFT;
	}

	gtt_pages >>= PAGE_SHIFT;
	gtt_pages *= 4;

	printk(KERN_INFO "03 gtt_pages = 0x%x \n", gtt_pages);
	/* HW removed the PSB_BSM, SW/FW needs it. */
	pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base);
	vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE;

	/* CI is not included in the stolen size since the TOPAZ MMU bug */
	ci_stolen_size = dev_priv->ci_region_size;
	/* Don't add CI & RAR share buffer space
	 * managed by TTM to stolen_size */
	stolen_size = vram_stolen_size;

	rar_stolen_size = dev_priv->rar_region_size;

	printk(KERN_INFO "GMMADR(region 0) start: 0x%08x (%dM).\n",
	       pg->gatt_start, pg->gatt_pages / 256);
	printk(KERN_INFO "GTT (can map %dM RAM), and actual RAM base 0x%08x.\n",
	       gtt_pages * 4, pg->gtt_phys_start);
	printk(KERN_INFO "Stole memory information \n");
	printk(KERN_INFO "      base in RAM: 0x%x \n", pg->stolen_base);
	printk(KERN_INFO
	       "      size: %luK, calculated by (GTT RAM base) - (Stolen base).\n",
	       vram_stolen_size / 1024);

	if (ci_stolen_size > 0)
		printk(KERN_INFO
		       "CI Stole memory: RAM base = 0x%08x, size = %lu M \n",
		       dev_priv->ci_region_start, ci_stolen_size / 1024 / 1024);
	if (rar_stolen_size > 0)
		printk(KERN_INFO
		       "RAR Stole memory: RAM base = 0x%08x, size = %lu M \n",
		       dev_priv->rar_region_start,
		       rar_stolen_size / 1024 / 1024);

	if (resume && (gtt_pages != pg->gtt_pages) &&
	    (stolen_size != pg->stolen_size)) {
		DRM_ERROR("GTT resume error.\n");
		ret = -EINVAL;
		goto out_err;
	}

	pg->gtt_pages = gtt_pages;
	pg->stolen_size = stolen_size;
	pg->vram_stolen_size = vram_stolen_size;
	pg->ci_stolen_size = ci_stolen_size;
	pg->rar_stolen_size = rar_stolen_size;
	pg->gtt_map =
	    ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT);
	if (!pg->gtt_map) {
		DRM_ERROR("Failure to map gtt.\n");
		ret = -ENOMEM;
		goto out_err;
	}

	pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size);
	if (!pg->vram_addr) {
		DRM_ERROR("Failure to map stolen base.\n");
		ret = -ENOMEM;
		goto out_err;
	}

	DRM_INFO("%s: vram kernel virtual address %p\n", __FUNCTION__,
		 pg->vram_addr);

	tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
	    (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;

	ttm_gtt_map = pg->gtt_map + tt_pages / 2;

	/*
	 * insert vram stolen pages.
	 */

	pfn_base = pg->stolen_base >> PAGE_SHIFT;
	vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
	printk(KERN_INFO
	       "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
	       num_pages, pfn_base, 0);
	for (i = 0; i < num_pages; ++i) {
		pte = psb_gtt_mask_pte(pfn_base + i, 0);
		iowrite32(pte, pg->gtt_map + i);
	}

	/*
	 * Init rest of gtt managed by IMG.
	 */
	pfn_base = page_to_pfn(dev_priv->scratch_page);
	pte = psb_gtt_mask_pte(pfn_base, 0);
	for (; i < tt_pages / 2 - 1; ++i)
		iowrite32(pte, pg->gtt_map + i);

	/*
	 * insert CI stolen pages
	 */

	pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT;
	ci_pages = num_pages = ci_stolen_size >> PAGE_SHIFT;
	printk(KERN_INFO
	       "Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n",
	       num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map) * 4);
	for (i = 0; i < num_pages; ++i) {
		pte = psb_gtt_mask_pte(pfn_base + i, 0);
		iowrite32(pte, ttm_gtt_map + i);
	}

	/*
	 * insert RAR stolen pages
	 */
	if (rar_stolen_size != 0) {
		pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT;
		num_pages = rar_stolen_size >> PAGE_SHIFT;
		printk(KERN_INFO
		       "Set up %d RAR stolen pages starting at 0x%08x, GTT offset %dK\n",
		       num_pages, pfn_base,
		       (ttm_gtt_map - pg->gtt_map + i) * 4);
		for (; i < num_pages + ci_pages; ++i) {
			pte = psb_gtt_mask_pte(pfn_base + i - ci_pages, 0);
			iowrite32(pte, ttm_gtt_map + i);
		}
	}
Example #13
0
static int atomisp_pci_probe(struct pci_dev *dev,
				       const struct pci_device_id *id)
{
	const struct atomisp_platform_data *pdata;
	struct atomisp_device *isp;
	unsigned int start;
	void __iomem *base;
	int err;

	if (!dev) {
		dev_err(&dev->dev, "atomisp: error device ptr\n");
		return -EINVAL;
	}

	atomisp_pci_vendor = id->vendor;
	atomisp_pci_device = id->device;

	pdata = atomisp_get_platform_data();
	if (pdata == NULL) {
		dev_err(&dev->dev, "no platform data available\n");
		return -ENODEV;
	}

	err = pcim_enable_device(dev);
	if (err) {
		dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n",
			err);
		return err;
	}

	start = pci_resource_start(dev, ATOM_ISP_PCI_BAR);
	v4l2_dbg(1, dbg_level, &atomisp_dev, "start: 0x%x\n", start);

	err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev));
	if (err) {
		dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n",
			err);
		return err;
	}

	base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR];
	v4l2_dbg(1, dbg_level, &atomisp_dev, "base: %p\n", base);

	atomisp_io_base = base;

	v4l2_dbg(1, dbg_level, &atomisp_dev, "atomisp_io_base: %p\n",
			atomisp_io_base);

	isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL);
	if (!isp) {
		dev_err(&dev->dev, "Failed to alloc CI ISP structure\n");
		return -ENOMEM;
	}
	isp->pdev = dev;
	isp->dev = &dev->dev;
	isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
	isp->pci_root = pci_get_bus_and_slot(0, 0);
	if (!isp->pci_root) {
		dev_err(&dev->dev, "Unable to find PCI host\n");
		return -ENODEV;
	}
	isp->saved_regs.ispmmadr = start;

	mutex_init(&isp->mutex);
	mutex_init(&isp->streamoff_mutex);
	spin_lock_init(&isp->lock);
	init_completion(&isp->init_done);

	isp->media_dev.driver_version = ATOMISP_CSS_VERSION_20;

	switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
	case ATOMISP_PCI_DEVICE_SOC_MRFLD:
	case ATOMISP_PCI_DEVICE_SOC_BYT:
		isp->media_dev.hw_revision =
			(ATOMISP_HW_REVISION_ISP2400
			 << ATOMISP_HW_REVISION_SHIFT) |
#ifdef CONFIG_ISP2400
			ATOMISP_HW_STEPPING_A0;
#else
			ATOMISP_HW_STEPPING_B0;
#endif
		break;
	default:
		/* Medfield and Clovertrail. */
		isp->media_dev.hw_revision =
			(ATOMISP_HW_REVISION_ISP2300
			 << ATOMISP_HW_REVISION_SHIFT) |
			(dev->revision < 0x09 ?
			 ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0);
	}

	isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
	if ((pdata->spid->platform_family_id == INTEL_CLVTP_PHONE ||
	     pdata->spid->platform_family_id == INTEL_CLVT_TABLET) &&
	    isp->pdev->revision < 0x09) {
		/* Workaround for Cloverview(+) older than stepping B0 */
		isp->max_isr_latency = CSTATE_EXIT_LATENCY_C1;
	}

	/* Load isp firmware from user space */
	isp->firmware = load_firmware(&dev->dev);
	if (!isp->firmware) {
		err = -ENOENT;
		dev_err(&dev->dev, "Load firmwares failed\n");
		goto load_fw_fail;
	}

	isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1);
	if (isp->wdt_work_queue == NULL) {
		dev_err(&dev->dev, "Failed to initialize wdt work queue\n");
		err = -ENOMEM;
		goto wdt_work_queue_fail;
	}
	INIT_WORK(&isp->wdt_work, atomisp_wdt_work);

	isp->delayed_init_workq =
		alloc_workqueue(isp->v4l2_dev.name, WQ_CPU_INTENSIVE, 1);
	if (isp->delayed_init_workq == NULL) {
		dev_err(&dev->dev, "Failed to initialize delayed init workq\n");
		err = -ENOMEM;
		goto delayed_init_work_queue_fail;
	}
	INIT_WORK(&isp->delayed_init_work, atomisp_delayed_init_work);

	pci_set_master(dev);
	pci_set_drvdata(dev, isp);

	err = pci_enable_msi(dev);
	if (err) {
		dev_err(&dev->dev, "Failed to enable msi (%d)\n", err);
		goto enable_msi_fail;
	}

	err = devm_request_threaded_irq(&dev->dev, dev->irq,
					atomisp_isr, atomisp_isr_thread,
					IRQF_SHARED, "isp_irq", isp);
	if (err) {
		dev_err(&dev->dev, "Failed to request irq (%d)\n", err);
		goto enable_msi_fail;
	}

	setup_timer(&isp->wdt, atomisp_wdt, (unsigned long)isp);

	atomisp_msi_irq_init(isp, dev);

	pm_qos_add_request(&isp->pm_qos, PM_QOS_CPU_DMA_LATENCY,
			   PM_QOS_DEFAULT_VALUE);

	if (IS_ISP2400) {
		u32 reg32;
		/*
		 * for MRFLD, Software/firmware needs to write a 1 to bit 0 of
		 * the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI
		 * backend write 0 will enable Arasan CSI backend, which has
		 * bugs(like sighting:4567697 and 4567699) and will be removed
		 * in B0
		 */
		atomisp_css2_hw_store_32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
		pci_read_config_dword(dev, PCI_I_CONTROL, &reg32);
		reg32 |= MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING
			| MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
		pci_write_config_dword(dev, PCI_I_CONTROL, reg32);
	}

	err = atomisp_initialize_modules(isp);
	if (err < 0) {
		dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err);
		goto enable_msi_fail;
	}

	err = atomisp_register_entities(isp);
	if (err < 0) {
		dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n",
			err);
		goto enable_msi_fail;
	}
	atomisp_acc_init(isp);

	/* save the iunit context only once after all the values are init'ed. */
	atomisp_save_iunit_reg(isp);

	pm_runtime_put_noidle(&dev->dev);
	pm_runtime_allow(&dev->dev);

	err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
	if (err)
		dev_err(&dev->dev, "Failed to register reserved memory pool.\n");

	return 0;

enable_msi_fail:
	destroy_workqueue(isp->delayed_init_workq);
delayed_init_work_queue_fail:
	destroy_workqueue(isp->wdt_work_queue);
wdt_work_queue_fail:
	release_firmware(isp->firmware);
load_fw_fail:
	pci_dev_put(isp->pci_root);
	return err;
}
Example #14
0
static int mrfld_csi_lane_config(struct atomisp_device *isp)
{
	static const u8 mipi_lanes[MRFLD_PORT_CONFIG_NUM][MRFLD_PORT_NUM] = {
		{4, 1, 0},
		{3, 1, 0},
		{2, 1, 0},
		{1, 1, 0},
		{2, 1, 2},
		{3, 1, 1},
		{2, 1, 1},
		{1, 1, 1}
	};

	unsigned int i, j;
	u8 sensor_lanes[MRFLD_PORT_NUM] = {0};
	u32 data;

	for (i = 0; i < isp->input_cnt; i++) {
		struct camera_mipi_info *mipi_info;

		if (isp->inputs[i].type != RAW_CAMERA &&
			isp->inputs[i].type != SOC_CAMERA)
			continue;

		mipi_info = atomisp_to_sensor_mipi_info(isp->inputs[i].camera);
		if (!mipi_info)
			continue;

		switch (mipi_info->port) {
		case ATOMISP_CAMERA_PORT_PRIMARY:
			sensor_lanes[0] = mipi_info->num_lanes;
			break;
		case ATOMISP_CAMERA_PORT_SECONDARY:
			sensor_lanes[1] = mipi_info->num_lanes;
			break;
		case ATOMISP_CAMERA_PORT_THIRD:
			sensor_lanes[2] = mipi_info->num_lanes;
			break;
		default:
			v4l2_err(&atomisp_dev,
				"%s: invalid port: %d for the %dth sensor\n",
				__func__, mipi_info->port, i);
			break;
		}
	}

	for (i = 0; i < MRFLD_PORT_CONFIG_NUM; i++) {
		for (j = 0; j < MRFLD_PORT_NUM; j++)
			if (sensor_lanes[j]
				&& sensor_lanes[j] != mipi_lanes[i][j])
				break;

		if (j == MRFLD_PORT_NUM)
			break;	/* matched setting is found */
	}

	if (i == MRFLD_PORT_CONFIG_NUM) {
		v4l2_err(&atomisp_dev,
			"%s: could not find the CSI port setting for %d-%d-%d\n",
			__func__, sensor_lanes[0],
			sensor_lanes[1], sensor_lanes[2]);
		return -EINVAL;
	}

	pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &data);
	v4l2_dbg(3, dbg_level, &atomisp_dev,
				"%s: original CSI_CONTROL is 0x%x\n",
				__func__, data);
	data &= ~MRFLD_PORT_CONFIG_MASK;
	data |= (i << MRFLD_PORT_CONFIGCODE_SHIFT)
		| (mipi_lanes[i][2] ? 0 : (1 << MRFLD_PORT3_ENABLE_SHIFT))
		| (((1 << mipi_lanes[i][0]) - 1) << MRFLD_PORT1_LANES_SHIFT)
		| (((1 << mipi_lanes[i][1]) - 1) << MRFLD_PORT2_LANES_SHIFT)
		| (((1 << mipi_lanes[i][2]) - 1) << MRFLD_PORT3_LANES_SHIFT);

	pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, data);

	v4l2_dbg(3, dbg_level, &atomisp_dev,
		"%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%x\n",
		__func__, mipi_lanes[i][0], mipi_lanes[i][1],
		mipi_lanes[i][2], data);

	return 0;
}
Example #15
0
static int atomisp_save_iunit_reg(struct atomisp_device *isp)
{
	struct pci_dev *dev = isp->pdev;

	dev_dbg(isp->dev, "%s\n", __func__);

	pci_read_config_word(dev, PCI_COMMAND, &isp->saved_regs.pcicmdsts);
	/* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */
	pci_read_config_dword(dev, PCI_MSI_CAPID, &isp->saved_regs.msicap);
	pci_read_config_dword(dev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr);
	pci_read_config_word(dev, PCI_MSI_DATA,  &isp->saved_regs.msi_data);
	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr);
	pci_read_config_dword(dev, PCI_INTERRUPT_CTRL,
			      &isp->saved_regs.interrupt_control);

	if (IS_ISP2400) {
		pci_read_config_dword(dev, MRFLD_PCI_PMCS,
				      &isp->saved_regs.pmcs);
		/* Ensure read/write combining is enabled. */
		pci_read_config_dword(dev, PCI_I_CONTROL,
				&isp->saved_regs.i_control);
		isp->saved_regs.i_control |=
				MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING |
				MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
		pci_read_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
				      &isp->saved_regs.csi_access_viol);
		pci_read_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL,
				      &isp->saved_regs.csi_rcomp_config);
		pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
				      &isp->saved_regs.csi_afe_dly);
		pci_read_config_dword(dev, MRFLD_PCI_CSI_CONTROL,
				      &isp->saved_regs.csi_control);
		pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
				      &isp->saved_regs.csi_afe_rcomp_config);
		pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
				      &isp->saved_regs.csi_afe_hs_control);
		pci_read_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
				      &isp->saved_regs.csi_deadline_control);
	} else {
		pci_read_config_dword(dev, MFLD_PCI_PMCS,
				      &isp->saved_regs.pmcs);

		/* Ensure clock gating for ISPCLK, PERF and NOA monitoring. */
		pci_read_config_dword(dev, MFLD_PCI_CG_DIS,
				      &isp->saved_regs.cg_dis);
		isp->saved_regs.cg_dis &= ~(MFLD_PCI_CG_DIS_DISABLED_ISPCLK |
				MFLD_PCI_CG_DIS_DISABLED_PERF_MON |
				MFLD_PCI_CG_DIS_DISABLED_NOA_MON);

		/* Ensure read/write combining is enabled. */
		pci_read_config_dword(dev, PCI_I_CONTROL,
				&isp->saved_regs.i_control);
		isp->saved_regs.i_control |=
				MFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING |
				MFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;

		isp->saved_regs.csi_rcomp_config = intel_mid_msgbus_read32(
				MFLD_IUNITPHY_PORT, MFLD_CSI_RCOMP);
		isp->saved_regs.csi_afe_dly = intel_mid_msgbus_read32(
				MFLD_IUNITPHY_PORT, MFLD_CSI_AFE);

		/* Ensure mipi1 and mipi4 configurations are enabled */
		isp->saved_regs.csi_control = intel_mid_msgbus_read32(
				MFLD_IUNITPHY_PORT, MFLD_CSI_CONTROL);
		isp->saved_regs.csi_control &= ~(MFLD_CSI_CONTROL_DIS_MIPI4_IF |
				MFLD_CSI_CONTROL_DIS_MIPI1_IF);
		isp->saved_regs.csi_control |= MFLD_CSI_CONTROL_EN_MIPI4_LANE |
				MFLD_CSI_CONTROL_EN_MIPI1_LANE;
	}

	return 0;
}
int
oss_als3xx_attach (oss_device_t * osdev)
{
  unsigned char pci_irq_line, pci_revision;
  unsigned short pci_command, vendor, device;
  unsigned int pci_ioaddr;
  int err;
  als300_devc *devc;

  DDB (cmn_err (CE_WARN, "Entered ALS ALS300 probe routine\n"));


  pci_read_config_word (osdev, PCI_VENDOR_ID, &vendor);
  pci_read_config_byte (osdev, PCI_REVISION_ID, &pci_revision);
  pci_read_config_word (osdev, PCI_COMMAND, &pci_command);
  pci_read_config_word (osdev, PCI_DEVICE_ID, &device);
  pci_read_config_irq (osdev, PCI_INTERRUPT_LINE, &pci_irq_line);
  pci_read_config_dword (osdev, PCI_BASE_ADDRESS_0, &pci_ioaddr);

  if (vendor != ALS_VENDOR_ID || (device != ALS_300 && device != ALS_300P))
    return 0;

  DDB (cmn_err (CE_WARN, "rev %x I/O base %04x\n", pci_revision, pci_ioaddr));

  if (pci_ioaddr == 0)
    {
      cmn_err (CE_WARN, "I/O address not assigned by BIOS.\n");
      return 0;
    }

  if (pci_irq_line == 0)
    {
      cmn_err (CE_WARN, "IRQ not assigned by BIOS (%d).\n", pci_irq_line);
      return 0;
    }

  if ((devc = PMALLOC (osdev, sizeof (*devc))) == NULL)
    {
      cmn_err (CE_WARN, "Out of memory\n");
      return 0;
    }

  devc->osdev = osdev;
  osdev->devc = devc;
  devc->open_mode = 0;

  devc->base = MAP_PCI_IOADDR (devc->osdev, 0, pci_ioaddr);
  /* Remove I/O space marker in bit 0. */
  devc->base &= ~3;

  devc->irq = pci_irq_line;
  devc->mpu_base = als300_mpu_base;

  pci_command |= PCI_COMMAND_MASTER | PCI_COMMAND_IO;
  pci_write_config_word (osdev, PCI_COMMAND, pci_command);


  switch (device)
    {
    case ALS_300:
      devc->model = MDL_ALS300;
      devc->chip_name = "Avance Logic ALS300";
      devc->chip_rev = pci_revision;
      break;

    case ALS_300P:
      devc->model = MDL_ALS300PLUS;
      devc->chip_name = "Avance Logic ALS300+";
      devc->chip_rev = pci_revision;
      break;
    }

  MUTEX_INIT (devc->osdev, devc->mutex, MH_DRV);
  MUTEX_INIT (devc->osdev, devc->low_mutex, MH_DRV + 1);

  oss_register_device (osdev, devc->chip_name);

  if ((err = oss_register_interrupts (devc->osdev, 0, als300intr, NULL)) < 0)
    {
      cmn_err (CE_WARN, "Can't allocate IRQ%d, err=%d\n", pci_irq_line, err);
      return 0;
    }

  return init_als300 (devc);	/* Detected */
}
Example #17
0
static int __init init_l440gx(void)
{
	struct pci_dev *dev, *pm_dev;
	struct resource *pm_iobase;
	__u16 word;

	dev = pci_find_device(PCI_VENDOR_ID_INTEL,
		PCI_DEVICE_ID_INTEL_82371AB_0, NULL);

	pm_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
		PCI_DEVICE_ID_INTEL_82371AB_3, NULL);

	if (!dev || !pm_dev) {
		printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n");
		return -ENODEV;
	}

	l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);

	if (!l440gx_map.virt) {
		printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
		return -ENOMEM;
	}
	simple_map_init(&l440gx_map);
	printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt);

	/* Setup the pm iobase resource
	 * This code should move into some kind of generic bridge
	 * driver but for the moment I'm content with getting the
	 * allocation correct.
	 */
	pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE];
	if (!(pm_iobase->flags & IORESOURCE_IO)) {
		pm_iobase->name = "pm iobase";
		pm_iobase->start = 0;
		pm_iobase->end = 63;
		pm_iobase->flags = IORESOURCE_IO;

		/* Put the current value in the resource */
		pci_read_config_dword(pm_dev, 0x40, &iobase);
		iobase &= ~1;
		pm_iobase->start += iobase & ~1;
		pm_iobase->end += iobase & ~1;

		/* Allocate the resource region */
		if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) {
			printk(KERN_WARNING "Could not allocate pm iobase resource\n");
			iounmap(l440gx_map.virt);
			return -ENXIO;
		}
	}
	/* Set the iobase */
	iobase = pm_iobase->start;
	pci_write_config_dword(pm_dev, 0x40, iobase | 1);


	/* Set XBCS# */
	pci_read_config_word(dev, 0x4e, &word);
	word |= 0x4;
        pci_write_config_word(dev, 0x4e, word);

	/* Supply write voltage to the chip */
	l440gx_set_vpp(&l440gx_map, 1);

	/* Enable the gate on the WE line */
	outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT);

       	printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n");

	mymtd = do_map_probe("jedec_probe", &l440gx_map);
	if (!mymtd) {
		printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n");
		mymtd = do_map_probe("map_rom", &l440gx_map);
	}
	if (mymtd) {
		mymtd->owner = THIS_MODULE;

		add_mtd_device(mymtd);
		return 0;
	}

	iounmap(l440gx_map.virt);
	return -ENXIO;
}
static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
				     const struct pci_device_id *ent)
{
	struct agp_bridge_data *bridge;
	u8 cap_ptr;
	int j;

	cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
	if (!cap_ptr)
		return -ENODEV;

	j = ent - agp_amdk7_pci_table;
	dev_info(&pdev->dev, "AMD %s chipset\n",
		 amd_agp_device_ids[j].chipset_name);

	bridge = agp_alloc_bridge();
	if (!bridge)
		return -ENOMEM;

	bridge->driver = &amd_irongate_driver;
	bridge->dev_private_data = &amd_irongate_private,
	bridge->dev = pdev;
	bridge->capndx = cap_ptr;

	if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
		struct pci_dev *gfxcard=NULL;

		cap_ptr = 0;
		while (!cap_ptr) {
			gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
			if (!gfxcard) {
				dev_info(&pdev->dev, "no AGP VGA controller\n");
				return -ENODEV;
			}
			cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
		}

		if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
			agp_bridge->flags |= AGP_ERRATA_1X;
			dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
		}
		pci_dev_put(gfxcard);
	}

	if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
		if (pdev->revision == 0x10 || pdev->revision == 0x11) {
			agp_bridge->flags = AGP_ERRATA_FASTWRITES;
			agp_bridge->flags |= AGP_ERRATA_SBA;
			agp_bridge->flags |= AGP_ERRATA_1X;
			dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
		}
	}

	
	pci_read_config_dword(pdev,
			bridge->capndx+PCI_AGP_STATUS,
			&bridge->mode);

	pci_set_drvdata(pdev, bridge);
	return agp_add_bridge(bridge);
}
Example #19
0
int mthca_reset(struct mthca_dev *mdev)
{
	int i;
	int err = 0;
	u32 *hca_header    = NULL;
	u32 *bridge_header = NULL;
	struct pci_dev *bridge = NULL;
	int bridge_pcix_cap = 0;
	int hca_pcie_cap = 0;
	int hca_pcix_cap = 0;

	u16 devctl;
	u16 linkctl;

#define MTHCA_RESET_OFFSET 0xf0010
#define MTHCA_RESET_VALUE  swab32(1)

	/*
	 * Reset the chip.  This is somewhat ugly because we have to
	 * save off the PCI header before reset and then restore it
	 * after the chip reboots.  We skip config space offsets 22
	 * and 23 since those have a special meaning.
	 *
	 * To make matters worse, for Tavor (PCI-X HCA) we have to
	 * find the associated bridge device and save off its PCI
	 * header as well.
	 */

	if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) {
		/* Look for the bridge -- its device ID will be 2 more
		   than HCA's device ID. */
		while ((bridge = pci_get_device(mdev->pdev->vendor,
						mdev->pdev->device + 2,
						bridge)) != NULL) {
			if (bridge->hdr_type    == PCI_HEADER_TYPE_BRIDGE &&
			    bridge->subordinate == mdev->pdev->bus) {
				mthca_dbg(mdev, "Found bridge: %s\n",
					  pci_name(bridge));
				break;
			}
		}

		if (!bridge) {
			/*
			 * Didn't find a bridge for a Tavor device --
			 * assume we're in no-bridge mode and hope for
			 * the best.
			 */
			mthca_warn(mdev, "No bridge found for %s\n",
				  pci_name(mdev->pdev));
		}

	}

	/* For Arbel do we need to save off the full 4K PCI Express header?? */
	hca_header = kmalloc(256, GFP_KERNEL);
	if (!hca_header) {
		err = -ENOMEM;
		mthca_err(mdev, "Couldn't allocate memory to save HCA "
			  "PCI header, aborting.\n");
		goto out;
	}

	for (i = 0; i < 64; ++i) {
		if (i == 22 || i == 23)
			continue;
		if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't save HCA "
				  "PCI header, aborting.\n");
			goto out;
		}
	}

	hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
	hca_pcie_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);

	if (bridge) {
		bridge_header = kmalloc(256, GFP_KERNEL);
		if (!bridge_header) {
			err = -ENOMEM;
			mthca_err(mdev, "Couldn't allocate memory to save HCA "
				  "bridge PCI header, aborting.\n");
			goto out;
		}

		for (i = 0; i < 64; ++i) {
			if (i == 22 || i == 23)
				continue;
			if (pci_read_config_dword(bridge, i * 4, bridge_header + i)) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't save HCA bridge "
					  "PCI header, aborting.\n");
				goto out;
			}
		}
		bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
		if (!bridge_pcix_cap) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't locate HCA bridge "
					  "PCI-X capability, aborting.\n");
				goto out;
		}
	}

	/* actually hit reset */
	{
		void __iomem *reset = ioremap(pci_resource_start(mdev->pdev, 0) +
					      MTHCA_RESET_OFFSET, 4);

		if (!reset) {
			err = -ENOMEM;
			mthca_err(mdev, "Couldn't map HCA reset register, "
				  "aborting.\n");
			goto out;
		}

		writel(MTHCA_RESET_VALUE, reset);
		iounmap(reset);
	}

	/* Docs say to wait one second before accessing device */
	msleep(1000);

	/* Now wait for PCI device to start responding again */
	{
		u32 v;
		int c = 0;

		for (c = 0; c < 100; ++c) {
			if (pci_read_config_dword(bridge ? bridge : mdev->pdev, 0, &v)) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't access HCA after reset, "
					  "aborting.\n");
				goto out;
			}

			if (v != 0xffffffff)
				goto good;

			msleep(100);
		}

		err = -ENODEV;
		mthca_err(mdev, "PCI device did not come back after reset, "
			  "aborting.\n");
		goto out;
	}

good:
	/* Now restore the PCI headers */
	if (bridge) {
		if (pci_write_config_dword(bridge, bridge_pcix_cap + 0x8,
				 bridge_header[(bridge_pcix_cap + 0x8) / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge Upstream "
				  "split transaction control, aborting.\n");
			goto out;
		}
		if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc,
				 bridge_header[(bridge_pcix_cap + 0xc) / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge Downstream "
				  "split transaction control, aborting.\n");
			goto out;
		}
		/*
		 * Bridge control register is at 0x3e, so we'll
		 * naturally restore it last in this loop.
		 */
		for (i = 0; i < 16; ++i) {
			if (i * 4 == PCI_COMMAND)
				continue;

			if (pci_write_config_dword(bridge, i * 4, bridge_header[i])) {
				err = -ENODEV;
				mthca_err(mdev, "Couldn't restore HCA bridge reg %x, "
					  "aborting.\n", i);
				goto out;
			}
		}

		if (pci_write_config_dword(bridge, PCI_COMMAND,
					   bridge_header[PCI_COMMAND / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, "
				  "aborting.\n");
			goto out;
		}
	}

	if (hca_pcix_cap) {
		if (pci_write_config_dword(mdev->pdev, hca_pcix_cap,
				 hca_header[hca_pcix_cap / 4])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI-X "
				  "command register, aborting.\n");
			goto out;
		}
	}

	if (hca_pcie_cap) {
		devctl = hca_header[(hca_pcie_cap + PCI_EXP_DEVCTL) / 4];
		if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_DEVCTL,
					   devctl)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI Express "
				  "Device Control register, aborting.\n");
			goto out;
		}
		linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
		if (pci_write_config_word(mdev->pdev, hca_pcie_cap + PCI_EXP_LNKCTL,
					   linkctl)) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA PCI Express "
				  "Link control register, aborting.\n");
			goto out;
		}
	}

	for (i = 0; i < 16; ++i) {
		if (i * 4 == PCI_COMMAND)
			continue;

		if (pci_write_config_dword(mdev->pdev, i * 4, hca_header[i])) {
			err = -ENODEV;
			mthca_err(mdev, "Couldn't restore HCA reg %x, "
				  "aborting.\n", i);
			goto out;
		}
	}

	if (pci_write_config_dword(mdev->pdev, PCI_COMMAND,
				   hca_header[PCI_COMMAND / 4])) {
		err = -ENODEV;
		mthca_err(mdev, "Couldn't restore HCA COMMAND, "
			  "aborting.\n");
		goto out;
	}

out:
	if (bridge)
		pci_dev_put(bridge);
	kfree(bridge_header);
	kfree(hca_header);

	return err;
}
Example #20
0
static long sp5100_tco_ioctl(struct file *file, unsigned int cmd,
                             unsigned long arg)
{
    int new_options, retval = -EINVAL;
    int new_heartbeat;
    void __user *argp = (void __user *)arg;
    int __user *p = argp;
    static const struct watchdog_info ident = {
        .options =		WDIOF_SETTIMEOUT |
        WDIOF_KEEPALIVEPING |
        WDIOF_MAGICCLOSE,
        .firmware_version =	0,
        .identity =		TCO_MODULE_NAME,
    };

    switch (cmd) {
    case WDIOC_GETSUPPORT:
        return copy_to_user(argp, &ident,
                            sizeof(ident)) ? -EFAULT : 0;
    case WDIOC_GETSTATUS:
    case WDIOC_GETBOOTSTATUS:
        return put_user(0, p);
    case WDIOC_SETOPTIONS:
        if (get_user(new_options, p))
            return -EFAULT;
        if (new_options & WDIOS_DISABLECARD) {
            tco_timer_stop();
            retval = 0;
        }
        if (new_options & WDIOS_ENABLECARD) {
            tco_timer_start();
            tco_timer_keepalive();
            retval = 0;
        }
        return retval;
    case WDIOC_KEEPALIVE:
        tco_timer_keepalive();
        return 0;
    case WDIOC_SETTIMEOUT:
        if (get_user(new_heartbeat, p))
            return -EFAULT;
        if (tco_timer_set_heartbeat(new_heartbeat))
            return -EINVAL;
        tco_timer_keepalive();
    /* Fall through */
    case WDIOC_GETTIMEOUT:
        return put_user(heartbeat, p);
    default:
        return -ENOTTY;
    }
}

/*
 * Kernel Interfaces
 */

static const struct file_operations sp5100_tco_fops = {
    .owner =		THIS_MODULE,
    .llseek =		no_llseek,
    .write =		sp5100_tco_write,
    .unlocked_ioctl =	sp5100_tco_ioctl,
    .open =			sp5100_tco_open,
    .release =		sp5100_tco_release,
};

static struct miscdevice sp5100_tco_miscdev = {
    .minor =	WATCHDOG_MINOR,
    .name =		"watchdog",
    .fops =		&sp5100_tco_fops,
};

/*
 * Data for PCI driver interface
 *
 * This data only exists for exporting the supported
 * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
 * register a pci_driver, because someone else might
 * want to register another driver on the same PCI id.
 */
static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = {
    {   PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
        PCI_ANY_ID,
    },
    { 0, },			/* End of list */
};
MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);

/*
 * Init & exit routines
 */

static unsigned char __devinit sp5100_tco_setupdevice(void)
{
    struct pci_dev *dev = NULL;
    u32 val;

    /* Match the PCI device */
    for_each_pci_dev(dev) {
        if (pci_match_id(sp5100_tco_pci_tbl, dev) != NULL) {
            sp5100_tco_pci = dev;
            break;
        }
    }

    if (!sp5100_tco_pci)
        return 0;

    /* Request the IO ports used by this driver */
    pm_iobase = SP5100_IO_PM_INDEX_REG;
    if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, "SP5100 TCO")) {
        pr_err("I/O address 0x%04x already in use\n", pm_iobase);
        goto exit;
    }

    /* Find the watchdog base address. */
    outb(SP5100_PM_WATCHDOG_BASE3, SP5100_IO_PM_INDEX_REG);
    val = inb(SP5100_IO_PM_DATA_REG);
    outb(SP5100_PM_WATCHDOG_BASE2, SP5100_IO_PM_INDEX_REG);
    val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
    outb(SP5100_PM_WATCHDOG_BASE1, SP5100_IO_PM_INDEX_REG);
    val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
    outb(SP5100_PM_WATCHDOG_BASE0, SP5100_IO_PM_INDEX_REG);
    /* Low three bits of BASE0 are reserved. */
    val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8);

    if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
                                      "SP5100 TCO")) {
        pr_err("mmio address 0x%04x already in use\n", val);
        goto unreg_region;
    }
    tcobase_phys = val;

    tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE);
    if (!tcobase) {
        pr_err("failed to get tcobase address\n");
        goto unreg_mem_region;
    }

    /* Enable watchdog decode bit */
    pci_read_config_dword(sp5100_tco_pci,
                          SP5100_PCI_WATCHDOG_MISC_REG,
                          &val);

    val |= SP5100_PCI_WATCHDOG_DECODE_EN;

    pci_write_config_dword(sp5100_tco_pci,
                           SP5100_PCI_WATCHDOG_MISC_REG,
                           val);

    /* Enable Watchdog timer and set the resolution to 1 sec. */
    outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
    val = inb(SP5100_IO_PM_DATA_REG);
    val |= SP5100_PM_WATCHDOG_SECOND_RES;
    val &= ~SP5100_PM_WATCHDOG_DISABLE;
    outb(val, SP5100_IO_PM_DATA_REG);

    /* Check that the watchdog action is set to reset the system. */
    val = readl(SP5100_WDT_CONTROL(tcobase));
    val &= ~SP5100_PM_WATCHDOG_ACTION_RESET;
    writel(val, SP5100_WDT_CONTROL(tcobase));

    /* Set a reasonable heartbeat before we stop the timer */
    tco_timer_set_heartbeat(heartbeat);

    /*
     * Stop the TCO before we change anything so we don't race with
     * a zeroed timer.
     */
    tco_timer_stop();

    /* Done */
    return 1;

unreg_mem_region:
    release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
unreg_region:
    release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
exit:
    return 0;
}

static int __devinit sp5100_tco_init(struct platform_device *dev)
{
    int ret;
    u32 val;

    /* Check whether or not the hardware watchdog is there. If found, then
     * set it up.
     */
    if (!sp5100_tco_setupdevice())
        return -ENODEV;

    /* Check to see if last reboot was due to watchdog timeout */
    pr_info("Watchdog reboot %sdetected\n",
            readl(SP5100_WDT_CONTROL(tcobase)) & SP5100_PM_WATCHDOG_FIRED ?
            "" : "not ");

    /* Clear out the old status */
    val = readl(SP5100_WDT_CONTROL(tcobase));
    val &= ~SP5100_PM_WATCHDOG_FIRED;
    writel(val, SP5100_WDT_CONTROL(tcobase));

    /*
     * Check that the heartbeat value is within it's range.
     * If not, reset to the default.
     */
    if (tco_timer_set_heartbeat(heartbeat)) {
        heartbeat = WATCHDOG_HEARTBEAT;
        tco_timer_set_heartbeat(heartbeat);
    }

    ret = misc_register(&sp5100_tco_miscdev);
    if (ret != 0) {
        pr_err("cannot register miscdev on minor=%d (err=%d)\n",
               WATCHDOG_MINOR, ret);
        goto exit;
    }

    clear_bit(0, &timer_alive);

    pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
            tcobase, heartbeat, nowayout);

    return 0;

exit:
    iounmap(tcobase);
    release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
    release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
    return ret;
}

static void __devexit sp5100_tco_cleanup(void)
{
    /* Stop the timer before we leave */
    if (!nowayout)
        tco_timer_stop();

    /* Deregister */
    misc_deregister(&sp5100_tco_miscdev);
    iounmap(tcobase);
    release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
    release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
}

static int __devexit sp5100_tco_remove(struct platform_device *dev)
{
    if (tcobase)
        sp5100_tco_cleanup();
    return 0;
}

static void sp5100_tco_shutdown(struct platform_device *dev)
{
    tco_timer_stop();
}

static struct platform_driver sp5100_tco_driver = {
    .probe		= sp5100_tco_init,
    .remove		= __devexit_p(sp5100_tco_remove),
    .shutdown	= sp5100_tco_shutdown,
    .driver		= {
        .owner	= THIS_MODULE,
        .name	= TCO_MODULE_NAME,
    },
};

static int __init sp5100_tco_init_module(void)
{
    int err;

    pr_info("SP5100 TCO WatchDog Timer Driver v%s\n", TCO_VERSION);

    err = platform_driver_register(&sp5100_tco_driver);
    if (err)
        return err;

    sp5100_tco_platform_device = platform_device_register_simple(
                                     TCO_MODULE_NAME, -1, NULL, 0);
    if (IS_ERR(sp5100_tco_platform_device)) {
        err = PTR_ERR(sp5100_tco_platform_device);
        goto unreg_platform_driver;
    }

    return 0;

unreg_platform_driver:
    platform_driver_unregister(&sp5100_tco_driver);
    return err;
}
Example #21
0
static void dump_cxl_config_space(struct pci_dev *dev)
{
	int vsec;
	u32 val;

	dev_info(&dev->dev, "dump_cxl_config_space\n");

	pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
	dev_info(&dev->dev, "BAR0: %#.8x\n", val);
	pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
	dev_info(&dev->dev, "BAR1: %#.8x\n", val);
	pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
	dev_info(&dev->dev, "BAR2: %#.8x\n", val);
	pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
	dev_info(&dev->dev, "BAR3: %#.8x\n", val);
	pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
	dev_info(&dev->dev, "BAR4: %#.8x\n", val);
	pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
	dev_info(&dev->dev, "BAR5: %#.8x\n", val);

	dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
		p1_base(dev), p1_size(dev));
	dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
		p2_base(dev), p2_size(dev));
	dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
		pci_resource_start(dev, 4), pci_resource_len(dev, 4));

	if (!(vsec = find_cxl_vsec(dev)))
		return;

#define show_reg(name, what) \
	dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)

	pci_read_config_dword(dev, vsec + 0x0, &val);
	show_reg("Cap ID", (val >> 0) & 0xffff);
	show_reg("Cap Ver", (val >> 16) & 0xf);
	show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
	pci_read_config_dword(dev, vsec + 0x4, &val);
	show_reg("VSEC ID", (val >> 0) & 0xffff);
	show_reg("VSEC Rev", (val >> 16) & 0xf);
	show_reg("VSEC Length",	(val >> 20) & 0xfff);
	pci_read_config_dword(dev, vsec + 0x8, &val);
	show_reg("Num AFUs", (val >> 0) & 0xff);
	show_reg("Status", (val >> 8) & 0xff);
	show_reg("Mode Control", (val >> 16) & 0xff);
	show_reg("Reserved", (val >> 24) & 0xff);
	pci_read_config_dword(dev, vsec + 0xc, &val);
	show_reg("PSL Rev", (val >> 0) & 0xffff);
	show_reg("CAIA Ver", (val >> 16) & 0xffff);
	pci_read_config_dword(dev, vsec + 0x10, &val);
	show_reg("Base Image Rev", (val >> 0) & 0xffff);
	show_reg("Reserved", (val >> 16) & 0x0fff);
	show_reg("Image Control", (val >> 28) & 0x3);
	show_reg("Reserved", (val >> 30) & 0x1);
	show_reg("Image Loaded", (val >> 31) & 0x1);

	pci_read_config_dword(dev, vsec + 0x14, &val);
	show_reg("Reserved", val);
	pci_read_config_dword(dev, vsec + 0x18, &val);
	show_reg("Reserved", val);
	pci_read_config_dword(dev, vsec + 0x1c, &val);
	show_reg("Reserved", val);

	pci_read_config_dword(dev, vsec + 0x20, &val);
	show_reg("AFU Descriptor Offset", val);
	pci_read_config_dword(dev, vsec + 0x24, &val);
	show_reg("AFU Descriptor Size", val);
	pci_read_config_dword(dev, vsec + 0x28, &val);
	show_reg("Problem State Offset", val);
	pci_read_config_dword(dev, vsec + 0x2c, &val);
	show_reg("Problem State Size", val);

	pci_read_config_dword(dev, vsec + 0x30, &val);
	show_reg("Reserved", val);
	pci_read_config_dword(dev, vsec + 0x34, &val);
	show_reg("Reserved", val);
	pci_read_config_dword(dev, vsec + 0x38, &val);
	show_reg("Reserved", val);
	pci_read_config_dword(dev, vsec + 0x3c, &val);
	show_reg("Reserved", val);

	pci_read_config_dword(dev, vsec + 0x40, &val);
	show_reg("PSL Programming Port", val);
	pci_read_config_dword(dev, vsec + 0x44, &val);
	show_reg("PSL Programming Control", val);

	pci_read_config_dword(dev, vsec + 0x48, &val);
	show_reg("Reserved", val);
	pci_read_config_dword(dev, vsec + 0x4c, &val);
	show_reg("Reserved", val);

	pci_read_config_dword(dev, vsec + 0x50, &val);
	show_reg("Flash Address Register", val);
	pci_read_config_dword(dev, vsec + 0x54, &val);
	show_reg("Flash Size Register", val);
	pci_read_config_dword(dev, vsec + 0x58, &val);
	show_reg("Flash Status/Control Register", val);
	pci_read_config_dword(dev, vsec + 0x58, &val);
	show_reg("Flash Data Port", val);

#undef show_reg
}
Example #22
0
/*
 * Cache the initial value of both GPIO data registers
 */
static int rdc321x_gpio_probe(struct platform_device *pdev)
{
	int err;
	struct resource *r;
	struct rdc321x_gpio *rdc321x_gpio_dev;
	struct rdc321x_gpio_pdata *pdata;

	pdata = dev_get_platdata(&pdev->dev);
	if (!pdata) {
		dev_err(&pdev->dev, "no platform data supplied\n");
		return -ENODEV;
	}

	rdc321x_gpio_dev = devm_kzalloc(&pdev->dev, sizeof(struct rdc321x_gpio),
					GFP_KERNEL);
	if (!rdc321x_gpio_dev)
		return -ENOMEM;

	r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1");
	if (!r) {
		dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n");
		return -ENODEV;
	}

	spin_lock_init(&rdc321x_gpio_dev->lock);
	rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev;
	rdc321x_gpio_dev->reg1_ctrl_base = r->start;
	rdc321x_gpio_dev->reg1_data_base = r->start + 0x4;

	r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2");
	if (!r) {
		dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n");
		return -ENODEV;
	}

	rdc321x_gpio_dev->reg2_ctrl_base = r->start;
	rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;

	rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
	rdc321x_gpio_dev->chip.owner = THIS_MODULE;
	rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
	rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
	rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
	rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
	rdc321x_gpio_dev->chip.base = 0;
	rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;

	platform_set_drvdata(pdev, rdc321x_gpio_dev);

	/* This might not be, what others (BIOS, bootloader, etc.)
	   wrote to these registers before, but it's a good guess. Still
	   better than just using 0xffffffff. */
	err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
					rdc321x_gpio_dev->reg1_data_base,
					&rdc321x_gpio_dev->data_reg[0]);
	if (err)
		return err;

	err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev,
					rdc321x_gpio_dev->reg2_data_base,
					&rdc321x_gpio_dev->data_reg[1]);
	if (err)
		return err;

	dev_info(&pdev->dev, "registering %d GPIOs\n",
					rdc321x_gpio_dev->chip.ngpio);
	return devm_gpiochip_add_data(&pdev->dev, &rdc321x_gpio_dev->chip,
				      rdc321x_gpio_dev);
}
Example #23
0
static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
{
	struct mem_ctl_info *mci;
	u8 dramcr;
	u32 eapr;
	u32 scrub_disabled;
	u32 sdram_refresh_rate;
	struct r82600_error_info discard;

	debugf0("%s()\n", __func__);
	pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
	pci_read_config_dword(pdev, R82600_EAP, &eapr);
	scrub_disabled = eapr & BIT(31);
	sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
	debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
		sdram_refresh_rate);
	debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
	mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);

	if (mci == NULL)
		return -ENOMEM;

	debugf0("%s(): mci = %p\n", __func__, mci);
	mci->dev = &pdev->dev;
	mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
	/* FIXME try to work out if the chip leads have been used for COM2
	 * instead on this board? [MA6?] MAYBE:
	 */

	/* On the R82600, the pins for memory bits 72:65 - i.e. the   *
	 * EC bits are shared with the pins for COM2 (!), so if COM2  *
	 * is enabled, we assume COM2 is wired up, and thus no EDAC   *
	 * is possible.                                               */
	mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;

	if (ecc_enabled(dramcr)) {
		if (scrub_disabled)
			debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
				"%#0x\n", __func__, mci, eapr);
	} else
		mci->edac_cap = EDAC_FLAG_NONE;

	mci->mod_name = EDAC_MOD_STR;
	mci->mod_ver = R82600_REVISION;
	mci->ctl_name = "R82600";
	mci->dev_name = pci_name(pdev);
	mci->edac_check = r82600_check;
	mci->ctl_page_to_phys = NULL;
	r82600_init_csrows(mci, pdev, dramcr);
	r82600_get_error_info(mci, &discard);	/* clear counters */

	/* Here we assume that we will never see multiple instances of this
	 * type of memory controller.  The ID is therefore hardcoded to 0.
	 */
	if (edac_mc_add_mc(mci)) {
		debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
		goto fail;
	}

	/* get this far and it's successful */

	if (disable_hardware_scrub) {
		debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
			__func__);
		pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
	}

	/* allocating generic PCI control info */
	r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
	if (!r82600_pci) {
		printk(KERN_WARNING
			"%s(): Unable to create PCI control\n",
			__func__);
		printk(KERN_WARNING
			"%s(): PCI error report via EDAC not setup\n",
			__func__);
	}

	debugf3("%s(): success\n", __func__);
	return 0;

fail:
	edac_mc_free(mci);
	return -ENODEV;
}
Example #24
0
/*
========================================================================
Routine Description:
	Read 32-bit from the PCI config space.

Arguments:
	pDev			- PCI device
	Offset			- Register offset
	Value			- 32-bit value

Return Value:
	None

Note:
========================================================================
*/
int RtmpOsPciConfigReadDWord(VOID *pDev, UINT32 Offset, UINT32 *pValue)
{
	return pci_read_config_dword((struct pci_dev *)pDev, Offset, pValue);
}
static int eeprom_write(struct et131x_adapter *etdev, u32 addr, u8 data)
{
	struct pci_dev *pdev = etdev->pdev;
	int index = 0;
	int retries;
	int err = 0;
	int i2c_wack = 0;
	int writeok = 0;
	u32 status;
	u32 val = 0;

	/*
	 * For an EEPROM, an I2C single byte write is defined as a START
	 * condition followed by the device address, EEPROM address, one byte
	 * of data and a STOP condition.  The STOP condition will trigger the
	 * EEPROM's internally timed write cycle to the nonvolatile memory.
	 * All inputs are disabled during this write cycle and the EEPROM will
	 * not respond to any access until the internal write is complete.
	 */

	err = eeprom_wait_ready(pdev, NULL);
	if (err)
		return err;

	 /*
	 * 2. Write to the LBCIF Control Register:  bit 7=1, bit 6=1, bit 3=0,
	 *    and bits 1:0 both =0.  Bit 5 should be set according to the
	 *    type of EEPROM being accessed (1=two byte addressing, 0=one
	 *    byte addressing).
	 */
	if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
			LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE))
		return -EIO;

	i2c_wack = 1;

	/* Prepare EEPROM address for Step 3 */

	for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
		/* Write the address to the LBCIF Address Register */
		if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
			break;
		/*
		 * Write the data to the LBCIF Data Register (the I2C write
		 * will begin).
		 */
		if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
			break;
		/*
		 * Monitor bit 1:0 of the LBCIF Status Register.  When bits
		 * 1:0 are both equal to 1, the I2C write has completed and the
		 * internal write cycle of the EEPROM is about to start.
		 * (bits 1:0 = 01 is a legal state while waiting from both
		 * equal to 1, but bits 1:0 = 10 is invalid and implies that
		 * something is broken).
		 */
		err = eeprom_wait_ready(pdev, &status);
		if (err < 0)
			return 0;

		/*
		 * Check bit 3 of the LBCIF Status Register.  If  equal to 1,
		 * an error has occurred.Don't break here if we are revision
		 * 1, this is so we do a blind write for load bug.
		 */
		if ((status & LBCIF_STATUS_GENERAL_ERROR)
			&& etdev->pdev->revision == 0)
			break;

		/*
		 * Check bit 2 of the LBCIF Status Register.  If equal to 1 an
		 * ACK error has occurred on the address phase of the write.
		 * This could be due to an actual hardware failure or the
		 * EEPROM may still be in its internal write cycle from a
		 * previous write. This write operation was ignored and must be
		  *repeated later.
		 */
		if (status & LBCIF_STATUS_ACK_ERROR) {
			/*
			 * This could be due to an actual hardware failure
			 * or the EEPROM may still be in its internal write
			 * cycle from a previous write. This write operation
			 * was ignored and must be repeated later.
			 */
			udelay(10);
			continue;
		}

		writeok = 1;
		break;
	}

	/*
	 * Set bit 6 of the LBCIF Control Register = 0.
	 */
	udelay(10);

	while (i2c_wack) {
		if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
			LBCIF_CONTROL_LBCIF_ENABLE))
			writeok = 0;

		/* Do read until internal ACK_ERROR goes away meaning write
		 * completed
		 */
		do {
			pci_write_config_dword(pdev,
					       LBCIF_ADDRESS_REGISTER,
					       addr);
			do {
				pci_read_config_dword(pdev,
					LBCIF_DATA_REGISTER, &val);
			} while ((val & 0x00010000) == 0);
		} while (val & 0x00040000);

		if ((val & 0xFF00) != 0xC000 || index == 10000)
			break;
		index++;
	}
	return writeok ? 0 : -EIO;
}
Example #26
0
static int wdt_probe(struct pci_dev *pdev,
			       const struct pci_device_id *ent)
{
	unsigned char conf;
	int ret = -ENODEV;

	if (pci_enable_device(pdev)) {
		dev_err(&pdev->dev, "cannot enable PCI device\n");
		return -ENODEV;
	}

	/*
	 * Allocate a MMIO region which contains watchdog control register
	 * and counter, then configure the watchdog to use this region.
	 * This is possible only if PnP is properly enabled in BIOS.
	 * If not, the watchdog must be configured in BIOS manually.
	 */
	if (allocate_resource(&iomem_resource, &wdt_res, VIA_WDT_MMIO_LEN,
			      0xf0000000, 0xffffff00, 0xff, NULL, NULL)) {
		dev_err(&pdev->dev, "MMIO allocation failed\n");
		goto err_out_disable_device;
	}

	pci_write_config_dword(pdev, VIA_WDT_MMIO_BASE, wdt_res.start);
	pci_read_config_byte(pdev, VIA_WDT_CONF, &conf);
	conf |= VIA_WDT_CONF_ENABLE | VIA_WDT_CONF_MMIO;
	pci_write_config_byte(pdev, VIA_WDT_CONF, conf);

	pci_read_config_dword(pdev, VIA_WDT_MMIO_BASE, &mmio);
	if (mmio) {
		dev_info(&pdev->dev, "VIA Chipset watchdog MMIO: %x\n", mmio);
	} else {
		dev_err(&pdev->dev, "MMIO setting failed. Check BIOS.\n");
		goto err_out_resource;
	}

	if (!request_mem_region(mmio, VIA_WDT_MMIO_LEN, "via_wdt")) {
		dev_err(&pdev->dev, "MMIO region busy\n");
		goto err_out_resource;
	}

	wdt_mem = ioremap(mmio, VIA_WDT_MMIO_LEN);
	if (wdt_mem == NULL) {
		dev_err(&pdev->dev, "cannot remap VIA wdt MMIO registers\n");
		goto err_out_release;
	}

	if (timeout < 1 || timeout > WDT_TIMEOUT_MAX)
		timeout = WDT_TIMEOUT;

	wdt_dev.timeout = timeout;
	wdt_dev.parent = &pdev->dev;
	watchdog_set_nowayout(&wdt_dev, nowayout);
	if (readl(wdt_mem) & VIA_WDT_FIRED)
		wdt_dev.bootstatus |= WDIOF_CARDRESET;

	ret = watchdog_register_device(&wdt_dev);
	if (ret)
		goto err_out_iounmap;

	/* start triggering, in case of watchdog already enabled by BIOS */
	mod_timer(&timer, jiffies + WDT_HEARTBEAT);
	return 0;

err_out_iounmap:
	iounmap(wdt_mem);
err_out_release:
	release_mem_region(mmio, VIA_WDT_MMIO_LEN);
err_out_resource:
	release_resource(&wdt_res);
err_out_disable_device:
	pci_disable_device(pdev);
	return ret;
}
Example #27
0
static long ali_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	int __user *p = argp;
	static const struct watchdog_info ident = {
		.options =		WDIOF_KEEPALIVEPING |
					WDIOF_SETTIMEOUT |
					WDIOF_MAGICCLOSE,
		.firmware_version =	0,
		.identity =		"ALi M1535 WatchDog Timer",
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;

	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		return put_user(0, p);
	case WDIOC_SETOPTIONS:
	{
		int new_options, retval = -EINVAL;

		if (get_user(new_options, p))
			return -EFAULT;
		if (new_options & WDIOS_DISABLECARD) {
			ali_stop();
			retval = 0;
		}
		if (new_options & WDIOS_ENABLECARD) {
			ali_start();
			retval = 0;
		}
		return retval;
	}
	case WDIOC_KEEPALIVE:
		ali_keepalive();
		return 0;
	case WDIOC_SETTIMEOUT:
	{
		int new_timeout;
		if (get_user(new_timeout, p))
			return -EFAULT;
		if (ali_settimer(new_timeout))
			return -EINVAL;
		ali_keepalive();
		/* Fall */
	}
	case WDIOC_GETTIMEOUT:
		return put_user(timeout, p);
	default:
		return -ENOTTY;
	}
}

/*
 *	ali_open	-	handle open of ali watchdog
 *	@inode: inode from VFS
 *	@file: file from VFS
 *
 *	Open the ALi watchdog device. Ensure only one person opens it
 *	at a time. Also start the watchdog running.
 */

static int ali_open(struct inode *inode, struct file *file)
{
	/* /dev/watchdog can only be opened once */
	if (test_and_set_bit(0, &ali_is_open))
		return -EBUSY;

	/* Activate */
	ali_start();
	return nonseekable_open(inode, file);
}

/*
 *	ali_release	-	close an ALi watchdog
 *	@inode: inode from VFS
 *	@file: file from VFS
 *
 *	Close the ALi watchdog device. Actual shutdown of the timer
 *	only occurs if the magic sequence has been set.
 */

static int ali_release(struct inode *inode, struct file *file)
{
	/*
	 *      Shut off the timer.
	 */
	if (ali_expect_release == 42)
		ali_stop();
	else {
		printk(KERN_CRIT PFX
				"Unexpected close, not stopping watchdog!\n");
		ali_keepalive();
	}
	clear_bit(0, &ali_is_open);
	ali_expect_release = 0;
	return 0;
}

/*
 *	ali_notify_sys	-	System down notifier
 *
 *	Notifier for system down
 */


static int ali_notify_sys(struct notifier_block *this,
					unsigned long code, void *unused)
{
	if (code == SYS_DOWN || code == SYS_HALT)
		ali_stop();		/* Turn the WDT off */
	return NOTIFY_DONE;
}

/*
 *	Data for PCI driver interface
 *
 *	This data only exists for exporting the supported
 *	PCI ids via MODULE_DEVICE_TABLE.  We do not actually
 *	register a pci_driver, because someone else might one day
 *	want to register another driver on the same PCI id.
 */

static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = {
	{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
	{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
	{ 0, },
};
MODULE_DEVICE_TABLE(pci, ali_pci_tbl);

/*
 *	ali_find_watchdog	-	find a 1535 and 7101
 *
 *	Scans the PCI hardware for a 1535 series bridge and matching 7101
 *	watchdog device. This may be overtight but it is better to be safe
 */

static int __init ali_find_watchdog(void)
{
	struct pci_dev *pdev;
	u32 wdog;

	/* Check for a 1533/1535 series bridge */
	pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x1535, NULL);
	if (pdev == NULL)
		pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x1533, NULL);
	if (pdev == NULL)
		return -ENODEV;
	pci_dev_put(pdev);

	/* Check for the a 7101 PMU */
	pdev = pci_get_device(PCI_VENDOR_ID_AL, 0x7101, NULL);
	if (pdev == NULL)
		return -ENODEV;

	if (pci_enable_device(pdev)) {
		pci_dev_put(pdev);
		return -EIO;
	}

	ali_pci = pdev;

	/*
	 *	Initialize the timer bits
	 */
	pci_read_config_dword(pdev, 0xCC, &wdog);

	/* Timer bits */
	wdog &= ~0x3F;
	/* Issued events */
	wdog &= ~((1 << 27)|(1 << 26)|(1 << 25)|(1 << 24));
	/* No monitor bits */
	wdog &= ~((1 << 16)|(1 << 13)|(1 << 12)|(1 << 11)|(1 << 10)|(1 << 9));

	pci_write_config_dword(pdev, 0xCC, wdog);

	return 0;
}

/*
 *	Kernel Interfaces
 */

static const struct file_operations ali_fops = {
	.owner		=	THIS_MODULE,
	.llseek		=	no_llseek,
	.write		=	ali_write,
	.unlocked_ioctl =	ali_ioctl,
	.open		=	ali_open,
	.release	=	ali_release,
};

static struct miscdevice ali_miscdev = {
	.minor =	WATCHDOG_MINOR,
	.name =		"watchdog",
	.fops =		&ali_fops,
};

static struct notifier_block ali_notifier = {
	.notifier_call =	ali_notify_sys,
};

/*
 *	watchdog_init	-	module initialiser
 *
 *	Scan for a suitable watchdog and if so initialize it. Return an error
 *	if we cannot, the error causes the module to unload
 */

static int __init watchdog_init(void)
{
	int ret;

	/* Check whether or not the hardware watchdog is there */
	if (ali_find_watchdog() != 0)
		return -ENODEV;

	/* Check that the timeout value is within it's range;
	   if not reset to the default */
	if (timeout < 1 || timeout >= 18000) {
		timeout = WATCHDOG_TIMEOUT;
		printk(KERN_INFO PFX
		     "timeout value must be 0 < timeout < 18000, using %d\n",
							timeout);
	}

	/* Calculate the watchdog's timeout */
	ali_settimer(timeout);

	ret = register_reboot_notifier(&ali_notifier);
	if (ret != 0) {
		printk(KERN_ERR PFX
			"cannot register reboot notifier (err=%d)\n", ret);
		goto out;
	}

	ret = misc_register(&ali_miscdev);
	if (ret != 0) {
		printk(KERN_ERR PFX
			"cannot register miscdev on minor=%d (err=%d)\n",
						WATCHDOG_MINOR, ret);
		goto unreg_reboot;
	}

	printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d)\n",
		timeout, nowayout);

out:
	return ret;
unreg_reboot:
	unregister_reboot_notifier(&ali_notifier);
	goto out;
}

/*
 *	watchdog_exit	-	module de-initialiser
 *
 *	Called while unloading a successfully installed watchdog module.
 */

static void __exit watchdog_exit(void)
{
	/* Stop the timer before we leave */
	ali_stop();

	/* Deregister */
	misc_deregister(&ali_miscdev);
	unregister_reboot_notifier(&ali_notifier);
	pci_dev_put(ali_pci);
}
static int __devinit mvs_64xx_init(struct mvs_info *mvi)
{
	void __iomem *regs = mvi->regs;
	int i;
	u32 tmp, cctl;

	if (mvi->pdev && mvi->pdev->revision == 0)
		mvi->flags |= MVF_PHY_PWR_FIX;
	if (!(mvi->flags & MVF_FLAG_SOC)) {
		mvs_show_pcie_usage(mvi);
		tmp = mvs_64xx_chip_reset(mvi);
		if (tmp)
			return tmp;
	} else {
		tmp = mr32(MVS_PHY_CTL);
		tmp &= ~PCTL_PWR_OFF;
		tmp |= PCTL_PHY_DSBL;
		mw32(MVS_PHY_CTL, tmp);
	}

	
	
	cctl = mr32(MVS_CTL) & 0xFFFF;
	if (cctl & CCTL_RST)
		cctl &= ~CCTL_RST;
	else
		mw32_f(MVS_CTL, cctl | CCTL_RST);

	if (!(mvi->flags & MVF_FLAG_SOC)) {
		
		pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
		tmp &= ~PRD_REQ_MASK;
		tmp |= PRD_REQ_SIZE;
		pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);

		pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
		tmp &= ~PCTL_PWR_OFF;
		tmp &= ~PCTL_PHY_DSBL;
		pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);

		pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
		tmp &= PCTL_PWR_OFF;
		tmp &= ~PCTL_PHY_DSBL;
		pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
	} else {
		tmp = mr32(MVS_PHY_CTL);
		tmp &= ~PCTL_PWR_OFF;
		tmp |= PCTL_COM_ON;
		tmp &= ~PCTL_PHY_DSBL;
		tmp |= PCTL_LINK_RST;
		mw32(MVS_PHY_CTL, tmp);
		msleep(100);
		tmp &= ~PCTL_LINK_RST;
		mw32(MVS_PHY_CTL, tmp);
		msleep(100);
	}

	
	mw32(MVS_PCS, 0);		
	
	mvs_64xx_phy_hacks(mvi);

	tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
	tmp &= 0x0000ffff;
	tmp |= 0x00fa0000;
	mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);

	
	mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);

	mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
	mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);

	mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
	mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);

	mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
	mw32(MVS_TX_LO, mvi->tx_dma);
	mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);

	mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
	mw32(MVS_RX_LO, mvi->rx_dma);
	mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);

	for (i = 0; i < mvi->chip->n_phy; i++) {
		
		
		mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
				cpu_to_be64(mvi->phy[i].dev_sas_addr));

		mvs_64xx_enable_xmt(mvi, i);

		mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
		msleep(500);
		mvs_64xx_detect_porttype(mvi, i);
	}
	if (mvi->flags & MVF_FLAG_SOC) {
		
		writel(0x0E008000, regs + 0x000);
		writel(0x59000008, regs + 0x004);
		writel(0x20, regs + 0x008);
		writel(0x20, regs + 0x00c);
		writel(0x20, regs + 0x010);
		writel(0x20, regs + 0x014);
		writel(0x20, regs + 0x018);
		writel(0x20, regs + 0x01c);
	}
	for (i = 0; i < mvi->chip->n_phy; i++) {
		
		tmp = mvs_read_port_irq_stat(mvi, i);
		tmp &= ~PHYEV_SIG_FIS;
		mvs_write_port_irq_stat(mvi, i, tmp);

		
		tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
			PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
			PHYEV_DEC_ERR;
		mvs_write_port_irq_mask(mvi, i, tmp);

		msleep(100);
		mvs_update_phyinfo(mvi, i, 1);
	}

	
	cctl = mr32(MVS_CTL);
	cctl |= CCTL_ENDIAN_CMD;
	cctl |= CCTL_ENDIAN_DATA;
	cctl &= ~CCTL_ENDIAN_OPEN;
	cctl |= CCTL_ENDIAN_RSP;
	mw32_f(MVS_CTL, cctl);

	
	tmp = mr32(MVS_PCS);
	tmp |= PCS_CMD_RST;
	tmp &= ~PCS_SELF_CLEAR;
	mw32(MVS_PCS, tmp);
	tmp = 0;
	if (MVS_CHIP_SLOT_SZ > 0x1ff)
		mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
	else
		mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);

	tmp = 0x10000 | interrupt_coalescing;
	mw32(MVS_INT_COAL_TMOUT, tmp);

	
	mw32(MVS_TX_CFG, 0);
	mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
	mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
	
	mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
		PCS_CMD_EN | PCS_CMD_STOP_ERR);

	
	tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
		CINT_DMA_PCIE);

	mw32(MVS_INT_MASK, tmp);

	
	mw32(MVS_INT_MASK_SRS_0, 0xFFFF);

	return 0;
}
Example #29
0
static void mid_get_fuse_settings(struct drm_device *dev)
{
	struct drm_psb_private *dev_priv = dev->dev_private;
	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
	uint32_t fuse_value = 0;
	uint32_t fuse_value_tmp = 0;

#define FB_REG06 0xD0810600
#define FB_MIPI_DISABLE  (1 << 11)
#define FB_REG09 0xD0810900
#define FB_REG09 0xD0810900
#define FB_SKU_MASK  0x7000
#define FB_SKU_SHIFT 12
#define FB_SKU_100 0
#define FB_SKU_100L 1
#define FB_SKU_83 2
	if (pci_root == NULL) {
		WARN_ON(1);
		return;
	}


	pci_write_config_dword(pci_root, 0xD0, FB_REG06);
	pci_read_config_dword(pci_root, 0xD4, &fuse_value);

	/* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
	if (IS_MRST(dev))
		dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;

	DRM_INFO("internal display is %s\n",
		 dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");

	 /* Prevent runtime suspend at start*/
	 if (dev_priv->iLVDS_enable) {
		dev_priv->is_lvds_on = true;
		dev_priv->is_mipi_on = false;
	} else {
		dev_priv->is_mipi_on = true;
		dev_priv->is_lvds_on = false;
	}

	dev_priv->video_device_fuse = fuse_value;

	pci_write_config_dword(pci_root, 0xD0, FB_REG09);
	pci_read_config_dword(pci_root, 0xD4, &fuse_value);

	dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
	fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;

	dev_priv->fuse_reg_value = fuse_value;

	switch (fuse_value_tmp) {
	case FB_SKU_100:
		dev_priv->core_freq = 200;
		break;
	case FB_SKU_100L:
		dev_priv->core_freq = 100;
		break;
	case FB_SKU_83:
		dev_priv->core_freq = 166;
		break;
	default:
		dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
								fuse_value_tmp);
		dev_priv->core_freq = 0;
	}
	dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
	pci_dev_put(pci_root);
}
Example #30
0
/* called during probe() after chip reset completes */
static int ehci_pci_setup(struct usb_hcd *hcd)
{
	struct ehci_hcd		*ehci = hcd_to_ehci(hcd);
	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
	struct pci_dev		*p_smbus;
	u8			rev;
	u32			temp;
	int			retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_TOSHIBA_2:
		/* celleb's companion chip */
		if (pdev->device == 0x01b5) {
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
			ehci->big_endian_mmio = 1;
#else
			ehci_warn(ehci,
				  "unsupported big endian Toshiba quirk\n");
#endif
		}
		break;
	}

	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));

	dbg_hcs_params(ehci, "reset");
	dbg_hcc_params(ehci, "reset");

        /* ehci_init() causes memory for DMA transfers to be
         * allocated.  Thus, any vendor-specific workarounds based on
         * limiting the type of memory used for DMA transfers must
         * happen before ehci_init() is called. */
	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NVIDIA:
		/* NVidia reports that certain chips don't handle
		 * QH, ITD, or SITD addresses above 2GB.  (But TD,
		 * data buffer, and periodic schedule are normal.)
		 */
		switch (pdev->device) {
		case 0x003c:	/* MCP04 */
		case 0x005b:	/* CK804 */
		case 0x00d8:	/* CK8 */
		case 0x00e8:	/* CK8S */
			if (pci_set_consistent_dma_mask(pdev,
						DMA_BIT_MASK(31)) < 0)
				ehci_warn(ehci, "can't enable NVidia "
					"workaround for >2GB RAM\n");
			break;
		}
		break;
	}

	/* cache this readonly data; minimize chip reads */
	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);

	retval = ehci_halt(ehci);
	if (retval)
		return retval;

	if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) ||
	    (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) {
		/* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
		 * read/write memory space which does not belong to it when
		 * there is NULL pointer with T-bit set to 1 in the frame list
		 * table. To avoid the issue, the frame list link pointer
		 * should always contain a valid pointer to a inactive qh.
		 */
		ehci->use_dummy_qh = 1;
		ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI "
				"dummy qh workaround\n");
	}

	/* data structure init */
	retval = ehci_init(hcd);
	if (retval)
		return retval;

	switch (pdev->vendor) {
	case PCI_VENDOR_ID_NEC:
		ehci->need_io_watchdog = 0;
		break;
	case PCI_VENDOR_ID_INTEL:
		ehci->need_io_watchdog = 0;
		ehci->fs_i_thresh = 1;
		if (pdev->device == 0x27cc) {
			ehci->broken_periodic = 1;
			ehci_info(ehci, "using broken periodic workaround\n");
		}
		if (pdev->device == 0x0806 || pdev->device == 0x0811
				|| pdev->device == 0x0829) {
			ehci_info(ehci, "disable lpm for langwell/penwell\n");
			ehci->has_lpm = 0;
		}
		if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) {
			hcd->has_tt = 1;
			tdi_reset(ehci);
		}
		if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
			/* EHCI #1 or #2 on 6 Series/C200 Series chipset */
			if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
				ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
				hcd->broken_pci_sleep = 1;
				device_set_wakeup_capable(&pdev->dev, false);
			}
		}
		break;
	case PCI_VENDOR_ID_TDI:
		if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
			hcd->has_tt = 1;
			tdi_reset(ehci);
		}
		break;
	case PCI_VENDOR_ID_AMD:
		/* AMD PLL quirk */
		if (usb_amd_find_chipset_info())
			ehci->amd_pll_fix = 1;
		/* AMD8111 EHCI doesn't work, according to AMD errata */
		if (pdev->device == 0x7463) {
			ehci_info(ehci, "ignoring AMD8111 (errata)\n");
			retval = -EIO;
			goto done;
		}
		break;
	case PCI_VENDOR_ID_NVIDIA:
		switch (pdev->device) {
		/* Some NForce2 chips have problems with selective suspend;
		 * fixed in newer silicon.
		 */
		case 0x0068:
			if (pdev->revision < 0xa4)
				ehci->no_selective_suspend = 1;
			break;

		/* MCP89 chips on the MacBookAir3,1 give EPROTO when
		 * fetching device descriptors unless LPM is disabled.
		 * There are also intermittent problems enumerating
		 * devices with PPCD enabled.
		 */
		case 0x0d9d:
			ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89");
			ehci->has_lpm = 0;
			ehci->has_ppcd = 0;
			ehci->command &= ~CMD_PPCEE;
			break;
		}
		break;
	case PCI_VENDOR_ID_VIA:
		if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) {
			u8 tmp;

			/* The VT6212 defaults to a 1 usec EHCI sleep time which
			 * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes
			 * that sleep time use the conventional 10 usec.
			 */
			pci_read_config_byte(pdev, 0x4b, &tmp);
			if (tmp & 0x20)
				break;
			pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
		}
		break;
	case PCI_VENDOR_ID_ATI:
		/* AMD PLL quirk */
		if (usb_amd_find_chipset_info())
			ehci->amd_pll_fix = 1;
		/* SB600 and old version of SB700 have a bug in EHCI controller,
		 * which causes usb devices lose response in some cases.
		 */
		if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) {
			p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
						 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
						 NULL);
			if (!p_smbus)
				break;
			rev = p_smbus->revision;
			if ((pdev->device == 0x4386) || (rev == 0x3a)
			    || (rev == 0x3b)) {
				u8 tmp;
				ehci_info(ehci, "applying AMD SB600/SB700 USB "
					"freeze workaround\n");
				pci_read_config_byte(pdev, 0x53, &tmp);
				pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
			}
			pci_dev_put(p_smbus);
		}
		break;
	case PCI_VENDOR_ID_NETMOS:
		/* MosChip frame-index-register bug */
		ehci_info(ehci, "applying MosChip frame-index workaround\n");
		ehci->frame_index_bug = 1;
		break;
	}

	/* optional debug port, normally in the first BAR */
	temp = pci_find_capability(pdev, 0x0a);
	if (temp) {
		pci_read_config_dword(pdev, temp, &temp);
		temp >>= 16;
		if ((temp & (3 << 13)) == (1 << 13)) {
			temp &= 0x1fff;
			ehci->debug = ehci_to_hcd(ehci)->regs + temp;
			temp = ehci_readl(ehci, &ehci->debug->control);
			ehci_info(ehci, "debug port %d%s\n",
				HCS_DEBUG_PORT(ehci->hcs_params),
				(temp & DBGP_ENABLED)
					? " IN USE"
					: "");
			if (!(temp & DBGP_ENABLED))
				ehci->debug = NULL;
		}
	}

	ehci_reset(ehci);

	/* at least the Genesys GL880S needs fixup here */
	temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
	temp &= 0x0f;
	if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
		ehci_dbg(ehci, "bogus port configuration: "
			"cc=%d x pcc=%d < ports=%d\n",
			HCS_N_CC(ehci->hcs_params),
			HCS_N_PCC(ehci->hcs_params),
			HCS_N_PORTS(ehci->hcs_params));

		switch (pdev->vendor) {
		case 0x17a0:		/* GENESYS */
			/* GL880S: should be PORTS=2 */
			temp |= (ehci->hcs_params & ~0xf);
			ehci->hcs_params = temp;
			break;
		case PCI_VENDOR_ID_NVIDIA:
			/* NF4: should be PCC=10 */
			break;
		}
	}

	/* Serial Bus Release Number is at PCI 0x60 offset */
	pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
	if (pdev->vendor == PCI_VENDOR_ID_STMICRO
	    && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
		ehci->sbrn = 0x20; /* ConneXT has no sbrn register */

	/* Keep this around for a while just in case some EHCI
	 * implementation uses legacy PCI PM support.  This test
	 * can be removed on 17 Dec 2009 if the dev_warn() hasn't
	 * been triggered by then.
	 */
	if (!device_can_wakeup(&pdev->dev)) {
		u16	port_wake;

		pci_read_config_word(pdev, 0x62, &port_wake);
		if (port_wake & 0x0001) {
			dev_warn(&pdev->dev, "Enabling legacy PCI PM\n");
			device_set_wakeup_capable(&pdev->dev, 1);
		}
	}

#ifdef	CONFIG_USB_SUSPEND
	/* REVISIT: the controller works fine for wakeup iff the root hub
	 * itself is "globally" suspended, but usbcore currently doesn't
	 * understand such things.
	 *
	 * System suspend currently expects to be able to suspend the entire
	 * device tree, device-at-a-time.  If we failed selective suspend
	 * reports, system suspend would fail; so the root hub code must claim
	 * success.  That's lying to usbcore, and it matters for runtime
	 * PM scenarios with selective suspend and remote wakeup...
	 */
	if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev))
		ehci_warn(ehci, "selective suspend/wakeup unavailable\n");
#endif

	ehci_port_power(ehci, 1);
	retval = ehci_pci_reinit(ehci, pdev);
done:
	return retval;
}