예제 #1
0
/**
 * aer_service_init - register AER root service driver
 *
 * Invoked when AER root service driver is loaded.
 **/
static int __init aer_service_init(void)
{
	if (pcie_aer_disable)
		return -ENXIO;
	if (!pci_msi_enabled())
		return -ENXIO;
	return pcie_port_service_register(&aerdriver);
}
예제 #2
0
파일: msi.c 프로젝트: Moretti0/gg
void imx_msi_init(void)
{
	if (pci_msi_enabled()) {
		int irq = MXC_INT_PCIE_0B;
		irq_set_chained_handler(MXC_INT_PCIE_0, imx_msi_handler);

		irq_set_chip_and_handler(irq, &intd_irq_chip, handle_simple_irq);
		set_irq_flags(irq, IRQF_VALID);
	}
}
예제 #3
0
/*
 * WDT timer, start when guest OS start watchdog service; and re-start for
 * each dog-kick / ping action if time out, it will trigger reboot or other
 * action to guest OS
 */
static void
wdt_expired_handler(void *arg, uint64_t nexp)
{
	struct pci_vdev *dev = (struct pci_vdev *)arg;

	DPRINTF("wdt timer out! stage=%d, reboot=%d\n",
		wdt_state.stage, wdt_state.reboot_enabled);

	if (wdt_state.stage == 1) {
		if (wdt_state.intr_enabled) {

			if (pci_msi_enabled(dev))
				pci_generate_msi(dev, 0);
			else
				pci_lintr_assert(dev);

			wdt_state.intr_active = true;
		}

		wdt_state.stage = 2;
		start_wdt_timer();
	} else {
		if (wdt_state.reboot_enabled) {
			wdt_state.stage = 1;
			wdt_timeout = 1;

			/* watchdog timer out, set the uos to reboot */
			vm_set_suspend_mode(VM_SUSPEND_FULL_RESET);
			mevent_notify();
		} else {
			/* if not need reboot, just loop timer */
			wdt_state.stage = 1;
			start_wdt_timer();
		}
	}
}
예제 #4
0
static int __devinit ddb_probe(struct pci_dev *pdev,
			       const struct pci_device_id *id)
{
	struct ddb *dev;
	int stat = 0;
	int irq_flag = IRQF_SHARED;

	if (pci_enable_device(pdev) < 0)
		return -ENODEV;

	dev = vzalloc(sizeof(struct ddb));
	if (dev == NULL)
		return -ENOMEM;

	dev->has_dma = 1;
	dev->pdev = pdev;
	dev->dev = &pdev->dev;
	pci_set_drvdata(pdev, dev);

	dev->ids.vendor = id->vendor;
	dev->ids.device = id->device;
	dev->ids.subvendor = id->subvendor;
	dev->ids.subdevice = id->subdevice;

	dev->info = (struct ddb_info *) id->driver_data;
	pr_info("DDBridge driver detected: %s\n", dev->info->name);

	dev->regs_len = pci_resource_len(dev->pdev, 0);
	dev->regs = ioremap(pci_resource_start(dev->pdev, 0),
			    pci_resource_len(dev->pdev, 0));
	if (!dev->regs) {
		pr_err("DDBridge: not enough memory for register map\n");
		stat = -ENOMEM;
		goto fail;
	}
	if (ddbreadl(dev, 0) == 0xffffffff) {
		pr_err("DDBridge: cannot read registers\n");
		stat = -ENODEV;
		goto fail;
	}

	dev->ids.hwid = ddbreadl(dev, 0);
	dev->ids.regmapid = ddbreadl(dev, 4);

	pr_info("HW %08x REGMAP %08x\n",
		dev->ids.hwid, dev->ids.regmapid);

	ddbwritel(dev, 0x00000000, INTERRUPT_ENABLE);
	ddbwritel(dev, 0x00000000, MSI1_ENABLE);
	ddbwritel(dev, 0x00000000, MSI2_ENABLE);
	ddbwritel(dev, 0x00000000, MSI3_ENABLE);
	ddbwritel(dev, 0x00000000, MSI4_ENABLE);
	ddbwritel(dev, 0x00000000, MSI5_ENABLE);
	ddbwritel(dev, 0x00000000, MSI6_ENABLE);
	ddbwritel(dev, 0x00000000, MSI7_ENABLE);

#ifdef CONFIG_PCI_MSI
	if (msi && pci_msi_enabled()) {
		stat = pci_enable_msi_block(dev->pdev, 2);
		if (stat == 0) {
			dev->msi = 1;
			pr_info("DDBrige using 2 MSI interrupts\n");
		}
		if (stat == 1)
			stat = pci_enable_msi(dev->pdev);
		if (stat < 0) {
			pr_info(": MSI not available.\n");
		} else {
			irq_flag = 0;
			dev->msi++;
		}
	}
	if (dev->msi == 2) {
		stat = request_irq(dev->pdev->irq, irq_handler0,
				   irq_flag, "ddbridge", (void *) dev);
		if (stat < 0)
			goto fail0;
		stat = request_irq(dev->pdev->irq + 1, irq_handler1,
				   irq_flag, "ddbridge", (void *) dev);
		if (stat < 0) {
			free_irq(dev->pdev->irq, dev);
			goto fail0;
		}
	} else
#endif
	{
#ifdef DDB_TEST_THREADED
		stat = request_threaded_irq(dev->pdev->irq, irq_handler,
					    irq_thread,
					    irq_flag,
					    "ddbridge", (void *) dev);
#else
		stat = request_irq(dev->pdev->irq, irq_handler,
				   irq_flag, "ddbridge", (void *) dev);
#endif
		if (stat < 0)
			goto fail0;
	}
	ddbwritel(dev, 0, DMA_BASE_READ);
	if (dev->info->type != DDB_MOD)
		ddbwritel(dev, 0, DMA_BASE_WRITE);

	/*ddbwritel(dev, 0xffffffff, INTERRUPT_ACK);*/
	if (dev->msi == 2) {
		ddbwritel(dev, 0x0fffff00, INTERRUPT_ENABLE);
		ddbwritel(dev, 0x0000000f, MSI1_ENABLE);
	} else {
		ddbwritel(dev, 0x0fffff0f, INTERRUPT_ENABLE);
		ddbwritel(dev, 0x00000000, MSI1_ENABLE);
	}
	if (ddb_i2c_init(dev) < 0)
		goto fail1;
	ddb_ports_init(dev);
	if (ddb_buffers_alloc(dev) < 0) {
		pr_info(": Could not allocate buffer memory\n");
		goto fail2;
	}
	if (ddb_ports_attach(dev) < 0)
		goto fail3;

	/* ignore if this fails */
	ddb_device_create(dev);

	if (dev->info->fan_num)	{
		ddbwritel(dev, 1, GPIO_DIRECTION);
		ddbwritel(dev, 1, GPIO_OUTPUT);
	}
	if (dev->info->type == DDB_MOD)
		ddbridge_mod_init(dev);

	return 0;

fail3:
	ddb_ports_detach(dev);
	pr_err("fail3\n");
	ddb_ports_release(dev);
fail2:
	pr_err("fail2\n");
	ddb_buffers_free(dev);
	ddb_i2c_release(dev);
fail1:
	pr_err("fail1\n");
	ddbwritel(dev, 0, INTERRUPT_ENABLE);
	ddbwritel(dev, 0, MSI1_ENABLE);
	free_irq(dev->pdev->irq, dev);
	if (dev->msi == 2)
		free_irq(dev->pdev->irq + 1, dev);
fail0:
	pr_err("fail0\n");
	if (dev->msi)
		pci_disable_msi(dev->pdev);
fail:
	pr_err("fail\n");
	ddb_unmap(dev);
	pci_set_drvdata(pdev, 0);
	pci_disable_device(pdev);
	return -1;
}
예제 #5
0
bool pci_aer_available(void)
{
	return !pcie_aer_disable && pci_msi_enabled();
}
예제 #6
0
static int
passthru_cfgwrite(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
		  int coff, int bytes, uint32_t val)
{
	int error, msix_table_entries, i;
	struct passthru_softc *sc;

	sc = pi->pi_arg;

	/*
	 * PCI BARs are emulated
	 */
	if (bar_access(coff))
		return (-1);

	/*
	 * MSI capability is emulated
	 */
	if (msicap_access(sc, coff)) {
		msicap_cfgwrite(pi, sc->psc_msi.capoff, coff, bytes, val);

		error = vm_setup_pptdev_msi(ctx, vcpu, sc->psc_sel.pc_bus,
			sc->psc_sel.pc_dev, sc->psc_sel.pc_func,
			pi->pi_msi.addr, pi->pi_msi.msg_data,
			pi->pi_msi.maxmsgnum);
		if (error != 0) {
			printf("vm_setup_pptdev_msi error %d\r\n", errno);
			exit(1);
		}
		return (0);
	}

	if (msixcap_access(sc, coff)) {
		msixcap_cfgwrite(pi, sc->psc_msix.capoff, coff, bytes, val);
		if (pi->pi_msix.enabled) {
			msix_table_entries = pi->pi_msix.table_count;
			for (i = 0; i < msix_table_entries; i++) {
				error = vm_setup_pptdev_msix(ctx, vcpu,
				    sc->psc_sel.pc_bus, sc->psc_sel.pc_dev, 
				    sc->psc_sel.pc_func, i, 
				    pi->pi_msix.table[i].addr,
				    pi->pi_msix.table[i].msg_data,
				    pi->pi_msix.table[i].vector_control);
		
				if (error) {
					printf("vm_setup_pptdev_msix error "
					    "%d\r\n", errno);
					exit(1);	
				}
			}
		}
		return (0);
	}

#ifdef LEGACY_SUPPORT
	/*
	 * If this device does not support MSI natively then we cannot let
	 * the guest disable legacy interrupts from the device. It is the
	 * legacy interrupt that is triggering the virtual MSI to the guest.
	 */
	if (sc->psc_msi.emulated && pci_msi_enabled(pi)) {
		if (coff == PCIR_COMMAND && bytes == 2)
			val &= ~PCIM_CMD_INTxDIS;
	}
#endif

	write_config(&sc->psc_sel, coff, bytes, val);

	return (0);
}