Exemple #1
0
static int mei_txe_pci_resume(struct device *device)
{
    struct pci_dev *pdev = to_pci_dev(device);
    struct mei_device *dev;
    int err;

    dev = pci_get_drvdata(pdev);
    if (!dev)
        return -ENODEV;

    pci_enable_msi(pdev);

    mei_clear_interrupts(dev);

    /* request and enable interrupt */
    if (pci_dev_msi_enabled(pdev))
        err = request_threaded_irq(pdev->irq,
                                   NULL,
                                   mei_txe_irq_thread_handler,
                                   IRQF_ONESHOT, KBUILD_MODNAME, dev);
    else
        err = request_threaded_irq(pdev->irq,
                                   mei_txe_irq_quick_handler,
                                   mei_txe_irq_thread_handler,
                                   IRQF_SHARED, KBUILD_MODNAME, dev);
    if (err) {
        dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
                pdev->irq);
        return err;
    }

    err = mei_restart(dev);

    return err;
}
Exemple #2
0
static void ndev_deinit_isr(struct amd_ntb_dev *ndev)
{
	struct pci_dev *pdev;
	void __iomem *mmio = ndev->self_mmio;
	int i;

	pdev = ndev->ntb.pdev;

	/* Mask all doorbell interrupts */
	ndev->db_mask = ndev->db_valid_mask;
	writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET);

	if (ndev->msix) {
		i = ndev->msix_vec_count;
		while (i--)
			free_irq(ndev->msix[i].vector, &ndev->vec[i]);
		pci_disable_msix(pdev);
		kfree(ndev->msix);
		kfree(ndev->vec);
	} else {
		free_irq(pdev->irq, ndev);
		if (pci_dev_msi_enabled(pdev))
			pci_disable_msi(pdev);
		else
			pci_intx(pdev, 0);
	}
}
Exemple #3
0
/* IRQ handler */
static irqreturn_t pciefd_irq_handler(int irq, void *arg)
{
	struct pciefd_can *priv = arg;
	struct pciefd_rx_dma *rx_dma = priv->rx_dma_vaddr;

	/* INTA mode only to sync with PCIe transaction */
	if (!pci_dev_msi_enabled(priv->board->pci_dev))
		(void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);

	/* read IRQ status from the first 32-bits of the Rx DMA area */
	priv->irq_status = le32_to_cpu(rx_dma->irq_status);

	/* check if this (shared) IRQ is for this CAN */
	if (pciefd_irq_tag(priv->irq_status) != priv->irq_tag)
		return IRQ_NONE;

	/* handle rx messages (if any) */
	peak_canfd_handle_msgs_list(&priv->ucan,
				    rx_dma->msg,
				    pciefd_irq_rx_cnt(priv->irq_status));

	/* handle tx link interrupt (if any) */
	if (pciefd_irq_is_lnk(priv->irq_status)) {
		unsigned long flags;

		spin_lock_irqsave(&priv->tx_lock, flags);
		priv->tx_pages_free++;
		spin_unlock_irqrestore(&priv->tx_lock, flags);

		/* wake producer up (only if enough room in echo_skb array) */
		spin_lock_irqsave(&priv->ucan.echo_lock, flags);
		if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx])
			netif_wake_queue(priv->ucan.ndev);

		spin_unlock_irqrestore(&priv->ucan.echo_lock, flags);
	}

	/* re-enable Rx DMA transfer for this CAN */
	pciefd_can_ack_rx_dma(priv);

	return IRQ_HANDLED;
}
static int mei_me_pci_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct mei_device *dev;
	int err;

	dev = pci_get_drvdata(pdev);
	if (!dev)
		return -ENODEV;

	pci_enable_msi(pdev);

	/* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_me_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);

	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
				pdev->irq);
		return err;
	}

	mutex_lock(&dev->device_lock);
	dev->dev_state = MEI_DEV_POWER_UP;
	mei_clear_interrupts(dev);
	mei_reset(dev, 1);
	mutex_unlock(&dev->device_lock);

	/* Start timer if stopped in suspend */
	schedule_delayed_work(&dev->timer_work, HZ);

	return err;
}
Exemple #5
0
static int mei_me_pci_resume(struct device *device)
{
	struct pci_dev *pdev = to_pci_dev(device);
	struct mei_device *dev;
	unsigned int irqflags;
	int err;

	dev = pci_get_drvdata(pdev);
	if (!dev)
		return -ENODEV;

	pci_enable_msi(pdev);

	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;

	/* request and enable interrupt */
	err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			irqflags, KBUILD_MODNAME, dev);

	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
				pdev->irq);
		return err;
	}

	err = mei_restart(dev);
	if (err)
		return err;

	/* Start timer if stopped in suspend */
	schedule_delayed_work(&dev->timer_work, HZ);

	return 0;
}
Exemple #6
0
/**
 * mei_me_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * Return: 0 on success, <0 on failure.
 */
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
	struct mei_device *dev;
	struct mei_me_hw *hw;
	int err;


	if (!mei_me_quirk_probe(pdev, cfg))
		return -ENODEV;

	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto disable_device;
	}

	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {

		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
		if (err)
			err = dma_set_coherent_mask(&pdev->dev,
						    DMA_BIT_MASK(32));
	}
	if (err) {
		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
		goto release_regions;
	}


	/* allocates and initializes the mei dev structure */
	dev = mei_me_dev_init(pdev, cfg);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_me_hw(dev);
	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_me_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);

	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto disable_msi;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
	pm_runtime_use_autosuspend(&pdev->dev);

	err = mei_register(dev, &pdev->dev);
	if (err)
		goto release_irq;

	pci_set_drvdata(pdev, dev);

	schedule_delayed_work(&dev->timer_work, HZ);

	/*
	* For not wake-able HW runtime pm framework
	* can't be used on pci device level.
	* Use domain runtime pm callbacks instead.
	*/
	if (!pci_dev_run_wake(pdev))
		mei_me_set_pm_domain(dev);

	if (mei_pg_is_enabled(dev))
		pm_runtime_put_noidle(&pdev->dev);

	dev_dbg(&pdev->dev, "initialization successful.\n");

	return 0;

release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
disable_msi:
	pci_disable_msi(pdev);
	pci_iounmap(pdev, hw->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
Exemple #7
0
/**
 * mei_txe_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in mei_txe_pci_tbl
 *
 * Return: 0 on success, <0 on failure.
 */
static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
    struct mei_device *dev;
    struct mei_txe_hw *hw;
    int err;
    int i;

    /* enable pci dev */
    err = pci_enable_device(pdev);
    if (err) {
        dev_err(&pdev->dev, "failed to enable pci device.\n");
        goto end;
    }
    /* set PCI host mastering  */
    pci_set_master(pdev);
    /* pci request regions for mei driver */
    err = pci_request_regions(pdev, KBUILD_MODNAME);
    if (err) {
        dev_err(&pdev->dev, "failed to get pci regions.\n");
        goto disable_device;
    }

    err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
    if (err) {
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (err) {
            dev_err(&pdev->dev, "No suitable DMA available.\n");
            goto release_regions;
        }
    }

    /* allocates and initializes the mei dev structure */
    dev = mei_txe_dev_init(pdev);
    if (!dev) {
        err = -ENOMEM;
        goto release_regions;
    }
    hw = to_txe_hw(dev);

    /* mapping  IO device memory */
    for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
        hw->mem_addr[i] = pci_iomap(pdev, i, 0);
        if (!hw->mem_addr[i]) {
            dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
            err = -ENOMEM;
            goto free_device;
        }
    }


    pci_enable_msi(pdev);

    /* clear spurious interrupts */
    mei_clear_interrupts(dev);

    /* request and enable interrupt  */
    if (pci_dev_msi_enabled(pdev))
        err = request_threaded_irq(pdev->irq,
                                   NULL,
                                   mei_txe_irq_thread_handler,
                                   IRQF_ONESHOT, KBUILD_MODNAME, dev);
    else
        err = request_threaded_irq(pdev->irq,
                                   mei_txe_irq_quick_handler,
                                   mei_txe_irq_thread_handler,
                                   IRQF_SHARED, KBUILD_MODNAME, dev);
    if (err) {
        dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
                pdev->irq);
        goto free_device;
    }

    if (mei_start(dev)) {
        dev_err(&pdev->dev, "init hw failure.\n");
        err = -ENODEV;
        goto release_irq;
    }

    pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
    pm_runtime_use_autosuspend(&pdev->dev);

    err = mei_register(dev, &pdev->dev);
    if (err)
        goto stop;

    pci_set_drvdata(pdev, dev);

    /*
    * For not wake-able HW runtime pm framework
    * can't be used on pci device level.
    * Use domain runtime pm callbacks instead.
    */
    if (!pci_dev_run_wake(pdev))
        mei_txe_set_pm_domain(dev);

    pm_runtime_put_noidle(&pdev->dev);

    return 0;

stop:
    mei_stop(dev);
release_irq:

    mei_cancel_work(dev);

    /* disable interrupts */
    mei_disable_interrupts(dev);

    free_irq(pdev->irq, dev);
    pci_disable_msi(pdev);

free_device:
    mei_txe_pci_iounmap(pdev, hw);

    kfree(dev);
release_regions:
    pci_release_regions(pdev);
disable_device:
    pci_disable_device(pdev);
end:
    dev_err(&pdev->dev, "initialization failed.\n");
    return err;
}
Exemple #8
0
/**
 * ismt_access() - process an SMBus command
 * @adap: the i2c host adapter
 * @addr: address of the i2c/SMBus target
 * @flags: command options
 * @read_write: read from or write to device
 * @command: the i2c/SMBus command to issue
 * @size: SMBus transaction type
 * @data: read/write data buffer
 */
static int ismt_access(struct i2c_adapter *adap, u16 addr,
		       unsigned short flags, char read_write, u8 command,
		       int size, union i2c_smbus_data *data)
{
	int ret;
	unsigned long time_left;
	dma_addr_t dma_addr = 0; /* address of the data buffer */
	u8 dma_size = 0;
	enum dma_data_direction dma_direction = 0;
	struct ismt_desc *desc;
	struct ismt_priv *priv = i2c_get_adapdata(adap);
	struct device *dev = &priv->pci_dev->dev;

	desc = &priv->hw[priv->head];

	/* Initialize the DMA buffer */
	memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));

	/* Initialize the descriptor */
	memset(desc, 0, sizeof(struct ismt_desc));
	desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);

	/* Initialize common control bits */
	if (likely(pci_dev_msi_enabled(priv->pci_dev)))
		desc->control = ISMT_DESC_INT | ISMT_DESC_FAIR;
	else
		desc->control = ISMT_DESC_FAIR;

	if ((flags & I2C_CLIENT_PEC) && (size != I2C_SMBUS_QUICK)
	    && (size != I2C_SMBUS_I2C_BLOCK_DATA))
		desc->control |= ISMT_DESC_PEC;

	switch (size) {
	case I2C_SMBUS_QUICK:
		dev_dbg(dev, "I2C_SMBUS_QUICK\n");
		break;

	case I2C_SMBUS_BYTE:
		if (read_write == I2C_SMBUS_WRITE) {
			/*
			 * Send Byte
			 * The command field contains the write data
			 */
			dev_dbg(dev, "I2C_SMBUS_BYTE:  WRITE\n");
			desc->control |= ISMT_DESC_CWRL;
			desc->wr_len_cmd = command;
		} else {
			/* Receive Byte */
			dev_dbg(dev, "I2C_SMBUS_BYTE:  READ\n");
			dma_size = 1;
			dma_direction = DMA_FROM_DEVICE;
			desc->rd_len = 1;
		}
		break;

	case I2C_SMBUS_BYTE_DATA:
		if (read_write == I2C_SMBUS_WRITE) {
			/*
			 * Write Byte
			 * Command plus 1 data byte
			 */
			dev_dbg(dev, "I2C_SMBUS_BYTE_DATA:  WRITE\n");
			desc->wr_len_cmd = 2;
			dma_size = 2;
			dma_direction = DMA_TO_DEVICE;
			priv->dma_buffer[0] = command;
			priv->dma_buffer[1] = data->byte;
		} else {
			/* Read Byte */
			dev_dbg(dev, "I2C_SMBUS_BYTE_DATA:  READ\n");
			desc->control |= ISMT_DESC_CWRL;
			desc->wr_len_cmd = command;
			desc->rd_len = 1;
			dma_size = 1;
			dma_direction = DMA_FROM_DEVICE;
		}
		break;

	case I2C_SMBUS_WORD_DATA:
		if (read_write == I2C_SMBUS_WRITE) {
			/* Write Word */
			dev_dbg(dev, "I2C_SMBUS_WORD_DATA:  WRITE\n");
			desc->wr_len_cmd = 3;
			dma_size = 3;
			dma_direction = DMA_TO_DEVICE;
			priv->dma_buffer[0] = command;
			priv->dma_buffer[1] = data->word & 0xff;
			priv->dma_buffer[2] = data->word >> 8;
		} else {
Exemple #9
0
/**
 * mei_txe_irq_thread_handler - txe interrupt thread
 *
 * @irq: The irq number
 * @dev_id: pointer to the device structure
 *
 * returns irqreturn_t
 *
 */
irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
{
	struct mei_device *dev = (struct mei_device *) dev_id;
	struct mei_txe_hw *hw = to_txe_hw(dev);
	struct mei_cl_cb complete_list;
	s32 slots;
	int rets = 0;

	dev_dbg(&dev->pdev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
		mei_txe_br_reg_read(hw, HHISR_REG),
		mei_txe_br_reg_read(hw, HISR_REG),
		mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));


	/* initialize our complete list */
	mutex_lock(&dev->device_lock);
	mei_io_list_init(&complete_list);

	if (pci_dev_msi_enabled(dev->pdev))
		mei_txe_check_and_ack_intrs(dev, true);

	/* show irq events */
	mei_txe_pending_interrupts(dev);

	hw->aliveness = mei_txe_aliveness_get(dev);
	hw->readiness = mei_txe_readiness_get(dev);

	/* Readiness:
	 * Detection of TXE driver going through reset
	 * or TXE driver resetting the HECI interface.
	 */
	if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
		dev_dbg(&dev->pdev->dev, "Readiness Interrupt was received...\n");

		/* Check if SeC is going through reset */
		if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
			dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
			dev->recvd_hw_ready = true;
		} else {
			dev->recvd_hw_ready = false;
			if (dev->dev_state != MEI_DEV_RESETTING) {

				dev_warn(&dev->pdev->dev, "FW not ready: resetting.\n");
				schedule_work(&dev->reset_work);
				goto end;

			}
		}
		wake_up(&dev->wait_hw_ready);
	}

	/************************************************************/
	/* Check interrupt cause:
	 * Aliveness: Detection of SeC acknowledge of host request that
	 * it remain alive or host cancellation of that request.
	 */

	if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
		/* Clear the interrupt cause */
		dev_dbg(&dev->pdev->dev,
			"Aliveness Interrupt: Status: %d\n", hw->aliveness);
		dev->pg_event = MEI_PG_EVENT_RECEIVED;
		if (waitqueue_active(&hw->wait_aliveness_resp))
			wake_up(&hw->wait_aliveness_resp);
	}


	/* Output Doorbell:
	 * Detection of SeC having sent output to host
	 */
	slots = mei_count_full_read_slots(dev);
	if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
		/* Read from TXE */
		rets = mei_irq_read_handler(dev, &complete_list, &slots);
		if (rets && dev->dev_state != MEI_DEV_RESETTING) {
			dev_err(&dev->pdev->dev,
				"mei_irq_read_handler ret = %d.\n", rets);

			schedule_work(&dev->reset_work);
			goto end;
		}
	}
	/* Input Ready: Detection if host can write to SeC */
	if (test_and_clear_bit(TXE_INTR_IN_READY_BIT, &hw->intr_cause)) {
		dev->hbuf_is_ready = true;
		hw->slots = dev->hbuf_depth;
	}

	if (hw->aliveness && dev->hbuf_is_ready) {
		/* get the real register value */
		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
		rets = mei_irq_write_handler(dev, &complete_list);
		if (rets && rets != -EMSGSIZE)
			dev_err(&dev->pdev->dev, "mei_irq_write_handler ret = %d.\n",
				rets);
		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
	}

	mei_irq_compl_handler(dev, &complete_list);

end:
	dev_dbg(&dev->pdev->dev, "interrupt thread end ret = %d\n", rets);

	mutex_unlock(&dev->device_lock);

	mei_enable_interrupts(dev);
	return IRQ_HANDLED;
}
Exemple #10
0
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct mei_device *dev;
	struct mei_me_hw *hw;
	int err;


	if (!mei_me_quirk_probe(pdev, ent)) {
		err = -ENODEV;
		goto end;
	}

	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto disable_device;
	}

	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {

		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
		if (err)
			err = dma_set_coherent_mask(&pdev->dev,
						    DMA_BIT_MASK(32));
	}
	if (err) {
		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
		goto release_regions;
	}


	/* allocates and initializes the mei dev structure */
	dev = mei_me_dev_init(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_me_hw(dev);
	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_me_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);

	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto disable_msi;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	err = mei_register(dev);
	if (err)
		goto release_irq;

	pci_set_drvdata(pdev, dev);

	schedule_delayed_work(&dev->timer_work, HZ);

	dev_dbg(&pdev->dev, "initialization successful.\n");

	return 0;

release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
disable_msi:
	pci_disable_msi(pdev);
	pci_iounmap(pdev, hw->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}