Пример #1
0
static void msix_handle_mask_update(PCIDevice *dev, int vector)
{
    if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) {
        msix_clr_pending(dev, vector);
        msix_notify(dev, vector);
    }
}
Пример #2
0
static void fake_irqfd(void *opaque, const uint8_t *buf, int size) {

    EventfdEntry *entry = opaque;
    PCIDevice *pdev = entry->pdev;

    IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, entry->vector);
    msix_notify(pdev, entry->vector);
}
Пример #3
0
static void nvme_isr_notify(NvmeCtrl *n, NvmeCQueue *cq)
{
    if (cq->irq_enabled) {
        if (msix_enabled(&(n->parent_obj))) {
            msix_notify(&(n->parent_obj), cq->vector);
        } else {
            pci_irq_pulse(&n->parent_obj);
        }
    }
}
Пример #4
0
void isr_notify(NVMEState *n, NVMEIOCQueue *cq)
{
    if (cq->irq_enabled) {
        if (msix_enabled(&(n->dev))) {
            msix_notify(&(n->dev), cq->vector);
        } else {
            qemu_irq_pulse(n->dev.irq[0]);
        }
    }
}
Пример #5
0
static void pcie_aer_root_notify(PCIDevice *dev)
{
    if (msix_enabled(dev)) {
        msix_notify(dev, pcie_aer_root_get_vector(dev));
    } else if (msi_enabled(dev)) {
        msi_notify(dev, pcie_aer_root_get_vector(dev));
    } else {
        pci_irq_assert(dev);
    }
}
Пример #6
0
static void pcie_aer_root_notify(PCIDevice *dev)
{
    if (msix_enabled(dev)) {
        msix_notify(dev, pcie_aer_root_get_vector(dev));
    } else if (msi_enabled(dev)) {
        msi_notify(dev, pcie_aer_root_get_vector(dev));
    } else {
        qemu_set_irq(dev->irq[dev->exp.aer_intx], 1);
    }
}
Пример #7
0
static void fake_irqfd(void *opaque, const uint8_t *buf, int size) {

    MSIVector *entry = opaque;
    PCIDevice *pdev = entry->pdev;
    IVShmemState *s = IVSHMEM(pdev);
    int vector = entry - s->msi_vectors;

    IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector);
    msix_notify(pdev, vector);
}
Пример #8
0
static void msix_mmio_writel(void *opaque, target_phys_addr_t addr,
                             uint32_t val)
{
    PCIDevice *dev = opaque;
    unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3;
    int vector = offset / MSIX_ENTRY_SIZE;
    pci_set_long(dev->msix_table_page + offset, val);
    if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) {
        msix_clr_pending(dev, vector);
        msix_notify(dev, vector);
    }
}
Пример #9
0
static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
{
    bool is_masked = msix_is_masked(dev, vector);

    if (is_masked == was_masked) {
        return;
    }

    msix_fire_vector_notifier(dev, vector, is_masked);

    if (!is_masked && msix_is_pending(dev, vector)) {
        msix_clr_pending(dev, vector);
        msix_notify(dev, vector);
    }
}
Пример #10
0
static void msix_mmio_writel(void *opaque, target_phys_addr_t addr,
                             uint32_t val)
{
    PCIDevice *dev = opaque;
    unsigned int offset = addr & (MSIX_PAGE_SIZE - 1);
    int vector = offset / MSIX_ENTRY_SIZE;
    int was_masked = msix_is_masked(dev, vector);
    memcpy(dev->msix_table_page + offset, &val, 4);
    if (kvm_enabled() && qemu_kvm_irqchip_in_kernel()) {
        kvm_msix_update(dev, vector, was_masked, msix_is_masked(dev, vector));
    }
    if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) {
        msix_clr_pending(dev, vector);
        msix_notify(dev, vector);
    }
}
Пример #11
0
static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
{
    if (cq->irq_enabled) {
        if (msix_enabled(&(n->parent_obj))) {
            trace_nvme_irq_msix(cq->vector);
            msix_notify(&(n->parent_obj), cq->vector);
        } else {
            trace_nvme_irq_pin();
            assert(cq->cqid < 64);
            n->irq_status |= 1 << cq->cqid;
            nvme_irq_check(n);
        }
    } else {
        trace_nvme_irq_masked();
    }
}
Пример #12
0
static void ivshmem_vector_notify(void *opaque)
{
    MSIVector *entry = opaque;
    PCIDevice *pdev = entry->pdev;
    IVShmemState *s = IVSHMEM(pdev);
    int vector = entry - s->msi_vectors;
    EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];

    if (!event_notifier_test_and_clear(n)) {
        return;
    }

    IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector);
    if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
        msix_notify(pdev, vector);
    } else {
        ivshmem_IntrStatus_write(s, 1);
    }
}
Пример #13
0
Файл: pcie.c Проект: THanwa/qemu
static void hotplug_event_notify(PCIDevice *dev)
{
    bool prev = dev->exp.hpev_notified;

    hotplug_event_update_event_status(dev);

    if (prev == dev->exp.hpev_notified) {
        return;
    }

    /* Note: the logic above does not take into account whether interrupts
     * are masked. The result is that interrupt will be sent when it is
     * subsequently unmasked. This appears to be legal: Section 6.7.3.4:
     * The Port may optionally send an MSI when there are hot-plug events that
     * occur while interrupt generation is disabled, and interrupt generation is
     * subsequently enabled. */
    if (msix_enabled(dev)) {
        msix_notify(dev, pcie_cap_flags_get_vector(dev));
    } else if (msi_enabled(dev)) {
        msi_notify(dev, pcie_cap_flags_get_vector(dev));
    } else {
        pci_set_irq(dev, dev->exp.hpev_notified);
    }
}
Пример #14
0
void process_sq(NVMEState *n, uint16_t sq_id)
{
    target_phys_addr_t addr;
    uint16_t cq_id;
    NVMECmd sqe;
    NVMECQE cqe;
    NVMEStatusField *sf = (NVMEStatusField *) &cqe.status;

    if (n->sq[sq_id].dma_addr == 0 || n->cq[n->sq[sq_id].cq_id].dma_addr
        == 0) {
        LOG_ERR("Required Submission/Completion Queue does not exist");
        n->sq[sq_id].head = n->sq[sq_id].tail = 0;
        goto exit;
    }
    cq_id = n->sq[sq_id].cq_id;
    if (is_cq_full(n, cq_id)) {
        return;
    }
    memset(&cqe, 0, sizeof(cqe));

    LOG_DBG("%s(): called", __func__);

    /* Process SQE */
    if (sq_id == ASQ_ID || n->sq[sq_id].phys_contig) {
        addr = n->sq[sq_id].dma_addr + n->sq[sq_id].head * sizeof(sqe);
    } else {
        /* PRP implementation */
        addr = find_discontig_queue_entry(n->page_size, n->sq[sq_id].head,
            sizeof(sqe), n->sq[sq_id].dma_addr);
    }
    nvme_dma_mem_read(addr, (uint8_t *)&sqe, sizeof(sqe));

    if (n->abort) {
        if (abort_command(n, sq_id, &sqe)) {
            incr_sq_head(&n->sq[sq_id]);
            return;
        }
    }

    incr_sq_head(&n->sq[sq_id]);

    if (sq_id == ASQ_ID) {
        nvme_admin_command(n, &sqe, &cqe);
    } else {
       /* TODO add support for IO commands with different sizes of Q elements */
        nvme_io_command(n, &sqe, &cqe);
    }

    /* Filling up the CQ entry */
    cqe.sq_id = sq_id;
    cqe.sq_head = n->sq[sq_id].head;
    cqe.command_id = sqe.cid;

    sf->p = n->cq[cq_id].phase_tag;
    sf->m = 0;
    sf->dnr = 0; /* TODO add support for dnr */

    /* write cqe to completion queue */
    if (cq_id == ACQ_ID || n->cq[cq_id].phys_contig) {
        addr = n->cq[cq_id].dma_addr + n->cq[cq_id].tail * sizeof(cqe);
    } else {
        /* PRP implementation */
        addr = find_discontig_queue_entry(n->page_size, n->cq[cq_id].tail,
            sizeof(cqe), n->cq[cq_id].dma_addr);
    }
    nvme_dma_mem_write(addr, (uint8_t *)&cqe, sizeof(cqe));

    incr_cq_tail(&n->cq[cq_id]);

    if (cq_id == ACQ_ID) {
        /*
         3.1.9 says: "This queue is always associated
                 with interrupt vector 0"
        */
        msix_notify(&(n->dev), 0);
        return;
    }

    if (n->cq[cq_id].irq_enabled) {
        msix_notify(&(n->dev), n->cq[cq_id].vector);
    } else {
        LOG_NORM("kw q: IRQ not enabled for CQ: %d", cq_id);
    }

exit:
    return;

}