static void msix_handle_mask_update(PCIDevice *dev, int vector) { if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) { msix_clr_pending(dev, vector); msix_notify(dev, vector); } }
static void msix_clear_all_vectors(PCIDevice *dev) { int vector; for (vector = 0; vector < dev->msix_entries_nr; ++vector) { msix_clr_pending(dev, vector); } }
static void msix_free_irq_entries(PCIDevice *dev) { int vector; for (vector = 0; vector < dev->msix_entries_nr; ++vector) { dev->msix_entry_used[vector] = 0; msix_clr_pending(dev, vector); } }
/* Mark vector as unused. */ void msix_vector_unuse(PCIDevice *dev, unsigned vector) { if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { return; } if (--dev->msix_entry_used[vector]) { return; } msix_clr_pending(dev, vector); }
static void msix_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val) { PCIDevice *dev = opaque; unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; int vector = offset / MSIX_ENTRY_SIZE; pci_set_long(dev->msix_table_page + offset, val); if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) { msix_clr_pending(dev, vector); msix_notify(dev, vector); } }
/* Mark vector as unused. */ void msix_vector_unuse(PCIDevice *dev, unsigned vector) { if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { return; } if (--dev->msix_entry_used[vector]) { return; } if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_vector_del(dev, vector); } msix_clr_pending(dev, vector); }
static void msix_free_irq_entries(PCIDevice *dev) { int vector; if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_free(dev); } for (vector = 0; vector < dev->msix_entries_nr; ++vector) { dev->msix_entry_used[vector] = 0; msix_clr_pending(dev, vector); } }
static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked) { bool is_masked = msix_is_masked(dev, vector); if (is_masked == was_masked) { return; } msix_fire_vector_notifier(dev, vector, is_masked); if (!is_masked && msix_is_pending(dev, vector)) { msix_clr_pending(dev, vector); msix_notify(dev, vector); } }
static void msix_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val) { PCIDevice *dev = opaque; unsigned int offset = addr & (MSIX_PAGE_SIZE - 1); int vector = offset / MSIX_ENTRY_SIZE; int was_masked = msix_is_masked(dev, vector); memcpy(dev->msix_table_page + offset, &val, 4); if (kvm_enabled() && qemu_kvm_irqchip_in_kernel()) { kvm_msix_update(dev, vector, was_masked, msix_is_masked(dev, vector)); } if (!msix_is_masked(dev, vector) && msix_is_pending(dev, vector)) { msix_clr_pending(dev, vector); msix_notify(dev, vector); } }
/********************************************************************* Function : process_doorbell Description : Processing Doorbell and SQ commands Return Type : void Arguments : NVMEState * : Pointer to NVME device State target_phys_addr_t : Address (offset address) uint32_t : Value to be written *********************************************************************/ static void process_doorbell(NVMEState *nvme_dev, target_phys_addr_t addr, uint32_t val) { /* Used to get the SQ/CQ number to be written to */ uint32_t queue_id; int64_t deadline; LOG_DBG("%s(): addr = 0x%08x, val = 0x%08x", __func__, (unsigned)addr, val); /* Check if it is CQ or SQ doorbell */ queue_id = (addr - NVME_SQ0TDBL) / sizeof(uint32_t); if (queue_id % 2) { /* CQ */ uint16_t new_head = val & 0xffff; queue_id = (addr - NVME_CQ0HDBL) / QUEUE_BASE_ADDRESS_WIDTH; if (adm_check_cqid(nvme_dev, queue_id)) { LOG_NORM("Wrong CQ ID: %d", queue_id); enqueue_async_event(nvme_dev, event_type_error, event_info_err_invalid_sq, NVME_LOG_ERROR_INFORMATION); return; } if (new_head >= nvme_dev->cq[queue_id].size) { LOG_NORM("Bad cq head value: %d", new_head); enqueue_async_event(nvme_dev, event_type_error, event_info_err_invalid_db, NVME_LOG_ERROR_INFORMATION); return; } if (is_cq_full(nvme_dev, queue_id)) { /* queue was previously full, schedule submission queue check in case there are commands that couldn't be processed */ nvme_dev->sq_processing_timer_target = qemu_get_clock_ns(vm_clock) + 5000; qemu_mod_timer(nvme_dev->sq_processing_timer, nvme_dev->sq_processing_timer_target); } nvme_dev->cq[queue_id].head = new_head; /* Reset the P bit if head == tail for all Queues on * a specific interrupt vector */ if (nvme_dev->cq[queue_id].irq_enabled && !(nvme_irqcq_empty(nvme_dev, nvme_dev->cq[queue_id].vector))) { /* reset the P bit */ LOG_DBG("Reset P bit for vec:%d", nvme_dev->cq[queue_id].vector); msix_clr_pending(&nvme_dev->dev, nvme_dev->cq[queue_id].vector); } if (nvme_dev->cq[queue_id].tail != nvme_dev->cq[queue_id].head) { /* more completion entries, submit interrupt */ isr_notify(nvme_dev, &nvme_dev->cq[queue_id]); } } else { /* SQ */ uint16_t new_tail = val & 0xffff; queue_id = (addr - NVME_SQ0TDBL) / QUEUE_BASE_ADDRESS_WIDTH; if (adm_check_sqid(nvme_dev, queue_id)) { LOG_NORM("Wrong SQ ID: %d", queue_id); enqueue_async_event(nvme_dev, event_type_error, event_info_err_invalid_sq, NVME_LOG_ERROR_INFORMATION); return; } if (new_tail >= nvme_dev->sq[queue_id].size) { LOG_NORM("Bad sq tail value: %d", new_tail); enqueue_async_event(nvme_dev, event_type_error, event_info_err_invalid_db, NVME_LOG_ERROR_INFORMATION); return; } nvme_dev->sq[queue_id].tail = new_tail; /* Check if the SQ processing routine is scheduled for * execution within 5 uS.If it isn't, make it so */ deadline = qemu_get_clock_ns(vm_clock) + 5000; if (nvme_dev->sq_processing_timer_target == 0) { qemu_mod_timer(nvme_dev->sq_processing_timer, deadline); nvme_dev->sq_processing_timer_target = deadline; } } return; }