static void nvme_update_stats(NVMEState *n, DiskInfo *disk, uint8_t opcode, uint64_t slba, uint64_t nlb) { uint64_t tmp; if (opcode == NVME_CMD_WRITE) { uint64_t old_use = disk->idtfy_ns.nuse; update_ns_util(disk, slba, nlb); /* check if there needs to be an event issued */ if (old_use != disk->idtfy_ns.nuse && !disk->thresh_warn_issued && (100 - (uint32_t)((((double)disk->idtfy_ns.nuse) / disk->idtfy_ns.nsze) * 100) < NVME_SPARE_THRESH)) { LOG_NORM("Device:%d nsid:%d, setting threshold warning", n->instance, disk->nsid); disk->thresh_warn_issued = 1; enqueue_async_event(n, event_type_smart, event_info_smart_spare_thresh, NVME_LOG_SMART_INFORMATION); } if (++disk->host_write_commands[0] == 0) { ++disk->host_write_commands[1]; } disk->write_data_counter += nlb + 1; tmp = disk->data_units_written[0]; disk->data_units_written[0] += (disk->write_data_counter / 1000); disk->write_data_counter %= 1000; if (tmp > disk->data_units_written[0]) { ++disk->data_units_written[1]; } } else if (opcode == NVME_CMD_READ) { if (++disk->host_read_commands[0] == 0) { ++disk->host_read_commands[1]; } disk->read_data_counter += nlb + 1; tmp = disk->data_units_read[0]; disk->data_units_read[0] += (disk->read_data_counter / 1000); disk->read_data_counter %= 1000; if (tmp > disk->data_units_read[0]) { ++disk->data_units_read[1]; } } }
/********************************************************************* Function : process_doorbell Description : Processing Doorbell and SQ commands Return Type : void Arguments : NVMEState * : Pointer to NVME device State target_phys_addr_t : Address (offset address) uint32_t : Value to be written *********************************************************************/ static void process_doorbell(NVMEState *nvme_dev, target_phys_addr_t addr, uint32_t val) { /* Used to get the SQ/CQ number to be written to */ uint32_t queue_id; int64_t deadline; LOG_DBG("%s(): addr = 0x%08x, val = 0x%08x", __func__, (unsigned)addr, val); /* Check if it is CQ or SQ doorbell */ queue_id = (addr - NVME_SQ0TDBL) / sizeof(uint32_t); if (queue_id % 2) { /* CQ */ uint16_t new_head = val & 0xffff; queue_id = (addr - NVME_CQ0HDBL) / QUEUE_BASE_ADDRESS_WIDTH; if (adm_check_cqid(nvme_dev, queue_id)) { LOG_NORM("Wrong CQ ID: %d", queue_id); enqueue_async_event(nvme_dev, event_type_error, event_info_err_invalid_sq, NVME_LOG_ERROR_INFORMATION); return; } if (new_head >= nvme_dev->cq[queue_id].size) { LOG_NORM("Bad cq head value: %d", new_head); enqueue_async_event(nvme_dev, event_type_error, event_info_err_invalid_db, NVME_LOG_ERROR_INFORMATION); return; } if (is_cq_full(nvme_dev, queue_id)) { /* queue was previously full, schedule submission queue check in case there are commands that couldn't be processed */ nvme_dev->sq_processing_timer_target = qemu_get_clock_ns(vm_clock) + 5000; qemu_mod_timer(nvme_dev->sq_processing_timer, nvme_dev->sq_processing_timer_target); } nvme_dev->cq[queue_id].head = new_head; /* Reset the P bit if head == tail for all Queues on * a specific interrupt vector */ if (nvme_dev->cq[queue_id].irq_enabled && !(nvme_irqcq_empty(nvme_dev, nvme_dev->cq[queue_id].vector))) { /* reset the P bit */ LOG_DBG("Reset P bit for vec:%d", nvme_dev->cq[queue_id].vector); msix_clr_pending(&nvme_dev->dev, nvme_dev->cq[queue_id].vector); } if (nvme_dev->cq[queue_id].tail != nvme_dev->cq[queue_id].head) { /* more completion entries, submit interrupt */ isr_notify(nvme_dev, &nvme_dev->cq[queue_id]); } } else { /* SQ */ uint16_t new_tail = val & 0xffff; queue_id = (addr - NVME_SQ0TDBL) / QUEUE_BASE_ADDRESS_WIDTH; if (adm_check_sqid(nvme_dev, queue_id)) { LOG_NORM("Wrong SQ ID: %d", queue_id); enqueue_async_event(nvme_dev, event_type_error, event_info_err_invalid_sq, NVME_LOG_ERROR_INFORMATION); return; } if (new_tail >= nvme_dev->sq[queue_id].size) { LOG_NORM("Bad sq tail value: %d", new_tail); enqueue_async_event(nvme_dev, event_type_error, event_info_err_invalid_db, NVME_LOG_ERROR_INFORMATION); return; } nvme_dev->sq[queue_id].tail = new_tail; /* Check if the SQ processing routine is scheduled for * execution within 5 uS.If it isn't, make it so */ deadline = qemu_get_clock_ns(vm_clock) + 5000; if (nvme_dev->sq_processing_timer_target == 0) { qemu_mod_timer(nvme_dev->sq_processing_timer, deadline); nvme_dev->sq_processing_timer_target = deadline; } } return; }