Beispiel #1
0
/*********************************************************************
    Function     :    nvme_command_set
    Description  :    All NVM command set processing

    Return Type  :    uint8_t

    Arguments    :    NVMEState * : Pointer to NVME device State
                      NVMECmd  *  : Pointer to SQ entries
                      NVMECQE *   : Pointer to CQ entries
*********************************************************************/
uint8_t nvme_command_set(NVMEState *n, NVMECmd *sqe, NVMECQE *cqe)
{
    NVMEStatusField *sf = (NVMEStatusField *)&cqe->status;

    /* As of NVMe spec rev 1.0b "All NVM cmds use the CMD.DW1 (NSID) field".
     * Thus all NVM cmd set cmds must check for illegal namespaces up front */
    if (sqe->nsid == 0 || (sqe->nsid > n->idtfy_ctrl->nn)) {
        LOG_NORM("%s(): Invalid nsid:%u", __func__, sqe->nsid);
        sf->sc = NVME_SC_INVALID_NAMESPACE;
        return FAIL;
    }

    if (sqe->opcode == NVME_CMD_READ || (sqe->opcode == NVME_CMD_WRITE)){
        return nvme_io_command(n, sqe, cqe);
    } else if (sqe->opcode == NVME_CMD_DSM) {
        return nvme_dsm_command(n, sqe, cqe);
    } else if (sqe->opcode == NVME_CMD_FLUSH) {
        return NVME_SC_SUCCESS;
    } else {
        LOG_NORM("%s():Wrong IO opcode:\t\t0x%02x", __func__, sqe->opcode);
        sf->sc = NVME_SC_INVALID_OPCODE;
        return FAIL;
    }
}
Beispiel #2
0
void process_sq(NVMEState *n, uint16_t sq_id)
{
    target_phys_addr_t addr;
    uint16_t cq_id;
    NVMECmd sqe;
    NVMECQE cqe;
    NVMEStatusField *sf = (NVMEStatusField *) &cqe.status;

    if (n->sq[sq_id].dma_addr == 0 || n->cq[n->sq[sq_id].cq_id].dma_addr
        == 0) {
        LOG_ERR("Required Submission/Completion Queue does not exist");
        n->sq[sq_id].head = n->sq[sq_id].tail = 0;
        goto exit;
    }
    cq_id = n->sq[sq_id].cq_id;
    if (is_cq_full(n, cq_id)) {
        return;
    }
    memset(&cqe, 0, sizeof(cqe));

    LOG_DBG("%s(): called", __func__);

    /* Process SQE */
    if (sq_id == ASQ_ID || n->sq[sq_id].phys_contig) {
        addr = n->sq[sq_id].dma_addr + n->sq[sq_id].head * sizeof(sqe);
    } else {
        /* PRP implementation */
        addr = find_discontig_queue_entry(n->page_size, n->sq[sq_id].head,
            sizeof(sqe), n->sq[sq_id].dma_addr);
    }
    nvme_dma_mem_read(addr, (uint8_t *)&sqe, sizeof(sqe));

    if (n->abort) {
        if (abort_command(n, sq_id, &sqe)) {
            incr_sq_head(&n->sq[sq_id]);
            return;
        }
    }

    incr_sq_head(&n->sq[sq_id]);

    if (sq_id == ASQ_ID) {
        nvme_admin_command(n, &sqe, &cqe);
    } else {
       /* TODO add support for IO commands with different sizes of Q elements */
        nvme_io_command(n, &sqe, &cqe);
    }

    /* Filling up the CQ entry */
    cqe.sq_id = sq_id;
    cqe.sq_head = n->sq[sq_id].head;
    cqe.command_id = sqe.cid;

    sf->p = n->cq[cq_id].phase_tag;
    sf->m = 0;
    sf->dnr = 0; /* TODO add support for dnr */

    /* write cqe to completion queue */
    if (cq_id == ACQ_ID || n->cq[cq_id].phys_contig) {
        addr = n->cq[cq_id].dma_addr + n->cq[cq_id].tail * sizeof(cqe);
    } else {
        /* PRP implementation */
        addr = find_discontig_queue_entry(n->page_size, n->cq[cq_id].tail,
            sizeof(cqe), n->cq[cq_id].dma_addr);
    }
    nvme_dma_mem_write(addr, (uint8_t *)&cqe, sizeof(cqe));

    incr_cq_tail(&n->cq[cq_id]);

    if (cq_id == ACQ_ID) {
        /*
         3.1.9 says: "This queue is always associated
                 with interrupt vector 0"
        */
        msix_notify(&(n->dev), 0);
        return;
    }

    if (n->cq[cq_id].irq_enabled) {
        msix_notify(&(n->dev), n->cq[cq_id].vector);
    } else {
        LOG_NORM("kw q: IRQ not enabled for CQ: %d", cq_id);
    }

exit:
    return;

}