void CQ::LogCE(uint16_t indexPtr) { union CE ce = PeekCE(indexPtr); LOG_NRM("Logging Completion Element (CE)..."); LOG_NRM(" CQ %d, CE %d, DWORD0: 0x%08X", GetQId(), indexPtr, ce.t.dw0); LOG_NRM(" CQ %d, CE %d, DWORD1: 0x%08X", GetQId(), indexPtr, ce.t.dw1); LOG_NRM(" CQ %d, CE %d, DWORD2: 0x%08X", GetQId(), indexPtr, ce.t.dw2); LOG_NRM(" CQ %d, CE %d, DWORD3: 0x%08X", GetQId(), indexPtr, ce.t.dw3); }
void CQ::Init(uint16_t qId, uint16_t entrySize, uint16_t numEntries, bool irqEnabled, uint16_t irqVec) { Queue::Init(qId, entrySize, numEntries); mIrqEnabled = irqEnabled; mIrqVec = irqVec; LOG_NRM("Allocating contiguous CQ memory in dnvme"); if (numEntries < 2) LOG_WARN("Number elements breaches spec requirement"); if (GetIsAdmin()) { if (gCtrlrConfig->IsStateEnabled()) { // At best this will cause tnvme to seg fault or a kernel crash // The NVME spec states unpredictable outcomes will occur. LOG_DBG("Creating an ASQ while ctrlr is enabled is a shall not"); throw exception(); } // We are creating a contiguous ACQ. ACQ's have a constant well known // element size and no setup is required for this type of Q. int ret; struct nvme_create_admn_q q; q.elements = GetNumEntries(); q.type = ADMIN_CQ; LOG_NRM("Init contig ACQ: (id, entrySize, numEntries) = (%d, %d, %d)", GetQId(), GetEntrySize(), GetNumEntries()); if ((ret = ioctl(mFd, NVME_IOCTL_CREATE_ADMN_Q, &q)) < 0) { LOG_DBG("Q Creation failed by dnvme with error: 0x%02X", ret); throw exception(); } } else { // We are creating a contiguous IOCQ. struct nvme_prep_cq q; q.cq_id = GetQId(); q.elements = GetNumEntries(); q.contig = true; CreateIOCQ(q); } // Contiguous Q's are created in dnvme and must be mapped back to user space mContigBuf = KernelAPI::mmap(mFd, GetQSize(), GetQId(), KernelAPI::MMR_CQ); if (mContigBuf == NULL) { LOG_DBG("Unable to mmap contig memory to user space"); throw exception(); } LOG_NRM( "Created CQ: (id, entrySize, numEntry, IRQEnable) = (%d, %d, %d, %s)", GetQId(), GetEntrySize(), GetNumEntries(), GetIrqEnabled() ? "T" : "F"); }
void CQ::Init(uint16_t qId, uint16_t entrySize, uint16_t numEntries, const SharedMemBufferPtr memBuffer, bool irqEnabled, uint16_t irqVec) { Queue::Init(qId, entrySize, numEntries); mIrqEnabled = irqEnabled; mIrqVec = irqVec; LOG_NRM("Allocating discontiguous CQ memory in tnvme"); if (numEntries < 2) LOG_WARN("Number elements breaches spec requirement"); if (memBuffer == MemBuffer::NullMemBufferPtr) { LOG_DBG("Passing an uninitialized SharedMemBufferPtr"); throw exception(); } else if (GetIsAdmin()) { // There are no appropriate methods for an NVME device to report ASC/ACQ // creation errors, thus since ASC/ASQ may only be contiguous then don't // allow these problems to be injected, at best they will only succeed // to seg fault the app or crash the kernel. LOG_DBG("Illegal memory alignment will corrupt"); throw exception(); } else if (memBuffer->GetBufSize() < GetQSize()) { LOG_DBG("Q buffer memory ambiguous to passed size params"); LOG_DBG("Mem buffer size = %d, Q size = %d", memBuffer->GetBufSize(), GetQSize()); throw exception(); } else if (memBuffer->GetAlignment() != sysconf(_SC_PAGESIZE)) { // Nonconformance to page alignment will seg fault the app or crash // the kernel. This state is not testable since no errors can be // reported by hdw, thus disallow this attempt. LOG_DBG("Q content memory shall be page aligned"); throw exception(); } // Zero out the content memory so the P-bit correlates to a newly alloc'd Q. // Also assuming life time ownership of this object if it wasn't created // by the RsrcMngr. mDiscontigBuf = memBuffer; mDiscontigBuf->Zero(); // We are creating a discontiguous IOCQ struct nvme_prep_cq q; q.cq_id = GetQId(); q.elements = GetNumEntries(); q.contig = false; CreateIOCQ(q); LOG_NRM( "Created CQ: (id, entrySize, numEntry, IRQEnable) = (%d, %d, %d, %s)", GetQId(), GetEntrySize(), GetNumEntries(), GetIrqEnabled() ? "T" : "F"); }
void SQ::Send(SharedCmdPtr cmd, uint16_t &uniqueId) { int rc; struct nvme_64b_send io; // Detect if doing something that looks suspicious/incorrect/illegal if (gCtrlrConfig->IsStateEnabled() == false) LOG_WARN("Sending cmds to a disabled DUT is suspicious"); io.q_id = GetQId(); io.bit_mask = (send_64b_bitmask)(cmd->GetPrpBitmask() | cmd->GetMetaBitmask()); io.meta_buf_id = cmd->GetMetaBufferID(); io.data_buf_size = cmd->GetPrpBufferSize(); io.data_buf_ptr = cmd->GetROPrpBuffer(); io.cmd_buf_ptr = cmd->GetCmd()->GetBuffer(); io.data_dir = cmd->GetDataDir(); LOG_NRM("Send cmd opcode 0x%02X, payload size 0x%04X, to SQ id 0x%02X", cmd->GetOpcode(), io.data_buf_size, io.q_id); if ((rc = ioctl(mFd, NVME_IOCTL_SEND_64B_CMD, &io)) < 0) throw FrmwkEx(HERE, "Error sending cmd, rc =%d", rc); // Allow tnvme to learn of the unique cmd ID which was assigned by dnvme uniqueId = io.unique_id; }
void SQ::Ring() { int rc; uint16_t sqId = GetQId(); LOG_NRM("Ring doorbell for SQ %d", sqId); if ((rc = ioctl(mFd, NVME_IOCTL_RING_SQ_DOORBELL, sqId)) < 0) throw FrmwkEx(HERE, "Error ringing doorbell, rc =%d", rc); }
uint16_t CQ::Reap(uint16_t &ceRemain, SharedMemBufferPtr memBuffer, uint32_t &isrCount, uint16_t ceDesire, bool zeroMem) { int rc; struct nvme_reap reap; // The tough part of reaping all which can be reaped, indicated by // (ceDesire == 0), is that CE's can be arriving from hdw between the time // one calls ReapInquiry() and Reap(). In essence this indicates we really // can never know for certain how many there are to be reaped, and thus // never really knowing how large to make a buffer to reap CE's into. // The solution is to enforce brute force methods by allocating max CE's if (ceDesire == 0) { // Per NVME spec: 1 empty CE implies a full CQ, can't truly fill all ceDesire = (GetNumEntries() - 1); } else if (ceDesire > (GetNumEntries() - 1)) { // Per NVME spec: 1 empty CE implies a full CQ, can't truly fill all LOG_NRM("Requested num of CE's exceeds max can fit, resizing"); ceDesire = (GetNumEntries() - 1); } // Allocate enough space to contain the CE's memBuffer->Init(GetEntrySize()*ceDesire); if (zeroMem) memBuffer->Zero(); reap.q_id = GetQId(); reap.elements = ceDesire; reap.size = memBuffer->GetBufSize(); reap.buffer = memBuffer->GetBuffer(); if ((rc = ioctl(mFd, NVME_IOCTL_REAP, &reap)) < 0) { LOG_ERR("Error during reaping CE's, rc =%d", rc); throw exception(); } isrCount = reap.isr_count; ceRemain = reap.num_remaining; LOG_NRM("Reaped %d CE's, %d remain, from CQ %d, ISR count: %d", reap.num_reaped, reap.num_remaining, GetQId(), isrCount); return reap.num_reaped; }
void CQ::CreateIOCQ(struct nvme_prep_cq &q) { int ret; LOG_NRM("Init %s CQ: (id, entrySize, numEntries) = (%d, %d, %d)", q.contig ? "contig" : "discontig", GetQId(), GetEntrySize(), GetNumEntries()); if ((ret = ioctl(mFd, NVME_IOCTL_PREPARE_CQ_CREATION, &q)) < 0) { LOG_DBG("Q Creation failed by dnvme with error: 0x%02X", ret); throw exception(); } }
struct nvme_gen_cq CQ::GetQMetrics() { int ret; struct nvme_gen_cq qMetrics; struct nvme_get_q_metrics getQMetrics; getQMetrics.q_id = GetQId(); getQMetrics.type = METRICS_CQ; getQMetrics.nBytes = sizeof(qMetrics); getQMetrics.buffer = (uint8_t *)&qMetrics; if ((ret = ioctl(mFd, NVME_IOCTL_GET_Q_METRICS, &getQMetrics)) < 0) { LOG_DBG("Get Q metrics failed by dnvme with error: 0x%02X", ret); throw exception(); } return qMetrics; }
struct nvme_gen_sq SQ::GetQMetrics() { int ret; struct nvme_gen_sq qMetrics; struct nvme_get_q_metrics getQMetrics; getQMetrics.q_id = GetQId(); getQMetrics.type = METRICS_SQ; getQMetrics.nBytes = sizeof(qMetrics); getQMetrics.buffer = (uint8_t *)&qMetrics; if ((ret = ioctl(mFd, NVME_IOCTL_GET_Q_METRICS, &getQMetrics)) < 0) { throw FrmwkEx(HERE, "Get Q metrics failed by dnvme with error: 0x%02X", ret); } return qMetrics; }
bool CQ::ReapInquiryWaitAny(uint16_t ms, uint16_t &numCE, uint32_t &isrCount) { struct timeval initial; if (gettimeofday(&initial, &TZ_NULL) != 0) { LOG_DBG("Cannot retrieve system time"); throw exception(); } while (CalcTimeout(ms, initial) == false) { if ((numCE = ReapInquiry(isrCount)) != 0) { return true; } } LOG_ERR("Timed out waiting %d ms for CE's in CQ %d", ms, GetQId()); return false; }
uint16_t CQ::ReapInquiry(uint32_t &isrCount, bool reportOn0) { int rc; struct nvme_reap_inquiry inq; inq.q_id = GetQId(); if ((rc = ioctl(mFd, NVME_IOCTL_REAP_INQUIRY, &inq)) < 0) { LOG_ERR("Error during reap inquiry, rc =%d", rc); throw exception(); } isrCount = inq.isr_count; if (inq.num_remaining || reportOn0) { LOG_NRM("%d CE's awaiting attention in CQ %d, ISR count: %d", inq.num_remaining, inq.q_id, isrCount); } return inq.num_remaining; }
void SQ::Init(uint16_t qId, uint16_t entrySize, uint32_t numEntries, uint16_t cqId) { uint64_t work; mCqId = cqId; Queue::Init(qId, entrySize, numEntries); LOG_NRM("Create SQ: (id,cqid,entrySize,numEntries) = (%d,%d,%d,%d)", GetQId(), GetCqId(), GetEntrySize(), GetNumEntries()); LOG_NRM("Allocating contiguous SQ memory in dnvme"); if (numEntries < 2) { throw FrmwkEx(HERE, "Number elements breaches spec requirement"); } else if (gRegisters->Read(CTLSPC_CAP, work) == false) { throw FrmwkEx(HERE, "Unable to determine MQES"); } // Detect if doing something that looks suspicious/incorrect/illegal work &= CAP_MQES; work += 1; // convert to 1-based if (work < (uint64_t)numEntries) { LOG_WARN("Creating Q with %d entries, but DUT only allows %d", numEntries, (uint32_t)work); } if (GetIsAdmin()) { if (gCtrlrConfig->IsStateEnabled()) { // At best this will cause tnvme to seg fault or a kernel crash // The NVME spec states unpredictable outcomes will occur. throw FrmwkEx(HERE, "Creating an ASQ while ctrlr is enabled is a shall not"); } // We are creating a contiguous ASQ. ASQ's have a constant well known // element size and no setup is required for this type of Q. int ret; struct nvme_create_admn_q q; q.elements = GetNumEntries(); q.type = ADMIN_SQ; LOG_NRM("Init contig ASQ: (id, cqid, entrySize, numEntries) = " "(%d, %d, %d, %d)", GetQId(), GetCqId(), GetEntrySize(), GetNumEntries()); if ((ret = ioctl(mFd, NVME_IOCTL_CREATE_ADMN_Q, &q)) < 0) { throw FrmwkEx(HERE, "Q Creation failed by dnvme with error: 0x%02X", ret); } } else { // We are creating a contiguous IOSQ. struct nvme_prep_sq q; q.sq_id = GetQId(); q.cq_id = GetCqId(); q.elements = GetNumEntries(); q.contig = true; CreateIOSQ(q); } // Contiguous Q's are created in dnvme and must be mapped back to user space mContigBuf = KernelAPI::mmap(GetQSize(), GetQId(), KernelAPI::MMR_SQ); if (mContigBuf == NULL) throw FrmwkEx(HERE, "Unable to mmap contig memory to user space"); }
void SQ::LogSE(uint16_t indexPtr) { union SE se = PeekSE(indexPtr); LOG_NRM("Logging Submission Element (SE)..."); LOG_NRM("SQ %d, SE %d, DWORD0: 0x%08X", GetQId(), indexPtr, se.d.dw0); LOG_NRM("SQ %d, SE %d, DWORD1: 0x%08X", GetQId(), indexPtr, se.d.dw1); LOG_NRM("SQ %d, SE %d, DWORD2: 0x%08X", GetQId(), indexPtr, se.d.dw2); LOG_NRM("SQ %d, SE %d, DWORD3: 0x%08X", GetQId(), indexPtr, se.d.dw3); LOG_NRM("SQ %d, SE %d, DWORD4: 0x%08X", GetQId(), indexPtr, se.d.dw4); LOG_NRM("SQ %d, SE %d, DWORD5: 0x%08X", GetQId(), indexPtr, se.d.dw5); LOG_NRM("SQ %d, SE %d, DWORD6: 0x%08X", GetQId(), indexPtr, se.d.dw6); LOG_NRM("SQ %d, SE %d, DWORD7: 0x%08X", GetQId(), indexPtr, se.d.dw7); LOG_NRM("SQ %d, SE %d, DWORD8: 0x%08X", GetQId(), indexPtr, se.d.dw8); LOG_NRM("SQ %d, SE %d, DWORD9: 0x%08X", GetQId(), indexPtr, se.d.dw9); LOG_NRM("SQ %d, SE %d, DWORD10: 0x%08X", GetQId(), indexPtr, se.d.dw10); LOG_NRM("SQ %d, SE %d, DWORD11: 0x%08X", GetQId(), indexPtr, se.d.dw11); LOG_NRM("SQ %d, SE %d, DWORD12: 0x%08X", GetQId(), indexPtr, se.d.dw12); LOG_NRM("SQ %d, SE %d, DWORD13: 0x%08X", GetQId(), indexPtr, se.d.dw13); LOG_NRM("SQ %d, SE %d, DWORD14: 0x%08X", GetQId(), indexPtr, se.d.dw14); LOG_NRM("SQ %d, SE %d, DWORD15: 0x%08X", GetQId(), indexPtr, se.d.dw15); }
void SQ::Init(uint16_t qId, uint16_t entrySize, uint32_t numEntries, const SharedMemBufferPtr memBuffer, uint16_t cqId) { uint64_t work; mCqId = cqId; Queue::Init(qId, entrySize, numEntries); LOG_NRM("Create SQ: (id,cqid,entrySize,numEntries) = (%d,%d,%d,%d)", GetQId(), GetCqId(), GetEntrySize(), GetNumEntries()); LOG_NRM("Allocating discontiguous SQ memory in tnvme"); if (numEntries < 2) { throw FrmwkEx(HERE, "Number elements breaches spec requirement"); } else if (gRegisters->Read(CTLSPC_CAP, work) == false) { throw FrmwkEx(HERE, "Unable to determine MQES"); } // Detect if doing something that looks suspicious/incorrect/illegal work &= CAP_MQES; work += 1; // convert to 1-based if (work < (uint64_t)numEntries) { LOG_WARN("Creating Q with %d entries, but DUT only allows %d", numEntries, (uint32_t)work); } if (memBuffer == MemBuffer::NullMemBufferPtr) { throw FrmwkEx(HERE, "Passing an uninitialized SharedMemBufferPtr"); } else if (GetIsAdmin()) { // There are no appropriate methods for an NVME device to report ASC/ACQ // creation errors, thus since ASC/ASQ may only be contiguous then don't // allow these problems to be injected, at best they will only succeed // to seg fault the app or crash the kernel. throw FrmwkEx(HERE, "Illegal memory alignment will corrupt"); } else if (memBuffer->GetBufSize() < GetQSize()) { LOG_ERR("Q buffer memory ambiguous to passed size params"); throw FrmwkEx(HERE, "Mem buffer size = %d, Q size = %d", memBuffer->GetBufSize(), GetQSize()); } else if (memBuffer->GetAlignment() != sysconf(_SC_PAGESIZE)) { // Nonconformance to page alignment will seg fault the app or crash // the kernel. This state is not testable since no errors can be // reported by hdw, thus disallow this attempt. throw FrmwkEx(HERE, "Q content memory shall be page aligned"); } // Zero out the content memory so the P-bit correlates to a newly alloc'd Q. // Also assuming life time ownership of this object if it wasn't created // by the RsrcMngr. mDiscontigBuf = memBuffer; mDiscontigBuf->Zero(); // We are creating a discontiguous IOSQ struct nvme_prep_sq q; q.sq_id = GetQId(); q.cq_id = GetCqId(); q.elements = GetNumEntries(); q.contig = false; CreateIOSQ(q); }