void CQ::Init(uint16_t qId, uint16_t entrySize, uint16_t numEntries, bool irqEnabled, uint16_t irqVec) { Queue::Init(qId, entrySize, numEntries); mIrqEnabled = irqEnabled; mIrqVec = irqVec; LOG_NRM("Allocating contiguous CQ memory in dnvme"); if (numEntries < 2) LOG_WARN("Number elements breaches spec requirement"); if (GetIsAdmin()) { if (gCtrlrConfig->IsStateEnabled()) { // At best this will cause tnvme to seg fault or a kernel crash // The NVME spec states unpredictable outcomes will occur. LOG_DBG("Creating an ASQ while ctrlr is enabled is a shall not"); throw exception(); } // We are creating a contiguous ACQ. ACQ's have a constant well known // element size and no setup is required for this type of Q. int ret; struct nvme_create_admn_q q; q.elements = GetNumEntries(); q.type = ADMIN_CQ; LOG_NRM("Init contig ACQ: (id, entrySize, numEntries) = (%d, %d, %d)", GetQId(), GetEntrySize(), GetNumEntries()); if ((ret = ioctl(mFd, NVME_IOCTL_CREATE_ADMN_Q, &q)) < 0) { LOG_DBG("Q Creation failed by dnvme with error: 0x%02X", ret); throw exception(); } } else { // We are creating a contiguous IOCQ. struct nvme_prep_cq q; q.cq_id = GetQId(); q.elements = GetNumEntries(); q.contig = true; CreateIOCQ(q); } // Contiguous Q's are created in dnvme and must be mapped back to user space mContigBuf = KernelAPI::mmap(mFd, GetQSize(), GetQId(), KernelAPI::MMR_CQ); if (mContigBuf == NULL) { LOG_DBG("Unable to mmap contig memory to user space"); throw exception(); } LOG_NRM( "Created CQ: (id, entrySize, numEntry, IRQEnable) = (%d, %d, %d, %s)", GetQId(), GetEntrySize(), GetNumEntries(), GetIrqEnabled() ? "T" : "F"); }
// -------------------------------------------------------------------------- // // Function // Name: BackupStoreRefCountDatabase::GetRefCount(int64_t // ObjectID) // Purpose: Get the number of references to the specified object // out of the database // Created: 2009/06/01 // // -------------------------------------------------------------------------- BackupStoreRefCountDatabase::refcount_t BackupStoreRefCountDatabase::GetRefCount(int64_t ObjectID) const { IOStream::pos_type offset = GetOffset(ObjectID); if (GetSize() < offset + GetEntrySize()) { THROW_FILE_ERROR("Failed to read refcount database: " "attempted read of unknown refcount for object " << BOX_FORMAT_OBJECTID(ObjectID), mFilename, BackupStoreException, UnknownObjectRefCountRequested); } mapDatabaseFile->Seek(offset, SEEK_SET); refcount_t refcount; if (mapDatabaseFile->Read(&refcount, sizeof(refcount)) != sizeof(refcount)) { THROW_FILE_ERROR("Failed to read refcount database: " "short read at offset " << offset, mFilename, BackupStoreException, CouldNotLoadStoreInfo); } return ntohl(refcount); }
bool QuickIndexImpl::GrowIndexFile() { if (kMaxEntriesCount == header_->num_entries_capacity){ return false; } if (header_->num_entries_capacity == 0) { if (IsLargeEntry()) { header_->num_entries_capacity = kInitLargeEntriesCount; } else { header_->num_entries_capacity = kInitEntriesCount; } } else { header_->num_entries_capacity *= 2; if (header_->num_entries_capacity > kMaxEntriesCount) { header_->num_entries_capacity = kMaxEntriesCount; } } if (index_file_.SetLength(sizeof(QuickIndexHeader) + GetEntrySize() * header_->num_entries_capacity)) { header_ = (QuickIndexHeader*)index_file_.buffer(); //header_->empty_entry_addr = GetEmptyEntryAddr(); return true; } return false; }
void CQ::Dump(LogFilename filename, string fileHdr) { FILE *fp; union CE ce; vector<string> desc; Queue::Dump(filename, fileHdr); // Reopen the file and append the same data in a different format if ((fp = fopen(filename.c_str(), "a")) == NULL) { LOG_DBG("Failed to open file: %s", filename.c_str()); throw exception(); } fprintf(fp, "\nFurther decoding details of the above raw dump follow:\n"); for (uint32_t i = 0; i < GetNumEntries(); i++) { ce = PeekCE(i); fprintf(fp, "CE %d @ 0x%08X:\n", i, (i * GetEntrySize())); fprintf(fp, " Cmd specific: 0x%08X\n", ce.n.cmdSpec); fprintf(fp, " Reserved: 0x%08X\n", ce.n.reserved); fprintf(fp, " SQ head ptr: 0x%04X\n", ce.n.SQHD); fprintf(fp, " SQ ID: 0x%04X\n", ce.n.SQID); fprintf(fp, " Cmd ID: 0x%08X\n", ce.n.CID); fprintf(fp, " P: 0x%1X\n", ce.n.SF.t.P); ProcessCE::DecodeStatus(ce, desc); for (size_t j = 0; j < desc.size(); j++ ) fprintf(fp, " %s\n", desc[j].c_str()); } fclose(fp); }
void CQ::Init(uint16_t qId, uint16_t entrySize, uint16_t numEntries, const SharedMemBufferPtr memBuffer, bool irqEnabled, uint16_t irqVec) { Queue::Init(qId, entrySize, numEntries); mIrqEnabled = irqEnabled; mIrqVec = irqVec; LOG_NRM("Allocating discontiguous CQ memory in tnvme"); if (numEntries < 2) LOG_WARN("Number elements breaches spec requirement"); if (memBuffer == MemBuffer::NullMemBufferPtr) { LOG_DBG("Passing an uninitialized SharedMemBufferPtr"); throw exception(); } else if (GetIsAdmin()) { // There are no appropriate methods for an NVME device to report ASC/ACQ // creation errors, thus since ASC/ASQ may only be contiguous then don't // allow these problems to be injected, at best they will only succeed // to seg fault the app or crash the kernel. LOG_DBG("Illegal memory alignment will corrupt"); throw exception(); } else if (memBuffer->GetBufSize() < GetQSize()) { LOG_DBG("Q buffer memory ambiguous to passed size params"); LOG_DBG("Mem buffer size = %d, Q size = %d", memBuffer->GetBufSize(), GetQSize()); throw exception(); } else if (memBuffer->GetAlignment() != sysconf(_SC_PAGESIZE)) { // Nonconformance to page alignment will seg fault the app or crash // the kernel. This state is not testable since no errors can be // reported by hdw, thus disallow this attempt. LOG_DBG("Q content memory shall be page aligned"); throw exception(); } // Zero out the content memory so the P-bit correlates to a newly alloc'd Q. // Also assuming life time ownership of this object if it wasn't created // by the RsrcMngr. mDiscontigBuf = memBuffer; mDiscontigBuf->Zero(); // We are creating a discontiguous IOCQ struct nvme_prep_cq q; q.cq_id = GetQId(); q.elements = GetNumEntries(); q.contig = false; CreateIOCQ(q); LOG_NRM( "Created CQ: (id, entrySize, numEntry, IRQEnable) = (%d, %d, %d, %s)", GetQId(), GetEntrySize(), GetNumEntries(), GetIrqEnabled() ? "T" : "F"); }
void CQ::CreateIOCQ(struct nvme_prep_cq &q) { int ret; LOG_NRM("Init %s CQ: (id, entrySize, numEntries) = (%d, %d, %d)", q.contig ? "contig" : "discontig", GetQId(), GetEntrySize(), GetNumEntries()); if ((ret = ioctl(mFd, NVME_IOCTL_PREPARE_CQ_CREATION, &q)) < 0) { LOG_DBG("Q Creation failed by dnvme with error: 0x%02X", ret); throw exception(); } }
uint16_t CQ::Reap(uint16_t &ceRemain, SharedMemBufferPtr memBuffer, uint32_t &isrCount, uint16_t ceDesire, bool zeroMem) { int rc; struct nvme_reap reap; // The tough part of reaping all which can be reaped, indicated by // (ceDesire == 0), is that CE's can be arriving from hdw between the time // one calls ReapInquiry() and Reap(). In essence this indicates we really // can never know for certain how many there are to be reaped, and thus // never really knowing how large to make a buffer to reap CE's into. // The solution is to enforce brute force methods by allocating max CE's if (ceDesire == 0) { // Per NVME spec: 1 empty CE implies a full CQ, can't truly fill all ceDesire = (GetNumEntries() - 1); } else if (ceDesire > (GetNumEntries() - 1)) { // Per NVME spec: 1 empty CE implies a full CQ, can't truly fill all LOG_NRM("Requested num of CE's exceeds max can fit, resizing"); ceDesire = (GetNumEntries() - 1); } // Allocate enough space to contain the CE's memBuffer->Init(GetEntrySize()*ceDesire); if (zeroMem) memBuffer->Zero(); reap.q_id = GetQId(); reap.elements = ceDesire; reap.size = memBuffer->GetBufSize(); reap.buffer = memBuffer->GetBuffer(); if ((rc = ioctl(mFd, NVME_IOCTL_REAP, &reap)) < 0) { LOG_ERR("Error during reaping CE's, rc =%d", rc); throw exception(); } isrCount = reap.isr_count; ceRemain = reap.num_remaining; LOG_NRM("Reaped %d CE's, %d remain, from CQ %d, ISR count: %d", reap.num_reaped, reap.num_remaining, GetQId(), isrCount); return reap.num_reaped; }
// ------------------------------------------------------------------------- // Init a new node, which not exist // <level>: level (depth) in b-tree // <btree>: b-tree of this node void BNode::Init(int level, BTree* btree) { level_ = (char)level; btree_ = btree; dirty_ = true; int block_length = btree_->file()->block_length(); capacity_ = (block_length - GetHeaderSize()) / GetEntrySize(); // init <key_> key_ = new float[capacity_]; for (int i = 0; i < capacity_; i++) { key_[i] = FLOAT_MIN; } // init <son_> son_ = new int[capacity_]; memset(son_, -1, sizeof(int) * capacity_); char* buf = new char[block_length]; block_ = btree_->file()->AppendBlock(buf); delete[] buf; }
// Load an exist node from disk to init // <btree>: b-tree of this node // <block>: address of file of this node void BNode::InitFromFile(BTree* btree, int block) { btree_ = btree; block_ = block; int block_length = btree_->file()->block_length(); capacity_ = (block_length - GetHeaderSize()) / GetEntrySize(); // init <key_> key_ = new float[capacity_]; for (int i = 0; i < capacity_; i++) { key_[i] = FLOAT_MIN; } // init <son_> son_ = new int[capacity_]; memset(son_, -1, sizeof(int) * capacity_); char* buf = new char[block_length]; btree_->file()->ReadBlock(buf, block); ReadFromBuffer(buf); delete[] buf; }
bool QuickIndexImpl::IsLargeEntry() { return GetEntrySize() == sizeof(EntryLarge); }
void * QuickIndexImpl::GetEntryByAddr( EntryAddr addr ) { size_t pos = addr - 1; return (char*)header_ + sizeof(QuickIndexHeader) + GetEntrySize() * pos; }
void SQ::Init(uint16_t qId, uint16_t entrySize, uint32_t numEntries, uint16_t cqId) { uint64_t work; mCqId = cqId; Queue::Init(qId, entrySize, numEntries); LOG_NRM("Create SQ: (id,cqid,entrySize,numEntries) = (%d,%d,%d,%d)", GetQId(), GetCqId(), GetEntrySize(), GetNumEntries()); LOG_NRM("Allocating contiguous SQ memory in dnvme"); if (numEntries < 2) { throw FrmwkEx(HERE, "Number elements breaches spec requirement"); } else if (gRegisters->Read(CTLSPC_CAP, work) == false) { throw FrmwkEx(HERE, "Unable to determine MQES"); } // Detect if doing something that looks suspicious/incorrect/illegal work &= CAP_MQES; work += 1; // convert to 1-based if (work < (uint64_t)numEntries) { LOG_WARN("Creating Q with %d entries, but DUT only allows %d", numEntries, (uint32_t)work); } if (GetIsAdmin()) { if (gCtrlrConfig->IsStateEnabled()) { // At best this will cause tnvme to seg fault or a kernel crash // The NVME spec states unpredictable outcomes will occur. throw FrmwkEx(HERE, "Creating an ASQ while ctrlr is enabled is a shall not"); } // We are creating a contiguous ASQ. ASQ's have a constant well known // element size and no setup is required for this type of Q. int ret; struct nvme_create_admn_q q; q.elements = GetNumEntries(); q.type = ADMIN_SQ; LOG_NRM("Init contig ASQ: (id, cqid, entrySize, numEntries) = " "(%d, %d, %d, %d)", GetQId(), GetCqId(), GetEntrySize(), GetNumEntries()); if ((ret = ioctl(mFd, NVME_IOCTL_CREATE_ADMN_Q, &q)) < 0) { throw FrmwkEx(HERE, "Q Creation failed by dnvme with error: 0x%02X", ret); } } else { // We are creating a contiguous IOSQ. struct nvme_prep_sq q; q.sq_id = GetQId(); q.cq_id = GetCqId(); q.elements = GetNumEntries(); q.contig = true; CreateIOSQ(q); } // Contiguous Q's are created in dnvme and must be mapped back to user space mContigBuf = KernelAPI::mmap(GetQSize(), GetQId(), KernelAPI::MMR_SQ); if (mContigBuf == NULL) throw FrmwkEx(HERE, "Unable to mmap contig memory to user space"); }
void SQ::Init(uint16_t qId, uint16_t entrySize, uint32_t numEntries, const SharedMemBufferPtr memBuffer, uint16_t cqId) { uint64_t work; mCqId = cqId; Queue::Init(qId, entrySize, numEntries); LOG_NRM("Create SQ: (id,cqid,entrySize,numEntries) = (%d,%d,%d,%d)", GetQId(), GetCqId(), GetEntrySize(), GetNumEntries()); LOG_NRM("Allocating discontiguous SQ memory in tnvme"); if (numEntries < 2) { throw FrmwkEx(HERE, "Number elements breaches spec requirement"); } else if (gRegisters->Read(CTLSPC_CAP, work) == false) { throw FrmwkEx(HERE, "Unable to determine MQES"); } // Detect if doing something that looks suspicious/incorrect/illegal work &= CAP_MQES; work += 1; // convert to 1-based if (work < (uint64_t)numEntries) { LOG_WARN("Creating Q with %d entries, but DUT only allows %d", numEntries, (uint32_t)work); } if (memBuffer == MemBuffer::NullMemBufferPtr) { throw FrmwkEx(HERE, "Passing an uninitialized SharedMemBufferPtr"); } else if (GetIsAdmin()) { // There are no appropriate methods for an NVME device to report ASC/ACQ // creation errors, thus since ASC/ASQ may only be contiguous then don't // allow these problems to be injected, at best they will only succeed // to seg fault the app or crash the kernel. throw FrmwkEx(HERE, "Illegal memory alignment will corrupt"); } else if (memBuffer->GetBufSize() < GetQSize()) { LOG_ERR("Q buffer memory ambiguous to passed size params"); throw FrmwkEx(HERE, "Mem buffer size = %d, Q size = %d", memBuffer->GetBufSize(), GetQSize()); } else if (memBuffer->GetAlignment() != sysconf(_SC_PAGESIZE)) { // Nonconformance to page alignment will seg fault the app or crash // the kernel. This state is not testable since no errors can be // reported by hdw, thus disallow this attempt. throw FrmwkEx(HERE, "Q content memory shall be page aligned"); } // Zero out the content memory so the P-bit correlates to a newly alloc'd Q. // Also assuming life time ownership of this object if it wasn't created // by the RsrcMngr. mDiscontigBuf = memBuffer; mDiscontigBuf->Zero(); // We are creating a discontiguous IOSQ struct nvme_prep_sq q; q.sq_id = GetQId(); q.cq_id = GetCqId(); q.elements = GetNumEntries(); q.contig = false; CreateIOSQ(q); }