void WorkItemQueueManager::enqueueDeferredWorkItem(WorkItem* workItem, UInt64 numMilliSecUntilExecution) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); if (m_enqueueingEnabled == true) { DeferredWorkItem* deferredWorkItem = new DeferredWorkItem(workItem, numMilliSecUntilExecution); try { m_deferredQueue->enqueue(deferredWorkItem); } catch (std::exception ex) { DELETE_MEMORY_TC(deferredWorkItem); throw; } } else { delete workItem; throw dptf_exception("Failed to enqueue work item. Enqueueing has been disabled."); } esifMutexHelper.unlock(); }
IndexStructPtr IndexContainer::getIndexPtr(UIntN index) { IndexStructPtr indexPtr = nullptr; UInt64 currentVectorSize = m_vectorIndexStructPtr.size(); EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); if ((index == Constants::Esif::NoParticipant) || (index == Constants::Esif::NoDomain) || (index == Constants::Invalid) || (index > currentVectorSize)) { indexPtr = nullptr; } else if (index < currentVectorSize) { indexPtr = m_vectorIndexStructPtr[index]; } else if (index == currentVectorSize) { IndexStructPtr indexStructPtr = new IndexStruct; indexStructPtr->index = index; m_vectorIndexStructPtr.push_back(indexStructPtr); indexPtr = m_vectorIndexStructPtr[index]; } esifMutexHelper.unlock(); return indexPtr; }
UIntN ImmediateWorkItemQueue::removeIfMatches(const WorkItemMatchCriteria& matchCriteria) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); UIntN numRemoved = 0; auto it = m_queue.begin(); while (it != m_queue.end()) { if ((*it)->matches(matchCriteria) == true) { DELETE_MEMORY_TC(*it); it = m_queue.erase(it); numRemoved++; } else { it++; } } esifMutexHelper.unlock(); return numRemoved; }
void WorkItemQueueManager::enqueueImmediateWorkItemAndReturn(WorkItem* workItem, UIntN priority) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); if (canEnqueueImmediateWorkItem(workItem)) { ImmediateWorkItem* immediateWorkItem = new ImmediateWorkItem(workItem, priority); try { m_immediateQueue->enqueue(immediateWorkItem); } catch (std::exception ex) { DELETE_MEMORY_TC(immediateWorkItem); throw; } } else { DELETE_MEMORY_TC(workItem); throw dptf_exception("Failed to enqueue work item. Enqueueing has been disabled."); } esifMutexHelper.unlock(); }
UIntN IndexContainer::getIndex(IndexStructPtr indexStructPtr) { //FIXME: consider using a hash table. However, the number of items in the vector will be short as it will // be the number of participants loaded. It may not be worth the conversion. Should run // performance tests before changing. UIntN index = Constants::Invalid; EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); if (indexStructPtr != nullptr) { UInt64 currentVectorSize = m_vectorIndexStructPtr.size(); for (UIntN i = 0; i < currentVectorSize; i++) { if (m_vectorIndexStructPtr[i] == indexStructPtr) { index = i; break; } } } esifMutexHelper.unlock(); return index; }
WorkItemQueueManager::~WorkItemQueueManager(void) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); deleteAllObjects(); esifMutexHelper.unlock(); }
UInt64 ImmediateWorkItemQueue::getCount(void) const { UInt64 count; EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); count = m_queue.size(); esifMutexHelper.unlock(); return count; }
UInt64 ImmediateWorkItemQueue::getMaxCount(void) const { UInt64 maxCount; EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); maxCount = m_maxCount; esifMutexHelper.unlock(); return maxCount; }
void WorkItemQueueManager::disableAndEmptyAllQueues(void) { // This has to be atomic while holding the lock. So, both items (disable and empty) are within the same function. EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); m_enqueueingEnabled = false; m_immediateQueue->makeEmtpy(); m_deferredQueue->makeEmtpy(); esifMutexHelper.unlock(); }
XmlNode* DeferredWorkItemQueue::getXml(void) const { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); XmlNode* deferredQueueStastics = XmlNode::createWrapperElement("deferred_queue_statistics"); deferredQueueStastics->addChild(XmlNode::createDataElement("current_count", StlOverride::to_string(m_queue.size()))); deferredQueueStastics->addChild(XmlNode::createDataElement("max_count", StlOverride::to_string(m_maxCount))); esifMutexHelper.unlock(); return deferredQueueStastics; }
std::shared_ptr<XmlNode> ImmediateWorkItemQueue::getXml(void) const { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); auto immediateQueueStastics = XmlNode::createWrapperElement("immediate_queue_statistics"); immediateQueueStastics->addChild(XmlNode::createDataElement("current_count", StlOverride::to_string(m_queue.size()))); immediateQueueStastics->addChild(XmlNode::createDataElement("max_count", StlOverride::to_string(m_maxCount))); esifMutexHelper.unlock(); return immediateQueueStastics; }
void ImmediateWorkItemQueue::makeEmtpy(void) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); while (m_queue.empty() == false) { ImmediateWorkItem* currentWorkItem = m_queue.front(); delete currentWorkItem; m_queue.pop_front(); } esifMutexHelper.unlock(); }
void ImmediateWorkItemQueue::enqueue(ImmediateWorkItem* newWorkItem) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); // In the case of an interrupt storm we need to make sure we don't enqueue a temperature threshold crossed // event if the same event is already in the queue. throwIfDuplicateThermalThresholdCrossedEvent(newWorkItem); insertSortedByPriority(newWorkItem); updateMaxCount(); esifMutexHelper.unlock(); }
DeferredWorkItem* DeferredWorkItemQueue::dequeue(void) { // Returns the first item in the queue if it the work item time is >= the current time. EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); DeferredWorkItem* firstReadyWorkItem = getFirstReadyWorkItemFromQueue(); setTimer(); esifMutexHelper.unlock(); return firstReadyWorkItem; }
IndexContainer::~IndexContainer(void) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); for (UIntN i = 0; i < m_vectorIndexStructPtr.size(); i++) { delete m_vectorIndexStructPtr[i]; } m_vectorIndexStructPtr.clear(); esifMutexHelper.unlock(); }
void DeferredWorkItemQueue::makeEmtpy(void) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); m_timer.cancelTimer(); while (m_queue.empty() == false) { DeferredWorkItem* currentWorkItem = m_queue.front(); delete currentWorkItem; m_queue.pop_front(); } esifMutexHelper.unlock(); }
void DeferredWorkItemQueue::enqueue(DeferredWorkItem* newWorkItem) { // FIMXE: during round 2, need to add statistics logging // Insert into the queue sorted based on time stamp. The timer must be set to expire // when the first item in the queue is ready to process. EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); insertSortedByDeferredProcessingTime(newWorkItem); updateMaxCount(); setTimer(); esifMutexHelper.unlock(); }
void WorkItemQueueManager::enqueueImmediateWorkItemAndWait(WorkItem* workItem, UIntN priority) { if (isWorkItemThread() == true) { // This is in place to prevent a deadlock. Keep in mind that we run a single thread to process work items. // There are conditions where a work item is running (on a work item thread) and it submits another work item // and waits for the return. In that case we have an automatic deadlock without this special processing // in place. When this happens we just treat it like a function call and execute the work item directly // and return. Without this in place the work item would just sit in the queue and never execute since // the thread is being held by the currently running work item. workItem->execute(); delete workItem; } else { EsifSemaphore semaphore; EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); if (canEnqueueImmediateWorkItem(workItem)) { ImmediateWorkItem* immediateWorkItem = new ImmediateWorkItem(workItem, priority); immediateWorkItem->signalAtCompletion(&semaphore); try { m_immediateQueue->enqueue(immediateWorkItem); } catch (std::exception ex) { DELETE_MEMORY_TC(immediateWorkItem); throw; } } else { delete workItem; throw dptf_exception("Failed to enqueue work item. Enqueueing has been disabled."); } esifMutexHelper.unlock(); semaphore.wait(); } }
ImmediateWorkItem* ImmediateWorkItemQueue::dequeue(void) { ImmediateWorkItem* firstItemInQueue = nullptr; EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); if (m_queue.empty() == false) { firstItemInQueue = m_queue.front(); m_queue.pop_front(); } esifMutexHelper.unlock(); return firstItemInQueue; }
XmlNode* WorkItemQueueManager::getStatusAsXml(void) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); XmlNode* root = XmlNode::createRoot(); root->addChild(XmlNode::createComment("format_id=C5-61-4D-E9-30-80-4D-B5-98-1A-D1-D1-67-DD-4C-D7")); XmlNode* workItemQueueManagerStatus = XmlNode::createWrapperElement("work_item_queue_manager_status"); root->addChild(workItemQueueManagerStatus); workItemQueueManagerStatus->addChild(m_immediateQueue->getXml()); workItemQueueManagerStatus->addChild(m_deferredQueue->getXml()); workItemQueueManagerStatus->addChild(m_workItemStatistics->getXml()); esifMutexHelper.unlock(); return root; }
UIntN WorkItemQueueManager::removeIfMatches(const WorkItemMatchCriteria& matchCriteria) { EsifMutexHelper esifMutexHelper(&m_mutex); esifMutexHelper.lock(); UIntN numRemovedImmediate = m_immediateQueue->removeIfMatches(matchCriteria); UIntN numRemovedDeferred = m_deferredQueue->removeIfMatches(matchCriteria); esifMutexHelper.unlock(); UIntN numRemoved = numRemovedImmediate + numRemovedDeferred; if (numRemoved > 0) { ManagerMessage message = ManagerMessage(m_dptfManager, FLF, "One or more work items have been removed from the queues."); message.addMessage("Immediate Queue removed", numRemovedImmediate); message.addMessage("Deferred Queue removed", numRemovedDeferred); m_dptfManager->getEsifServices()->writeMessageDebug(message); } return numRemoved; }