Status PlanCache::feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback) { if (NULL == feedback) { return Status(ErrorCodes::BadValue, "feedback is NULL"); } std::auto_ptr<PlanCacheEntryFeedback> autoFeedback(feedback); const PlanCacheKey& ck = cq.getPlanCacheKey(); boost::lock_guard<boost::mutex> cacheLock(_cacheMutex); PlanCacheEntry* entry; Status cacheStatus = _cache.get(ck, &entry); if (!cacheStatus.isOK()) { return cacheStatus; } invariant(entry); if (entry->feedback.size() >= PlanCacheEntry::kMaxFeedback) { // If we have enough feedback, then use it to determine whether // we should get rid of the cached solution. if (hasCachedPlanPerformanceDegraded(entry, autoFeedback.get())) { LOG(1) << _ns << ": removing plan cache entry " << entry->toString() << " - detected degradation in performance of cached solution."; _cache.remove(ck); } } else { // We don't have enough feedback yet---just store it and move on. entry->feedback.push_back(autoFeedback.release()); } return Status::OK(); }
Status PlanCache::feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* feedback) { if (NULL == feedback) { return Status(ErrorCodes::BadValue, "feedback is NULL"); } std::unique_ptr<PlanCacheEntryFeedback> autoFeedback(feedback); PlanCacheKey ck = computeKey(cq); stdx::lock_guard<stdx::mutex> cacheLock(_cacheMutex); PlanCacheEntry* entry; Status cacheStatus = _cache.get(ck, &entry); if (!cacheStatus.isOK()) { return cacheStatus; } invariant(entry); // We store up to a constant number of feedback entries. if (entry->feedback.size() < static_cast<size_t>(internalQueryCacheFeedbacksStored.load())) { entry->feedback.push_back(autoFeedback.release()); } return Status::OK(); }