static void testLeak(size_t alloc_size) { RuntimeOption::EvalGCTriggerPct = 0.50; RuntimeOption::EvalGCMinTrigger = 4 << 20; tl_heap->collect("testLeak"); tl_heap->setGCEnabled(true); clearSurpriseFlag(MemExceededFlag); tl_heap->setMemoryLimit(100 << 20); auto const target_alloc = int64_t{5} << 30; auto const vec_cap = (alloc_size - sizeof(ArrayData)) / sizeof(TypedValue); auto const vec = [vec_cap] { VecArrayInit vec{vec_cap}; for (int j = 0; j < vec_cap; ++j) { vec.append(make_tv<KindOfNull>()); } return vec.toArray(); }(); auto const start_alloc = tl_heap->getStatsRaw().mmAllocated(); for (int64_t i = 0; ; ++i) { auto vec_copy = vec; vec_copy.set(0, make_tv<KindOfInt64>(i)); vec_copy.detach(); if (tl_heap->getStatsRaw().mmAllocated() - start_alloc > target_alloc) { break; } if (UNLIKELY(checkSurpriseFlags())) handle_request_surprise(); } }
void IntervalTimer::RunCallbacks(IntervalTimer::SampleType type) { clearSurpriseFlag(IntervalTimerFlag); auto const timers = s_timer_pool->timers; for (auto timer : timers) { if (!s_timer_pool->timers.count(timer)) { // This timer has been removed from the pool by one of the callbacks. continue; } int count = 0; { std::lock_guard<std::mutex> lock(timer->m_signalMutex); count = timer->m_count; timer->m_count = 0; if (count == 0) { continue; } } try { Array args = make_packed_array(sample_type_string(type), count); vm_call_user_func(timer->m_callback, args); } catch (Object& ex) { raise_error("Uncaught exception escaping IntervalTimer: %s", ex.toString().data()); } } }
size_t check_request_surprise() { auto& info = TI(); auto& p = info.m_reqInjectionData; auto const flags = fetchAndClearSurpriseFlags(); auto const do_timedout = (flags & TimedOutFlag) && !p.getDebuggerAttached(); auto const do_memExceeded = flags & MemExceededFlag; auto const do_signaled = flags & SignaledFlag; auto const do_cpuTimedOut = (flags & CPUTimedOutFlag) && !p.getDebuggerAttached(); auto const do_GC = flags & PendingGCFlag; // Start with any pending exception that might be on the thread. auto pendingException = info.m_pendingException; info.m_pendingException = nullptr; if (do_timedout) { p.setCPUTimeout(0); // Stop CPU timer so we won't time out twice. if (pendingException) { setSurpriseFlag(TimedOutFlag); } else { pendingException = generate_request_timeout_exception(); } } // Don't bother with the CPU timeout if we're already handling a wall timeout. if (do_cpuTimedOut && !do_timedout) { p.setTimeout(0); // Stop wall timer so we won't time out twice. if (pendingException) { setSurpriseFlag(CPUTimedOutFlag); } else { pendingException = generate_request_cpu_timeout_exception(); } } if (do_memExceeded) { if (pendingException) { setSurpriseFlag(MemExceededFlag); } else { pendingException = generate_memory_exceeded_exception(); } } if (do_GC) { if (StickyFlags & PendingGCFlag) { clearSurpriseFlag(PendingGCFlag); } if (RuntimeOption::EvalEnableGC) { MM().collect("surprise"); } else { MM().checkHeap("surprise"); } } if (do_signaled) { HHVM_FN(pcntl_signal_dispatch)(); } if (pendingException) { pendingException->throwException(); } return flags; }
void XenonRequestLocalData::requestShutdown() { TRACE(1, "XenonRequestLocalData::requestShutdown\n"); m_inRequest = false; clearSurpriseFlag(XenonSignalFlag); Xenon::getInstance().incrementMissedSampleCount(m_stackSnapshots.size()); m_stackSnapshots.reset(); }
// Xenon data is gathered for logging per request, "if we should" // meaning that if Xenon's Surprise flag has been turned on by someone, we // should log the stacks. If we are in XenonForceAlwaysOn, do not clear // the Surprise flag. The data is gathered in thread local storage. // If the sample is Enter, then do not record this function name because it // hasn't done anything. The sample belongs to the previous function. void Xenon::log(SampleType t, c_WaitableWaitHandle* wh) const { if (getSurpriseFlag(XenonSignalFlag)) { if (!RuntimeOption::XenonForceAlwaysOn) { clearSurpriseFlag(XenonSignalFlag); } logNoSurprise(t, nullptr, wh); } }
// Xenon data is gathered for logging per request, "if we should" // meaning that if Xenon's Surprise flag has been turned on by someone, we // should log the stacks. If we are in XenonForceAlwaysOn, do not clear // the Surprise flag. The data is gathered in thread local storage. // If the sample is Enter, then do not record this function name because it // hasn't done anything. The sample belongs to the previous function. void Xenon::log(SampleType t) const { if (getSurpriseFlag(XenonSignalFlag)) { if (!RuntimeOption::XenonForceAlwaysOn) { clearSurpriseFlag(XenonSignalFlag); } TRACE(1, "Xenon::log %s\n", (t == IOWaitSample) ? "IOWait" : "Normal"); s_xenonData->log(t); } }
void XenonRequestLocalData::requestInit() { TRACE(1, "XenonRequestLocalData::requestInit\n"); m_stackSnapshots = Array::Create(); if (RuntimeOption::XenonForceAlwaysOn) { setSurpriseFlag(XenonSignalFlag); } else { // Clear any Xenon flags that might still be on in this thread so that we do // not have a bias towards the first function. clearSurpriseFlag(XenonSignalFlag); } }
void EventHook::DoMemoryThresholdCallback() { clearSurpriseFlag(MemThresholdFlag); if (!g_context->m_memThresholdCallback.isNull()) { VMRegAnchor _; try { vm_call_user_func(g_context->m_memThresholdCallback, empty_array()); } catch (Object& ex) { raise_error("Uncaught exception escaping mem Threshold callback: %s", ex.toString().data()); } } }
void XenonRequestLocalData::requestInit() { TRACE(1, "XenonRequestLocalData::requestInit\n"); assertx(!m_inRequest); assertx(m_stackSnapshots.get() == nullptr); if (RuntimeOption::XenonForceAlwaysOn) { setSurpriseFlag(XenonSignalFlag); } else { // Clear any Xenon flags that might still be on in this thread so that we do // not have a bias towards the first function. clearSurpriseFlag(XenonSignalFlag); } m_inRequest = true; }
void EventHook::DisableDebug() { clearSurpriseFlag(DebuggerHookFlag); }
void MemoryManager::resetCouldOOM(bool state) { clearSurpriseFlag(MemExceededFlag); m_couldOOM = state; }
size_t check_request_surprise() { auto& info = TI(); auto& p = info.m_reqInjectionData; auto const flags = fetchAndClearSurpriseFlags(); auto const do_timedout = (flags & TimedOutFlag) && !p.getDebuggerAttached(); auto const do_memExceeded = flags & MemExceededFlag; auto const do_memThreshold = flags & MemThresholdFlag; auto const do_signaled = flags & SignaledFlag; auto const do_cpuTimedOut = (flags & CPUTimedOutFlag) && !p.getDebuggerAttached(); auto const do_GC = flags & PendingGCFlag; // Start with any pending exception that might be on the thread. auto pendingException = info.m_pendingException; info.m_pendingException = nullptr; if (do_timedout) { p.setCPUTimeout(0); // Stop CPU timer so we won't time out twice. if (pendingException) { setSurpriseFlag(TimedOutFlag); } else { pendingException = generate_request_timeout_exception(); } } // Don't bother with the CPU timeout if we're already handling a wall timeout. if (do_cpuTimedOut && !do_timedout) { p.setTimeout(0); // Stop wall timer so we won't time out twice. if (pendingException) { setSurpriseFlag(CPUTimedOutFlag); } else { pendingException = generate_request_cpu_timeout_exception(); } } if (do_memExceeded) { if (pendingException) { setSurpriseFlag(MemExceededFlag); } else { pendingException = generate_memory_exceeded_exception(); } } if (do_memThreshold) { clearSurpriseFlag(MemThresholdFlag); if (!g_context->m_memThresholdCallback.isNull()) { VMRegAnchor _; try { vm_call_user_func(g_context->m_memThresholdCallback, empty_array()); } catch (Object& ex) { raise_error("Uncaught exception escaping mem Threshold callback: %s", ex.toString().data()); } } } if (do_GC) { VMRegAnchor _; if (RuntimeOption::EvalEnableGC) { MM().collect("surprise"); } else { MM().checkHeap("surprise"); } } if (do_signaled) { HHVM_FN(pcntl_signal_dispatch)(); } if (pendingException) { pendingException->throwException(); } return flags; }
void XenonRequestLocalData::requestShutdown() { TRACE(1, "XenonRequestLocalData::requestShutdown\n"); clearSurpriseFlag(XenonSignalFlag); m_stackSnapshots.reset(); }
size_t handle_request_surprise(c_WaitableWaitHandle* wh, size_t mask) { auto& info = TI(); auto& p = info.m_reqInjectionData; auto const flags = fetchAndClearSurpriseFlags() & mask; auto const debugging = p.getDebuggerAttached(); // Start with any pending exception that might be on the thread. auto pendingException = info.m_pendingException; info.m_pendingException = nullptr; if ((flags & TimedOutFlag) && !debugging) { p.setCPUTimeout(0); // Stop CPU timer so we won't time out twice. if (pendingException) { setSurpriseFlag(TimedOutFlag); } else { pendingException = generate_request_timeout_exception(wh); } } else if ((flags & CPUTimedOutFlag) && !debugging) { // Don't bother with the CPU timeout if we're already handling a wall // timeout. p.setTimeout(0); // Stop wall timer so we won't time out twice. if (pendingException) { setSurpriseFlag(CPUTimedOutFlag); } else { pendingException = generate_request_cpu_timeout_exception(wh); } } if (flags & MemExceededFlag) { if (pendingException) { setSurpriseFlag(MemExceededFlag); } else { pendingException = generate_memory_exceeded_exception(wh); } } if (flags & PendingGCFlag) { if (StickyFlags & PendingGCFlag) { clearSurpriseFlag(PendingGCFlag); } if (RuntimeOption::EvalEnableGC) { MM().collect("surprise"); } else { MM().checkHeap("surprise"); } } if (flags & SignaledFlag) { HHVM_FN(pcntl_signal_dispatch)(); } if (flags & PendingPerfEventFlag) { if (StickyFlags & PendingPerfEventFlag) { clearSurpriseFlag(PendingPerfEventFlag); } perf_event_consume(record_perf_mem_event); } if (pendingException) { pendingException->throwException(); } return flags; }
void EventHook::Disable() { clearSurpriseFlag(EventHookFlag); }
void EventHook::DisableIntercept() { clearSurpriseFlag(InterceptFlag); }
void EventHook::DisableAsync() { clearSurpriseFlag(AsyncEventHookFlag); }