Exemplo n.º 1
0
size_t check_request_surprise() {
  auto& info = TI();
  auto& p = info.m_reqInjectionData;

  auto const flags = fetchAndClearSurpriseFlags();
  auto const do_timedout = (flags & TimedOutFlag) && !p.getDebuggerAttached();
  auto const do_memExceeded = flags & MemExceededFlag;
  auto const do_signaled = flags & SignaledFlag;
  auto const do_cpuTimedOut =
    (flags & CPUTimedOutFlag) && !p.getDebuggerAttached();
  auto const do_GC = flags & PendingGCFlag;

  // Start with any pending exception that might be on the thread.
  auto pendingException = info.m_pendingException;
  info.m_pendingException = nullptr;

  if (do_timedout) {
    p.setCPUTimeout(0);  // Stop CPU timer so we won't time out twice.
    if (pendingException) {
      setSurpriseFlag(TimedOutFlag);
    } else {
      pendingException = generate_request_timeout_exception();
    }
  }
  // Don't bother with the CPU timeout if we're already handling a wall timeout.
  if (do_cpuTimedOut && !do_timedout) {
    p.setTimeout(0);  // Stop wall timer so we won't time out twice.
    if (pendingException) {
      setSurpriseFlag(CPUTimedOutFlag);
    } else {
      pendingException = generate_request_cpu_timeout_exception();
    }
  }
  if (do_memExceeded) {
    if (pendingException) {
      setSurpriseFlag(MemExceededFlag);
    } else {
      pendingException = generate_memory_exceeded_exception();
    }
  }
  if (do_GC) {
    if (StickyFlags & PendingGCFlag) {
      clearSurpriseFlag(PendingGCFlag);
    }
    if (RuntimeOption::EvalEnableGC) {
      MM().collect("surprise");
    } else {
      MM().checkHeap("surprise");
    }
  }
  if (do_signaled) {
    HHVM_FN(pcntl_signal_dispatch)();
  }

  if (pendingException) {
    pendingException->throwException();
  }
  return flags;
}
Exemplo n.º 2
0
void MemoryManager::refreshStatsHelperExceeded() {
  setSurpriseFlag(MemExceededFlag);
  m_couldOOM = false;
  if (RuntimeOption::LogNativeStackOnOOM) {
    log_native_stack("Exceeded memory limit");
  }
}
Exemplo n.º 3
0
void MemoryManager::eagerGCCheck() {
  if (RuntimeOption::EvalEagerGCProbability > 0 &&
      !g_context.isNull() &&
      folly::Random::oneIn(RuntimeOption::EvalEagerGCProbability)) {
    setSurpriseFlag(PendingGCFlag);
  }
}
Exemplo n.º 4
0
void XenonRequestLocalData::requestInit() {
  TRACE(1, "XenonRequestLocalData::requestInit\n");
  m_stackSnapshots = Array::Create();
  if (RuntimeOption::XenonForceAlwaysOn) {
    setSurpriseFlag(XenonSignalFlag);
  } else {
    // Clear any Xenon flags that might still be on in this thread so that we do
    // not have a bias towards the first function.
    clearSurpriseFlag(XenonSignalFlag);
  }
}
Exemplo n.º 5
0
void XenonRequestLocalData::requestInit() {
  TRACE(1, "XenonRequestLocalData::requestInit\n");

  assertx(!m_inRequest);
  assertx(m_stackSnapshots.get() == nullptr);
  if (RuntimeOption::XenonForceAlwaysOn) {
    setSurpriseFlag(XenonSignalFlag);
  } else {
    // Clear any Xenon flags that might still be on in this thread so that we do
    // not have a bias towards the first function.
    clearSurpriseFlag(XenonSignalFlag);
  }
  m_inRequest = true;
}
/*
 * Get a new slab, then allocate nbytes from it and install it in our
 * slab list.  Return the newly allocated nbytes-sized block.
 */
NEVER_INLINE void* MemoryManager::newSlab(uint32_t nbytes) {
  if (UNLIKELY(m_stats.usage > m_stats.maxBytes)) {
    refreshStats();
  }
  storeTail(m_front, (char*)m_limit - (char*)m_front);
  if (debug && RuntimeOption::EvalCheckHeapOnAlloc && !g_context.isNull()) {
    setSurpriseFlag(PendingGCFlag); // defer heap check until safepoint
  }
  auto slab = m_heap.allocSlab(kSlabSize);
  assert((uintptr_t(slab.ptr) & kSmallSizeAlignMask) == 0);
  m_stats.borrow(slab.size);
  m_stats.alloc += slab.size;
  if (m_stats.alloc > m_stats.peakAlloc) {
    m_stats.peakAlloc = m_stats.alloc;
  }
  m_front = (void*)(uintptr_t(slab.ptr) + nbytes);
  m_limit = (void*)(uintptr_t(slab.ptr) + slab.size);
  FTRACE(3, "newSlab: adding slab at {} to limit {}\n", slab.ptr, m_limit);
  return slab.ptr;
}
Exemplo n.º 7
0
static void pcntl_signal_handler(int signo) {
  if (signo > 0 && signo < _NSIG && signalHandlersInited()) {
    s_signal_handlers->signaled[signo] = 1;
    setSurpriseFlag(SignaledFlag);
  }
}
Exemplo n.º 8
0
size_t check_request_surprise() {
  auto& info = TI();
  auto& p = info.m_reqInjectionData;

  auto const flags = fetchAndClearSurpriseFlags();
  auto const do_timedout = (flags & TimedOutFlag) && !p.getDebuggerAttached();
  auto const do_memExceeded = flags & MemExceededFlag;
  auto const do_memThreshold = flags & MemThresholdFlag;
  auto const do_signaled = flags & SignaledFlag;
  auto const do_cpuTimedOut =
    (flags & CPUTimedOutFlag) && !p.getDebuggerAttached();
  auto const do_GC = flags & PendingGCFlag;

  // Start with any pending exception that might be on the thread.
  auto pendingException = info.m_pendingException;
  info.m_pendingException = nullptr;

  if (do_timedout) {
    p.setCPUTimeout(0);  // Stop CPU timer so we won't time out twice.
    if (pendingException) {
      setSurpriseFlag(TimedOutFlag);
    } else {
      pendingException = generate_request_timeout_exception();
    }
  }
  // Don't bother with the CPU timeout if we're already handling a wall timeout.
  if (do_cpuTimedOut && !do_timedout) {
    p.setTimeout(0);  // Stop wall timer so we won't time out twice.
    if (pendingException) {
      setSurpriseFlag(CPUTimedOutFlag);
    } else {
      pendingException = generate_request_cpu_timeout_exception();
    }
  }
  if (do_memExceeded) {
    if (pendingException) {
      setSurpriseFlag(MemExceededFlag);
    } else {
      pendingException = generate_memory_exceeded_exception();
    }
  }
  if (do_memThreshold) {
    clearSurpriseFlag(MemThresholdFlag);
    if (!g_context->m_memThresholdCallback.isNull()) {
      VMRegAnchor _;
      try {
        vm_call_user_func(g_context->m_memThresholdCallback, empty_array());
      } catch (Object& ex) {
        raise_error("Uncaught exception escaping mem Threshold callback: %s",
                    ex.toString().data());
      }
    }
  }
  if (do_GC) {
    VMRegAnchor _;
    if (RuntimeOption::EvalEnableGC) {
      MM().collect("surprise");
    } else {
      MM().checkHeap("surprise");
    }
  }
  if (do_signaled) {
    HHVM_FN(pcntl_signal_dispatch)();
  }

  if (pendingException) {
    pendingException->throwException();
  }
  return flags;
}
void MemoryManager::refreshStatsImpl(MemoryUsageStats& stats) {
#ifdef USE_JEMALLOC
  // Incrementally incorporate the difference between the previous and current
  // deltas into the memory usage statistic.  For reference, the total
  // malloced memory usage could be calculated as such, if delta0 were
  // recorded in resetStatsImpl():
  //
  //   int64 musage = delta - delta0;
  //
  // Note however, the slab allocator adds to m_stats.jemallocDebt
  // when it calls malloc(), so that this function can avoid
  // double-counting the malloced memory. Thus musage in the example
  // code may well substantially exceed m_stats.usage.
  if (m_enableStatsSync) {
    uint64_t jeDeallocated = *m_deallocated;
    uint64_t jeAllocated = *m_allocated;

    // We can't currently handle wrapping so make sure this isn't happening.
    assert(jeAllocated >= 0 &&
           jeAllocated <= std::numeric_limits<int64_t>::max());
    assert(jeDeallocated >= 0 &&
           jeDeallocated <= std::numeric_limits<int64_t>::max());

    // This is the delta between the current and the previous jemalloc reading.
    int64_t jeMMDeltaAllocated =
      int64_t(jeAllocated) - int64_t(m_prevAllocated);

    FTRACE(1, "Before stats sync:\n");
    FTRACE(1, "je alloc:\ncurrent: {}\nprevious: {}\ndelta with MM: {}\n",
      jeAllocated, m_prevAllocated, jeAllocated - m_prevAllocated);
    FTRACE(1, "je dealloc:\ncurrent: {}\nprevious: {}\ndelta with MM: {}\n",
      jeDeallocated, m_prevDeallocated, jeDeallocated - m_prevDeallocated);
    FTRACE(1, "usage: {}\ntotal (je) alloc: {}\nje debt: {}\n",
      stats.usage, stats.totalAlloc, stats.jemallocDebt);

    if (!contiguous_heap) {
      // Since these deltas potentially include memory allocated from another
      // thread but deallocated on this one, it is possible for these nubmers to
      // go negative.
      int64_t jeDeltaAllocated =
        int64_t(jeAllocated) - int64_t(jeDeallocated);
      int64_t mmDeltaAllocated =
        int64_t(m_prevAllocated) - int64_t(m_prevDeallocated);
      FTRACE(1, "je delta:\ncurrent: {}\nprevious: {}\n",
          jeDeltaAllocated, mmDeltaAllocated);

      // Subtract the old jemalloc adjustment (delta0) and add the current one
      // (delta) to arrive at the new combined usage number.
      stats.usage += jeDeltaAllocated - mmDeltaAllocated;
      // Remove the "debt" accrued from allocating the slabs so we don't double
      // count the slab-based allocations.
      stats.usage -= stats.jemallocDebt;
    }

    stats.jemallocDebt = 0;
    // We need to do the calculation instead of just setting it to jeAllocated
    // because of the MaskAlloc capability.
    stats.totalAlloc += jeMMDeltaAllocated;
    if (live) {
      m_prevAllocated = jeAllocated;
      m_prevDeallocated = jeDeallocated;
    }

    FTRACE(1, "After stats sync:\n");
    FTRACE(1, "usage: {}\ntotal (je) alloc: {}\n\n",
      stats.usage, stats.totalAlloc);
  }
#endif
  assert(stats.maxBytes > 0);
  if (live && stats.usage > stats.maxBytes && m_couldOOM) {
    refreshStatsHelperExceeded();
  }
  if (stats.usage > stats.peakUsage) {
    // Check whether the process's active memory limit has been exceeded, and
    // if so, stop the server.
    //
    // Only check whether the total memory limit was exceeded if this request
    // is at a new high water mark.  This check could be performed regardless
    // of this request's current memory usage (because other request threads
    // could be to blame for the increased memory usage), but doing so would
    // measurably increase computation for little benefit.
#ifdef USE_JEMALLOC
    // (*m_cactive) consistency is achieved via atomic operations.  The fact
    // that we do not use an atomic operation here means that we could get a
    // stale read, but in practice that poses no problems for how we are
    // using the value.
    if (live && s_statsEnabled && *m_cactive > m_cactiveLimit) {
      refreshStatsHelperStop();
    }
#endif
    if (live &&
        stats.usage > m_memThresholdCallbackPeakUsage &&
        stats.peakUsage <= m_memThresholdCallbackPeakUsage) {
      setSurpriseFlag(MemThresholdFlag);
    }

    stats.peakUsage = stats.usage;
  }
  if (live && m_statsIntervalActive) {
    if (stats.usage > stats.peakIntervalUsage) {
      stats.peakIntervalUsage = stats.usage;
    }
    if (stats.alloc > stats.peakIntervalAlloc) {
      stats.peakIntervalAlloc = stats.alloc;
    }
  }
}
Exemplo n.º 10
0
void EventHook::EnableIntercept() {
  setSurpriseFlag(InterceptFlag);
}
Exemplo n.º 11
0
void EventHook::EnableDebug() {
  setSurpriseFlag(DebuggerHookFlag);
}
Exemplo n.º 12
0
void EventHook::EnableAsync() {
  setSurpriseFlag(AsyncEventHookFlag);
}
Exemplo n.º 13
0
void EventHook::Enable() {
  setSurpriseFlag(EventHookFlag);
}
Exemplo n.º 14
0
void HHVM_FUNCTION(trigger_oom, bool oom) {
  if (oom) setSurpriseFlag(MemExceededFlag);
}
Exemplo n.º 15
0
size_t handle_request_surprise(c_WaitableWaitHandle* wh, size_t mask) {
  auto& info = TI();
  auto& p = info.m_reqInjectionData;

  auto const flags = fetchAndClearSurpriseFlags() & mask;
  auto const debugging = p.getDebuggerAttached();

  // Start with any pending exception that might be on the thread.
  auto pendingException = info.m_pendingException;
  info.m_pendingException = nullptr;

  if ((flags & TimedOutFlag) && !debugging) {
    p.setCPUTimeout(0);  // Stop CPU timer so we won't time out twice.
    if (pendingException) {
      setSurpriseFlag(TimedOutFlag);
    } else {
      pendingException = generate_request_timeout_exception(wh);
    }
  } else if ((flags & CPUTimedOutFlag) && !debugging) {
    // Don't bother with the CPU timeout if we're already handling a wall
    // timeout.
    p.setTimeout(0);  // Stop wall timer so we won't time out twice.
    if (pendingException) {
      setSurpriseFlag(CPUTimedOutFlag);
    } else {
      pendingException = generate_request_cpu_timeout_exception(wh);
    }
  }
  if (flags & MemExceededFlag) {
    if (pendingException) {
      setSurpriseFlag(MemExceededFlag);
    } else {
      pendingException = generate_memory_exceeded_exception(wh);
    }
  }
  if (flags & PendingGCFlag) {
    if (StickyFlags & PendingGCFlag) {
      clearSurpriseFlag(PendingGCFlag);
    }
    if (RuntimeOption::EvalEnableGC) {
      MM().collect("surprise");
    } else {
      MM().checkHeap("surprise");
    }
  }
  if (flags & SignaledFlag) {
    HHVM_FN(pcntl_signal_dispatch)();
  }

  if (flags & PendingPerfEventFlag) {
    if (StickyFlags & PendingPerfEventFlag) {
      clearSurpriseFlag(PendingPerfEventFlag);
    }
    perf_event_consume(record_perf_mem_event);
  }

  if (pendingException) {
    pendingException->throwException();
  }
  return flags;
}