void ApplicationV8::exitContext (V8Context* context) { V8GcThread* gc = dynamic_cast<V8GcThread*>(_gcThread); assert(gc != 0); double lastGc = gc->getLastGcStamp(); CONDITION_LOCKER(guard, _contextCondition); context->handleGlobalContextMethods(); context->_context->Exit(); context->_isolate->Exit(); delete context->_locker; ++context->_dirt; if (context->_lastGcStamp + _gcFrequency < lastGc) { LOGGER_TRACE("V8 context has reached GC timeout threshold and will be scheduled for GC"); _dirtyContexts.push_back(context); _busyContexts.erase(context); } else if (context->_dirt >= _gcInterval) { LOGGER_TRACE("V8 context has reached maximum number of requests and will be scheduled for GC"); _dirtyContexts.push_back(context); _busyContexts.erase(context); } else { _freeContexts.push_back(context); _busyContexts.erase(context); } guard.broadcast(); LOGGER_TRACE("returned dirty V8 context"); }
ApplicationV8::V8Context* ApplicationV8::pickFreeContextForGc () { int const n = (int) _freeContexts[DEFAULT_NAME].size(); if (n == 0) { // this is easy... return nullptr; } V8GcThread* gc = dynamic_cast<V8GcThread*>(_gcThread); TRI_ASSERT(gc != nullptr); V8Context* context = nullptr; // we got more than 1 context to clean up, pick the one with the "oldest" GC stamp int pickedContextNr = -1; // index of context with lowest GC stamp, -1 means "none" for (int i = 0; i < n; ++i) { // check if there's actually anything to clean up in the context if (_freeContexts[DEFAULT_NAME][i]->_numExecutions == 0 && ! _freeContexts[DEFAULT_NAME][i]->_hasDeadObjects) { continue; } // compare last GC stamp if (pickedContextNr == -1 || _freeContexts[DEFAULT_NAME][i]->_lastGcStamp <= _freeContexts[DEFAULT_NAME][pickedContextNr]->_lastGcStamp) { pickedContextNr = i; } } // we now have the context to clean up in pickedContextNr if (pickedContextNr == -1) { // no context found return nullptr; } // this is the context to clean up context = _freeContexts[DEFAULT_NAME][pickedContextNr]; TRI_ASSERT(context != nullptr); // now compare its last GC timestamp with the last global GC stamp if (context->_lastGcStamp + _gcFrequency >= gc->getLastGcStamp()) { // no need yet to clean up the context return nullptr; } // we'll pop the context from the vector. the context might be at any position in the vector // so we need to move the other elements around if (n > 1) { for (int i = pickedContextNr; i < n - 1; ++i) { _freeContexts[DEFAULT_NAME][i] = _freeContexts[DEFAULT_NAME][i + 1]; } } _freeContexts[DEFAULT_NAME].pop_back(); return context; }
ApplicationV8::V8Context* ApplicationV8::pickContextForGc () { const size_t n = _freeContexts.size(); if (n == 0) { // this is easy... return 0; } V8GcThread* gc = dynamic_cast<V8GcThread*>(_gcThread); assert(gc != 0); V8Context* context = 0; // we got more than 1 context to clean up, pick the one with the "oldest" GC stamp size_t pickedContextNr = 0; // index of context with lowest GC stamp for (size_t i = 0; i < n; ++i) { // compare last GC stamp if (_freeContexts[i]->_lastGcStamp <= _freeContexts[pickedContextNr]->_lastGcStamp) { pickedContextNr = i; } } // we now have the context to clean up in pickedContextNr // this is the context to clean up context = _freeContexts[pickedContextNr]; assert(context != 0); // now compare its last GC timestamp with the last global GC stamp if (context->_lastGcStamp + _gcFrequency >= gc->getLastGcStamp()) { // no need yet to clean up the context return 0; } // we'll pop the context from the vector. the context might be at any position in the vector // so we need to move the other elements around if (n > 1) { for (size_t i = pickedContextNr; i < n - 1; ++i) { _freeContexts[i] = _freeContexts[i + 1]; } } _freeContexts.pop_back(); return context; }
void ApplicationV8::collectGarbage () { V8GcThread* gc = dynamic_cast<V8GcThread*>(_gcThread); TRI_ASSERT(gc != nullptr); // this flag will be set to true if we timed out waiting for a GC signal // if set to true, the next cycle will use a reduced wait time so the GC // can be performed more early for all dirty contexts. The flag is set // to false again once all contexts have been cleaned up and there is nothing // more to do bool useReducedWait = false; // the time we'll wait for a signal uint64_t const regularWaitTime = (uint64_t) (_gcFrequency * 1000.0 * 1000.0); // the time we'll wait for a signal when the previous wait timed out uint64_t const reducedWaitTime = (uint64_t) (_gcFrequency * 1000.0 * 100.0); while (_stopping == 0) { V8Context* context = nullptr; { bool gotSignal = false; CONDITION_LOCKER(guard, _contextCondition); if (_dirtyContexts[DEFAULT_NAME].empty()) { uint64_t waitTime = useReducedWait ? reducedWaitTime : regularWaitTime; // we'll wait for a signal or a timeout gotSignal = guard.wait(waitTime); // use a reduced wait time in the next round because we seem to be idle // the reduced wait time will allow use to perfom GC for more contexts useReducedWait = ! gotSignal; } if (! _dirtyContexts[DEFAULT_NAME].empty()) { context = _dirtyContexts[DEFAULT_NAME].back(); _dirtyContexts[DEFAULT_NAME].pop_back(); useReducedWait = false; } else if (! gotSignal && ! _freeContexts[DEFAULT_NAME].empty()) { // we timed out waiting for a signal, so we have idle time that we can // spend on running the GC pro-actively // We'll pick one of the free contexts and clean it up context = pickFreeContextForGc(); // there is no context to clean up, probably they all have been cleaned up // already. increase the wait time so we don't cycle too much in the GC loop // and waste CPU unnecessary useReducedWait = (context != nullptr); } } // update last gc time double lastGc = TRI_microtime(); gc->updateGcStamp(lastGc); if (context != nullptr) { LOG_TRACE("collecting V8 garbage"); auto isolate = context->isolate; TRI_ASSERT(context->_locker == nullptr); context->_locker = new v8::Locker(isolate); isolate->Enter(); { v8::HandleScope scope(isolate); auto localContext = v8::Local<v8::Context>::New(isolate, context->_context); localContext->Enter(); v8::Context::Scope contextScope(localContext); TRI_ASSERT(context->_locker->IsLocked(isolate)); TRI_ASSERT(v8::Locker::IsLocked(isolate)); TRI_RunGarbageCollectionV8(isolate, 1.0); localContext->Exit(); } isolate->Exit(); delete context->_locker; context->_locker = nullptr; // update garbage collection statistics context->_hasDeadObjects = false; context->_numExecutions = 0; context->_lastGcStamp = lastGc; { CONDITION_LOCKER(guard, _contextCondition); _freeContexts[DEFAULT_NAME].push_back(context); guard.broadcast(); } } } _gcFinished = true; }
void ApplicationV8::exitContext (V8Context* context) { const string& name = context->_name; bool isStandard = (name == DEFAULT_NAME); V8GcThread* gc = dynamic_cast<V8GcThread*>(_gcThread); TRI_ASSERT(gc != nullptr); LOG_TRACE("leaving V8 context %d", (int) context->_id); double lastGc = gc->getLastGcStamp(); CONDITION_LOCKER(guard, _contextCondition); auto isolate = context->isolate; TRI_ASSERT(context->_locker->IsLocked(isolate)); TRI_ASSERT(v8::Locker::IsLocked(isolate)); // update data for later garbage collection TRI_GET_GLOBALS(); context->_hasDeadObjects = v8g->_hasDeadObjects; ++context->_numExecutions; TRI_ASSERT(v8g->_vocbase != nullptr); // release last recently used vocbase TRI_ReleaseVocBase(static_cast<TRI_vocbase_t*>(v8g->_vocbase)); // check for cancelation requests bool const canceled = v8g->_canceled; v8g->_canceled = false; // exit the context { v8::HandleScope scope(isolate); auto localContext = v8::Local<v8::Context>::New(isolate, context->_context); localContext->Exit(); } isolate->Exit(); // if the execution was canceled, we need to cleanup if (canceled) { isolate->Enter(); { v8::HandleScope scope(isolate); auto localContext = v8::Local<v8::Context>::New(isolate, context->_context); localContext->Enter(); context->handleCancelationCleanup(); localContext->Exit(); } isolate->Exit(); } // try to execute new global context methods if (isStandard) { bool runGlobal = false; { MUTEX_LOCKER(context->_globalMethodsLock); runGlobal = ! context->_globalMethods.empty(); } if (runGlobal) { isolate->Enter(); { v8::HandleScope scope(isolate); auto localContext = v8::Local<v8::Context>::New(isolate, context->_context); localContext->Enter(); TRI_ASSERT(context->_locker->IsLocked(isolate)); TRI_ASSERT(v8::Locker::IsLocked(isolate)); context->handleGlobalContextMethods(); localContext->Exit(); } isolate->Exit(); } } // default is false bool performGarbageCollection = false; // postpone garbage collection for standard contexts if (isStandard) { if (context->_lastGcStamp + _gcFrequency < lastGc) { LOG_TRACE("V8 context has reached GC timeout threshold and will be scheduled for GC"); performGarbageCollection = true; } else if (context->_numExecutions >= _gcInterval) { LOG_TRACE("V8 context has reached maximum number of requests and will be scheduled for GC"); performGarbageCollection = true; } if (performGarbageCollection) { _dirtyContexts[name].push_back(context); } else { _freeContexts[name].push_back(context); } _busyContexts[name].erase(context); delete context->_locker; context->_locker = nullptr; TRI_ASSERT(! v8::Locker::IsLocked(isolate)); guard.broadcast(); } // non-standard case: directly collect the garbage else { if (context->_numExecutions >= 1000) { LOG_TRACE("V8 context has reached maximum number of requests and will be scheduled for GC"); performGarbageCollection = true; } _busyContexts[name].erase(context); if (performGarbageCollection) { guard.unlock(); isolate->Enter(); { v8::HandleScope scope(isolate); auto localContext = v8::Local<v8::Context>::New(isolate, context->_context); localContext->Enter(); TRI_ASSERT(context->_locker->IsLocked(isolate)); TRI_ASSERT(v8::Locker::IsLocked(isolate)); TRI_RunGarbageCollectionV8(isolate, 1.0); localContext->Exit(); } isolate->Exit(); guard.lock(); context->_numExecutions = 0; } delete context->_locker; context->_locker = nullptr; _freeContexts[name].push_back(context); } // reset the context data. garbage collection should be able to run without it v8g->_query = nullptr; v8g->_vocbase = nullptr; v8g->_allowUseDatabase = false; LOG_TRACE("returned dirty V8 context"); }
void ApplicationV8::collectGarbage () { V8GcThread* gc = dynamic_cast<V8GcThread*>(_gcThread); assert(gc != 0); // this flag will be set to true if we timed out waiting for a GC signal // if set to true, the next cycle will use a reduced wait time so the GC // can be performed more early for all dirty contexts. The flag is set // to false again once all contexts have been cleaned up and there is nothing // more to do bool useReducedWait = false; // the time we'll wait for a signal const uint64_t regularWaitTime = (uint64_t) (_gcFrequency * 1000.0 * 1000.0); // the time we'll wait for a signal when the previous wait timed out const uint64_t reducedWaitTime = (uint64_t) (_gcFrequency * 1000.0 * 100.0); while (_stopping == 0) { V8Context* context = 0; { bool gotSignal = false; CONDITION_LOCKER(guard, _contextCondition); if (_dirtyContexts.empty()) { uint64_t waitTime = useReducedWait ? reducedWaitTime : regularWaitTime; // we'll wait for a signal or a timeout gotSignal = guard.wait(waitTime); // use a reduced wait time in the next round because we seem to be idle // the reduced wait time will allow use to perfom GC for more contexts useReducedWait = ! gotSignal; } if (! _dirtyContexts.empty()) { context = _dirtyContexts.back(); _dirtyContexts.pop_back(); useReducedWait = false; } else if (! gotSignal && ! _freeContexts.empty()) { // we timed out waiting for a signal, so we have idle time that we can // spend on running the GC pro-actively // We'll pick one of the free contexts and clean it up context = pickContextForGc(); // there is no context to clean up, probably they all have been cleaned up // already. increase the wait time so we don't cycle too much in the GC loop // and waste CPU unnecessary useReducedWait = (context != 0); } } // update last gc time double lastGc = TRI_microtime(); gc->updateGcStamp(lastGc); if (context != 0) { LOGGER_TRACE("collecting V8 garbage"); context->_locker = new v8::Locker(context->_isolate); context->_isolate->Enter(); context->_context->Enter(); v8::V8::LowMemoryNotification(); while (! v8::V8::IdleNotification()) { } context->_context->Exit(); context->_isolate->Exit(); delete context->_locker; context->_dirt = 0; context->_lastGcStamp = lastGc; { CONDITION_LOCKER(guard, _contextCondition); _freeContexts.push_back(context); guard.broadcast(); } } } }
void ApplicationV8::exitContext (V8Context* context) { V8GcThread* gc = dynamic_cast<V8GcThread*>(_gcThread); assert(gc != 0); LOG_TRACE("leaving V8 context %d", (int) context->_id); double lastGc = gc->getLastGcStamp(); CONDITION_LOCKER(guard, _contextCondition); ++context->_dirt; // HasOutOfMemoryException must be called while there is still an isolate! bool const hasOutOfMemoryException = context->_context->HasOutOfMemoryException(); // exit the context context->_context->Exit(); context->_isolate->Exit(); // try to execute new global context methods bool runGlobal = false; { MUTEX_LOCKER(context->_globalMethodsLock); runGlobal = ! context->_globalMethods.empty(); } if (runGlobal) { context->_isolate->Enter(); context->_context->Enter(); context->handleGlobalContextMethods(); context->_context->Exit(); context->_isolate->Exit(); } delete context->_locker; bool performGarbageCollection; if (context->_lastGcStamp + _gcFrequency < lastGc) { LOG_TRACE("V8 context has reached GC timeout threshold and will be scheduled for GC"); performGarbageCollection = true; } else if (context->_dirt >= _gcInterval) { LOG_TRACE("V8 context has reached maximum number of requests and will be scheduled for GC"); performGarbageCollection = true; } else if (hasOutOfMemoryException) { LOG_INFO("V8 context has encountered out of memory and will be scheduled for GC"); performGarbageCollection = true; } else { performGarbageCollection = false; } if (performGarbageCollection) { _dirtyContexts.push_back(context); _busyContexts.erase(context); } else { _freeContexts.push_back(context); _busyContexts.erase(context); } guard.broadcast(); LOG_TRACE("returned dirty V8 context"); }