const VSAPI *getVSAPIInternal(int apiMajor) { if (apiMajor == VAPOURSYNTH_API_MAJOR) { return &vsapi; } else { vsFatal("Internally requested API version %d not supported", apiMajor); return nullptr; } }
void VSThreadPool::startInternal(const PFrameContext &context) { //technically this could be done by walking up the context chain and add a new notification to the correct one //unfortunately this would probably be quite slow for deep scripts so just hope the cache catches it if (context->n < 0) vsFatal("Negative frame request by: %s", context->clip->getName().c_str()); // check to see if it's time to reevaluate cache sizes if (core->memory->isOverLimit()) { ticks = 0; notifyCaches(true); } // a normal tick for caches to adjust their sizes based on recent history if (!context->upstreamContext && ++ticks == 500) { ticks = 0; notifyCaches(false); } // add it immediately if the task is to return a completed frame or report an error since it never has an existing context if (context->returnedFrame || context->hasError()) { tasks.push_back(context); } else { if (context->upstreamContext) ++context->upstreamContext->numFrameRequests; NodeOutputKey p(context->clip, context->n, context->index); if (allContexts.count(p)) { PFrameContext &ctx = allContexts[p]; assert(context->clip == ctx->clip && context->n == ctx->n && context->index == ctx->index); if (ctx->returnedFrame) { // special case where the requested frame is encountered "by accident" context->returnedFrame = ctx->returnedFrame; tasks.push_back(context); } else { // add it to the list of contexts to notify when it's available context->notificationChain = ctx->notificationChain; ctx->notificationChain = context; ctx->reqOrder = std::min(ctx->reqOrder, context->reqOrder); } } else { // create a new context and append it to the tasks allContexts[p] = context; tasks.push_back(context); } } wakeThread(); }
void VSThreadPool::runTasks(VSThreadPool *owner, std::atomic<bool> &stop) { #ifdef VS_TARGET_CPU_X86 if (!vs_isMMXStateOk()) vsFatal("Bad MMX state detected after creating new thread"); #endif #ifdef VS_TARGET_OS_WINDOWS if (!vs_isFPUStateOk()) vsWarning("Bad FPU state detected after creating new thread"); if (!vs_isSSEStateOk()) vsFatal("Bad SSE state detected after creating new thread"); #endif std::unique_lock<std::mutex> lock(owner->lock); while (true) { bool ranTask = false; ///////////////////////////////////////////////////////////////////////////////////////////// // Go through all tasks from the top (oldest) and process the first one possible for (std::list<PFrameContext>::iterator iter = owner->tasks.begin(); iter != owner->tasks.end(); ++iter) { FrameContext *mainContext = iter->get(); FrameContext *leafContext = nullptr; ///////////////////////////////////////////////////////////////////////////////////////////// // Handle the output tasks if (mainContext->frameDone && mainContext->returnedFrame) { PFrameContext mainContextRef(*iter); owner->tasks.erase(iter); owner->returnFrame(mainContextRef, mainContext->returnedFrame); ranTask = true; break; } if (mainContext->frameDone && mainContext->hasError()) { PFrameContext mainContextRef(*iter); owner->tasks.erase(iter); owner->returnFrame(mainContextRef, mainContext->getErrorMessage()); ranTask = true; break; } bool hasLeafContext = mainContext->returnedFrame || mainContext->hasError(); if (hasLeafContext) { leafContext = mainContext; mainContext = mainContext->upstreamContext.get(); } VSNode *clip = mainContext->clip; int filterMode = clip->filterMode; ///////////////////////////////////////////////////////////////////////////////////////////// // This part handles the locking for the different filter modes bool parallelRequestsNeedsUnlock = false; if (filterMode == fmUnordered) { // already busy? if (!clip->serialMutex.try_lock()) continue; } else if (filterMode == fmSerial) { // already busy? if (!clip->serialMutex.try_lock()) continue; // no frame in progress? if (clip->serialFrame == -1) { clip->serialFrame = mainContext->n; // } else if (clip->serialFrame != mainContext->n) { clip->serialMutex.unlock(); continue; } // continue processing the already started frame } else if (filterMode == fmParallel) { std::lock_guard<std::mutex> lock(clip->concurrentFramesMutex); // is the filter already processing another call for this frame? if so move along if (clip->concurrentFrames.count(mainContext->n)) { continue; } else { clip->concurrentFrames.insert(mainContext->n); } } else if (filterMode == fmParallelRequests) { std::lock_guard<std::mutex> lock(clip->concurrentFramesMutex); // is the filter already processing another call for this frame? if so move along if (clip->concurrentFrames.count(mainContext->n)) { continue; } else { // do we need the serial lock since all frames will be ready this time? // check if we're in the arAllFramesReady state so we need additional locking if (mainContext->numFrameRequests == 1) { if (!clip->serialMutex.try_lock()) continue; parallelRequestsNeedsUnlock = true; clip->concurrentFrames.insert(mainContext->n); } } } ///////////////////////////////////////////////////////////////////////////////////////////// // Remove the context from the task list PFrameContext mainContextRef; PFrameContext leafContextRef; if (hasLeafContext) { leafContextRef = *iter; mainContextRef = leafContextRef->upstreamContext; } else { mainContextRef = *iter; } owner->tasks.erase(iter); ///////////////////////////////////////////////////////////////////////////////////////////// // Figure out the activation reason VSActivationReason ar = arInitial; bool skipCall = false; // Used to avoid multiple error calls for the same frame request going into a filter if ((hasLeafContext && leafContext->hasError()) || mainContext->hasError()) { ar = arError; skipCall = mainContext->setError(leafContext->getErrorMessage()); --mainContext->numFrameRequests; } else if (hasLeafContext && leafContext->returnedFrame) { if (--mainContext->numFrameRequests > 0) ar = arFrameReady; else ar = arAllFramesReady; mainContext->availableFrames.insert(std::make_pair(NodeOutputKey(leafContext->clip, leafContext->n, leafContext->index), leafContext->returnedFrame)); mainContext->lastCompletedN = leafContext->n; mainContext->lastCompletedNode = leafContext->node; } assert(mainContext->numFrameRequests >= 0); bool hasExistingRequests = !!mainContext->numFrameRequests; ///////////////////////////////////////////////////////////////////////////////////////////// // Do the actual processing lock.unlock(); VSFrameContext externalFrameCtx(mainContextRef); assert(ar == arError || !mainContext->hasError()); PVideoFrame f; if (!skipCall) f = clip->getFrameInternal(mainContext->n, ar, externalFrameCtx); ranTask = true; bool frameProcessingDone = f || mainContext->hasError(); if (mainContext->hasError() && f) vsFatal("A frame was returned by %s but an error was also set, this is not allowed", clip->name.c_str()); ///////////////////////////////////////////////////////////////////////////////////////////// // Unlock so the next job can run on the context if (filterMode == fmUnordered) { clip->serialMutex.unlock(); } else if (filterMode == fmSerial) { if (frameProcessingDone) clip->serialFrame = -1; clip->serialMutex.unlock(); } else if (filterMode == fmParallel) { std::lock_guard<std::mutex> lock(clip->concurrentFramesMutex); clip->concurrentFrames.erase(mainContext->n); } else if (filterMode == fmParallelRequests) { std::lock_guard<std::mutex> lock(clip->concurrentFramesMutex); clip->concurrentFrames.erase(mainContext->n); if (parallelRequestsNeedsUnlock) clip->serialMutex.unlock(); } ///////////////////////////////////////////////////////////////////////////////////////////// // Handle frames that were requested bool requestedFrames = !externalFrameCtx.reqList.empty() && !frameProcessingDone; lock.lock(); if (requestedFrames) { for (auto &reqIter : externalFrameCtx.reqList) owner->startInternal(reqIter); externalFrameCtx.reqList.clear(); } if (frameProcessingDone) owner->allContexts.erase(NodeOutputKey(mainContext->clip, mainContext->n, mainContext->index)); ///////////////////////////////////////////////////////////////////////////////////////////// // Propagate status to other linked contexts // CHANGES mainContextRef!!! if (mainContext->hasError() && !hasExistingRequests && !requestedFrames) { PFrameContext n; do { n = mainContextRef->notificationChain; if (n) { mainContextRef->notificationChain.reset(); n->setError(mainContextRef->getErrorMessage()); } if (mainContextRef->upstreamContext) { owner->startInternal(mainContextRef); } if (mainContextRef->frameDone) { owner->returnFrame(mainContextRef, mainContextRef->getErrorMessage()); } } while ((mainContextRef = n)); } else if (f) { if (hasExistingRequests || requestedFrames) vsFatal("A frame was returned at the end of processing by %s but there are still outstanding requests", clip->name.c_str()); PFrameContext n; do { n = mainContextRef->notificationChain; if (n) mainContextRef->notificationChain.reset(); if (mainContextRef->upstreamContext) { mainContextRef->returnedFrame = f; owner->startInternal(mainContextRef); } if (mainContextRef->frameDone) owner->returnFrame(mainContextRef, f); } while ((mainContextRef = n)); } else if (hasExistingRequests || requestedFrames) { // already scheduled, do nothing } else { vsFatal("No frame returned at the end of processing by %s", clip->name.c_str()); } break; } if (!ranTask || owner->activeThreadCount() > owner->threadCount()) { --owner->activeThreads; if (stop) { lock.unlock(); break; } ++owner->idleThreads; owner->newWork.wait(lock); --owner->idleThreads; ++owner->activeThreads; } } }