/*! * Start all jobs of \p phase that have been added to the pipeline (see \ref * addJobForPhase) as well as jobs from \em all previous phases. */ void Pipeline::startJobsOfPhase(Phase phase) { switch (phase) { case PHASE_NONE: // pipelined startJobs(&mPipelinedJobs); break; case PHASE_BROADPHASE: startJobs(mBroadPhaseJobs); break; case PHASE_MIDDLEPHASE: // AB: start from broadphase too, in case some job was // accidentally added there (should be completed already) startJobs(mBroadPhaseJobs); startJobs(mMiddlePhaseJobs); break; case PHASE_NARROWPHASE: // AB: start from broad/middlephase too, in case some job was // accidentally added there (should be completed already) startJobs(mBroadPhaseJobs); startJobs(mMiddlePhaseJobs); startJobs(mNarrowPhaseJobs); break; case PHASE_INVALID: throw Exception("Cannot start jobs of phase \"invalid\""); break; } }
int main(int argc, char *argv[]){ if (argc != 2){ fprintf(stderr, "Argumentos incorrectos, utilice de la siguiente manera:\n"); fprintf(stderr, "consolactrl path/archivo/configuracion\n"); return 1; } Job jobs[512]; int jobCount = 0; jobCount = readConfigFile(argv[1], jobs); int status; status = startJobs( jobCount, jobs); return status; }
void JobScheduler::add(const std::shared_ptr<IndexerJob> &job) { assert(!(job->flags & ~IndexerJob::Type_Mask)); std::shared_ptr<Node> node(new Node({ job, 0, 0, 0, String() })); node->job = job; // error() << job->priority << job->sourceFile << mProcrastination; if (mPendingJobs.isEmpty() || job->priority > mPendingJobs.first()->job->priority) { mPendingJobs.prepend(node); } else { std::shared_ptr<Node> after = mPendingJobs.last(); while (job->priority > after->job->priority) { after = after->prev; assert(after); } mPendingJobs.insert(node, after); } assert(!mInactiveById.contains(job->id)); mInactiveById[job->id] = node; // error() << "procrash" << mProcrastination << job->sourceFile; if (!mProcrastination) startJobs(); }
void JobScheduler::startJobs() { static Path rp; if (rp.isEmpty()) { rp = Rct::executablePath().parentDir() + "rp"; if (!rp.isFile()) { rp = Rct::executablePath(); rp.resolve(); rp = rp.parentDir() + "rp"; if (!rp.isFile()) // should be in $PATH rp = "rp"; } } const auto &options = Server::instance()->options(); std::shared_ptr<Node> node = mPendingJobs.first(); auto cont = [&node, this]() { auto tmp = node->next; mPendingJobs.remove(node); node = tmp; }; while (mActiveByProcess.size() < options.jobCount && node) { assert(node); assert(node->job); assert(!(node->job->flags & (IndexerJob::Running|IndexerJob::Complete|IndexerJob::Crashed|IndexerJob::Aborted))); std::shared_ptr<Project> project = Server::instance()->project(node->job->project); if (!project) { cont(); debug() << node->job->sourceFile << "doesn't have a project, discarding"; continue; } uint32_t headerError = 0; if (!mHeaderErrors.isEmpty()) { headerError = hasHeaderError(node->job->source.fileId, project); if (headerError) { // error() << "We got a headerError" << Location::path(headerError) << "for" << node->job->source.sourceFile() // << mHeaderErrorMaxJobs << mHeaderErrorJobIds; if (options.headerErrorJobCount <= mHeaderErrorJobIds.size()) { warning() << "Holding off on" << node->job->sourceFile << "it's got a header error from" << Location::path(headerError); node = node->next; continue; } } } const uint64_t jobId = node->job->id; Process *process = new Process; debug() << "Starting process for" << jobId << node->job->source.key() << node->job.get(); List<String> arguments; for (int i=logLevel().toInt(); i>0; --i) arguments << "-v"; process->readyReadStdOut().connect([this](Process *proc) { std::shared_ptr<Node> node = mActiveByProcess[proc]; assert(node); node->stdOut.append(proc->readAllStdOut()); std::regex rx("@CRASH@([^@]*)@CRASH@"); std::smatch match; while (std::regex_search(node->stdOut.ref(), match, rx)) { error() << match[1].str(); node->stdOut.remove(match.position(), match.length()); } }); if (!process->start(rp, arguments)) { error() << "Couldn't start rp" << rp << process->errorString(); delete process; node->job->flags |= IndexerJob::Crashed; debug() << "job crashed (didn't start)" << jobId << node->job->source.key() << node->job.get(); std::shared_ptr<IndexDataMessage> msg(new IndexDataMessage(node->job)); msg->setFlag(IndexDataMessage::ParseFailure); jobFinished(node->job, msg); rp.clear(); // in case rp was missing for a moment and we fell back to searching $PATH cont(); continue; } if (headerError) { node->job->priority = IndexerJob::HeaderError; warning() << "Letting" << node->job->sourceFile << "go even with a headerheader error from" << Location::path(headerError); mHeaderErrorJobIds.insert(jobId); } process->finished().connect([this, jobId](Process *proc) { EventLoop::deleteLater(proc); auto node = mActiveByProcess.take(proc); assert(!node || node->process == proc); const String stdErr = proc->readAllStdErr(); if ((node && !node->stdOut.isEmpty()) || !stdErr.isEmpty()) { error() << (node ? ("Output from " + node->job->sourceFile + ":") : String("Orphaned process:")) << '\n' << stdErr << (node ? node->stdOut : String()); } if (node) { assert(node->process == proc); node->process = 0; assert(!(node->job->flags & IndexerJob::Aborted)); if (!(node->job->flags & IndexerJob::Complete) && proc->returnCode() != 0) { auto nodeById = mActiveById.take(jobId); assert(nodeById); assert(nodeById == node); // job failed, probably no IndexDataMessage coming node->job->flags |= IndexerJob::Crashed; debug() << "job crashed" << jobId << node->job->source.key() << node->job.get(); std::shared_ptr<IndexDataMessage> msg(new IndexDataMessage(node->job)); msg->setFlag(IndexDataMessage::ParseFailure); jobFinished(node->job, msg); } } mHeaderErrorJobIds.remove(jobId); startJobs(); }); node->process = process; assert(!(node->job->flags & ~IndexerJob::Type_Mask)); node->job->flags |= IndexerJob::Running; process->write(node->job->encode()); mActiveByProcess[process] = node; // error() << "STARTING JOB" << node->job->source.sourceFile(); mInactiveById.remove(jobId); mActiveById[jobId] = node; cont(); } }