bool wait_on_child_and_print_progress (process &child_proc) { const size_t BUF_SIZE = 4096; char buf[BUF_SIZE]; ssize_t bytes_read; bool success = true; while( (bytes_read = child_proc.read_from_child(buf, BUF_SIZE)) > 0 ) { logprogress_stream << std::string(buf, buf + bytes_read); if (cppipc::must_cancel()) { logprogress_stream << "Cancel by user" << std::endl; child_proc.kill(false); success = false; break; } } logprogress_stream << std::endl; return success; }
// The net_cls handles aren't treated as resources. Further, they have fixed // values and hence don't have a notion of usage. We are therefore returning an // empty 'ResourceStatistics' object. Future<ResourceStatistics> CgroupsNetClsIsolatorProcess::usage( const ContainerID& containerId) { if (!infos.contains(containerId)) { return Failure("Unknown container"); } return ResourceStatistics(); }
// The net_cls handles are labels and hence there are no limitations associated // with them . This function would therefore always return a pending future // since the limitation is never reached. Future<ContainerLimitation> CgroupsNetClsIsolatorProcess::watch( const ContainerID& containerId) { if (!infos.contains(containerId)) { return Failure("Unknown container"); } return Future<ContainerLimitation>(); }
Future<bool> RegistrarProcess::apply(Owned<Operation> operation) { if (recovered.isNone()) { return Failure("Attempted to apply the operation before recovering"); } return recovered.get()->future() .then(defer(self(), &Self::_apply, operation)); }
void HealthCheckerProcess::reschedule() { VLOG(1) << "Rescheduling health check in " << Seconds(static_cast<int64_t>(check.interval_seconds())); delay(Seconds(static_cast<int64_t>(check.interval_seconds())), self(), &Self::_healthCheck); }
Future<Nothing> NetworkCniIsolatorProcess::_cleanup( const ContainerID& containerId, const list<Future<Nothing>>& detaches) { CHECK(infos.contains(containerId)); vector<string> messages; foreach (const Future<Nothing>& detach, detaches) { if (!detach.isReady()) { messages.push_back( detach.isFailed() ? detach.failure() : "discarded"); } } if (!messages.empty()) { return Failure(strings::join("\n", messages)); } const string containerDir = paths::getContainerDir(rootDir.get(), containerId.value()); const string target = paths::getNamespacePath(rootDir.get(), containerId.value()); if (os::exists(target)) { Try<Nothing> unmount = fs::unmount(target); if (unmount.isError()) { return Failure( "Failed to unmount the network namespace handle '" + target + "': " + unmount.error()); } } Try<Nothing> rmdir = os::rmdir(containerDir); if (rmdir.isError()) { return Failure( "Failed to remove the container directory '" + containerDir + "': " + rmdir.error()); } infos.erase(containerId); return Nothing(); }
void Mesos::stop() { if (process != nullptr) { terminate(process); wait(process); delete process; process = nullptr; } }
bool processSearch(process &currProcess) { bool result = false;; if((currProcess.get_pname() != "") && (currProcess.get_pid() == 0)) //pname set, pid not set, search with pname { #ifdef DEBUG print_string("Using Name Search"); #endif int pid = 0; //will be set in currprocess, can't be done by this function result = processSearch(currProcess.get_pname(), &pid); currProcess.set_pid(pid); } else { #ifdef DEBUG print_string("Using PID Search"); #endif result = processSearch(currProcess.get_pid()); } return result; }
Future<Nothing> Fetcher::fetch( const URI& uri, const string& directory) const { if (!plugins.contains(uri.scheme())) { return Failure("Scheme '" + uri.scheme() + "' is not supported"); } return plugins.at(uri.scheme())->fetch(uri, directory); }
Future<Nothing> HealthCheckerProcess::__httpHealthCheck( const tuple< Future<Option<int>>, Future<string>, Future<string>>& t) { Future<Option<int>> status = std::get<0>(t); if (!status.isReady()) { return Failure( "Failed to get the exit status of the " + string(HTTP_CHECK_COMMAND) + " process: " + (status.isFailed() ? status.failure() : "discarded")); } if (status->isNone()) { return Failure( "Failed to reap the " + string(HTTP_CHECK_COMMAND) + " process"); } int statusCode = status->get(); if (statusCode != 0) { Future<string> error = std::get<2>(t); if (!error.isReady()) { return Failure( string(HTTP_CHECK_COMMAND) + " returned " + WSTRINGIFY(statusCode) + "; reading stderr failed: " + (error.isFailed() ? error.failure() : "discarded")); } return Failure( string(HTTP_CHECK_COMMAND) + " returned " + WSTRINGIFY(statusCode) + ": " + error.get()); } Future<string> output = std::get<1>(t); if (!output.isReady()) { return Failure( "Failed to read stdout from " + string(HTTP_CHECK_COMMAND) + ": " + (output.isFailed() ? output.failure() : "discarded")); } // Parse the output and get the HTTP response code. Try<int> code = numify<int>(output.get()); if (code.isError()) { return Failure( "Unexpected output from " + string(HTTP_CHECK_COMMAND) + ": " + output.get()); } if (code.get() < process::http::Status::OK || code.get() >= process::http::Status::BAD_REQUEST) { return Failure( "Unexpected HTTP response code: " + process::http::Status::string(code.get())); } return Nothing(); }
TEST(ProcessTest, Defer3) { ASSERT_TRUE(GTEST_IS_THREADSAFE); std::atomic_bool bool1(false); std::atomic_bool bool2(false); Deferred<void(bool)> set1 = defer([&bool1](bool b) { bool1.store(b); }); set1(true); Deferred<void(bool)> set2 = defer([&bool2](bool b) { bool2.store(b); }); set2(true); while (bool1.load() == false); while (bool2.load() == false); }
Future<Nothing> CgroupsNetClsIsolatorProcess::isolate( const ContainerID& containerId, pid_t pid) { if (!infos.contains(containerId)) { return Failure("Unknown container"); } const Info& info = infos.at(containerId); Try<Nothing> assign = cgroups::assign(hierarchy, info.cgroup, pid); if (assign.isError()) { return Failure("Failed to assign container '" + stringify(containerId) + "' to its own cgroup '" + path::join(hierarchy, info.cgroup) + "': " + assign.error()); } return Nothing(); }
Future<HttpResponse> SlavesManager::deactivate(const HttpRequest& request) { // Parse the query to get out the slave hostname and port. string hostname = ""; uint16_t port = 0; map<string, vector<string> > pairs = strings::pairs(request.query, ",", "="); // Make sure there is at least a 'hostname=' and 'port='. if (pairs.count("hostname") == 0) { LOG(WARNING) << "Slaves manager expecting 'hostname' in query string" << " when trying to deactivate a slave"; return HttpNotFoundResponse(); } else if (pairs.count("port") == 0) { LOG(WARNING) << "Slaves manager expecting 'port' in query string" << " when trying to deactivate a slave"; return HttpNotFoundResponse(); } hostname = pairs["hostname"].front(); // Check that 'port' is valid. try { port = lexical_cast<uint16_t>(pairs["port"].front()); } catch (const bad_lexical_cast&) { LOG(WARNING) << "Slaves manager failed to parse 'port = " << pairs["port"].front() << "' when trying to deactivate a slave"; return HttpNotFoundResponse(); } LOG(INFO) << "Slaves manager received HTTP request to deactivate slave at " << hostname << ":" << port; if (deactivate(hostname, port)) { return HttpOKResponse(); } else { return HttpInternalServerErrorResponse(); } }
TEST_F(SubprocessTest, Default) { Try<Subprocess> s = subprocess("echo hello world"); ASSERT_SOME(s); // Advance time until the internal reaper reaps the subprocess. Clock::pause(); while (s.get().status().isPending()) { Clock::advance(MAX_REAP_INTERVAL()); Clock::settle(); } Clock::resume(); AWAIT_ASSERT_READY(s.get().status()); ASSERT_SOME(s.get().status().get()); int status = s.get().status().get().get(); EXPECT_TRUE(WIFEXITED(status)); EXPECT_EQ(0, WEXITSTATUS(status)); }
TEST(LoopTest, DiscardIterate) { Promise<int> promise; promise.future().onDiscard([&]() { promise.discard(); }); Future<Nothing> future = loop( [&]() { return promise.future(); }, [&](int i) -> ControlFlow<Nothing> { return Break(); }); EXPECT_TRUE(future.isPending()); future.discard(); AWAIT_DISCARDED(future); EXPECT_TRUE(promise.future().hasDiscard()); }
TEST(LoopTest, Sync) { std::atomic_int value = ATOMIC_VAR_INIT(1); Future<Nothing> future = loop( [&]() { return value.load(); }, [](int i) -> ControlFlow<Nothing> { if (i != 0) { return Continue(); } return Break(); }); EXPECT_TRUE(future.isPending()); value.store(0); AWAIT_READY(future); }
// Print individual statistics for processes: void individual_stats(process &curr_process) { cout << "Process " << curr_process.get_pid() << endl; cout << "Burst Time: " << curr_process.get_burst() << " ms" << endl; cout << "Priority: " << curr_process.get_priority() << endl; cout << "Arrival Time: " << curr_process.get_arrival() << " ms" << endl; cout << "Initial Wait Time: " << curr_process.get_initial_wait() << " ms" << endl; cout << "Total Wait Time: " << curr_process.get_total_wait_time() << " ms" << endl; cout << "Status: " << curr_process.get_status() << endl; cout << "=============================================" << endl; }
void putProcess(process p){ if(!readyQueue.empty()){ for(i=0;i<readyQueue.size();i++){ if(readyQueue[i].getRem()>p.getRem()) break; } readyQueue.insert(readyQueue.begin()+i,p); } else{ readyQueue.push_back(p); } }
// Wait for a subprocess and test the status code for the following // conditions of 'expected_status': // 1. 'None' = Anything but '0'. // 2. 'Some' = the value of 'expected_status'. // Returns Nothing if the resulting status code matches the // expectation otherwise a Failure with the output of the subprocess. // TODO(jmlvanre): Turn this into a generally useful abstraction for // gtest where we can have a more straigtforward 'expected_status'. Future<Nothing> await_subprocess( const Subprocess& subprocess, const Option<int>& expected_status = None()) { // Dup the pipe fd of the subprocess so we can read the output if // needed. Try<int_fd> dup = os::dup(subprocess.out().get()); if (dup.isError()) { return Failure(dup.error()); } int_fd out = dup.get(); // Once we get the status of the process. return subprocess.status() .then([=](const Option<int>& status) -> Future<Nothing> { // If the status is not set, fail out. if (status.isNone()) { return Failure("Subprocess status is none"); } // If the status is not what we expect then fail out with the // output of the subprocess. The failure message will include // the assertion failures of the subprocess. if ((expected_status.isSome() && status.get() != expected_status.get()) || (expected_status.isNone() && status.get() == 0)) { return io::read(out) .then([](const string& output) -> Future<Nothing> { return Failure("\n[++++++++++] Subprocess output.\n" + output + "[++++++++++]\n"); }); } // If the subprocess ran successfully then return nothing. return Nothing(); }).onAny([=]() { os::close(out); }); }
void stdin_filler::fill(process& p) { if (fd == -1) return; //don't need to fill anything... char buf[512]; int numbytes; do { numbytes = read(fd, buf, 512); if (numbytes == -1) { perror("read"); std::exit(1); } if (!writeall(p.in(), buf, numbytes)) { //it is easy to imagine this write failing - so if that happens //just kill the process, as it's not useful to us... if (settings.verbose) { std::cerr << "write failed to subprocess, killing...\n"; } p.term(); break; } } while (numbytes != 0); lseek(fd, 0, SEEK_SET); //return to beginning of file }
v1::scheduler::Event evolve(const StatusUpdateMessage& message) { v1::scheduler::Event event; event.set_type(v1::scheduler::Event::UPDATE); v1::scheduler::Event::Update* update = event.mutable_update(); update->mutable_status()->CopyFrom(evolve(message.update().status())); if (message.update().has_slave_id()) { update->mutable_status()->mutable_agent_id()->CopyFrom( evolve(message.update().slave_id())); } if (message.update().has_executor_id()) { update->mutable_status()->mutable_executor_id()->CopyFrom( evolve(message.update().executor_id())); } update->mutable_status()->set_timestamp(message.update().timestamp()); // If the update does not have a 'uuid', it does not need // acknowledging. However, prior to 0.23.0, the update uuid // was required and always set. In 0.24.0, we can rely on the // update uuid check here, until then we must still check for // this being sent from the driver (from == UPID()) or from // the master (pid == UPID()). // TODO(vinod): Get rid of this logic in 0.25.0 because master // and slave correctly set task status in 0.24.0. if (!message.update().has_uuid() || message.update().uuid() == "") { update->mutable_status()->clear_uuid(); } else if (UPID(message.pid()) == UPID()) { update->mutable_status()->clear_uuid(); } else { update->mutable_status()->set_uuid(message.update().uuid()); } return event; }
Future<Nothing> HealthCheckerProcess::healthCheck() { VLOG(1) << "Health check starting in " << Seconds(static_cast<int64_t>(check.delay_seconds())) << ", grace period " << Seconds(static_cast<int64_t>(check.grace_period_seconds())); startTime = Clock::now(); delay(Seconds(static_cast<int64_t>(check.delay_seconds())), self(), &Self::_healthCheck); return promise.future(); }
void putProcess(process p){ // cout<<p.pid<<endl; if(p.getExp() == 0) { if(!readyQueue.empty()){ for(i=0; i<readyQueue.size(); i++) { if(readyQueue[i].getDP() < p.getDP()) break; } readyQueue.insert(readyQueue.begin()+i,p); } else{ readyQueue.push_back(p); } } else{ i=0; //p.setDP(p.getPrio()-1); if(!expiredQueue.empty()) { for(i=0; i<expiredQueue.size(); i++) { if(expiredQueue[i].getDP() < p.getDP()) break; } expiredQueue.insert(expiredQueue.begin()+i,p); } else{ expiredQueue.push_back(p); } } }
Future<bool> RegistrarProcess::_apply(Owned<Operation> operation) { if (error.isSome()) { return Failure(error.get()); } CHECK_SOME(variable); operations.push_back(operation); Future<bool> future = operation->future(); if (!updating) { update(); } return future; }
TEST_F(SubprocessTest, FdInput) { string in = path::join(os::getcwd(), "stdin"); ASSERT_SOME(os::write(in, "hello\n")); Try<int> inFd = os::open(in, O_RDONLY | O_CLOEXEC); ASSERT_SOME(inFd); Try<Subprocess> s = subprocess( "read word ; echo $word", Subprocess::FD(inFd.get()), Subprocess::PIPE(), Subprocess::FD(STDERR_FILENO)); ASSERT_SOME(os::close(inFd.get())); ASSERT_SOME(s); ASSERT_SOME(s.get().out()); AWAIT_EXPECT_EQ("hello\n", io::read(s.get().out().get())); // Advance time until the internal reaper reaps the subprocess. Clock::pause(); while (s.get().status().isPending()) { Clock::advance(MAX_REAP_INTERVAL()); Clock::settle(); } Clock::resume(); AWAIT_ASSERT_READY(s.get().status()); ASSERT_SOME(s.get().status().get()); int status = s.get().status().get().get(); EXPECT_TRUE(WIFEXITED(status)); EXPECT_EQ(0, WEXITSTATUS(status)); }
TEST(FutureTest, UndiscardableFuture) { Promise<int> promise; Future<int> f = undiscardable(promise.future()); f.discard(); EXPECT_TRUE(f.hasDiscard()); EXPECT_FALSE(promise.future().hasDiscard()); promise.set(42); AWAIT_ASSERT_EQ(42, f); }
// Like the 'remote' test but uses http::connect. TEST(ProcessTest, Http1) { ASSERT_TRUE(GTEST_IS_THREADSAFE); RemoteProcess process; spawn(process); http::URL url = http::URL( "http", process.self().address.ip, process.self().address.port, process.self().id + "/handler"); Future<http::Connection> connect = http::connect(url); AWAIT_READY(connect); http::Connection connection = connect.get(); Future<UPID> pid; Future<string> body; EXPECT_CALL(process, handler(_, _)) .WillOnce(DoAll(FutureArg<0>(&pid), FutureArg<1>(&body))); http::Request request; request.method = "POST"; request.url = url; request.headers["User-Agent"] = "libprocess/"; request.body = "hello world"; // Send the libprocess request. Note that we will not // receive a 202 due to the use of the `User-Agent` // header, therefore we need to explicitly disconnect! Future<http::Response> response = connection.send(request); AWAIT_READY(body); ASSERT_EQ("hello world", body.get()); AWAIT_READY(pid); ASSERT_EQ(UPID(), pid.get()); EXPECT_TRUE(response.isPending()); AWAIT_READY(connection.disconnect()); terminate(process); wait(process); }
Future<vector<string>> StoreProcess::fetchDependencies( const string& imageId, bool cached) { const string imagePath = paths::getImagePath(rootDir, imageId); Try<spec::ImageManifest> manifest = spec::getManifest(imagePath); if (manifest.isError()) { return Failure( "Failed to get dependencies for image id '" + imageId + "': " + manifest.error()); } vector<Image::Appc> dependencies; foreach (const spec::ImageManifest::Dependency& dependency, manifest->dependencies()) { Image::Appc appc; appc.set_name(dependency.imagename()); if (dependency.has_imageid()) { appc.set_id(dependency.imageid()); } // TODO(jojy): Make Image::Appc use appc::spec::Label instead of // mesos::Label so that we can avoid this loop here. foreach (const spec::ImageManifest::Label& label, dependency.labels()) { mesos::Label appcLabel; appcLabel.set_key(label.name()); appcLabel.set_value(label.value()); appc.mutable_labels()->add_labels()->CopyFrom(appcLabel); } dependencies.emplace_back(appc); } if (dependencies.size() == 0) { return vector<string>(); } // Do a depth first search. vector<Future<vector<string>>> futures; futures.reserve(dependencies.size()); foreach (const Image::Appc& appc, dependencies) { futures.emplace_back(fetchImage(appc, cached)); }
// In this hook, we check for the presence of a label, and if set // we return a failure, effectively failing the container creation. // Otherwise we add an environment variable to the executor and task. // Additionally, this hook creates a file named "foo" in the container // work directory (sandbox). Future<Option<DockerTaskExecutorPrepareInfo>> slavePreLaunchDockerTaskExecutorDecorator( const Option<TaskInfo>& taskInfo, const ExecutorInfo& executorInfo, const string& containerName, const string& containerWorkDirectory, const string& mappedSandboxDirectory, const Option<map<string, string>>& env) override { LOG(INFO) << "Executing 'slavePreLaunchDockerTaskExecutorDecorator' hook"; if (taskInfo.isSome()) { foreach (const Label& label, taskInfo->labels().labels()) { if (label.key() == testErrorLabelKey) { return Failure("Spotted error label"); } } }
Future<ProvisionInfo> ProvisionerProcess::provision( const ContainerID& containerId, const Image& image) { if (!stores.contains(image.type())) { return Failure( "Unsupported container image type: " + stringify(image.type())); } // Get and then provision image layers from the store. return stores.get(image.type()).get()->get(image, defaultBackend) .then(defer(self(), &Self::_provision, containerId, image, defaultBackend, lambda::_1)); }