TEST_F(LoggerTests, test_recursion) { // Stop the internal Glog facilities. google::ShutdownGoogleLogging(); auto& rf = RegistryFactory::get(); auto plugin = std::make_shared<RecursiveLoggerPlugin>(); rf.registry("logger")->add("recurse", plugin); EXPECT_TRUE(rf.exists("logger", "recurse")); EXPECT_TRUE(rf.setActive("logger", "recurse").ok()); FLAGS_logtostderr = true; initStatusLogger("logger_test"); initLogger("logger_test"); LOG(WARNING) << "Log to the recursive logger"; EXPECT_EQ(1U, plugin->statuses); FLAGS_logger_status_sync = false; LOG(WARNING) << "recurse"; if (isPlatform(PlatformType::TYPE_WINDOWS)) { for (size_t i = 0; i < 100; i++) { std::this_thread::sleep_for(std::chrono::microseconds(10)); if (plugin->statuses == 3U) { break; } } } EXPECT_EQ(3U, plugin->statuses); // Try again with the tool type as a daemon. auto tool_type = kToolType; kToolType = ToolType::DAEMON; LOG(WARNING) << "recurse"; // The daemon calls the status relay within the scheduler. EXPECT_EQ(3U, plugin->statuses); // All of recursive log lines will sink during the next call. relayStatusLogs(true); EXPECT_EQ(4U, plugin->statuses); relayStatusLogs(true); EXPECT_EQ(5U, plugin->statuses); kToolType = tool_type; EXPECT_EQ(0U, queuedStatuses()); EXPECT_EQ(0U, queuedSenders()); // Make sure the test file does not create a filesystem log. // This will happen if the logtostderr is not set. EXPECT_FALSE(pathExists("logger_test.INFO")); FLAGS_logtostderr = false; }
void SchedulerRunner::start() { // Start the counter at the second. auto i = osquery::getUnixTime(); for (; (timeout_ == 0) || (i <= timeout_); ++i) { Config::get().scheduledQueries( ([&i](const std::string& name, const ScheduledQuery& query) { if (query.splayed_interval > 0 && i % query.splayed_interval == 0) { TablePlugin::kCacheInterval = query.splayed_interval; TablePlugin::kCacheStep = i; launchQuery(name, query); } })); // Configuration decorators run on 60 second intervals only. if ((i % 60) == 0) { runDecorators(DECORATE_INTERVAL, i); } if (FLAGS_schedule_reload > 0 && (i % FLAGS_schedule_reload) == 0) { if (FLAGS_schedule_reload_sql) { SQLiteDBManager::resetPrimary(); } resetDatabase(); } // GLog is not re-entrant, so logs must be flushed in a dedicated thread. if ((i % 3) == 0) { relayStatusLogs(true); } // Put the thread into an interruptible sleep without a config instance. pauseMilli(interval_ * 1000); if (interrupted()) { break; } } }
TEST_F(FilesystemLoggerTests, test_log_status) { if (isPlatform(PlatformType::TYPE_WINDOWS)) { // Cannot test status deterministically on windows. return; } initStatusLogger("osqueryd"); initLogger("osqueryd"); LOG(WARNING) << "Filesystem logger test is generating a warning status (1/3)"; auto status_path = fs::path(FLAGS_logger_path) / "osqueryd.INFO"; EXPECT_TRUE(osquery::pathExists(status_path)); std::string content; EXPECT_TRUE(readFile(status_path, content)); auto lines = osquery::split(content, "\n").size(); EXPECT_EQ(4U, lines); LOG(WARNING) << "Filesystem logger test is generating a warning status (2/3)"; content.clear(); readFile(status_path, content); lines = osquery::split(content, "\n").size(); EXPECT_EQ(5U, lines); auto& rf = RegistryFactory::get(); auto filesystem_test = std::make_shared<FilesystemTestLoggerPlugin>(); rf.registry("logger")->add("filesystem_test", filesystem_test); EXPECT_TRUE(rf.setActive("logger", "filesystem,filesystem_test").ok()); LOG(WARNING) << "Filesystem logger test is generating a warning status (3/3)"; content.clear(); readFile(status_path, content); lines = osquery::split(content, "\n").size(); EXPECT_EQ(6U, lines); relayStatusLogs(true); content.clear(); readFile(status_path, content); lines = osquery::split(content, "\n").size(); EXPECT_EQ(6U, lines); }
bool WatcherRunner::isChildSane(const PlatformProcess& child) const { auto rows = SQL::selectAllFrom("processes", "pid", EQUALS, INTEGER(child.pid())); if (rows.size() == 0) { // Could not find worker process? return false; } // Get the performance state for the worker or extension. size_t sustained_latency = 0; // Compare CPU utilization since last check. size_t footprint = 0; pid_t parent = 0; // IV is the check interval in seconds, and utilization is set per-second. auto iv = std::max(getWorkerLimit(INTERVAL), (size_t)1); { WatcherLocker locker; auto& state = Watcher::getState(child); UNSIGNED_BIGINT_LITERAL user_time = 0, system_time = 0; try { parent = AS_LITERAL(BIGINT_LITERAL, rows[0].at("parent")); user_time = AS_LITERAL(BIGINT_LITERAL, rows[0].at("user_time")) / iv; system_time = AS_LITERAL(BIGINT_LITERAL, rows[0].at("system_time")) / iv; footprint = AS_LITERAL(BIGINT_LITERAL, rows[0].at("resident_size")); } catch (const std::exception& e) { state.sustained_latency = 0; } // Check the difference of CPU time used since last check. if (user_time - state.user_time > getWorkerLimit(UTILIZATION_LIMIT) || system_time - state.system_time > getWorkerLimit(UTILIZATION_LIMIT)) { state.sustained_latency++; } else { state.sustained_latency = 0; } // Update the current CPU time. state.user_time = user_time; state.system_time = system_time; // Check if the sustained difference exceeded the acceptable latency limit. sustained_latency = state.sustained_latency; // Set the memory footprint as the amount of resident bytes allocated // since the process image was created (estimate). // A more-meaningful check would limit this to writable regions. if (state.initial_footprint == 0) { state.initial_footprint = footprint; } // Set the measured/limit-applied footprint to the post-launch allocations. if (footprint < state.initial_footprint) { footprint = 0; } else { footprint = footprint - state.initial_footprint; } } // Only make a decision about the child sanity if it is still the watcher's // child. It's possible for the child to die, and its pid reused. if (parent != PlatformProcess::getCurrentProcess()->pid()) { // The child's parent is not the watcher. Watcher::reset(child); // Do not stop or call the child insane, since it is not our child. return true; } if (sustained_latency > 0 && sustained_latency * iv >= getWorkerLimit(LATENCY_LIMIT)) { LOG(WARNING) << "osqueryd worker (" << child.pid() << ") system performance limits exceeded"; return false; } // Check if the private memory exceeds a memory limit. if (footprint > 0 && footprint > getWorkerLimit(MEMORY_LIMIT) * 1024 * 1024) { LOG(WARNING) << "osqueryd worker (" << child.pid() << ") memory limits exceeded: " << footprint; return false; } // The worker is sane, no action needed. // Attempt to flush status logs to the well-behaved worker. if (use_worker_) { relayStatusLogs(); } return true; }