TEST_F(ConfigTests, test_watched_files) { ConfigDataInstance config; ASSERT_EQ(config.files().size(), 3); // From the deprecated "additional_monitoring" collection. EXPECT_EQ(config.files().at("downloads").size(), 5); // From the new, recommended top-level "file_paths" collection. EXPECT_EQ(config.files().at("downloads2").size(), 5); EXPECT_EQ(config.files().at("system_binaries").size(), 3); }
Status Config::getMD5(std::string& hash_string) { // Request an accessor to our own config, outside of an update. ConfigDataInstance config; std::stringstream out; pt::write_json(out, config.data()); hash_string = osquery::hashFromBuffer( HASH_TYPE_MD5, (void*)out.str().c_str(), out.str().length()); return Status(0, "OK"); }
Status YARAEventSubscriber::Callback(const FileEventContextRef& ec, const void* user_data) { if (user_data == nullptr) { return Status(1, "No YARA category string provided"); } Row r; r["action"] = ec->action; r["target_path"] = ec->path; r["category"] = *(std::string*)user_data; // Only FSEvents transactions updates (inotify is a no-op). r["transaction_id"] = INTEGER(ec->transaction_id); // These are default values, to be updated in YARACallback. r["count"] = INTEGER(0); r["matches"] = std::string(""); ConfigDataInstance config; const auto& parser = config.getParser("yara"); if (parser == nullptr) return Status(1, "ConfigParser unknown."); const auto& yaraParser = std::static_pointer_cast<YARAConfigParserPlugin>(parser); auto rules = yaraParser->rules(); // Use the category as a lookup into the yara file_paths. The value will be // a list of signature groups to scan with. auto category = r.at("category"); const auto& yara_config = config.getParsedData("yara"); const auto& yara_paths = yara_config.get_child("file_paths"); const auto& sig_groups = yara_paths.find(category); for (const auto& rule : sig_groups->second) { const std::string group = rule.second.data(); int result = yr_rules_scan_file(rules[group], ec->path.c_str(), SCAN_FLAGS_FAST_MODE, YARACallback, (void*)&r, 0); if (result != ERROR_SUCCESS) { return Status(1, "YARA error: " + std::to_string(result)); } } if (ec->action != "" && r.at("matches").size() > 0) { add(r, ec->time); } return Status(0, "OK"); }
Status FileEventSubscriber::init() { ConfigDataInstance config; for (const auto& element_kv : config.files()) { for (const auto& file : element_kv.second) { VLOG(1) << "Added listener to: " << file; auto mc = createSubscriptionContext(); mc->path = file; subscribe(&FileEventSubscriber::Callback, mc, (void*)(&element_kv.first)); } } return Status(0, "OK"); }
Status FileEventSubscriber::init() { ConfigDataInstance config; for (const auto& element_kv : config.files()) { for (const auto& file : element_kv.second) { VLOG(1) << "Added listener to: " << file; auto mc = createSubscriptionContext(); mc->recursive = 1; mc->path = file; mc->mask = IN_ATTRIB | IN_MODIFY | IN_DELETE | IN_CREATE; subscribe(&FileEventSubscriber::Callback, mc, (void*)(&element_kv.first)); } } return Status(0, "OK"); }
Status Config::getMD5(std::string& hash_string) { // Request an accessor to our own config, outside of an update. ConfigDataInstance config; std::stringstream out; try { pt::write_json(out, config.data(), false); } catch (const pt::json_parser::json_parser_error& e) { return Status(1, e.what()); } hash_string = osquery::hashFromBuffer( HASH_TYPE_MD5, (void*)out.str().c_str(), out.str().length()); return Status(0, "OK"); }
void SchedulerRunner::start() { time_t t = std::time(nullptr); struct tm* local = std::localtime(&t); unsigned long int i = local->tm_sec; for (; (timeout_ == 0) || (i <= timeout_); ++i) { { ConfigDataInstance config; for (const auto& query : config.schedule()) { if (i % query.second.splayed_interval == 0) { launchQuery(query.first, query.second); } } } // Put the thread into an interruptible sleep without a config instance. osquery::interruptableSleep(interval_ * 1000); } }
void Config::recordQueryPerformance(const std::string& name, size_t delay, size_t size, const Row& r0, const Row& r1) { // Grab a lock on the schedule structure and check the name. ConfigDataInstance config; if (config.schedule().count(name) == 0) { // Unknown query schedule name. return; } // Grab access to the non-const schedule item. auto& query = getInstance().data_.schedule.at(name); auto diff = AS_LITERAL(BIGINT_LITERAL, r1.at("user_time")) - AS_LITERAL(BIGINT_LITERAL, r0.at("user_time")); if (diff > 0) { query.user_time += diff; } diff = AS_LITERAL(BIGINT_LITERAL, r1.at("system_time")) - AS_LITERAL(BIGINT_LITERAL, r0.at("system_time")); if (diff > 0) { query.system_time += diff; } diff = AS_LITERAL(BIGINT_LITERAL, r1.at("resident_size")) - AS_LITERAL(BIGINT_LITERAL, r0.at("resident_size")); if (diff > 0) { // Memory is stored as an average of RSS changes between query executions. query.average_memory = (query.average_memory * query.executions) + diff; query.average_memory = (query.average_memory / (query.executions + 1)); } query.wall_time += delay; query.output_size += size; query.executions += 1; }
TEST_F(ConfigTests, test_config_parser) { // Register a config parser plugin. Registry::add<TestConfigParserPlugin>("config_parser", "test"); Registry::get("config_parser", "test")->setUp(); { // Access the parser's data without having updated the configuration. ConfigDataInstance config; const auto& test_data = config.getParsedData("test"); // Expect the setUp method to have run and set blank defaults. // Accessing an invalid property tree key will abort. ASSERT_EQ(test_data.get_child("dictionary").count(""), 0); } // Update or load the config, expect the parser to be called. Config::update( {{"source1", "{\"dictionary\": {\"key1\": \"value1\"}, \"list\": [\"first\"]}"}}); ASSERT_TRUE(TestConfigParserPlugin::update_called); { // Now access the parser's data AFTER updating the config (no longer blank) ConfigDataInstance config; const auto& test_data = config.getParsedData("test"); // Expect a value that existed in the configuration. EXPECT_EQ(test_data.count("dictionary"), 1); EXPECT_EQ(test_data.get("dictionary.key1", ""), "value1"); // Expect a value for every key the parser requested. // Every requested key will be present, event if the key's tree is empty. EXPECT_EQ(test_data.count("dictionary2"), 1); // Expect the parser-created data item. EXPECT_EQ(test_data.count("dictionary3"), 1); EXPECT_EQ(test_data.get("dictionary3.key2", ""), "value2"); } // Update from a secondary source into a dictionary. // Expect that the keys in the top-level dictionary are merged. Config::update({{"source2", "{\"dictionary\": {\"key3\": \"value3\"}}"}}); // Update from a third source into a list. // Expect that the items from each source in the top-level list are merged. Config::update({{"source3", "{\"list\": [\"second\"]}"}}); { ConfigDataInstance config; const auto& test_data = config.getParsedData("test"); EXPECT_EQ(test_data.count("dictionary"), 1); EXPECT_EQ(test_data.get("dictionary.key1", ""), "value1"); EXPECT_EQ(test_data.get("dictionary.key3", ""), "value3"); EXPECT_EQ(test_data.count("list"), 1); EXPECT_EQ(test_data.get_child("list").count(""), 2); } }
TEST_F(ConfigTests, test_config_update) { std::string digest; // Get a snapshot of the digest before making config updates. auto status = Config::getMD5(digest); EXPECT_TRUE(status); // Request an update of the 'new_source1'. Set new1 = value. status = Config::update({{"new_source1", "{\"options\": {\"new1\": \"value\"}}"}}); EXPECT_TRUE(status); // At least, the amalgamated config digest should have changed. std::string new_digest; Config::getMD5(new_digest); EXPECT_NE(digest, new_digest); // Access the option that was added in the update to source 'new_source1'. { ConfigDataInstance config; auto option = config.data().get<std::string>("options.new1", ""); EXPECT_EQ(option, "value"); } // Add a lexically larger source that emits the same option 'new1'. Config::update({{"new_source2", "{\"options\": {\"new1\": \"changed\"}}"}}); { ConfigDataInstance config; auto option = config.data().get<std::string>("options.new1", ""); // Expect the amalgamation to have overwritten 'new_source1'. EXPECT_EQ(option, "changed"); } // Again add a source but emit a different option, both 'new1' and 'new2' // should be in the amalgamated/merged config. Config::update({{"new_source3", "{\"options\": {\"new2\": \"different\"}}"}}); { ConfigDataInstance config; auto option = config.data().get<std::string>("options.new1", ""); EXPECT_EQ(option, "changed"); option = config.data().get<std::string>("options.new2", ""); EXPECT_EQ(option, "different"); } }
TEST_F(ConfigTests, test_queries_execute) { ConfigDataInstance config; EXPECT_EQ(config.schedule().size(), 3); }
Status ProcessEventSubscriber::Callback( const TypedKernelEventContextRef<osquery_process_event_t> &ec, const void *user_data) { Row r; r["overflows"] = ""; r["cmdline_count"] = BIGINT(ec->event.actual_argc); r["cmdline_size"] = BIGINT(ec->event.arg_length); if (ec->event.argc != ec->event.actual_argc) { r["overflows"] = "cmdline"; } r["envc"] = BIGINT(ec->event.envc); r["environment_count"] = BIGINT(ec->event.actual_envc); r["environment_size"] = BIGINT(ec->event.env_length); if (ec->event.envc != ec->event.actual_envc) { r["overflows"] += std::string(((r["overflows"].size() > 0) ? ", " : "")) + "environment"; } char *argv = &(ec->flexible_data.data()[ec->event.argv_offset]); std::string argv_accumulator(""); while (ec->event.argc-- > 0) { argv_accumulator += argv; argv_accumulator += " "; argv += strlen(argv) + 1; } r["cmdline"] = std::move(argv_accumulator); { // A configuration can optionally restrict environment variable logging to // a whitelist. This is helpful for limiting logged data as well as // protecting against logging unsafe/private variables. bool use_whitelist = false; pt::ptree whitelist; // Check if an events whitelist exists. ConfigDataInstance config; if (config.data().count("events")) { // Only apply a whitelist search if the events and environment_variables // keys are included. Otherwise, optimize by adding all. if (config.data().get_child("events").count("environment_variables")) { use_whitelist = true; whitelist = config.data().get_child("events.environment_variables"); } } char *envv = &(ec->flexible_data.data()[ec->event.envv_offset]); std::string envv_accumulator(""); while (ec->event.envc-- > 0) { auto envv_string = std::string(envv); if (use_whitelist) { for (const auto &item : whitelist) { if (envv_string.find(item.second.data()) == 0) { envv_accumulator += std::move(envv_string) + ' '; break; } } } else { envv_accumulator += std::move(envv_string) + ' '; } envv += strlen(envv) + 1; } r["environment"] = std::move(envv_accumulator); } r["pid"] = BIGINT(ec->event.pid); r["parent"] = BIGINT(ec->event.ppid); r["uid"] = BIGINT(ec->event.uid); r["euid"] = BIGINT(ec->event.euid); r["gid"] = BIGINT(ec->event.gid); r["egid"] = BIGINT(ec->event.egid); r["owner_uid"] = BIGINT(ec->event.owner_uid); r["owner_gid"] = BIGINT(ec->event.owner_gid); r["create_time"] = BIGINT(ec->event.create_time); r["access_time"] = BIGINT(ec->event.access_time); r["modify_time"] = BIGINT(ec->event.modify_time); r["change_time"] = BIGINT(ec->event.change_time); r["mode"] = BIGINT(ec->event.mode); r["path"] = ec->event.path; r["uptime"] = BIGINT(ec->uptime); add(r, ec->time); return Status(0, "OK"); }
QueryData genYara(QueryContext& context) { QueryData results; Status status; auto paths = context.constraints["path"].getAll(EQUALS); auto patterns = context.constraints["pattern"].getAll(EQUALS); auto groups = context.constraints["sig_group"].getAll(EQUALS); auto sigfiles = context.constraints["sigfile"].getAll(EQUALS); // Must specify a path constraint and at least one of sig_group or sigfile. if (groups.size() == 0 && sigfiles.size() == 0) { return results; } // XXX: Abstract this into a common "get rules for group" function. ConfigDataInstance config; const auto& parser = config.getParser("yara"); if (parser == nullptr) { return results; } const auto& yaraParser = std::static_pointer_cast<YARAConfigParserPlugin>(parser); if (yaraParser == nullptr) { return results; } auto rules = yaraParser->rules(); // Store resolved paths in a vector of pairs. // Each pair has the first element as the path to scan and the second // element as the pattern which generated it. std::vector<std::pair<std::string, std::string> > path_pairs; // Expand patterns and push onto path_pairs. for (const auto& pattern : patterns) { std::vector<std::string> expanded_patterns; auto status = resolveFilePattern(pattern, expanded_patterns); if (!status.ok()) { VLOG(1) << "Could not expand pattern properly: " << status.toString(); return results; } for (const auto& resolved : expanded_patterns) { if (!isReadable(resolved)) { continue; } path_pairs.push_back(make_pair(resolved, pattern)); } } // Collect all paths specified too. for (const auto& path_string : paths) { if (!isReadable(path_string)) { continue; } path_pairs.push_back(make_pair(path_string, "")); } // Compile all sigfiles into a map. std::map<std::string, YR_RULES*> compiled_rules; for (const auto& file : sigfiles) { YR_RULES *rules = nullptr; std::string full_path; if (file[0] != '/') { full_path = std::string("/etc/osquery/yara/") + file; } else { full_path = file; } status = compileSingleFile(full_path, &rules); if (!status.ok()) { VLOG(1) << "YARA error: " << status.toString(); } else { compiled_rules[file] = rules; } } // Scan every path pair. for (const auto& path_pair : path_pairs) { // Scan using siggroups. for (const auto& group : groups) { if (rules.count(group) == 0) { continue; } VLOG(1) << "Scanning with group: " << group; status = doYARAScan(rules[group], path_pair.first.c_str(), path_pair.second, results, group, ""); if (!status.ok()) { VLOG(1) << "YARA error: " << status.toString(); } } // Scan using files. for (const auto& element : compiled_rules) { VLOG(1) << "Scanning with file: " << element.first; status = doYARAScan(element.second, path_pair.first.c_str(), path_pair.second, results, "", element.first); if (!status.ok()) { VLOG(1) << "YARA error: " << status.toString(); } } } // Cleanup compiled rules for (const auto& element : compiled_rules) { yr_rules_destroy(element.second); } return results; }