TEST_F(SystemsTablesTests, test_abstract_joins) { // Codify several assumptions about how tables should be joined into tests. // The first is an implicit inner join from processes to file information. std::string join_preamble = "select * from (select path from osquery_info join processes using " "(pid)) p"; auto results = SQL(join_preamble + " join file using (path);"); ASSERT_EQ(results.rows().size(), 1U); // The same holds for an explicit left join. results = SQL(join_preamble + "left join file using (path);"); ASSERT_EQ(results.rows().size(), 1U); // A secondary inner join against hash. results = SQL(join_preamble + " join file using (path) join hash using (path);"); ASSERT_EQ(results.rows().size(), 1U); results = SQL(join_preamble + " left join file using (path) left join hash using (path);"); ASSERT_EQ(results.rows().size(), 1U); // Check LIKE and = operands. results = SQL("select path from file where path = '/etc/' or path LIKE '/dev/%'"); ASSERT_GT(results.rows().size(), 1U); }
TEST_F(FileEventsTableTests, test_configure_subscriptions) { // Attach/create the publishers. attachEvents(); // Load a configuration with file paths, verify subscriptions. Registry::add<FileEventsTestsConfigPlugin>("config", "file_events_tests"); Registry::setActive("config", "file_events_tests"); this->load(); // Explicitly request a configure for subscribers. Registry::registry("event_subscriber")->configure(); std::string q = "select * from osquery_events where name = 'file_events'"; auto results = SQL(q); ASSERT_EQ(results.rows().size(), 1U); auto& row = results.rows()[0]; // Expect the paths within "unrestricted_pack" to be created as subscriptions. EXPECT_EQ(row.at("subscriptions"), "2"); // The most important part, make sure a reconfigure removes the subscriptions. Config::getInstance().update({{"data", "{}"}}); results = SQL(q); auto& row2 = results.rows()[0]; EXPECT_EQ(row2.at("subscriptions"), "0"); }
TEST_F(SystemsTablesTests, test_processes) { auto results = SQL("select pid, name from processes limit 1"); ASSERT_EQ(results.rows().size(), 1U); EXPECT_FALSE(results.rows()[0].at("pid").empty()); EXPECT_FALSE(results.rows()[0].at("name").empty()); // Make sure an invalid pid within the query constraint returns no rows. results = SQL("select pid, name from processes where pid = -1"); EXPECT_EQ(results.rows().size(), 0U); }
bool cpiPython::SetConf(const char *conf, const char *var, const char *val) { if (!conf || !var || !val) { log2("PY: SetConf: wrong parameters\n"); return false; } // first let's check hub's internal config: if(!strcmp(conf, "config")) { string file(server->mDBConf.config_name); cConfigItemBase *ci = NULL; if(file == server->mDBConf.config_name) { ci = server->mC[var]; if (ci) { ci->ConvertFrom(val); log3("PY: SetConf set the value directly in mDBConf to: %s\n", val); return true; } } return false; /*if (SetConfig((char*)conf, (char*)var, (char*)val)) return true; return false;*/ } // let's try searching the database directly: if (!lib_begin || !lib_pack || !lib_unpack || !lib_packprint) return false; log3("PY: SetConf file != 'config', file == '%s'\n", conf); string query = string() + "delete from SetupList where file='" + conf + "' and var='" + var + "'"; w_Targs *a = lib_pack( "sl", query.c_str(), (long)1 ); log3("PY: SetConf calling SQL with params: %s\n", lib_packprint(a)); w_Targs *ret = SQL (-2, a); if (a) free(a); long res, rows, cols; char **list; log3("PY: SetConf SQL returned %s\n", lib_packprint(ret)); if (!lib_unpack( ret, "lllp", &res, &rows, &cols, (void**) &list )) { log3("PY: SetConf call to SQL function failed\n"); freee(ret); return false; } freee(ret->args[3].p); freee(ret); if (!res) { log2("requested config variable ( %s in %s ) does not exist\n", var, conf); }; query = string("") + "insert into SetupList (file, var, val) values ('" + conf + "', '" + var + "', '" + val + "')"; a = lib_pack( "sl", query.c_str(), (long)1 ); log3("PY: SetConf calling SQL with params: %s\n", lib_packprint(a)); ret = SQL (-2, a); freee(a); log3("PY: SetConf SQL returned %s\n", lib_packprint(ret)); if (!lib_unpack( ret, "lllp", &res, &rows, &cols, (void**) &list )) { log3("PY: SetConf call to SQL function failed\n"); freee(ret); return false; } freee(ret->args[3].p); freee(ret); if (!res) return false; return true; }
static int prune_old_entries(const char* build, const char* project) { SQL("DELETE FROM files WHERE build=%Q AND project=%Q", build, project); SQL("DELETE FROM unresolved_dependencies WHERE build=%Q AND project=%Q", build, project); SQL("DELETE FROM mach_o_objects WHERE build=%Q AND project=%Q", build, project); SQL("DELETE FROM mach_o_symbols WHERE mach_o_object NOT IN (SELECT serial FROM mach_o_objects)"); return 0; }
bool Pack::checkDiscovery() { stats_.total++; auto current = getUnixTime(); if ((current - discovery_cache_.first) < FLAGS_pack_refresh_interval) { stats_.hits++; return discovery_cache_.second; } stats_.misses++; discovery_cache_.first = current; discovery_cache_.second = true; for (const auto& q : discovery_queries_) { auto sql = SQL(q); if (!sql.ok()) { LOG(WARNING) << "Discovery query failed (" << q << "): " << sql.getMessageString(); discovery_cache_.second = false; break; } if (sql.rows().size() == 0) { discovery_cache_.second = false; break; } } return discovery_cache_.second; }
// Возвращаем читающий sql TSQLPtr __fastcall TdmConnect::GetReadSQL(const String &strQuery) { TSQLPtr SQL(new TIBSQL(0)); SQL->Database = Database; SQL->Transaction = trRead; SQL->SQL->Text = strQuery; return SQL; }
int insertDisk(Hash diskId) { char sDiskId[SHA1_HEXED_SIZE]; LogFS_HashPrint(sDiskId, &diskId); return SQL(NULL, NULL, "insert into disk values('%s');", sDiskId); }
int insertPeer(const char *hostName, Hash hostId) { char sHostId[SHA1_HEXED_SIZE]; LogFS_HashPrint(sHostId, &hostId); return SQL(NULL, NULL, "insert into peer values('%s','%s');", hostName, sHostId); }
void launchQuery(const std::string& name, const ScheduledQuery& query) { // Execute the scheduled query and create a named query object. VLOG(1) << "Executing query: " << query.query; auto sql = (FLAGS_enable_monitor) ? monitor(name, query) : SQL(query.query); if (!sql.ok()) { LOG(ERROR) << "Error executing query (" << query.query << "): " << sql.getMessageString(); return; } // Fill in a host identifier fields based on configuration or availability. std::string ident; auto status = getHostIdentifier(ident); if (!status.ok() || ident.empty()) { ident = "<unknown>"; } // A query log item contains an optional set of differential results or // a copy of the most-recent execution alongside some query metadata. QueryLogItem item; item.name = name; item.identifier = ident; item.time = osquery::getUnixTime(); item.calendar_time = osquery::getAsciiTime(); if (query.options.count("snapshot") && query.options.at("snapshot")) { // This is a snapshot query, emit results with a differential or state. item.snapshot_results = std::move(sql.rows()); logSnapshotQuery(item); return; } // Create a database-backed set of query results. auto dbQuery = Query(name, query); DiffResults diff_results; // Add this execution's set of results to the database-tracked named query. // We can then ask for a differential from the last time this named query // was executed by exact matching each row. status = dbQuery.addNewResults(sql.rows(), diff_results); if (!status.ok()) { LOG(ERROR) << "Error adding new results to database: " << status.what(); return; } if (diff_results.added.size() == 0 && diff_results.removed.size() == 0) { // No diff results or events to emit. return; } VLOG(1) << "Found results for query (" << name << ") for host: " << ident; item.results = diff_results; status = logQueryLogItem(item); if (!status.ok()) { LOG(ERROR) << "Error logging the results of query (" << query.query << "): " << status.toString(); } }
int pkg_repo_binary_init_prstatements(sqlite3 *sqlite) { sql_prstmt_index i, last; int ret; last = PRSTMT_LAST; for (i = 0; i < last; i++) { ret = sqlite3_prepare_v2(sqlite, SQL(i), -1, &STMT(i), NULL); if (ret != SQLITE_OK) { ERROR_SQLITE(sqlite, SQL(i)); return (EPKG_FATAL); } } return (EPKG_OK); }
int selectRandomPeers(char **result, Hash hostId, int howMany) { char sHostId[SHA1_HEXED_SIZE]; LogFS_HashPrint(sHostId, &hostId); return SQL(selectPeersCallback, result, "select hostId,hostName from peer where hostId!='%s' order by random() limit %d;", sHostId, howMany); }
BOOL CCReCalQ::OnInitDialog() { CDialog::OnInitDialog(); SQL(); RefreshDataQ(); UpWindowsText(); return TRUE; // return TRUE unless you set the focus to a control // EXCEPTION: OCX Property Pages should return FALSE }
int selectNRandomPeers(HostIdRecord *hir, int n) { int r = SQL(getHostPairCallback, hir, "select hostId,hostName from peer order by random() limit %d;", n); assert(r == SQLITE_OK); return r; }
TEST_F(SystemsTablesTests, test_process_info) { auto results = SQL("select * from osquery_info join processes using (pid)"); ASSERT_EQ(results.rows().size(), 1U); // Make sure there is a valid UID and parent. EXPECT_EQ(results.rows()[0].count("uid"), 1U); EXPECT_NE(results.rows()[0].at("uid"), "-1"); EXPECT_NE(results.rows()[0].at("parent"), "-1"); }
BOOL CLryEDBQryDoc::OnNewDocument() { if (!CDocument::OnNewDocument()) return FALSE; Rs.CreateInstance("ADODB.Recordset"); RsPrintData.CreateInstance("ADODB.Recordset"); RsTitle.CreateInstance("ADODB.Recordset"); SQL(); return TRUE; }
int invalidateView(Hash secretView) { int r; Hash publicView = LogFS_HashApply(secretView); char sPublicView[SHA1_HEXED_SIZE]; char sSecretView[SHA1_HEXED_SIZE]; LogFS_HashPrint(sPublicView, &publicView); LogFS_HashPrint(sSecretView, &secretView); r = SQL(NULL, NULL, "insert into invalidView values('%s','%s');", sPublicView, sSecretView); SQL(NULL, NULL, "delete from viewMember where publicview='%s';", sPublicView); return r; }
TEST_F(NetworkingTablesTests, test_address_details_join) { // Expect that we can join interface addresses with details auto query = "select * from interface_details id, interface_addresses ia " "on ia.interface = id.interface " "where ia.address = '127.0.0.1';"; auto results = SQL(query); EXPECT_GT(results.rows().size(), 0U); }
int getPeerForHost(char *result, Hash hostId) { char sHostId[SHA1_HEXED_SIZE]; LogFS_HashPrint(sHostId, &hostId); *result = 0; return SQL(getPeerForHostCallback, result, "select hostName from peer where hostId='%s';", sHostId); }
int selectHostsNotInView(HostIdRecord *hir, Hash publicView, int howMany) { char sPublicView[SHA1_HEXED_SIZE]; LogFS_HashPrint(sPublicView, &publicView); return SQL(getHostPairCallback, hir, "select hostId,hostName from peer where hostId not in " "(select peer.hostId as hostId from viewMember,peer where " "publicView='%s' and peer.hostId=viewMember.hostId) " "order by random() limit %d;", sPublicView, howMany); }
int getRandomPeer(char *result, Hash hostId) { char sHostId[SHA1_HEXED_SIZE]; LogFS_HashPrint(sHostId, &hostId); *result = 0; return SQL(getPeerForHostCallback, result, "select hostName from peer where hostId!='%s' order by random() limit 1;", sHostId); }
std::vector<fs::path> getHomeDirectories() { auto sql = SQL("SELECT DISTINCT directory FROM users WHERE directory != '/var/empty';"); std::vector<fs::path> results; if (sql.ok()) { for (const auto& row: sql.rows()) { results.push_back(row.at("directory")); } } else { LOG(ERROR) << "Error executing query to return users: " << sql.getMessageString(); } return results; }
int selectHostsInView(HostIdRecord *hir, Hash publicView, Hash exclude) { char sPublicView[SHA1_HEXED_SIZE]; char sExclude[SHA1_HEXED_SIZE]; LogFS_HashPrint(sPublicView, &publicView); LogFS_HashPrint(sExclude, &exclude); return SQL(getHostPairCallback, hir, "select peer.hostId,peer.hostName from viewMember,peer " " where publicView='%s' and viewMember.hostId!='%s' and peer.hostId=viewMember.hostId " " order by peer.hostId;", sPublicView, sExclude); }
int pkgdb_repo_cksum_exists(sqlite3 *sqlite, const char *cksum) { if (run_prepared_statement(EXISTS, cksum) != SQLITE_ROW) { ERROR_SQLITE(sqlite, SQL(EXISTS)); return (EPKG_FATAL); } if (sqlite3_column_int(STMT(EXISTS), 0) > 0) { return (EPKG_OK); } return (EPKG_END); }
const char *cpiPython::GetConf(const char *conf, const char *var) { if (!conf || !var) { log2("PY: GetConf wrong parameters\n"); return NULL; } // first let's check hub's internal config: if(!strcmp(conf, "config")) { static string res, file(server->mDBConf.config_name); cConfigItemBase *ci = NULL; if(file == server->mDBConf.config_name) { ci = server->mC[var]; if (ci) { ci->ConvertTo(res); log3("PY: GetConf got result from mDBConf: %s\n", res.c_str()); return strdup(res.c_str()); } } return NULL; /*char *s = (char*) calloc(1000, sizeof(char)); int size = GetConfig((char*)conf, (char*)var, (char*)s, 999); if (size < 0) { log("PY: GetConf: error in script_api's GetConfig"); return NULL; } if (size > 999) { free(s); s = (char*) calloc(size+1, sizeof(char)); GetConfig((char*)conf, (char*)var, s, 999); return s; }*/ } // let's try searching the database directly: if (!lib_begin || !lib_pack || !lib_unpack || !lib_packprint) return NULL; log3("PY: GetConf file != 'config'... calling SQL\n"); string query = string() + "select val from SetupList where file='" + conf + "' and var='" + var + "'"; w_Targs *a = lib_pack( "sl", query.c_str(), (long)1 ); log3("PY: GetConf calling SQL with params: %s\n", lib_packprint(a)); w_Targs *ret = SQL (-2, a); freee(a); if (!ret) return NULL; long res, rows, cols; char **list; log3("PY: GetConf SQL returned %s\n", lib_packprint(ret)); if (!lib_unpack( ret, "lllp", &res, &rows, &cols, (void**) &list )) { log3("PY: GetConf call to SQL function failed\n"); freee(ret); return NULL; } freee(ret); if (!res || !rows || !cols || !list || !list[0]) return NULL; log3("PY: GetConf returning value: %s\n", list[0]); const char * result = list[0]; free(list); return result; }
void csync_db_maycommit() { time_t now; if ( !db_blocking_mode || begin_commit_recursion ) return; begin_commit_recursion++; if (tqueries_counter <= 0) { begin_commit_recursion--; return; } now = time(0); if ((now - last_wait_cycle) > 10) { SQL("COMMIT", "COMMIT "); if (wait) { csync_debug(2, "Waiting %d secs so others can lock the database (%d - %d)...\n", wait, (int)now, (int)last_wait_cycle); sleep(wait); } last_wait_cycle = 0; tqueries_counter = -10; begin_commit_recursion--; return; } if ((tqueries_counter > 1000) || ((now - transaction_begin) > 3)) { SQL("COMMIT ", "COMMIT "); tqueries_counter = 0; begin_commit_recursion--; return; } signal(SIGALRM, csync_db_alarmhandler); alarm(10); begin_commit_recursion--; return; }
void csync_db_close() { if (!db || begin_commit_recursion) return; begin_commit_recursion++; if (tqueries_counter > 0) { SQL("COMMIT ", "COMMIT "); tqueries_counter = -10; } db_close(db); begin_commit_recursion--; db = 0; }
int getNextPeer(char *result, Hash hostId) { char sHostId[SHA1_HEXED_SIZE]; LogFS_HashPrint(sHostId, &hostId); *result = 0; return SQL(getPeerForHostCallback, result, "select hostName from peer where hostId=" "(select max(hostId) from " " (select min(hostId) as hostId from peer union " " select min(hostId) as hostId from peer where hostId>'%s'));", sHostId); }
int insertSecretView(Hash diskId, Hash secretView) { int r; char sDiskId[SHA1_HEXED_SIZE]; char sSecretView[SHA1_HEXED_SIZE]; char sPublicView[SHA1_HEXED_SIZE]; Hash publicView = LogFS_HashApply(secretView); LogFS_HashPrint(sDiskId, &diskId); LogFS_HashPrint(sSecretView, &secretView); LogFS_HashPrint(sPublicView, &publicView); r = SQL(NULL, NULL, "insert into secretView values('%s','%s','%s');", sDiskId, sSecretView, sPublicView); if (r != 0) { r = SQL(NULL, NULL, "update secretView set secretView='%s',publicView='%s' " "where diskId='%s';", sSecretView, sPublicView, sDiskId); } return r; }
// Возвращаем пишущий sql TSQLPtr __fastcall TdmConnect::GetWriteSQL(const String &strQuery, TIBTransaction *T) { TSQLPtr SQL(new TIBSQL(0)); SQL->Database = Database; SQL->SQL->Text = strQuery; if(T) SQL->Transaction = T; else { SQL->Transaction = new TIBTransaction(SQL.get()); SQL->Transaction->DefaultDatabase = Database; SQL->Transaction->Params->Add("read_committed"); SQL->Transaction->Params->Add("rec_version"); SQL->Transaction->Params->Add("nowait"); } return SQL; }