void test_migrate_unqualified_names(void) { DBHandle *db = setup(true); assert_int_equal(WriteDB(db, "foo", &dummy_event, sizeof(dummy_event)), true); assert_int_equal(WriteDB(db, "q.bar", &dummy_event, sizeof(dummy_event)), true); CloseDB(db); assert_int_equal(OpenDB(&db, dbid_bundles), true); /* Old entry migrated */ assert_int_equal(HasKeyDB(db, "foo", strlen("foo") + 1), false); assert_int_equal(HasKeyDB(db, "default.foo", strlen("default.foo") + 1), true); Event read_value = { 0 }; ReadDB(db, "default.foo", &read_value, sizeof(read_value)); assert_memory_equal(&read_value, &dummy_event, sizeof(dummy_event)); /* New entry preserved */ assert_int_equal(HasKeyDB(db, "q.bar", strlen("q.bar") + 1), true); memset(&read_value, 0, sizeof(read_value)); ReadDB(db, "q.bar", &read_value, sizeof(read_value)); assert_memory_equal(&read_value, &dummy_event, sizeof(dummy_event)); /* Version marker */ assert_int_equal(HasKeyDB(db, "version", strlen("version") + 1), true); CloseDB(db); }
void test_iter_delete_entry(void **state) { /* Test that deleting entry under cursor does not interrupt iteration */ CF_DB *db; assert_int_equal(OpenDB(&db, dbid_classes), true); assert_int_equal(WriteDB(db, "foobar", "abc", 3), true); assert_int_equal(WriteDB(db, "bazbaz", "def", 3), true); assert_int_equal(WriteDB(db, "booo", "ghi", 3), true); CF_DBC *cursor; assert_int_equal(NewDBCursor(db, &cursor), true); char *key; int ksize; void *value; int vsize; assert_int_equal(NextDB(db, cursor, &key, &ksize, &value, &vsize), true); assert_int_equal(DBCursorDeleteEntry(cursor), true); assert_int_equal(NextDB(db, cursor, &key, &ksize, &value, &vsize), true); assert_int_equal(NextDB(db, cursor, &key, &ksize, &value, &vsize), true); assert_int_equal(DeleteDBCursor(db, cursor), true); CloseDB(db); }
static bool WriteLockData(CF_DB *dbp, const char *lock_id, LockData *lock_data) { #ifdef LMDB unsigned char digest2[EVP_MAX_MD_SIZE*2 + 1]; if (!strcmp(lock_id, "CF_CRITICAL_SECTION") || !strncmp(lock_id, "lock.track_license_bundle.track_license", 39)) { strcpy(digest2, lock_id); } else { GenerateMd5Hash(lock_id, digest2); } if(WriteDB(dbp, digest2, lock_data, sizeof(LockData))) #else if(WriteDB(dbp, lock_id, lock_data, sizeof(LockData))) #endif { return true; } else { return false; } }
static bool BundlesMigrationVersion0(DBHandle *db) { bool errors = false; DBCursor *cursor; if (!NewDBCursor(db, &cursor)) { return false; } char *key; void *value; int ksize, vsize; while (NextDB(cursor, &key, &ksize, &value, &vsize)) { if (ksize == 0) { Log(LOG_LEVEL_INFO, "BundlesMigrationVersion0: Database structure error -- zero-length key."); continue; } if (strchr(key, '.')) // is qualified name? { continue; } char *fqname = StringConcatenate(3, "default", ".", key); if (!WriteDB(db, fqname, value, vsize)) { Log(LOG_LEVEL_INFO, "Unable to write version 1 bundle entry for '%s'", key); errors = true; continue; } if (!DBCursorDeleteEntry(cursor)) { Log(LOG_LEVEL_INFO, "Unable to delete version 0 bundle entry for '%s'", key); errors = true; } } if (DeleteDBCursor(cursor) == false) { Log(LOG_LEVEL_ERR, "BundlesMigrationVersion0: Unable to close cursor"); errors = true; } if ((!errors) && (!WriteDB(db, "version", "1", sizeof("1")))) { errors = true; } return !errors; }
static void UpdateAverages(EvalContext *ctx, char *timekey, Averages newvals) { CF_DB *dbp; if (!OpenDB(&dbp, dbid_observations)) { return; } Log(LOG_LEVEL_INFO, "Updated averages at '%s'", timekey); WriteDB(dbp, timekey, &newvals, sizeof(Averages)); WriteDB(dbp, "DATABASE_AGE", &AGE, sizeof(double)); CloseDB(dbp); HistoryUpdate(ctx, newvals); }
static void UpdateAverages(char *timekey, Averages newvals) { CF_DB *dbp; if (!OpenDB(&dbp, dbid_observations)) { return; } CfOut(cf_inform, "", "Updated averages at %s\n", timekey); WriteDB(dbp, timekey, &newvals, sizeof(Averages)); WriteDB(dbp, "DATABASE_AGE", &AGE, sizeof(double)); CloseDB(dbp); HistoryUpdate(newvals); }
void UpdateLastSawHost(const char *hostkey, const char *address, bool incoming, time_t timestamp) { DBHandle *db = NULL; if (!OpenDB(&db, dbid_lastseen)) { Log(LOG_LEVEL_ERR, "Unable to open last seen db"); return; } /* Update quality-of-connection entry */ char quality_key[CF_BUFSIZE]; snprintf(quality_key, CF_BUFSIZE, "q%c%s", incoming ? 'i' : 'o', hostkey); KeyHostSeen newq = { .lastseen = timestamp }; KeyHostSeen q; if (ReadDB(db, quality_key, &q, sizeof(q))) { newq.Q = QAverage(q.Q, newq.lastseen - q.lastseen, 0.4); } else { /* FIXME: more meaningful default value? */ newq.Q = QDefinite(0); } WriteDB(db, quality_key, &newq, sizeof(newq)); /* Update forward mapping */ char hostkey_key[CF_BUFSIZE]; snprintf(hostkey_key, CF_BUFSIZE, "k%s", hostkey); WriteDB(db, hostkey_key, address, strlen(address) + 1); /* Update reverse mapping */ char address_key[CF_BUFSIZE]; snprintf(address_key, CF_BUFSIZE, "a%s", address); WriteDB(db, address_key, hostkey, strlen(hostkey) + 1); CloseDB(db); }
static bool WriteLockData(CF_DB *dbp, const char *lock_id, LockData *lock_data) { if(WriteDB(dbp, lock_id, lock_data, sizeof(LockData))) { return true; } else { return false; } }
void test_migrate_single(const char *expected_old_key, const char *expected_quality_key) { /* Test migration of single entry */ DBHandle *db = setup(true); KeyHostSeen0 khs0 = { .q = 666777.0, .expect = 12345.0, .var = 6543210.0, }; strcpy(khs0.address, "1.2.3.4"); assert_int_equal(WriteDB(db, expected_old_key, &khs0, sizeof(khs0)), true); CloseDB(db); assert_int_equal(OpenDB(&db, dbid_lastseen), true); /* Old entry migrated */ assert_int_equal(HasKeyDB(db, expected_old_key, strlen(expected_old_key) + 1), false); /* Version marker */ assert_int_equal(HasKeyDB(db, "version", strlen("version") + 1), true); /* Incoming connection quality */ KeyHostSeen khs; assert_int_equal(ReadDB(db, expected_quality_key, &khs, sizeof(khs)), true); assert_int_equal(khs.lastseen, 666777); assert_double_close(khs.Q.q, 12345.0); assert_double_close(khs.Q.expect, 12345.0); assert_double_close(khs.Q.var, 6543210.0); /* Address mapping */ char address[CF_BUFSIZE]; assert_int_equal(ReadDB(db, KEYHASH_KEY, address, sizeof(address)), true); assert_string_equal(address, "1.2.3.4"); /* Reverse mapping */ char keyhash[CF_BUFSIZE]; assert_int_equal(ReadDB(db, "a1.2.3.4", keyhash, sizeof(keyhash)), true); assert_string_equal(keyhash, KEYHASH); CloseDB(db); } void test_migrate_incoming(void) { test_migrate_single(KEYHASH_IN, QUALITY_IN); }
static void NotePerformance(char *eventname, time_t t, double value) { CF_DB *dbp; Event e, newe; double lastseen; int lsea = SECONDS_PER_WEEK; time_t now = time(NULL); CfDebug("PerformanceEvent(%s,%.1f s)\n", eventname, value); if (!OpenDB(&dbp, dbid_performance)) { return; } if (ReadDB(dbp, eventname, &e, sizeof(e))) { lastseen = now - e.t; newe.t = t; newe.Q = QAverage(e.Q, value, 0.3); /* Have to kickstart variance computation, assume 1% to start */ if (newe.Q.var <= 0.0009) { newe.Q.var = newe.Q.expect / 100.0; } } else { lastseen = 0.0; newe.t = t; newe.Q.q = value; newe.Q.dq = 0; newe.Q.expect = value; newe.Q.var = 0.001; } if (lastseen > (double) lsea) { CfDebug("Performance record %s expired\n", eventname); DeleteDB(dbp, eventname); } else { CfOut(cf_verbose, "", "Performance(%s): time=%.4lf secs, av=%.4lf +/- %.4lf\n", eventname, value, newe.Q.expect, sqrt(newe.Q.var)); WriteDB(dbp, eventname, &newe, sizeof(newe)); } CloseDB(dbp); }
static void test_setup(void) { snprintf(CFWORKDIR, CF_BUFSIZE, "/tmp/changes_migration_test.XXXXXX"); mkdtemp(CFWORKDIR); char state_dir[PATH_MAX]; snprintf(state_dir, sizeof(state_dir), "%s/state", CFWORKDIR); mkdir(state_dir, 0755); CF_DB *db; assert_true(OpenDB(&db, dbid_checksums)); // Hand crafted from the old version of NewIndexKey(). char checksum_key[NO_FILES][30] = { { 'M','D','5','\0','\0','\0','\0','\0', '/','e','t','c','/','h','o','s','t','s','\0' }, { 'M','D','5','\0','\0','\0','\0','\0', '/','e','t','c','/','p','a','s','s','w','d','\0' }, { 'M','D','5','\0','\0','\0','\0','\0', '/','f','i','l','e','1','\0' }, { 'M','D','5','\0','\0','\0','\0','\0', '/','f','i','l','e','2','\0' }, }; for (int c = 0; c < NO_FILES; c++) { int ksize = CHANGES_HASH_FILE_NAME_OFFSET + strlen(checksum_key[c] + CHANGES_HASH_FILE_NAME_OFFSET) + 1; int vsize = strlen(CHECKSUM_VALUE[c]) + 1; assert_true(WriteComplexKeyDB(db, checksum_key[c], ksize, CHECKSUM_VALUE[c], vsize)); } CloseDB(db); assert_true(OpenDB(&db, dbid_filestats)); char *filestat_key[NO_FILES] = { "/etc/hosts", "/etc/passwd", "/file1", "/file2", }; filestat_value.st_uid = 4321; memset(&filestat_value, 0, sizeof(filestat_value)); for (int c = 0; c < NO_FILES; c++) { assert_true(WriteDB(db, filestat_key[c], &filestat_value, sizeof(filestat_value))); } CloseDB(db); }
//************************************************************ void cOrderArray::on_pushButtonUp_clicked() { try { int Row =tableWidgetFileSets4->currentRow(); CastRow(tableWidgetFileSets4,tableWidgetFileSets4,Row+1,Row-1); WriteDB(); } catch (const exception &e) { cout << e.what() << endl; } }
static void Mon_SaveFilePosition(char *name, long fileptr) { CF_DB *dbp; if (!OpenDB(&dbp, dbid_static)) { return; } Log(LOG_LEVEL_VERBOSE, "Saving state for %s at %ld", name, fileptr); WriteDB(dbp, name, &fileptr, sizeof(long)); CloseDB(dbp); }
//****************************************** void cOrderArray::on_pushButtonRemove_clicked() { try { int UseRow =tableWidgetFileSets3->rowCount(); int Row =tableWidgetFileSets4->currentRow(); CastRow(tableWidgetFileSets4,tableWidgetFileSets3,Row,UseRow); WriteDB(); } catch (const exception &e) { cout << e.what() << endl; } }
//**************************************************** void cOrderArray::on_pushButtonDown_clicked() { try { int Row =tableWidgetFileSets4->currentRow(); CastRow(tableWidgetFileSets4,tableWidgetFileSets4,Row,Row+2); Row+=1; tableWidgetFileSets4->setCurrentCell(Row,0); WriteDB(); } catch (const exception &e) { cout << e.what() << endl; } }
void test_ignore_wrong_sized(void) { /* Test that malformed values are discarded */ DBHandle *db = setup(true); const char *value = "+"; assert_int_equal(WriteDB(db, "+++", value, 2), true); CloseDB(db); assert_int_equal(OpenDB(&db, dbid_lastseen), true); assert_int_equal(HasKeyDB(db, "+++", 4), false); assert_int_equal(HasKeyDB(db, "k++", 4), false); assert_int_equal(HasKeyDB(db, "qi++", 5), false); assert_int_equal(HasKeyDB(db, "qo++", 5), false); assert_int_equal(HasKeyDB(db, "a+", 3), false); CloseDB(db); }
static void test_reverse_conflict(void **context) { setup(); UpdateLastSawHost("SHA-12345", "127.0.0.64", true, 555); DBHandle *db; OpenDB(&db, dbid_lastseen); assert_int_equal(WriteDB(db, "a127.0.0.64", "SHA-98765", strlen("SHA-98765") + 1), true); /* Check that resolution return false */ char result[CF_BUFSIZE]; assert_int_equal(Address2Hostkey("127.0.0.64", result), false); /* Check that entry is removed */ assert_int_equal(HasKeyDB(db, "a127.0.0.64", strlen("a127.0.0.64") + 1), false); CloseDB(db); }
static void test_up_to_date(void) { /* Test that upgrade is not performed if there is already a version * marker */ DBHandle *db = setup(false); assert_int_equal(WriteDB(db, "foo", &dummy_event, sizeof(dummy_event)), true); CloseDB(db); /* Test that manually inserted key still has unqalified name the next time the DB is opened, which is an indicator of the DB not being upgraded */ assert_int_equal(OpenDB(&db, dbid_bundles), true); Event read_value; assert_int_equal(ReadDB(db, "foo", &read_value, sizeof(read_value)), true); assert_int_equal(read_value.t, 1); CloseDB(db); }
void NovaNamedEvent(const char *eventname, double value) { Event ev_new, ev_old; time_t now = time(NULL); CF_DB *dbp; if (!OpenDB(&dbp, dbid_measure)) { return; } ev_new.t = now; if (ReadDB(dbp, eventname, &ev_old, sizeof(ev_old))) { if (isnan(ev_old.Q.expect)) { ev_old.Q.expect = value; } if (isnan(ev_old.Q.var)) { ev_old.Q.var = 0; } ev_new.Q = QAverage(ev_old.Q, value, 0.7); } else { ev_new.Q = QDefinite(value); } Log(LOG_LEVEL_VERBOSE, "Wrote scalar named event \"%s\" = (%.2lf,%.2lf,%.2lf)", eventname, ev_new.Q.q, ev_new.Q.expect, sqrt(ev_new.Q.var)); WriteDB(dbp, eventname, &ev_new, sizeof(ev_new)); CloseDB(dbp); }
void EvalContextHeapPersistentSave(const char *context, const char *ns, unsigned int ttl_minutes, ContextStatePolicy policy) { CF_DB *dbp; CfState state; time_t now = time(NULL); char name[CF_BUFSIZE]; if (!OpenDB(&dbp, dbid_state)) { return; } snprintf(name, CF_BUFSIZE, "%s%c%s", ns, CF_NS, context); if (ReadDB(dbp, name, &state, sizeof(state))) { if (state.policy == CONTEXT_STATE_POLICY_PRESERVE) { if (now < state.expires) { CfOut(OUTPUT_LEVEL_VERBOSE, "", " -> Persisent state %s is already in a preserved state -- %jd minutes to go\n", name, (intmax_t)((state.expires - now) / 60)); CloseDB(dbp); return; } } } else { CfOut(OUTPUT_LEVEL_VERBOSE, "", " -> New persistent state %s\n", name); } state.expires = now + ttl_minutes * 60; state.policy = policy; WriteDB(dbp, name, &state, sizeof(state)); CloseDB(dbp); }
static void test_up_to_date(void) { /* Test that upgrade is not performed if there is already a version * marker */ DBHandle *db = setup(false); const char *value = "+"; assert_int_equal(WriteDB(db, "+++", value, 2), true); CloseDB(db); /* Test that manually inserted key which matches the format of old-style * keys is still present next time database is open, which is an indicator * of database not being upgraded */ assert_int_equal(OpenDB(&db, dbid_lastseen), true); char read_value[CF_BUFSIZE]; assert_int_equal(ReadDB(db, "+++", &read_value, sizeof(read_value)), true); assert_string_equal(read_value, "+"); CloseDB(db); }
static void MonLogSymbolicValue(EvalContext *ctx, const char *handle, Item *stream, Attributes a, const Promise *pp, PromiseResult *result) { char value[CF_BUFSIZE], sdate[CF_MAXVARSIZE], filename[CF_BUFSIZE], *v; int count = 1, found = false, match_count = 0; Item *ip, *match = NULL, *matches = NULL; time_t now = time(NULL); FILE *fout; if (stream == NULL) { Log(LOG_LEVEL_VERBOSE, "No stream to measure"); return; } Log(LOG_LEVEL_VERBOSE, "Locate and log sample ..."); for (ip = stream; ip != NULL; ip = ip->next) { if (ip->name == NULL) { continue; } if (count == a.measure.select_line_number) { Log(LOG_LEVEL_VERBOSE, "Found line %d by number...", count); found = true; match_count = 1; match = ip; if (a.measure.extraction_regex) { Log(LOG_LEVEL_VERBOSE, "Now looking for a matching extractor \"%s\"", a.measure.extraction_regex); strncpy(value, ExtractFirstReference(a.measure.extraction_regex, match->name), CF_MAXVARSIZE - 1); Log(LOG_LEVEL_INFO, "Extracted value \"%s\" for promise \"%s\"", value, handle); AppendItem(&matches, value, NULL); } break; } if (a.measure.select_line_matching && StringMatchFull(a.measure.select_line_matching, ip->name)) { Log(LOG_LEVEL_VERBOSE, "Found line %d by pattern...", count); found = true; match = ip; match_count++; if (a.measure.extraction_regex) { Log(LOG_LEVEL_VERBOSE, "Now looking for a matching extractor \"%s\"", a.measure.extraction_regex); strncpy(value, ExtractFirstReference(a.measure.extraction_regex, match->name), CF_MAXVARSIZE - 1); Log(LOG_LEVEL_INFO, "Extracted value \"%s\" for promise \"%s\"", value, handle); AppendItem(&matches, value, NULL); } } count++; } if (!found) { cfPS(ctx, LOG_LEVEL_ERR, PROMISE_RESULT_FAIL, pp, a, "Promiser '%s' found no matching line.", pp->promiser); *result = PromiseResultUpdate(*result, PROMISE_RESULT_FAIL); return; } if (match_count > 1) { Log(LOG_LEVEL_INFO, "Warning: %d lines matched the line_selection \"%s\"- matching to last", match_count, a.measure.select_line_matching); } switch (a.measure.data_type) { case CF_DATA_TYPE_COUNTER: Log(LOG_LEVEL_VERBOSE, "Counted %d for %s", match_count, handle); snprintf(value, CF_MAXVARSIZE, "%d", match_count); break; case CF_DATA_TYPE_STRING_LIST: v = ItemList2CSV(matches); snprintf(value, CF_BUFSIZE, "%s", v); free(v); break; default: snprintf(value, CF_BUFSIZE, "%s", matches->name); } DeleteItemList(matches); if (a.measure.history_type && strcmp(a.measure.history_type, "log") == 0) { snprintf(filename, CF_BUFSIZE, "%s%cstate%c%s_measure.log", CFWORKDIR, FILE_SEPARATOR, FILE_SEPARATOR, handle); if ((fout = fopen(filename, "a")) == NULL) { cfPS(ctx, LOG_LEVEL_ERR, PROMISE_RESULT_FAIL, pp, a, "Unable to open the output log \"%s\"", filename); *result = PromiseResultUpdate(*result, PROMISE_RESULT_FAIL); PromiseRef(LOG_LEVEL_ERR, pp); return; } strncpy(sdate, ctime(&now), CF_MAXVARSIZE - 1); if (Chop(sdate, CF_EXPANDSIZE) == -1) { Log(LOG_LEVEL_ERR, "Chop was called on a string that seemed to have no terminator"); } fprintf(fout, "%s,%ld,%s\n", sdate, (long) now, value); Log(LOG_LEVEL_VERBOSE, "Logging: %s,%s to %s", sdate, value, filename); fclose(fout); } else // scalar or static { CF_DB *dbp; char id[CF_MAXVARSIZE]; if (!OpenDB(&dbp, dbid_static)) { return; } snprintf(id, CF_MAXVARSIZE - 1, "%s:%d", handle, a.measure.data_type); WriteDB(dbp, id, value, strlen(value) + 1); CloseDB(dbp); } }
static void test_class_persistence(void) { EvalContext *ctx = EvalContextNew(); // simulate old version { CF_DB *dbp; PersistentClassInfo i; assert_true(OpenDB(&dbp, dbid_state)); i.expires = UINT_MAX; i.policy = CONTEXT_STATE_POLICY_RESET; WriteDB(dbp, "old", &i, sizeof(PersistentClassInfo)); CloseDB(dbp); } // e.g. by monitoring EvalContextHeapPersistentSave(ctx, "class1", 3, CONTEXT_STATE_POLICY_PRESERVE, "a,b"); // e.g. by a class promise in a bundle with a namespace { Policy *p = PolicyNew(); Bundle *bp = PolicyAppendBundle(p, "ns1", "bundle1", "agent", NULL, NULL); EvalContextStackPushBundleFrame(ctx, bp, NULL, false); EvalContextHeapPersistentSave(ctx, "class2", 5, CONTEXT_STATE_POLICY_PRESERVE, "x"); EvalContextStackPopFrame(ctx); PolicyDestroy(p); } EvalContextHeapPersistentLoadAll(ctx); { const Class *cls = EvalContextClassGet(ctx, "default", "old"); assert_true(cls != NULL); assert_string_equal("old", cls->name); assert_true(cls->tags != NULL); assert_int_equal(1, StringSetSize(cls->tags)); assert_true(StringSetContains(cls->tags, "source=persistent")); } { const Class *cls = EvalContextClassGet(ctx, "default", "class1"); assert_true(cls != NULL); assert_string_equal("class1", cls->name); assert_true(cls->tags != NULL); assert_int_equal(3, StringSetSize(cls->tags)); assert_true(StringSetContains(cls->tags, "source=persistent")); assert_true(StringSetContains(cls->tags, "a")); assert_true(StringSetContains(cls->tags, "b")); } { const Class *cls = EvalContextClassGet(ctx, "ns1", "class2"); assert_true(cls != NULL); assert_string_equal("ns1", cls->ns); assert_string_equal("class2", cls->name); assert_true(cls->tags != NULL); assert_int_equal(2, StringSetSize(cls->tags)); assert_true(StringSetContains(cls->tags, "source=persistent")); assert_true(StringSetContains(cls->tags, "x")); } EvalContextDestroy(ctx); }
static void UpdateLastSawHost(char *rkey, char *ipaddress) { CF_DB *dbp = NULL; KeyHostSeen q, newq; double lastseen, delta2; time_t now = time(NULL); char timebuf[26]; if (!OpenDB(&dbp, dbid_lastseen)) { CfOut(cf_inform, "", " !! Unable to open last seen db"); return; } if (ReadDB(dbp, rkey, &q, sizeof(q))) { lastseen = (double) now - q.Q.q; if (q.Q.q <= 0) { lastseen = 300; q.Q = QDefinite(0.0); } newq.Q.q = (double) now; newq.Q.dq = newq.Q.q - q.Q.q; newq.Q.expect = GAverage(lastseen, q.Q.expect, 0.4); delta2 = (lastseen - q.Q.expect) * (lastseen - q.Q.expect); newq.Q.var = GAverage(delta2, q.Q.var, 0.4); strncpy(newq.address, ipaddress, CF_ADDRSIZE - 1); } else { lastseen = 0.0; newq.Q.q = (double) now; newq.Q.dq = 0; newq.Q.expect = 0.0; newq.Q.var = 0.0; strncpy(newq.address, ipaddress, CF_ADDRSIZE - 1); } if (strcmp(rkey + 1, PUBKEY_DIGEST) == 0) { Item *ip; int match = false; for (ip = IPADDRESSES; ip != NULL; ip = ip->next) { if (strcmp(VIPADDRESS, ip->name) == 0) { match = true; break; } if (strcmp(ipaddress, ip->name) == 0) { match = true; break; } } if (!match) { CfOut(cf_verbose, "", " ! Not updating last seen, as this appears to be a host with a duplicate key"); CloseDB(dbp); return; } } CfOut(cf_verbose, "", " -> Last saw %s (alias %s) at %s\n", rkey, ipaddress, cf_strtimestamp_local(now, timebuf)); PurgeMultipleIPReferences(dbp, rkey, ipaddress); WriteDB(dbp, rkey, &newq, sizeof(newq)); CloseDB(dbp); }
void RecordClassUsage() { DB *dbp; DB_ENV *dbenv = NULL; DBC *dbcp; DBT key,stored; char name[CF_BUFSIZE]; struct Event e,entry,newe; double lsea = CF_WEEK * 52; /* expire after a year */ time_t now = time(NULL); struct Item *ip,*list = NULL; double lastseen,delta2; double vtrue = 1.0; /* end with a rough probability */ Debug("RecordClassUsage\n"); for (ip = VHEAP; ip != NULL; ip=ip->next) { if (!IsItemIn(list,ip->name)) { PrependItem(&list,ip->name,NULL); } } for (ip = VALLADDCLASSES; ip != NULL; ip=ip->next) { if (!IsItemIn(list,ip->name)) { PrependItem(&list,ip->name,NULL); } } snprintf(name,CF_BUFSIZE-1,"%s/%s",CFWORKDIR,CF_CLASSUSAGE); if ((errno = db_create(&dbp,dbenv,0)) != 0) { snprintf(OUTPUT,CF_BUFSIZE*2,"Couldn't open performance database %s\n",name); CfLog(cferror,OUTPUT,"db_open"); return; } #ifdef CF_OLD_DB if ((errno = (dbp->open)(dbp,name,NULL,DB_BTREE,DB_CREATE,0644)) != 0) #else if ((errno = (dbp->open)(dbp,NULL,name,NULL,DB_BTREE,DB_CREATE,0644)) != 0) #endif { snprintf(OUTPUT,CF_BUFSIZE*2,"Couldn't open performance database %s\n",name); CfLog(cferror,OUTPUT,"db_open"); return; } /* First record the classes that are in use */ for (ip = list; ip != NULL; ip=ip->next) { if (ReadDB(dbp,ip->name,&e,sizeof(e))) { lastseen = now - e.t; newe.t = now; newe.Q.q = vtrue; newe.Q.expect = GAverage(vtrue,e.Q.expect,0.5); delta2 = (vtrue - e.Q.expect)*(vtrue - e.Q.expect); newe.Q.var = GAverage(delta2,e.Q.var,0.5); } else { lastseen = 0.0; newe.t = now; newe.Q.q = 0.5*vtrue; newe.Q.expect = 0.5*vtrue; /* With no data it's 50/50 what we can say */ newe.Q.var = 0.000; } if (lastseen > lsea) { Verbose("Class usage record %s expired\n",ip->name); DeleteDB(dbp,ip->name); } else { Debug("Upgrading %s %f\n",ip->name,newe.Q.expect); WriteDB(dbp,ip->name,&newe,sizeof(newe)); } } /* Then update with zero the ones we know about that are not active */ /* Acquire a cursor for the database. */ if ((errno = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) { Debug("Error reading from class database: "); dbp->err(dbp, errno, "DB->cursor"); return; } /* Initialize the key/data return pair. */ memset(&key, 0, sizeof(key)); memset(&stored, 0, sizeof(stored)); memset(&entry, 0, sizeof(entry)); while (dbcp->c_get(dbcp, &key, &stored, DB_NEXT) == 0) { double measure,av,var; time_t then; char tbuf[CF_BUFSIZE],eventname[CF_BUFSIZE]; strcpy(eventname,(char *)key.data); if (stored.data != NULL) { memcpy(&entry,stored.data,sizeof(entry)); then = entry.t; measure = entry.Q.q; av = entry.Q.expect; var = entry.Q.var; lastseen = now - then; snprintf(tbuf,CF_BUFSIZE-1,"%s",ctime(&then)); tbuf[strlen(tbuf)-9] = '\0'; /* Chop off second and year */ if (lastseen > lsea) { Verbose("Class usage record %s expired\n",eventname); DeleteDB(dbp,eventname); } else if (!IsItemIn(list,eventname)) { newe.t = then; newe.Q.q = 0; newe.Q.expect = GAverage(0.0,av,0.5); delta2 = av*av; newe.Q.var = GAverage(delta2,var,0.5); Debug("Downgrading class %s from %lf to %lf\n",eventname,entry.Q.expect,newe.Q.expect); WriteDB(dbp,eventname,&newe,sizeof(newe)); } } } dbp->close(dbp,0); }
void LastSeen(char *hostname,enum roles role) { DB *dbp,*dbpent; DB_ENV *dbenv = NULL, *dbenv2 = NULL; char name[CF_BUFSIZE],databuf[CF_BUFSIZE]; time_t now = time(NULL); struct QPoint q,newq; double lastseen,delta2; int lsea = -1; if (strlen(hostname) == 0) { snprintf(OUTPUT,CF_BUFSIZE,"LastSeen registry for empty hostname with role %d",role); CfLog(cflogonly,OUTPUT,""); return; } Debug("LastSeen(%s) reg\n",hostname); /* Tidy old versions - temporary */ snprintf(name,CF_BUFSIZE-1,"%s/%s",CFWORKDIR,CF_OLDLASTDB_FILE); unlink(name); if ((errno = db_create(&dbp,dbenv,0)) != 0) { snprintf(OUTPUT,CF_BUFSIZE*2,"Couldn't init last-seen database %s\n",name); CfLog(cferror,OUTPUT,"db_open"); return; } snprintf(name,CF_BUFSIZE-1,"%s/%s",CFWORKDIR,CF_LASTDB_FILE); #ifdef CF_OLD_DB if ((errno = (dbp->open)(dbp,name,NULL,DB_BTREE,DB_CREATE,0644)) != 0) #else if ((errno = (dbp->open)(dbp,NULL,name,NULL,DB_BTREE,DB_CREATE,0644)) != 0) #endif { snprintf(OUTPUT,CF_BUFSIZE*2,"Couldn't open last-seen database %s\n",name); CfLog(cferror,OUTPUT,"db_open"); return; } /* Now open special file for peer entropy record - INRIA intermittency */ snprintf(name,CF_BUFSIZE-1,"%s/%s.%s",CFWORKDIR,CF_LASTDB_FILE,hostname); if ((errno = db_create(&dbpent,dbenv2,0)) != 0) { snprintf(OUTPUT,CF_BUFSIZE*2,"Couldn't init last-seen database %s\n",name); CfLog(cferror,OUTPUT,"db_open"); return; } #ifdef CF_OLD_DB if ((errno = (dbpent->open)(dbpent,name,NULL,DB_BTREE,DB_CREATE,0644)) != 0) #else if ((errno = (dbpent->open)(dbpent,NULL,name,NULL,DB_BTREE,DB_CREATE,0644)) != 0) #endif { snprintf(OUTPUT,CF_BUFSIZE*2,"Couldn't open last-seen database %s\n",name); CfLog(cferror,OUTPUT,"db_open"); return; } #ifdef HAVE_PTHREAD_H if (pthread_mutex_lock(&MUTEX_GETADDR) != 0) { CfLog(cferror,"pthread_mutex_lock failed","unlock"); exit(1); } #endif switch (role) { case cf_accept: snprintf(databuf,CF_BUFSIZE-1,"-%s",Hostname2IPString(hostname)); break; case cf_connect: snprintf(databuf,CF_BUFSIZE-1,"+%s",Hostname2IPString(hostname)); break; } #ifdef HAVE_PTHREAD_H if (pthread_mutex_unlock(&MUTEX_GETADDR) != 0) { CfLog(cferror,"pthread_mutex_unlock failed","unlock"); exit(1); } #endif if (GetMacroValue(CONTEXTID,"LastSeenExpireAfter")) { lsea = atoi(GetMacroValue(CONTEXTID,"LastSeenExpireAfter")); lsea *= CF_TICKS_PER_DAY; } if (lsea < 0) { lsea = CF_WEEK; } if (ReadDB(dbp,databuf,&q,sizeof(q))) { lastseen = (double)now - q.q; newq.q = (double)now; /* Last seen is now-then */ newq.expect = GAverage(lastseen,q.expect,0.3); delta2 = (lastseen - q.expect)*(lastseen - q.expect); newq.var = GAverage(delta2,q.var,0.3); } else { lastseen = 0.0; newq.q = (double)now; newq.expect = 0.0; newq.var = 0.0; } #ifdef HAVE_PTHREAD_H if (pthread_mutex_lock(&MUTEX_GETADDR) != 0) { CfLog(cferror,"pthread_mutex_lock failed","unlock"); exit(1); } #endif if (lastseen > (double)lsea) { Verbose("Last seen %s expired\n",databuf); DeleteDB(dbp,databuf); } else { WriteDB(dbp,databuf,&newq,sizeof(newq)); WriteDB(dbpent,GenTimeKey(now),&newq,sizeof(newq)); } #ifdef HAVE_PTHREAD_H if (pthread_mutex_unlock(&MUTEX_GETADDR) != 0) { CfLog(cferror,"pthread_mutex_unlock failed","unlock"); exit(1); } #endif dbp->close(dbp,0); dbpent->close(dbpent,0); }
void RecordPerformance(char *eventname,time_t t,double value) { DB *dbp; DB_ENV *dbenv = NULL; char name[CF_BUFSIZE]; struct Event e,newe; double lastseen,delta2; int lsea = CF_WEEK; time_t now = time(NULL); Debug("PerformanceEvent(%s,%.1f s)\n",eventname,value); snprintf(name,CF_BUFSIZE-1,"%s/%s",CFWORKDIR,CF_PERFORMANCE); if ((errno = db_create(&dbp,dbenv,0)) != 0) { snprintf(OUTPUT,CF_BUFSIZE*2,"Couldn't open performance database %s\n",name); CfLog(cferror,OUTPUT,"db_open"); return; } #ifdef CF_OLD_DB if ((errno = (dbp->open)(dbp,name,NULL,DB_BTREE,DB_CREATE,0644)) != 0) #else if ((errno = (dbp->open)(dbp,NULL,name,NULL,DB_BTREE,DB_CREATE,0644)) != 0) #endif { snprintf(OUTPUT,CF_BUFSIZE*2,"Couldn't open performance database %s\n",name); CfLog(cferror,OUTPUT,"db_open"); return; } if (ReadDB(dbp,eventname,&e,sizeof(e))) { lastseen = now - e.t; newe.t = t; newe.Q.q = value; newe.Q.expect = GAverage(value,e.Q.expect,0.3); delta2 = (value - e.Q.expect)*(value - e.Q.expect); newe.Q.var = GAverage(delta2,e.Q.var,0.3); /* Have to kickstart variance computation, assume 1% to start */ if (newe.Q.var <= 0.0009) { newe.Q.var = newe.Q.expect / 100.0; } } else { lastseen = 0.0; newe.t = t; newe.Q.q = value; newe.Q.expect = value; newe.Q.var = 0.001; } if (lastseen > (double)lsea) { Verbose("Performance record %s expired\n",eventname); DeleteDB(dbp,eventname); } else { Verbose("Performance(%s): time=%.4f secs, av=%.4f +/- %.4f\n",eventname,value,newe.Q.expect,sqrt(newe.Q.var)); WriteDB(dbp,eventname,&newe,sizeof(newe)); } dbp->close(dbp,0); }
static bool LastseenMigrationVersion0(DBHandle *db) { bool errors = false; DBCursor *cursor; if (!NewDBCursor(db, &cursor)) { return false; } char *key; void *value; int ksize, vsize; while (NextDB(cursor, &key, &ksize, &value, &vsize)) { if (ksize == 0) { Log(LOG_LEVEL_INFO, "LastseenMigrationVersion0: Database structure error -- zero-length key."); continue; } /* Only look for old [+-]kH -> IP<QPoint> entries */ if ((key[0] != '+') && (key[0] != '-')) { /* Warn about completely unexpected keys */ if ((key[0] != 'q') && (key[0] != 'k') && (key[0] != 'a')) { Log(LOG_LEVEL_INFO, "LastseenMigrationVersion0: Malformed key found '%s'", key); } continue; } bool incoming = key[0] == '-'; const char *hostkey = key + 1; /* Properly align the data */ const char *old_data_address = (const char *)value; QPoint0 old_data_q; memcpy(&old_data_q, (const char *)value + QPOINT0_OFFSET, sizeof(QPoint0)); /* Only migrate sane data */ if (vsize != QPOINT0_OFFSET + sizeof(QPoint0)) { Log(LOG_LEVEL_INFO, "LastseenMigrationVersion0: invalid value size for key '%s', entry is deleted", key); DBCursorDeleteEntry(cursor); continue; } char hostkey_key[CF_BUFSIZE]; snprintf(hostkey_key, CF_BUFSIZE, "k%s", hostkey); if (!WriteDB(db, hostkey_key, old_data_address, strlen(old_data_address) + 1)) { Log(LOG_LEVEL_INFO, "Unable to write version 1 lastseen entry for '%s'", key); errors = true; continue; } char address_key[CF_BUFSIZE]; snprintf(address_key, CF_BUFSIZE, "a%s", old_data_address); if (!WriteDB(db, address_key, hostkey, strlen(hostkey) + 1)) { Log(LOG_LEVEL_INFO, "Unable to write version 1 reverse lastseen entry for '%s'", key); errors = true; continue; } char quality_key[CF_BUFSIZE]; snprintf(quality_key, CF_BUFSIZE, "q%c%s", incoming ? 'i' : 'o', hostkey); /* Ignore malformed connection quality data */ if ((!isfinite(old_data_q.q)) || (old_data_q.q < 0) || (!isfinite(old_data_q.expect)) || (!isfinite(old_data_q.var))) { Log(LOG_LEVEL_INFO, "Ignoring malformed connection quality data for '%s'", key); DBCursorDeleteEntry(cursor); continue; } KeyHostSeen data = { .lastseen = (time_t)old_data_q.q, .Q = { /* Previously .q wasn't stored in database, but was calculated every time as a difference between previous timestamp and a new timestamp. Given we don't have this information during the database upgrade, just assume that last reading is an average one. */ .q = old_data_q.expect, .dq = 0, .expect = old_data_q.expect, .var = old_data_q.var, } }; if (!WriteDB(db, quality_key, &data, sizeof(data))) { Log(LOG_LEVEL_INFO, "Unable to write version 1 connection quality key for '%s'", key); errors = true; continue; } if (!DBCursorDeleteEntry(cursor)) { Log(LOG_LEVEL_INFO, "Unable to delete version 0 lastseen entry for '%s'", key); errors = true; } } if (DeleteDBCursor(cursor) == false) { Log(LOG_LEVEL_ERR, "LastseenMigrationVersion0: Unable to close cursor"); errors = true; } if ((!errors) && (!WriteDB(db, "version", "1", sizeof("1")))) { errors = true; } return !errors; }
void UpdateLastSeen() // This function is temporarily or permanently deprecated { double lsea = LASTSEENEXPIREAFTER; int intermittency = false,qsize,ksize; struct CfKeyHostSeen q,newq; double lastseen,delta2; void *stored; CF_DB *dbp = NULL,*dbpent = NULL; CF_DBC *dbcp; char name[CF_BUFSIZE],*key; struct Rlist *rp; struct CfKeyBinding *kp; time_t now = time(NULL); static time_t then; if (now < then + 300 && then > 0 && then <= now + 300) { // Rate limiter return; } then = now; CfOut(cf_verbose,""," -> Writing last-seen observations"); ThreadLock(cft_server_keyseen); if (SERVER_KEYSEEN == NULL) { ThreadUnlock(cft_server_keyseen); CfOut(cf_verbose,""," -> Keyring is empty"); return; } if (BooleanControl("control_agent",CFA_CONTROLBODY[cfa_intermittency].lval)) { CfOut(cf_inform,""," -> Recording intermittency"); intermittency = true; } snprintf(name,CF_BUFSIZE-1,"%s/%s",CFWORKDIR,CF_LASTDB_FILE); MapName(name); if (!OpenDB(name,&dbp)) { ThreadUnlock(cft_server_keyseen); return; } /* First scan for hosts that have moved address and purge their records so that the database always has a 1:1 relationship between keyhash and IP address */ if (!NewDBCursor(dbp,&dbcp)) { ThreadUnlock(cft_server_keyseen); CfOut(cf_inform,""," !! Unable to scan class db"); return; } while(NextDB(dbp,dbcp,&key,&ksize,&stored,&qsize)) { memcpy(&q,stored,sizeof(q)); lastseen = (double)now - q.Q.q; if (lastseen > lsea) { CfOut(cf_verbose,""," -> Last-seen record for %s expired after %.1lf > %.1lf hours\n",key,lastseen/3600,lsea/3600); DeleteDB(dbp,key); } for (rp = SERVER_KEYSEEN; rp != NULL; rp=rp->next) { kp = (struct CfKeyBinding *) rp->item; if ((strcmp(q.address,kp->address) == 0) && (strcmp(key+1,kp->name+1) != 0)) { CfOut(cf_verbose,""," ! Deleting %s's address (%s=%d) as this host %s seems to have moved elsewhere (%s=5d)",key,kp->address,strlen(kp->address),kp->name,q.address,strlen(q.address)); DeleteDB(dbp,key); } } } DeleteDBCursor(dbp,dbcp); /* Now perform updates with the latest data */ for (rp = SERVER_KEYSEEN; rp != NULL; rp=rp->next) { kp = (struct CfKeyBinding *) rp->item; now = kp->timestamp; if (intermittency) { /* Open special file for peer entropy record - INRIA intermittency */ snprintf(name,CF_BUFSIZE-1,"%s/lastseen/%s.%s",CFWORKDIR,CF_LASTDB_FILE,kp->name); MapName(name); if (!OpenDB(name,&dbpent)) { continue; } } if (ReadDB(dbp,kp->name,&q,sizeof(q))) { lastseen = (double)now - q.Q.q; if (q.Q.q <= 0) { lastseen = 300; q.Q.expect = 0; q.Q.var = 0; } newq.Q.q = (double)now; newq.Q.expect = GAverage(lastseen,q.Q.expect,0.4); delta2 = (lastseen - q.Q.expect)*(lastseen - q.Q.expect); newq.Q.var = GAverage(delta2,q.Q.var,0.4); strncpy(newq.address,kp->address,CF_ADDRSIZE-1); } else { lastseen = 0.0; newq.Q.q = (double)now; newq.Q.expect = 0.0; newq.Q.var = 0.0; strncpy(newq.address,kp->address,CF_ADDRSIZE-1); } if (lastseen > lsea) { CfOut(cf_verbose,""," -> Last-seen record for %s expired after %.1lf > %.1lf hours\n",kp->name,lastseen/3600,lsea/3600); DeleteDB(dbp,kp->name); } else { char timebuf[26]; CfOut(cf_verbose,""," -> Last saw %s (alias %s) at %s (noexpiry %.1lf <= %.1lf)\n",kp->name,kp->address,cf_strtimestamp_local(now,timebuf),lastseen/3600,lsea/3600); WriteDB(dbp,kp->name,&newq,sizeof(newq)); if (intermittency) { WriteDB(dbpent,GenTimeKey(now),&newq,sizeof(newq)); } } if (intermittency && dbpent) { CloseDB(dbpent); } } ThreadUnlock(cft_server_keyseen); }
void NoteClassUsage(AlphaList baselist, int purge) { CF_DB *dbp; CF_DBC *dbcp; void *stored; char *key; int ksize, vsize; Event e, entry, newe; double lsea = SECONDS_PER_WEEK * 52; /* expire after (about) a year */ time_t now = time(NULL); Item *list = NULL; const Item *ip; double lastseen; double vtrue = 1.0; /* end with a rough probability */ /* Only do this for the default policy, too much "downgrading" otherwise */ if (MINUSF) { return; } AlphaListIterator it = AlphaListIteratorInit(&baselist); for (ip = AlphaListIteratorNext(&it); ip != NULL; ip = AlphaListIteratorNext(&it)) { if ((IGNORECLASS(ip->name))) { CfDebug("Ignoring class %s (not packing)", ip->name); continue; } IdempPrependItem(&list, ip->name, NULL); } if (!OpenDB(&dbp, dbid_classes)) { return; } /* First record the classes that are in use */ for (ip = list; ip != NULL; ip = ip->next) { if (ReadDB(dbp, ip->name, &e, sizeof(e))) { CfDebug("FOUND %s with %lf\n", ip->name, e.Q.expect); lastseen = now - e.t; newe.t = now; newe.Q = QAverage(e.Q, vtrue, 0.7); } else { lastseen = 0.0; newe.t = now; /* With no data it's 50/50 what we can say */ newe.Q = QDefinite(0.5 * vtrue); } if (lastseen > lsea) { CfDebug("Class usage record %s expired\n", ip->name); DeleteDB(dbp, ip->name); } else { WriteDB(dbp, ip->name, &newe, sizeof(newe)); } } /* Then update with zero the ones we know about that are not active */ if (purge) { /* Acquire a cursor for the database and downgrade classes that did not get defined this time*/ if (!NewDBCursor(dbp, &dbcp)) { CfOut(cf_inform, "", " !! Unable to scan class db"); CloseDB(dbp); DeleteItemList(list); return; } memset(&entry, 0, sizeof(entry)); while (NextDB(dbp, dbcp, &key, &ksize, &stored, &vsize)) { time_t then; char eventname[CF_BUFSIZE]; memset(eventname, 0, CF_BUFSIZE); strncpy(eventname, (char *) key, ksize); if (stored != NULL) { memcpy(&entry, stored, sizeof(entry)); then = entry.t; lastseen = now - then; if (lastseen > lsea) { CfDebug("Class usage record %s expired\n", eventname); DBCursorDeleteEntry(dbcp); } else if (!IsItemIn(list, eventname)) { newe.t = then; newe.Q = QAverage(entry.Q, 0, 0.5); if (newe.Q.expect <= 0.0001) { CfDebug("Deleting class %s as %lf is zero\n", eventname, newe.Q.expect); DBCursorDeleteEntry(dbcp); } else { CfDebug("Downgrading class %s from %lf to %lf\n", eventname, entry.Q.expect, newe.Q.expect); DBCursorWriteEntry(dbcp, &newe, sizeof(newe)); } } } } DeleteDBCursor(dbp, dbcp); } CloseDB(dbp); DeleteItemList(list); }