static void test_rend_cache_failure_intro_add(void *data) { (void)data; rend_cache_failure_t *fail_entry; rend_cache_failure_intro_t *entry; const char identity[DIGEST_LEN] = "foo1"; rend_cache_init(); // Adds non-existing entry cache_failure_intro_add((const uint8_t *) identity, "foo2", INTRO_POINT_FAILURE_TIMEOUT); fail_entry = strmap_get_lc(rend_cache_failure, "foo2"); tt_assert(fail_entry); tt_int_op(digestmap_size(fail_entry->intro_failures), OP_EQ, 1); entry = digestmap_get(fail_entry->intro_failures, identity); tt_assert(entry); // Adds existing entry cache_failure_intro_add((const uint8_t *) identity, "foo2", INTRO_POINT_FAILURE_TIMEOUT); fail_entry = strmap_get_lc(rend_cache_failure, "foo2"); tt_assert(fail_entry); tt_int_op(digestmap_size(fail_entry->intro_failures), OP_EQ, 1); entry = digestmap_get(fail_entry->intro_failures, identity); tt_assert(entry); done: rend_cache_free_all(); }
static void test_rend_cache_init(void *data) { (void)data; tt_assert_msg(!rend_cache, "rend_cache should be NULL when starting"); tt_assert_msg(!rend_cache_v2_dir, "rend_cache_v2_dir should be NULL " "when starting"); tt_assert_msg(!rend_cache_failure, "rend_cache_failure should be NULL when " "starting"); rend_cache_init(); tt_assert_msg(rend_cache, "rend_cache should not be NULL after initing"); tt_assert_msg(rend_cache_v2_dir, "rend_cache_v2_dir should not be NULL " "after initing"); tt_assert_msg(rend_cache_failure, "rend_cache_failure should not be NULL " "after initing"); tt_int_op(strmap_size(rend_cache), OP_EQ, 0); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 0); tt_int_op(strmap_size(rend_cache_failure), OP_EQ, 0); done: rend_cache_free_all(); }
static void test_rend_cache_intro_failure_note(void *data) { (void)data; rend_cache_failure_t *fail_entry; rend_cache_failure_intro_t *entry; const char key[DIGEST_LEN] = "foo1"; rend_cache_init(); // Test not found rend_cache_intro_failure_note(INTRO_POINT_FAILURE_TIMEOUT, (const uint8_t *) key, "foo2"); fail_entry = strmap_get_lc(rend_cache_failure, "foo2"); tt_assert(fail_entry); tt_int_op(digestmap_size(fail_entry->intro_failures), OP_EQ, 1); entry = digestmap_get(fail_entry->intro_failures, key); tt_assert(entry); tt_int_op(entry->failure_type, OP_EQ, INTRO_POINT_FAILURE_TIMEOUT); // Test found rend_cache_intro_failure_note(INTRO_POINT_FAILURE_UNREACHABLE, (const uint8_t *) key, "foo2"); tt_int_op(entry->failure_type, OP_EQ, INTRO_POINT_FAILURE_UNREACHABLE); done: rend_cache_free_all(); }
static void test_rend_cache_failure_clean(void *data) { rend_cache_failure_t *failure; rend_cache_failure_intro_t *ip_one, *ip_two; const char key_one[DIGEST_LEN] = "ip1"; const char key_two[DIGEST_LEN] = "ip2"; (void)data; rend_cache_init(); // Test with empty failure cache rend_cache_failure_clean(time(NULL)); tt_int_op(strmap_size(rend_cache_failure), OP_EQ, 0); // Test with one empty failure entry failure = rend_cache_failure_entry_new(); strmap_set_lc(rend_cache_failure, "foo1", failure); rend_cache_failure_clean(time(NULL)); tt_int_op(strmap_size(rend_cache_failure), OP_EQ, 0); // Test with one new intro point failure = rend_cache_failure_entry_new(); ip_one = rend_cache_failure_intro_entry_new(INTRO_POINT_FAILURE_TIMEOUT); digestmap_set(failure->intro_failures, key_one, ip_one); strmap_set_lc(rend_cache_failure, "foo1", failure); rend_cache_failure_clean(time(NULL)); tt_int_op(strmap_size(rend_cache_failure), OP_EQ, 1); // Test with one old intro point rend_cache_failure_purge(); failure = rend_cache_failure_entry_new(); ip_one = rend_cache_failure_intro_entry_new(INTRO_POINT_FAILURE_TIMEOUT); ip_one->created_ts = time(NULL) - 7*60; digestmap_set(failure->intro_failures, key_one, ip_one); strmap_set_lc(rend_cache_failure, "foo1", failure); rend_cache_failure_clean(time(NULL)); tt_int_op(strmap_size(rend_cache_failure), OP_EQ, 0); // Test with one old intro point and one new one rend_cache_failure_purge(); failure = rend_cache_failure_entry_new(); ip_one = rend_cache_failure_intro_entry_new(INTRO_POINT_FAILURE_TIMEOUT); ip_one->created_ts = time(NULL) - 7*60; digestmap_set(failure->intro_failures, key_one, ip_one); ip_two = rend_cache_failure_intro_entry_new(INTRO_POINT_FAILURE_TIMEOUT); ip_two->created_ts = time(NULL) - 2*60; digestmap_set(failure->intro_failures, key_two, ip_two); strmap_set_lc(rend_cache_failure, "foo1", failure); rend_cache_failure_clean(time(NULL)); tt_int_op(strmap_size(rend_cache_failure), OP_EQ, 1); tt_int_op(digestmap_size(failure->intro_failures), OP_EQ, 1); done: rend_cache_free_all(); }
static void test_rend_cache_clean_v2_descs_as_dir(void *data) { rend_cache_entry_t *e; time_t now, cutoff; rend_service_descriptor_t *desc; now = time(NULL); cutoff = now - (REND_CACHE_MAX_AGE + REND_CACHE_MAX_SKEW); const char key[DIGEST_LEN] = "abcde"; (void)data; rend_cache_init(); // Test running with an empty cache rend_cache_clean_v2_descs_as_dir(cutoff); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 0); // Test with only one new entry e = tor_malloc_zero(sizeof(rend_cache_entry_t)); e->last_served = now; desc = tor_malloc_zero(sizeof(rend_service_descriptor_t)); desc->timestamp = now; desc->pk = pk_generate(0); e->parsed = desc; digestmap_set(rend_cache_v2_dir, key, e); /* Set the cutoff to minus 10 seconds. */ rend_cache_clean_v2_descs_as_dir(cutoff - 10); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 1); // Test with one old entry desc->timestamp = cutoff - 1000; rend_cache_clean_v2_descs_as_dir(cutoff); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 0); done: rend_cache_free_all(); }
static void test_rend_cache_failure_entry_new(void *data) { rend_cache_failure_t *failure; (void)data; failure = rend_cache_failure_entry_new(); tt_assert(failure); tt_int_op(digestmap_size(failure->intro_failures), OP_EQ, 0); done: rend_cache_failure_entry_free(failure); }
/** Scan the measured bandwidth cache and remove expired entries */ STATIC void dirserv_expire_measured_bw_cache(time_t now) { if (mbw_cache) { /* Iterate through the cache and check each entry */ DIGESTMAP_FOREACH_MODIFY(mbw_cache, k, mbw_cache_entry_t *, e) { if (now > e->as_of + MAX_MEASUREMENT_AGE) { tor_free(e); MAP_DEL_CURRENT(k); } } DIGESTMAP_FOREACH_END; /* Check if we cleared the whole thing and free if so */ if (digestmap_size(mbw_cache) == 0) { digestmap_free(mbw_cache, tor_free_); mbw_cache = 0; } } }
static void test_replaycache_scrub(void *arg) { replaycache_t *r = NULL; int result; (void)arg; r = replaycache_new(600, 300); tt_assert(r != NULL); /* Set up like in test_replaycache_hit() */ result = replaycache_add_and_test_internal(100, r, test_buffer, strlen(test_buffer), NULL); tt_int_op(result,OP_EQ, 0); result = replaycache_add_and_test_internal(200, r, test_buffer, strlen(test_buffer), NULL); tt_int_op(result,OP_EQ, 1); /* * Poke a few replaycache_scrub_if_needed_internal() error cases that * can't happen through replaycache_add_and_test_internal() */ /* Null cache */ replaycache_scrub_if_needed_internal(300, NULL); /* Assert we're still here */ tt_assert(1); /* Make sure we hit the aging-out case too */ replaycache_scrub_if_needed_internal(1500, r); /* Assert that we aged it */ tt_int_op(digestmap_size(r->digests_seen),OP_EQ, 0); done: if (r) replaycache_free(r); return; }
static void test_rend_cache_clean_v2_descs_as_dir(void *data) { rend_cache_entry_t *e; time_t now; rend_service_descriptor_t *desc; now = time(NULL); const char key[DIGEST_LEN] = "abcde"; (void)data; rend_cache_init(); // Test running with an empty cache rend_cache_clean_v2_descs_as_dir(now, 0); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 0); // Test with only one new entry e = tor_malloc_zero(sizeof(rend_cache_entry_t)); e->last_served = now; desc = tor_malloc_zero(sizeof(rend_service_descriptor_t)); desc->timestamp = now; desc->pk = pk_generate(0); e->parsed = desc; digestmap_set(rend_cache_v2_dir, key, e); rend_cache_clean_v2_descs_as_dir(now, 0); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 1); // Test with one old entry desc->timestamp = now - (REND_CACHE_MAX_AGE + REND_CACHE_MAX_SKEW + 1000); rend_cache_clean_v2_descs_as_dir(now, 0); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 0); // Test with one entry that has an old last served e = tor_malloc_zero(sizeof(rend_cache_entry_t)); e->last_served = now - (REND_CACHE_MAX_AGE + REND_CACHE_MAX_SKEW + 1000); desc = tor_malloc_zero(sizeof(rend_service_descriptor_t)); desc->timestamp = now; desc->pk = pk_generate(0); e->parsed = desc; digestmap_set(rend_cache_v2_dir, key, e); rend_cache_clean_v2_descs_as_dir(now, 0); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 0); // Test a run through asking for a large force_remove e = tor_malloc_zero(sizeof(rend_cache_entry_t)); e->last_served = now; desc = tor_malloc_zero(sizeof(rend_service_descriptor_t)); desc->timestamp = now; desc->pk = pk_generate(0); e->parsed = desc; digestmap_set(rend_cache_v2_dir, key, e); rend_cache_clean_v2_descs_as_dir(now, 20000); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 1); done: rend_cache_free_all(); }
/* Update the current SR state as needed for the upcoming voting round at * <b>valid_after</b>. */ void sr_state_update(time_t valid_after) { sr_phase_t next_phase; if (BUG(!sr_state)) return; /* Don't call this function twice in the same voting period. */ if (valid_after <= sr_state->valid_after) { log_info(LD_DIR, "SR: Asked to update state twice. Ignoring."); return; } /* Get phase of upcoming round. */ next_phase = get_sr_protocol_phase(valid_after); /* If we are transitioning to a new protocol phase, prepare the stage. */ if (is_phase_transition(next_phase)) { if (next_phase == SR_PHASE_COMMIT) { /* Going into commit phase means we are starting a new protocol run. */ new_protocol_run(valid_after); } /* Set the new phase for this round */ sr_state->phase = next_phase; } else if (sr_state->phase == SR_PHASE_COMMIT && digestmap_size(sr_state->commits) == 0) { /* We are _NOT_ in a transition phase so if we are in the commit phase * and have no commit, generate one. Chances are that we are booting up * so let's have a commit in our state for the next voting period. */ sr_commit_t *our_commit = sr_generate_our_commit(valid_after, get_my_v3_authority_cert()); if (our_commit) { /* Add our commitment to our state. In case we are unable to create one * (highly unlikely), we won't vote for this protocol run since our * commitment won't be in our state. */ sr_state_add_commit(our_commit); } } sr_state_set_valid_after(valid_after); /* Count the current round */ if (sr_state->phase == SR_PHASE_COMMIT) { /* invariant check: we've not entered reveal phase yet */ if (BUG(sr_state->n_reveal_rounds != 0)) return; sr_state->n_commit_rounds++; } else { sr_state->n_reveal_rounds++; } { /* Debugging. */ char tbuf[ISO_TIME_LEN + 1]; format_iso_time(tbuf, valid_after); log_info(LD_DIR, "SR: State prepared for upcoming voting period (%s). " "Upcoming phase is %s (counters: %d commit & %d reveal rounds).", tbuf, get_phase_str(sr_state->phase), sr_state->n_commit_rounds, sr_state->n_reveal_rounds); } }
/** Get the current size of the measured bandwidth cache */ int dirserv_get_measured_bw_cache_size(void) { if (mbw_cache) return digestmap_size(mbw_cache); else return 0; }