/** Test destroy cell queue with no interference from other queues. */ static void test_cmux_destroy_cell_queue(void *arg) { circuitmux_t *cmux = NULL; channel_t *ch = NULL; circuit_t *circ = NULL; cell_queue_t *cq = NULL; packed_cell_t *pc = NULL; tor_libevent_cfg cfg; memset(&cfg, 0, sizeof(cfg)); tor_libevent_initialize(&cfg); scheduler_init(); #ifdef ENABLE_MEMPOOLS init_cell_pool(); #endif /* ENABLE_MEMPOOLS */ (void) arg; cmux = circuitmux_alloc(); tt_assert(cmux); ch = new_fake_channel(); ch->has_queued_writes = has_queued_writes; ch->wide_circ_ids = 1; circ = circuitmux_get_first_active_circuit(cmux, &cq); tt_assert(!circ); tt_assert(!cq); circuitmux_append_destroy_cell(ch, cmux, 100, 10); circuitmux_append_destroy_cell(ch, cmux, 190, 6); circuitmux_append_destroy_cell(ch, cmux, 30, 1); tt_int_op(circuitmux_num_cells(cmux), OP_EQ, 3); circ = circuitmux_get_first_active_circuit(cmux, &cq); tt_assert(!circ); tt_assert(cq); tt_int_op(cq->n, OP_EQ, 3); pc = cell_queue_pop(cq); tt_assert(pc); tt_mem_op(pc->body, OP_EQ, "\x00\x00\x00\x64\x04\x0a\x00\x00\x00", 9); packed_cell_free(pc); pc = NULL; tt_int_op(circuitmux_num_cells(cmux), OP_EQ, 2); done: circuitmux_free(cmux); channel_free(ch); packed_cell_free(pc); #ifdef ENABLE_MEMPOOLS free_cell_pool(); #endif /* ENABLE_MEMPOOLS */ }
gint scalliontor_start(ScallionTor* stor, gint argc, gchar *argv[]) { time_t now = time(NULL); update_approx_time(now); tor_threads_init(); init_logging(); /* tor_init() loses our logging, so set it before AND after */ scalliontor_setLogging(); if (tor_init(argc, argv) < 0) { return -1; } // scalliontor_setLogging(); /* load the private keys, if we're supposed to have them, and set up the * TLS context. */ gpointer idkey; #ifdef SCALLION_NEWIDKEYNAME idkey = client_identitykey; #else idkey = identitykey; #endif if (idkey == NULL) { if (init_keys() < 0) { log_err(LD_BUG,"Error initializing keys; exiting"); return -1; } } /* Set up the packed_cell_t memory pool. */ init_cell_pool(); /* Set up our buckets */ connection_bucket_init(); stats_prev_global_read_bucket = global_read_bucket; stats_prev_global_write_bucket = global_write_bucket; /* initialize the bootstrap status events to know we're starting up */ control_event_bootstrap(BOOTSTRAP_STATUS_STARTING, 0); if (trusted_dirs_reload_certs()) { log_warn(LD_DIR, "Couldn't load all cached v3 certificates. Starting anyway."); } #ifndef SCALLION_NOV2DIR if (router_reload_v2_networkstatus()) { return -1; } #endif if (router_reload_consensus_networkstatus()) { return -1; } /* load the routers file, or assign the defaults. */ if (router_reload_router_list()) { return -1; } /* load the networkstatuses. (This launches a download for new routers as * appropriate.) */ directory_info_has_arrived(now, 1); /* !note that scallion intercepts the cpuworker functionality (rob) */ if (server_mode(get_options())) { /* launch cpuworkers. Need to do this *after* we've read the onion key. */ cpu_init(); } /* set up once-a-second callback. */ if (! second_timer) { // struct timeval one_second; // one_second.tv_sec = 1; // one_second.tv_usec = 0; // // second_timer = periodic_timer_new(tor_libevent_get_base(), // &one_second, // second_elapsed_callback, // NULL); // tor_assert(second_timer); _scalliontor_secondCallback(stor); } #ifdef SCALLION_DOREFILLCALLBACKS #ifndef USE_BUFFEREVENTS if (!refill_timer) { int msecs = get_options()->TokenBucketRefillInterval; // struct timeval refill_interval; // // refill_interval.tv_sec = msecs/1000; // refill_interval.tv_usec = (msecs%1000)*1000; // // refill_timer = periodic_timer_new(tor_libevent_get_base(), // &refill_interval, // refill_callback, // NULL); // tor_assert(refill_timer); stor->refillmsecs = msecs; _scalliontor_refillCallback(stor); } #endif #endif /* run the startup events */ scalliontor_notify(stor); return 0; }