static uint64_t perftime(void) { struct timeval now, out; tor_gettimeofday(&now); timersub(&now, &tv_start, &out); return ((uint64_t)out.tv_sec)*1000000000 + out.tv_usec*1000; }
/** Return a fairly recent view of the current time. */ void tor_gettimeofday_cached(struct timeval *tv) { if (cached_time_hires.tv_sec == 0) { tor_gettimeofday(&cached_time_hires); } *tv = cached_time_hires; }
/** This is a wrapper function around the actual function that processes the * <b>cell</b> that just arrived on <b>conn</b>. Increment <b>*time</b> * by the number of microseconds used by the call to <b>*func(cell, conn)</b>. */ static void command_time_process_cell(cell_t *cell, or_connection_t *conn, int *time, void (*func)(cell_t *, or_connection_t *)) { struct timeval start, end; long time_passed; tor_gettimeofday(&start); (*func)(cell, conn); tor_gettimeofday(&end); time_passed = tv_udiff(&start, &end) ; if (time_passed > 10000) { /* more than 10ms */ log_debug(LD_OR,"That call just took %ld ms.",time_passed/1000); } if (time_passed < 0) { log_info(LD_GENERAL,"That call took us back in time!"); time_passed = 0; } *time += time_passed; }
/** Note that an either direct or tunneled (see <b>type</b>) directory * request for a v3 network status with unique ID <b>dirreq_id</b> of size * <b>response_size</b> has started. */ void geoip_start_dirreq(uint64_t dirreq_id, size_t response_size, dirreq_type_t type) { dirreq_map_entry_t *ent; if (!get_options()->DirReqStatistics) return; ent = tor_malloc_zero(sizeof(dirreq_map_entry_t)); ent->dirreq_id = dirreq_id; tor_gettimeofday(&ent->request_time); ent->response_size = response_size; ent->type = type; dirreq_map_put_(ent, type, dirreq_id); }
/** Initialize the common elements in a circuit_t, and add it to the global * list. */ static void init_circuit_base(circuit_t *circ) { tor_gettimeofday(&circ->timestamp_created); circ->package_window = circuit_initial_package_window(); circ->deliver_window = CIRCWINDOW_START; /* Initialize the cell_ewma_t structure */ circ->n_cell_ewma.last_adjusted_tick = cell_ewma_get_tick(); circ->n_cell_ewma.cell_count = 0.0; circ->n_cell_ewma.heap_index = -1; circ->n_cell_ewma.is_for_p_conn = 0; circuit_add(circ); }
/** Helper: Write the standard prefix for log lines to a * <b>buf_len</b> character buffer in <b>buf</b>. */ static INLINE size_t _log_prefix(char *buf, size_t buf_len, int severity) { time_t t; struct timeval now; struct tm tm; size_t n; int r; tor_gettimeofday(&now); t = (time_t)now.tv_sec; n = strftime(buf, buf_len, "%b %d %H:%M:%S", tor_localtime_r(&t, &tm)); r = tor_snprintf(buf+n, buf_len-n, ".%.3i [%s] ", (int)now.tv_usec / 1000, sev_to_string(severity)); if (r<0) return buf_len-1; else return n+r; }
static void timer_cb(tor_timer_t *t, void *arg, const monotime_t *now_mono) { struct timeval now; tor_gettimeofday(&now); tor_timer_t **t_ptr = arg; tor_assert(*t_ptr == t); int idx = (int) (t_ptr - timers); ++fired[idx]; timersub(&now, &fire_at[idx], &difference[idx]); diffs_mono_usec[idx] = monotime_diff_usec(&started_at, now_mono) - delay_usec[idx]; ++n_fired; // printf("%d / %d\n",n_fired, N_TIMERS); if (n_fired == n_active_timers) { tor_libevent_exit_loop_after_callback(tor_libevent_get_base()); } }
/** Change the state of the either direct or tunneled (see <b>type</b>) * directory request with <b>dirreq_id</b> to <b>new_state</b> and * possibly mark it as completed. If no entry can be found for the given * key parts (e.g., if this is a directory request that we are not * measuring, or one that was started in the previous measurement period), * or if the state cannot be advanced to <b>new_state</b>, do nothing. */ void geoip_change_dirreq_state(uint64_t dirreq_id, dirreq_type_t type, dirreq_state_t new_state) { dirreq_map_entry_t *ent; if (!get_options()->DirReqStatistics) return; ent = dirreq_map_get_(type, dirreq_id); if (!ent) return; if (new_state == DIRREQ_IS_FOR_NETWORK_STATUS) return; if (new_state - 1 != ent->state) return; ent->state = new_state; if ((type == DIRREQ_DIRECT && new_state == DIRREQ_FLUSHING_DIR_CONN_FINISHED) || (type == DIRREQ_TUNNELED && new_state == DIRREQ_CHANNEL_BUFFER_FLUSHED)) { tor_gettimeofday(&ent->completion_time); ent->completed = 1; } }
/** Return a newly allocated comma-separated string containing statistics * on network status downloads. The string contains the number of completed * requests, timeouts, and still running requests as well as the download * times by deciles and quartiles. Return NULL if we have not observed * requests for long enough. */ static char * geoip_get_dirreq_history(geoip_client_action_t action, dirreq_type_t type) { char *result = NULL; smartlist_t *dirreq_completed = NULL; uint32_t complete = 0, timeouts = 0, running = 0; int bufsize = 1024, written; dirreq_map_entry_t **ptr, **next, *ent; struct timeval now; tor_gettimeofday(&now); if (action != GEOIP_CLIENT_NETWORKSTATUS && action != GEOIP_CLIENT_NETWORKSTATUS_V2) return NULL; dirreq_completed = smartlist_new(); for (ptr = HT_START(dirreqmap, &dirreq_map); ptr; ptr = next) { ent = *ptr; if (ent->action != action || ent->type != type) { next = HT_NEXT(dirreqmap, &dirreq_map, ptr); continue; } else { if (ent->completed) { smartlist_add(dirreq_completed, ent); complete++; next = HT_NEXT_RMV(dirreqmap, &dirreq_map, ptr); } else { if (tv_mdiff(&ent->request_time, &now) / 1000 > DIRREQ_TIMEOUT) timeouts++; else running++; next = HT_NEXT_RMV(dirreqmap, &dirreq_map, ptr); tor_free(ent); } } } #define DIR_REQ_GRANULARITY 4 complete = round_uint32_to_next_multiple_of(complete, DIR_REQ_GRANULARITY); timeouts = round_uint32_to_next_multiple_of(timeouts, DIR_REQ_GRANULARITY); running = round_uint32_to_next_multiple_of(running, DIR_REQ_GRANULARITY); result = tor_malloc_zero(bufsize); written = tor_snprintf(result, bufsize, "complete=%u,timeout=%u," "running=%u", complete, timeouts, running); if (written < 0) { tor_free(result); goto done; } #define MIN_DIR_REQ_RESPONSES 16 if (complete >= MIN_DIR_REQ_RESPONSES) { uint32_t *dltimes; /* We may have rounded 'completed' up. Here we want to use the * real value. */ complete = smartlist_len(dirreq_completed); dltimes = tor_malloc_zero(sizeof(uint32_t) * complete); SMARTLIST_FOREACH_BEGIN(dirreq_completed, dirreq_map_entry_t *, ent) { uint32_t bytes_per_second; uint32_t time_diff = (uint32_t) tv_mdiff(&ent->request_time, &ent->completion_time); if (time_diff == 0) time_diff = 1; /* Avoid DIV/0; "instant" answers are impossible * by law of nature or something, but a milisecond * is a bit greater than "instantly" */ bytes_per_second = (uint32_t)(1000 * ent->response_size / time_diff); dltimes[ent_sl_idx] = bytes_per_second; } SMARTLIST_FOREACH_END(ent);
/** Try to tell a cpuworker to perform the public key operations necessary to * respond to <b>onionskin</b> for the circuit <b>circ</b>. * * If <b>cpuworker</b> is defined, assert that he's idle, and use him. Else, * look for an idle cpuworker and use him. If none idle, queue task onto the * pending onion list and return. Return 0 if we successfully assign the * task, or -1 on failure. */ int assign_onionskin_to_cpuworker(connection_t *cpuworker, or_circuit_t *circ, create_cell_t *onionskin) { cpuworker_request_t req; time_t now = approx_time(); static time_t last_culled_cpuworkers = 0; int should_time; /* Checking for wedged cpuworkers requires a linear search over all * connections, so let's do it only once a minute. */ #define CULL_CPUWORKERS_INTERVAL 60 if (last_culled_cpuworkers + CULL_CPUWORKERS_INTERVAL <= now) { cull_wedged_cpuworkers(); spawn_enough_cpuworkers(); last_culled_cpuworkers = now; } if (1) { if (num_cpuworkers_busy == num_cpuworkers) { log_debug(LD_OR,"No idle cpuworkers. Queuing."); if (onion_pending_add(circ, onionskin) < 0) { tor_free(onionskin); return -1; } return 0; } if (!cpuworker) cpuworker = connection_get_by_type_state(CONN_TYPE_CPUWORKER, CPUWORKER_STATE_IDLE); tor_assert(cpuworker); if (!circ->p_chan) { log_info(LD_OR,"circ->p_chan gone. Failing circ."); tor_free(onionskin); return -1; } if (connection_or_digest_is_known_relay(circ->p_chan->identity_digest)) rep_hist_note_circuit_handshake_assigned(onionskin->handshake_type); should_time = should_time_request(onionskin->handshake_type); memset(&req, 0, sizeof(req)); req.magic = CPUWORKER_REQUEST_MAGIC; tag_pack(req.tag, circ->p_chan->global_identifier, circ->p_circ_id); req.timed = should_time; cpuworker->state = CPUWORKER_STATE_BUSY_ONION; /* touch the lastwritten timestamp, since that's how we check to * see how long it's been since we asked the question, and sometimes * we check before the first call to connection_handle_write(). */ cpuworker->timestamp_lastwritten = now; num_cpuworkers_busy++; req.task = CPUWORKER_TASK_ONION; memcpy(&req.create_cell, onionskin, sizeof(create_cell_t)); tor_free(onionskin); if (should_time) tor_gettimeofday(&req.started_at); connection_write_to_buf((void*)&req, sizeof(req), cpuworker); memwipe(&req, 0, sizeof(req)); } return 0; }
/** Implement a cpuworker. 'data' is an fdarray as returned by socketpair. * Read and writes from fdarray[1]. Reads requests, writes answers. * * Request format: * cpuworker_request_t. * Response format: * cpuworker_reply_t */ static void cpuworker_main(void *data) { /* For talking to the parent thread/process */ tor_socket_t *fdarray = data; tor_socket_t fd; /* variables for onion processing */ server_onion_keys_t onion_keys; cpuworker_request_t req; cpuworker_reply_t rpl; fd = fdarray[1]; /* this side is ours */ #ifndef TOR_IS_MULTITHREADED tor_close_socket(fdarray[0]); /* this is the side of the socketpair the * parent uses */ tor_free_all(1); /* so the child doesn't hold the parent's fd's open */ handle_signals(0); /* ignore interrupts from the keyboard, etc */ #endif tor_free(data); setup_server_onion_keys(&onion_keys); for (;;) { if (read_all(fd, (void *)&req, sizeof(req), 1) != sizeof(req)) { log_info(LD_OR, "read request failed. Exiting."); goto end; } tor_assert(req.magic == CPUWORKER_REQUEST_MAGIC); memset(&rpl, 0, sizeof(rpl)); if (req.task == CPUWORKER_TASK_ONION) { const create_cell_t *cc = &req.create_cell; created_cell_t *cell_out = &rpl.created_cell; struct timeval tv_start = {0,0}, tv_end; int n; rpl.timed = req.timed; rpl.started_at = req.started_at; rpl.handshake_type = cc->handshake_type; if (req.timed) tor_gettimeofday(&tv_start); n = onion_skin_server_handshake(cc->handshake_type, cc->onionskin, cc->handshake_len, &onion_keys, cell_out->reply, rpl.keys, CPATH_KEY_MATERIAL_LEN, rpl.rend_auth_material); if (n < 0) { /* failure */ log_debug(LD_OR,"onion_skin_server_handshake failed."); memset(&rpl, 0, sizeof(rpl)); memcpy(rpl.tag, req.tag, TAG_LEN); rpl.success = 0; } else { /* success */ log_debug(LD_OR,"onion_skin_server_handshake succeeded."); memcpy(rpl.tag, req.tag, TAG_LEN); cell_out->handshake_len = n; switch (cc->cell_type) { case CELL_CREATE: cell_out->cell_type = CELL_CREATED; break; case CELL_CREATE2: cell_out->cell_type = CELL_CREATED2; break; case CELL_CREATE_FAST: cell_out->cell_type = CELL_CREATED_FAST; break; default: tor_assert(0); goto end; } rpl.success = 1; } rpl.magic = CPUWORKER_REPLY_MAGIC; if (req.timed) { struct timeval tv_diff; int64_t usec; tor_gettimeofday(&tv_end); timersub(&tv_end, &tv_start, &tv_diff); usec = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec; if (usec < 0 || usec > MAX_BELIEVABLE_ONIONSKIN_DELAY) rpl.n_usec = MAX_BELIEVABLE_ONIONSKIN_DELAY; else rpl.n_usec = (uint32_t) usec; } if (write_all(fd, (void*)&rpl, sizeof(rpl), 1) != sizeof(rpl)) { log_err(LD_BUG,"writing response buf failed. Exiting."); goto end; } log_debug(LD_OR,"finished writing response."); } else if (req.task == CPUWORKER_TASK_SHUTDOWN) { log_info(LD_OR,"Clean shutdown: exiting"); goto end; } memwipe(&req, 0, sizeof(req)); memwipe(&rpl, 0, sizeof(req)); } end: memwipe(&req, 0, sizeof(req)); memwipe(&rpl, 0, sizeof(req)); release_server_onion_keys(&onion_keys); tor_close_socket(fd); crypto_thread_cleanup(); spawn_exit(); }
/** Called when we get data from a cpuworker. If the answer is not complete, * wait for a complete answer. If the answer is complete, * process it as appropriate. */ int connection_cpu_process_inbuf(connection_t *conn) { uint64_t chan_id; circid_t circ_id; channel_t *p_chan = NULL; circuit_t *circ; tor_assert(conn); tor_assert(conn->type == CONN_TYPE_CPUWORKER); if (!connection_get_inbuf_len(conn)) return 0; if (conn->state == CPUWORKER_STATE_BUSY_ONION) { cpuworker_reply_t rpl; if (connection_get_inbuf_len(conn) < sizeof(cpuworker_reply_t)) return 0; /* not yet */ tor_assert(connection_get_inbuf_len(conn) == sizeof(cpuworker_reply_t)); connection_fetch_from_buf((void*)&rpl,sizeof(cpuworker_reply_t),conn); tor_assert(rpl.magic == CPUWORKER_REPLY_MAGIC); if (rpl.timed && rpl.success && rpl.handshake_type <= MAX_ONION_HANDSHAKE_TYPE) { /* Time how long this request took. The handshake_type check should be needless, but let's leave it in to be safe. */ struct timeval tv_end, tv_diff; int64_t usec_roundtrip; tor_gettimeofday(&tv_end); timersub(&tv_end, &rpl.started_at, &tv_diff); usec_roundtrip = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec; if (usec_roundtrip >= 0 && usec_roundtrip < MAX_BELIEVABLE_ONIONSKIN_DELAY) { ++onionskins_n_processed[rpl.handshake_type]; onionskins_usec_internal[rpl.handshake_type] += rpl.n_usec; onionskins_usec_roundtrip[rpl.handshake_type] += usec_roundtrip; if (onionskins_n_processed[rpl.handshake_type] >= 500000) { /* Scale down every 500000 handshakes. On a busy server, that's * less impressive than it sounds. */ onionskins_n_processed[rpl.handshake_type] /= 2; onionskins_usec_internal[rpl.handshake_type] /= 2; onionskins_usec_roundtrip[rpl.handshake_type] /= 2; } } } /* parse out the circ it was talking about */ tag_unpack(rpl.tag, &chan_id, &circ_id); circ = NULL; log_debug(LD_OR, "Unpacking cpuworker reply, chan_id is " U64_FORMAT ", circ_id is %u", U64_PRINTF_ARG(chan_id), (unsigned)circ_id); p_chan = channel_find_by_global_id(chan_id); if (p_chan) circ = circuit_get_by_circid_channel(circ_id, p_chan); if (rpl.success == 0) { log_debug(LD_OR, "decoding onionskin failed. " "(Old key or bad software.) Closing."); if (circ) circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL); goto done_processing; } if (!circ) { /* This happens because somebody sends us a destroy cell and the * circuit goes away, while the cpuworker is working. This is also * why our tag doesn't include a pointer to the circ, because we'd * never know if it's still valid. */ log_debug(LD_OR,"processed onion for a circ that's gone. Dropping."); goto done_processing; } tor_assert(! CIRCUIT_IS_ORIGIN(circ)); if (onionskin_answer(TO_OR_CIRCUIT(circ), &rpl.created_cell, (const char*)rpl.keys, rpl.rend_auth_material) < 0) { log_warn(LD_OR,"onionskin_answer failed. Closing."); circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL); goto done_processing; } log_debug(LD_OR,"onionskin_answer succeeded. Yay."); } else { tor_assert(0); /* don't ask me to do handshakes yet */ } done_processing: conn->state = CPUWORKER_STATE_IDLE; num_cpuworkers_busy--; if (conn->timestamp_created < last_rotation_time) { connection_mark_for_close(conn); num_cpuworkers--; spawn_enough_cpuworkers(); } else { process_pending_task(conn); } return 0; }
static void reset_perftime(void) { tor_gettimeofday(&tv_start); }
void test_circuitstats_hoplen(void *arg) { /* Plan: * 0. Test no other opened circs (relaxed timeout) * 1. Check >3 hop circ building w/o timeout * 2. Check >3 hop circs w/ timeouts.. */ struct timeval circ_start_time; origin_circuit_t *threehop = NULL; origin_circuit_t *fourhop = NULL; (void)arg; MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close); circuit_build_times_init(get_circuit_build_times_mutable()); // Let's set a close_ms to 2X the initial timeout, so we can // test relaxed functionality (which uses the close_ms timeout) get_circuit_build_times_mutable()->close_ms *= 2; tor_gettimeofday(&circ_start_time); circ_start_time.tv_sec -= 119; // make us hit "relaxed" cutoff // Test 1: Build a fourhop circuit that should get marked // as relaxed and eventually counted by circuit_expire_building // (but not before) fourhop = subtest_fourhop_circuit(circ_start_time, 0); tt_int_op(fourhop->relaxed_timeout, OP_EQ, 0); tt_int_op(marked_for_close, OP_EQ, 0); circuit_expire_building(); tt_int_op(marked_for_close, OP_EQ, 0); tt_int_op(fourhop->relaxed_timeout, OP_EQ, 1); TO_CIRCUIT(fourhop)->timestamp_began.tv_sec -= 119; circuit_expire_building(); tt_int_op(get_circuit_build_times()->total_build_times, OP_EQ, 1); tt_int_op(marked_for_close, OP_EQ, 1); circuit_free_(TO_CIRCUIT(fourhop)); circuit_build_times_reset(get_circuit_build_times_mutable()); // Test 2: Add a threehop circuit for non-relaxed timeouts threehop = add_opened_threehop(); /* This circuit should not timeout */ tor_gettimeofday(&circ_start_time); circ_start_time.tv_sec -= 59; fourhop = subtest_fourhop_circuit(circ_start_time, 0); circuit_expire_building(); tt_int_op(get_circuit_build_times()->total_build_times, OP_EQ, 1); tt_int_op(TO_CIRCUIT(fourhop)->purpose, OP_NE, CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT); circuit_free_((circuit_t *)fourhop); circuit_build_times_reset(get_circuit_build_times_mutable()); /* Test 3: This circuit should now time out and get marked as a * measurement circuit, but still get counted (and counted only once) */ circ_start_time.tv_sec -= 2; fourhop = subtest_fourhop_circuit(circ_start_time, 0); tt_int_op(TO_CIRCUIT(fourhop)->purpose, OP_EQ, CIRCUIT_PURPOSE_C_MEASURE_TIMEOUT); tt_int_op(get_circuit_build_times()->total_build_times, OP_EQ, 1); circuit_expire_building(); tt_int_op(get_circuit_build_times()->total_build_times, OP_EQ, 1); done: UNMOCK(circuit_mark_for_close_); circuit_free_(TO_CIRCUIT(threehop)); circuit_free_(TO_CIRCUIT(fourhop)); circuit_build_times_free_timeouts(get_circuit_build_times_mutable()); }
int main(int argc, char **argv) { (void)argc; (void)argv; tor_libevent_cfg cfg; memset(&cfg, 0, sizeof(cfg)); tor_libevent_initialize(&cfg); timers_initialize(); int i; int ret; struct timeval now; tor_gettimeofday(&now); monotime_get(&started_at); for (i = 0; i < N_TIMERS; ++i) { struct timeval delay; delay.tv_sec = crypto_rand_int_range(0,MAX_DURATION); delay.tv_usec = crypto_rand_int_range(0,1000000); delay_usec[i] = delay.tv_sec * 1000000 + delay.tv_usec; timeradd(&now, &delay, &fire_at[i]); timers[i] = timer_new(timer_cb, &timers[i]); timer_schedule(timers[i], &delay); ++n_active_timers; } /* Disable some; we'll make sure they don't trigger. */ for (i = 0; i < N_DISABLE; ++i) { int idx = crypto_rand_int_range(0, N_TIMERS); if (is_disabled[idx]) continue; is_disabled[idx] = 1; timer_disable(timers[idx]); --n_active_timers; } tor_libevent_run_event_loop(tor_libevent_get_base(), 0); int64_t total_difference = 0; uint64_t total_square_difference = 0; tor_assert(n_fired == n_active_timers); for (i = 0; i < N_TIMERS; ++i) { if (is_disabled[i]) { tor_assert(fired[i] == 0); continue; } tor_assert(fired[i] == 1); //int64_t diff = difference[i].tv_usec + difference[i].tv_sec * 1000000; int64_t diff = diffs_mono_usec[i]; total_difference += diff; total_square_difference += diff*diff; } const int64_t mean_diff = total_difference / n_active_timers; printf("mean difference: "I64_FORMAT" usec\n", I64_PRINTF_ARG(mean_diff)); const double mean_sq = ((double)total_square_difference)/ n_active_timers; const double sq_mean = mean_diff * mean_diff; const double stddev = sqrt(mean_sq - sq_mean); printf("standard deviation: %lf usec\n", stddev); #define MAX_DIFF_USEC (500*1000) #define MAX_STDDEV_USEC (500*1000) #define ODD_DIFF_USEC (2000) #define ODD_STDDEV_USEC (2000) if (mean_diff < 0 || mean_diff > MAX_DIFF_USEC || stddev > MAX_STDDEV_USEC) { printf("Either your system is under ridiculous load, or the " "timer backend is broken.\n"); ret = 1; } else if (mean_diff > ODD_DIFF_USEC || stddev > ODD_STDDEV_USEC) { printf("Either your system is a bit slow or the " "timer backend is odd.\n"); ret = 0; } else { printf("Looks good enough.\n"); ret = 0; } timer_free_(NULL); for (i = 0; i < N_TIMERS; ++i) { timer_free(timers[i]); } timers_shutdown(); return ret; }
void scalliontor_readCPUWorkerCallback(int sockd, short ev_types, void * arg) { vtor_cpuworker_tp cpuw = arg; enter: SCALLION_CPUWORKER_ASSERT(cpuw); if(cpuw->state == CPUW_NONE) { cpuw->state = CPUW_V2_READ; } switch (cpuw->state) { case CPUW_V2_READ: { size_t req_size = sizeof(cpuworker_request_t); char recvbuf[req_size]; /* read until we have a full request */ while(cpuw->num_partial_bytes < req_size) { memset(recvbuf, 0, req_size); size_t bytes_needed = req_size - cpuw->num_partial_bytes; int ioResult = recv(cpuw->fd, recvbuf, bytes_needed, 0); // int ioResult = recv(cpuw->fd, (&(cpuw->req))+cpuw->offset, bytesNeeded-cpuw->offset, 0); ioResult = scalliontor_checkIOResult(cpuw->fd, ioResult); if(ioResult < 0) goto end; // error, kill ourself else if(ioResult == 0) goto ret; // EAGAIN else g_assert(ioResult > 0); // yay /* we read some bytes */ size_t bytes_read = (size_t)ioResult; g_assert(bytes_read <= bytes_needed); /* copy these bytes into our request buffer */ gpointer req_loc = (gpointer) &(cpuw->req); gpointer req_w_loc = &req_loc[cpuw->num_partial_bytes]; SCALLION_CPUWORKER_ASSERT(cpuw); memcpy(req_w_loc, recvbuf, bytes_read); SCALLION_CPUWORKER_ASSERT(cpuw); cpuw->num_partial_bytes += bytes_read; g_assert(cpuw->num_partial_bytes <= req_size); } /* we got what we needed, assert this */ if(cpuw->num_partial_bytes == req_size) { /* got full request, process it */ cpuw->state = CPUW_V2_PROCESS; cpuw->num_partial_bytes = 0; goto enter; } else { log_err(LD_BUG,"read tag failed. Exiting."); goto end; } } case CPUW_V2_PROCESS: { tor_assert(cpuw->req.magic == CPUWORKER_REQUEST_MAGIC); SCALLION_CPUWORKER_ASSERT(cpuw); memset(&(cpuw->rpl), 0, sizeof(cpuworker_reply_t)); SCALLION_CPUWORKER_ASSERT(cpuw); if (cpuw->req.task == CPUWORKER_TASK_ONION) { const create_cell_t *cc = &cpuw->req.create_cell; created_cell_t *cell_out = &cpuw->rpl.created_cell; int n = 0; #ifdef SCALLION_USEV2CPUWORKERTIMING struct timeval tv_start, tv_end; cpuw->rpl.timed = cpuw->req.timed; cpuw->rpl.started_at = cpuw->req.started_at; cpuw->rpl.handshake_type = cc->handshake_type; if (cpuw->req.timed) tor_gettimeofday(&tv_start); #endif n = onion_skin_server_handshake(cc->handshake_type, cc->onionskin, cc->handshake_len, &cpuw->onion_keys, cell_out->reply, cpuw->rpl.keys, CPATH_KEY_MATERIAL_LEN, cpuw->rpl.rend_auth_material); if (n < 0) { /* failure */ log_debug(LD_OR, "onion_skin_server_handshake failed."); memset(&cpuw->rpl, 0, sizeof(cpuworker_reply_t)); memcpy(cpuw->rpl.tag, cpuw->req.tag, TAG_LEN); cpuw->rpl.success = 0; } else { /* success */ log_debug(LD_OR, "onion_skin_server_handshake succeeded."); memcpy(cpuw->rpl.tag, cpuw->req.tag, TAG_LEN); cell_out->handshake_len = n; switch (cc->cell_type) { case CELL_CREATE: cell_out->cell_type = CELL_CREATED; break; case CELL_CREATE2: cell_out->cell_type = CELL_CREATED2; break; case CELL_CREATE_FAST: cell_out->cell_type = CELL_CREATED_FAST; break; default: tor_assert(0); goto end; } cpuw->rpl.success = 1; } cpuw->rpl.magic = CPUWORKER_REPLY_MAGIC; #ifdef SCALLION_USEV2CPUWORKERTIMING if (cpuw->req.timed) { struct timeval tv_diff; tor_gettimeofday(&tv_end); timersub(&tv_end, &tv_start, &tv_diff); int64_t usec = (int64_t)(((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec); /** If any onionskin takes longer than this, we clip them to this * time. (microseconds) */ #define MAX_BELIEVABLE_ONIONSKIN_DELAY (2*1000*1000) if (usec < 0 || usec > MAX_BELIEVABLE_ONIONSKIN_DELAY) cpuw->rpl.n_usec = MAX_BELIEVABLE_ONIONSKIN_DELAY; else cpuw->rpl.n_usec = (uint32_t) usec; } #endif /* write response after processing request */ SCALLION_CPUWORKER_ASSERT(cpuw); cpuw->state = CPUW_V2_WRITE; } else if (cpuw->req.task == CPUWORKER_TASK_SHUTDOWN) { log_info(LD_OR, "Clean shutdown: exiting"); cpuw->state = CPUW_NONE; goto end; } else { /* dont know the task, just ignore it and start over reading the next */ cpuw->state = CPUW_V2_RESET; } goto enter; } case CPUW_V2_WRITE: { size_t rpl_size = sizeof(cpuworker_reply_t); char sendbuf[rpl_size]; memset(sendbuf, 0, rpl_size); /* copy reply into send buffer */ SCALLION_CPUWORKER_ASSERT(cpuw); memcpy(sendbuf, (gpointer) &(cpuw->rpl), rpl_size); SCALLION_CPUWORKER_ASSERT(cpuw); /* write until we wrote it all */ while(cpuw->num_partial_bytes < rpl_size) { size_t bytes_needed = rpl_size - cpuw->num_partial_bytes; gpointer rpl_loc = (gpointer) sendbuf; gpointer rpl_r_loc = &rpl_loc[cpuw->num_partial_bytes]; int ioResult = send(cpuw->fd, rpl_r_loc, bytes_needed, 0); ioResult = scalliontor_checkIOResult(cpuw->fd, ioResult); if(ioResult < 0) goto end; // error, kill ourself else if(ioResult == 0) goto ret; // EAGAIN else g_assert(ioResult > 0); // yay /* we wrote some bytes */ size_t bytes_written = (size_t)ioResult; g_assert(bytes_written <= bytes_needed); cpuw->num_partial_bytes += bytes_written; g_assert(cpuw->num_partial_bytes <= rpl_size); } /* we sent what we needed, assert this */ if(cpuw->num_partial_bytes == rpl_size) { /* sent full reply, start over */ log_debug(LD_OR, "finished writing response."); cpuw->state = CPUW_V2_RESET; cpuw->num_partial_bytes = 0; goto enter; } else { log_err(LD_BUG,"writing response buf failed. Exiting."); goto end; } } case CPUW_V2_RESET: { memwipe(&cpuw->req, 0, sizeof(cpuworker_request_t)); memwipe(&cpuw->rpl, 0, sizeof(cpuworker_reply_t)); cpuw->state = CPUW_V2_READ; cpuw->num_partial_bytes = 0; goto enter; } } ret: return; end: if (cpuw != NULL) { memwipe(&cpuw->req, 0, sizeof(cpuw->req)); memwipe(&cpuw->rpl, 0, sizeof(cpuw->rpl)); release_server_onion_keys(&cpuw->onion_keys); tor_close_socket(cpuw->fd); event_del(&(cpuw->read_event)); memset(cpuw, 0, sizeof(vtor_cpuworker_t)); free(cpuw); } }