/** Return a pointer to the microdescriptor cache, creating (but not loading) * it if necessary. */ static microdesc_cache_t * get_microdesc_cache_noload(void) { if (PREDICT_UNLIKELY(the_microdesc_cache==NULL)) { microdesc_cache_t *cache = tor_malloc_zero(sizeof(*cache)); HT_INIT(microdesc_map, &cache->map); cache->cache_fname = get_datadir_fname("cached-microdescs"); cache->journal_fname = get_datadir_fname("cached-microdescs.new"); the_microdesc_cache = cache; } return the_microdesc_cache; }
/** * Return the Pareto start-of-curve parameter Xm. * * Because we are not a true Pareto curve, we compute this as the * weighted average of the N most frequent build time bins. N is either * 1 if we don't have enough circuit build time data collected, or * determined by the consensus parameter cbtnummodes (default 3). */ static build_time_t circuit_build_times_get_xm(circuit_build_times_t *cbt) { build_time_t i, nbins; build_time_t *nth_max_bin; int32_t bin_counts=0; build_time_t ret = 0; uint32_t *histogram = circuit_build_times_create_histogram(cbt, &nbins); int n=0; int num_modes = circuit_build_times_default_num_xm_modes(); tor_assert(nbins > 0); tor_assert(num_modes > 0); // Only use one mode if < 1000 buildtimes. Not enough data // for multiple. if (cbt->total_build_times < CBT_NCIRCUITS_TO_OBSERVE) num_modes = 1; nth_max_bin = (build_time_t*)tor_malloc_zero(num_modes*sizeof(build_time_t)); /* Determine the N most common build times */ for (i = 0; i < nbins; i++) { if (histogram[i] >= histogram[nth_max_bin[0]]) { nth_max_bin[0] = i; } for (n = 1; n < num_modes; n++) { if (histogram[i] >= histogram[nth_max_bin[n]] && (!histogram[nth_max_bin[n-1]] || histogram[i] < histogram[nth_max_bin[n-1]])) { nth_max_bin[n] = i; } } } for (n = 0; n < num_modes; n++) { bin_counts += histogram[nth_max_bin[n]]; ret += CBT_BIN_TO_MS(nth_max_bin[n])*histogram[nth_max_bin[n]]; log_info(LD_CIRC, "Xm mode #%d: %u %u", n, CBT_BIN_TO_MS(nth_max_bin[n]), histogram[nth_max_bin[n]]); } /* The following assert is safe, because we don't get called when we * haven't observed at least CBT_MIN_MIN_CIRCUITS_TO_OBSERVE circuits. */ tor_assert(bin_counts > 0); ret /= bin_counts; tor_free(histogram); tor_free(nth_max_bin); return ret; }
/** Returns a newly allocated <b>process_unix_t</b>. */ process_unix_t * process_unix_new(void) { process_unix_t *unix_process; unix_process = tor_malloc_zero(sizeof(process_unix_t)); unix_process->stdin_handle.fd = -1; unix_process->stderr_handle.fd = -1; unix_process->stdout_handle.fd = -1; return unix_process; }
/** Add <b>circ</b> to the end of ol_list and return 0, except * if ol_list is too long, in which case do nothing and return -1. */ int onion_pending_add(or_circuit_t *circ, char *onionskin) { onion_queue_t *tmp; time_t now = time(NULL); tmp = tor_malloc_zero(sizeof(onion_queue_t)); tmp->circ = circ; tmp->onionskin = onionskin; tmp->when_added = now; if (!ol_tail) { tor_assert(!ol_list); tor_assert(!ol_length); ol_list = tmp; ol_tail = tmp; ol_length++; return 0; } tor_assert(ol_list); tor_assert(!ol_tail->next); if (ol_length >= get_options()->MaxOnionsPending) { #define WARN_TOO_MANY_CIRC_CREATIONS_INTERVAL (60) static ratelim_t last_warned = RATELIM_INIT(WARN_TOO_MANY_CIRC_CREATIONS_INTERVAL); char *m; if ((m = rate_limit_log(&last_warned, approx_time()))) { log_warn(LD_GENERAL, "Your computer is too slow to handle this many circuit " "creation requests! Please consider using the " "MaxAdvertisedBandwidth config option or choosing a more " "restricted exit policy.%s",m); tor_free(m); } tor_free(tmp); return -1; } ol_length++; ol_tail->next = tmp; ol_tail = tmp; while ((int)(now - ol_list->when_added) >= ONIONQUEUE_WAIT_CUTOFF) { /* cull elderly requests. */ circ = ol_list->circ; onion_pending_remove(ol_list->circ); log_info(LD_CIRC, "Circuit create request is too old; canceling due to overload."); circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_RESOURCELIMIT); } return 0; }
/** Create an <b>edge_connection_t</b> instance that is considered a * valid exit connection by asserts in dns_resolve_impl. */ static edge_connection_t * create_valid_exitconn(void) { edge_connection_t *exitconn = tor_malloc_zero(sizeof(edge_connection_t)); TO_CONN(exitconn)->type = CONN_TYPE_EXIT; TO_CONN(exitconn)->magic = EDGE_CONNECTION_MAGIC; TO_CONN(exitconn)->purpose = EXIT_PURPOSE_RESOLVE; TO_CONN(exitconn)->state = EXIT_CONN_STATE_RESOLVING; exitconn->base_.s = TOR_INVALID_SOCKET; return exitconn; }
static void bench_cell_ops(void) { const int iters = 1<<16; int i; /* benchmarks for cell ops at relay. */ or_circuit_t *or_circ = tor_malloc_zero(sizeof(or_circuit_t)); cell_t *cell = tor_malloc(sizeof(cell_t)); int outbound; uint64_t start, end; crypto_rand((char*)cell->payload, sizeof(cell->payload)); /* Mock-up or_circuit_t */ or_circ->_base.magic = OR_CIRCUIT_MAGIC; or_circ->_base.purpose = CIRCUIT_PURPOSE_OR; /* Initialize crypto */ or_circ->p_crypto = crypto_cipher_new(); crypto_cipher_generate_key(or_circ->p_crypto); crypto_cipher_encrypt_init_cipher(or_circ->p_crypto); or_circ->n_crypto = crypto_cipher_new(); crypto_cipher_generate_key(or_circ->n_crypto); crypto_cipher_encrypt_init_cipher(or_circ->n_crypto); or_circ->p_digest = crypto_digest_new(); or_circ->n_digest = crypto_digest_new(); reset_perftime(); for (outbound = 0; outbound <= 1; ++outbound) { cell_direction_t d = outbound ? CELL_DIRECTION_OUT : CELL_DIRECTION_IN; start = perftime(); for (i = 0; i < iters; ++i) { char recognized = 0; crypt_path_t *layer_hint = NULL; relay_crypt(TO_CIRCUIT(or_circ), cell, d, &layer_hint, &recognized); } end = perftime(); printf("%sbound cells: %.2f ns per cell. (%.2f ns per byte of payload)\n", outbound?"Out":" In", NANOCOUNT(start,end,iters), NANOCOUNT(start,end,iters*CELL_PAYLOAD_SIZE)); } crypto_digest_free(or_circ->p_digest); crypto_digest_free(or_circ->n_digest); crypto_cipher_free(or_circ->p_crypto); crypto_cipher_free(or_circ->n_crypto); tor_free(or_circ); tor_free(cell); }
/** Create a sockaddr_in structure from IP address string <b>ip_str</b>. * * If <b>out</b> is not NULL, write the result * to the memory address in <b>out</b>. Otherwise, allocate the memory * for result. On success, return pointer to result. Otherwise, return * NULL. */ static struct sockaddr_in * sockaddr_in_from_string(const char *ip_str, struct sockaddr_in *out) { // [FIXME: add some error checking?] if (!out) out = tor_malloc_zero(sizeof(struct sockaddr_in)); out->sin_family = AF_INET; out->sin_port = 0; tor_inet_pton(AF_INET,ip_str,&(out->sin_addr)); return out; }
/** Returns a transport_t struct for a transport proxy supporting the protocol <b>name</b> listening at <b>addr</b>:<b>port</b> using SOCKS version <b>socks_ver</b>. */ static transport_t * transport_new(const tor_addr_t *addr, uint16_t port, const char *name, int socks_ver) { transport_t *t = tor_malloc_zero(sizeof(transport_t)); tor_addr_copy(&t->addr, addr); t->port = port; t->name = tor_strdup(name); t->socks_version = socks_ver; return t; }
static void test_rend_cache_clean_v2_descs_as_dir(void *data) { rend_cache_entry_t *e; time_t now, cutoff; rend_service_descriptor_t *desc; now = time(NULL); cutoff = now - (REND_CACHE_MAX_AGE + REND_CACHE_MAX_SKEW); const char key[DIGEST_LEN] = "abcde"; (void)data; rend_cache_init(); // Test running with an empty cache rend_cache_clean_v2_descs_as_dir(cutoff); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 0); // Test with only one new entry e = tor_malloc_zero(sizeof(rend_cache_entry_t)); e->last_served = now; desc = tor_malloc_zero(sizeof(rend_service_descriptor_t)); desc->timestamp = now; desc->pk = pk_generate(0); e->parsed = desc; digestmap_set(rend_cache_v2_dir, key, e); /* Set the cutoff to minus 10 seconds. */ rend_cache_clean_v2_descs_as_dir(cutoff - 10); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 1); // Test with one old entry desc->timestamp = cutoff - 1000; rend_cache_clean_v2_descs_as_dir(cutoff); tt_int_op(digestmap_size(rend_cache_v2_dir), OP_EQ, 0); done: rend_cache_free_all(); }
/** Return a new HS token of type <b>type</b> containing <b>token</b>. */ static hs_token_t * hs_token_new(hs_token_type_t type, size_t token_len, const uint8_t *token) { tor_assert(token); hs_token_t *hs_token = tor_malloc_zero(sizeof(hs_token_t)); hs_token->type = type; hs_token->token_len = token_len; hs_token->token = tor_memdup(token, token_len); return hs_token; }
static void test_buffers_zlib_fin_at_chunk_end(void *arg) { char *msg = NULL; char *contents = NULL; char *expanded = NULL; buf_t *buf = NULL; tor_zlib_state_t *zlib_state = NULL; size_t out_len, in_len; size_t sz, headerjunk; (void) arg; buf = buf_new_with_capacity(128); /* will round up */ sz = buf_get_default_chunk_size(buf); msg = tor_malloc_zero(sz); write_to_buf(msg, 1, buf); tt_assert(buf->head); /* Fill up the chunk so the zlib stuff won't fit in one chunk. */ tt_uint_op(buf->head->memlen, OP_LT, sz); headerjunk = buf->head->memlen - 7; write_to_buf(msg, headerjunk-1, buf); tt_uint_op(buf->head->datalen, OP_EQ, headerjunk); tt_uint_op(buf_datalen(buf), OP_EQ, headerjunk); /* Write an empty string, with finalization on. */ zlib_state = tor_zlib_new(1, ZLIB_METHOD, HIGH_COMPRESSION); tt_int_op(write_to_buf_zlib(buf, zlib_state, "", 0, 1), OP_EQ, 0); in_len = buf_datalen(buf); contents = tor_malloc(in_len); tt_int_op(fetch_from_buf(contents, in_len, buf), OP_EQ, 0); tt_uint_op(in_len, OP_GT, headerjunk); tt_int_op(0, OP_EQ, tor_gzip_uncompress(&expanded, &out_len, contents + headerjunk, in_len - headerjunk, ZLIB_METHOD, 1, LOG_WARN)); tt_int_op(out_len, OP_EQ, 0); tt_assert(expanded); done: buf_free(buf); tor_zlib_free(zlib_state); tor_free(contents); tor_free(expanded); tor_free(msg); }
/** Add a log handler named <b>name</b> to send all messages in <b>severity</b> * to <b>fd</b>. Copies <b>severity</b>. Helper: does no locking. */ static void add_stream_log_impl(const log_severity_list_t *severity, const char *name, int fd) { logfile_t *lf; lf = tor_malloc_zero(sizeof(logfile_t)); lf->fd = fd; lf->filename = tor_strdup(name); lf->severities = tor_memdup(severity, sizeof(log_severity_list_t)); lf->next = logfiles; logfiles = lf; _log_global_min_severity = get_min_log_level(); }
static void test_pt_protocol(void *arg) { char line[200]; managed_proxy_t *mp = tor_malloc_zero(sizeof(managed_proxy_t)); (void)arg; mp->conf_state = PT_PROTO_LAUNCHED; mp->transports = smartlist_new(); mp->argv = tor_calloc(2, sizeof(char *)); mp->argv[0] = tor_strdup("<testcase>"); /* various wrong protocol runs: */ strlcpy(line,"VERSION 1",sizeof(line)); handle_proxy_line(line, mp); tt_assert(mp->conf_state == PT_PROTO_ACCEPTING_METHODS); strlcpy(line,"VERSION 1",sizeof(line)); handle_proxy_line(line, mp); tt_assert(mp->conf_state == PT_PROTO_BROKEN); reset_mp(mp); strlcpy(line,"CMETHOD trebuchet socks5 127.0.0.1:1999",sizeof(line)); handle_proxy_line(line, mp); tt_assert(mp->conf_state == PT_PROTO_BROKEN); reset_mp(mp); /* correct protocol run: */ strlcpy(line,"VERSION 1",sizeof(line)); handle_proxy_line(line, mp); tt_assert(mp->conf_state == PT_PROTO_ACCEPTING_METHODS); strlcpy(line,"CMETHOD trebuchet socks5 127.0.0.1:1999",sizeof(line)); handle_proxy_line(line, mp); tt_assert(mp->conf_state == PT_PROTO_ACCEPTING_METHODS); strlcpy(line,"CMETHODS DONE",sizeof(line)); handle_proxy_line(line, mp); tt_assert(mp->conf_state == PT_PROTO_CONFIGURED); done: reset_mp(mp); smartlist_free(mp->transports); tor_free(mp->argv[0]); tor_free(mp->argv); tor_free(mp); }
/** Allocate and return a new cached_dir_t containing the string <b>s</b>, * published at <b>published</b>. */ cached_dir_t * new_cached_dir(char *s, time_t published) { cached_dir_t *d = tor_malloc_zero(sizeof(cached_dir_t)); d->refcnt = 1; d->dir = s; d->dir_len = strlen(s); d->published = published; if (tor_compress(&(d->dir_compressed), &(d->dir_compressed_len), d->dir, d->dir_len, ZLIB_METHOD)) { log_warn(LD_BUG, "Error compressing directory"); } return d; }
/* Allocate a new commit object and initializing it with <b>rsa_identity</b> * that MUST be provided. The digest algorithm is set to the default one * that is supported. The rest is uninitialized. This never returns NULL. */ static sr_commit_t * commit_new(const char *rsa_identity) { sr_commit_t *commit; tor_assert(rsa_identity); commit = tor_malloc_zero(sizeof(*commit)); commit->alg = SR_DIGEST_ALG; memcpy(commit->rsa_identity, rsa_identity, sizeof(commit->rsa_identity)); base16_encode(commit->rsa_identity_hex, sizeof(commit->rsa_identity_hex), commit->rsa_identity, sizeof(commit->rsa_identity)); return commit; }
/** Note that an either direct or tunneled (see <b>type</b>) directory * request for a v3 network status with unique ID <b>dirreq_id</b> of size * <b>response_size</b> has started. */ void geoip_start_dirreq(uint64_t dirreq_id, size_t response_size, dirreq_type_t type) { dirreq_map_entry_t *ent; if (!get_options()->DirReqStatistics) return; ent = tor_malloc_zero(sizeof(dirreq_map_entry_t)); ent->dirreq_id = dirreq_id; tor_gettimeofday(&ent->request_time); ent->response_size = response_size; ent->type = type; dirreq_map_put_(ent, type, dirreq_id); }
static circuitmux_policy_data_t * ewma_alloc_cmux_data(circuitmux_t *cmux) { ewma_policy_data_t *pol = NULL; tor_assert(cmux); pol = tor_malloc_zero(sizeof(*pol)); pol->base_.magic = EWMA_POL_DATA_MAGIC; pol->active_circuit_pqueue = smartlist_new(); pol->active_circuit_pqueue_last_recalibrated = cell_ewma_get_tick(); return TO_CMUX_POL_DATA(pol); }
static rend_data_t * mock_rend_data(const char *onion_address) { rend_data_t *rend_query = tor_malloc_zero(sizeof(rend_data_t)); strlcpy(rend_query->onion_address, onion_address, sizeof(rend_query->onion_address)); rend_query->auth_type = REND_NO_AUTH; rend_query->hsdirs_fp = smartlist_new(); smartlist_add(rend_query->hsdirs_fp, tor_memdup("aaaaaaaaaaaaaaaaaaaaaaaa", DIGEST_LEN)); return rend_query; }
/** Set up a new list of geoip countries with no countries (yet) set in it, * except for the unknown country. */ static void init_geoip_countries(void) { geoip_country_t *geoip_unresolved; geoip_countries = smartlist_new(); /* Add a geoip_country_t for requests that could not be resolved to a * country as first element (index 0) to geoip_countries. */ geoip_unresolved = tor_malloc_zero(sizeof(geoip_country_t)); strlcpy(geoip_unresolved->countrycode, "??", sizeof(geoip_unresolved->countrycode)); smartlist_add(geoip_countries, geoip_unresolved); country_idxplus1_by_lc_code = strmap_new(); strmap_set_lc(country_idxplus1_by_lc_code, "??", (void*)(1)); }
static workqueue_entry_t * add_work(threadpool_t *tp) { int add_rsa = opt_ratio_rsa == 0 || tor_weak_random_range(&weak_rng, opt_ratio_rsa) == 0; if (add_rsa) { rsa_work_t *w = tor_malloc_zero(sizeof(*w)); w->serial = n_sent++; crypto_rand((char*)w->msg, 20); w->msglen = 20; ++rsa_sent; return threadpool_queue_work(tp, workqueue_do_rsa, handle_reply, w); } else { ecdh_work_t *w = tor_malloc_zero(sizeof(*w)); w->serial = n_sent++; /* Not strictly right, but this is just for benchmarks. */ crypto_rand((char*)w->u.pk.public_key, 32); ++ecdh_sent; return threadpool_queue_work(tp, workqueue_do_ecdh, handle_reply, w); } }
/* Return a heap allocated copy of the SRV <b>orig</b>. */ STATIC sr_srv_t * srv_dup(const sr_srv_t *orig) { sr_srv_t *duplicate = NULL; if (!orig) { return NULL; } duplicate = tor_malloc_zero(sizeof(sr_srv_t)); duplicate->num_reveals = orig->num_reveals; memcpy(duplicate->value, orig->value, sizeof(duplicate->value)); return duplicate; }
origin_circuit_t * build_unopened_fourhop(struct timeval circ_start_time) { origin_circuit_t *or_circ = origin_circuit_new(); extend_info_t *fakehop = tor_malloc_zero(sizeof(extend_info_t)); memset(fakehop, 0, sizeof(extend_info_t)); TO_CIRCUIT(or_circ)->purpose = CIRCUIT_PURPOSE_C_GENERAL; TO_CIRCUIT(or_circ)->timestamp_began = circ_start_time; TO_CIRCUIT(or_circ)->timestamp_created = circ_start_time; or_circ->build_state = tor_malloc_zero(sizeof(cpath_build_state_t)); or_circ->build_state->desired_path_len = 4; onion_append_hop(&or_circ->cpath, fakehop); onion_append_hop(&or_circ->cpath, fakehop); onion_append_hop(&or_circ->cpath, fakehop); onion_append_hop(&or_circ->cpath, fakehop); tor_free(fakehop); return or_circ; }
/** Create new cross-certification object to certify <b>ed_key</b> as the * master ed25519 identity key for the RSA identity key <b>rsa_key</b>. * Allocates and stores the encoded certificate in *<b>cert</b>, and returns * the number of bytes stored. Returns negative on error.*/ ssize_t tor_make_rsa_ed25519_crosscert(const ed25519_public_key_t *ed_key, const crypto_pk_t *rsa_key, time_t expires, uint8_t **cert) { // It is later than 1985, since otherwise there would be no C89 // compilers. (Try to diagnose #22466.) tor_assert_nonfatal(expires >= 15 * 365 * 86400); uint8_t *res; rsa_ed_crosscert_t *cc = rsa_ed_crosscert_new(); memcpy(cc->ed_key, ed_key->pubkey, ED25519_PUBKEY_LEN); cc->expiration = (uint32_t) CEIL_DIV(expires, 3600); cc->sig_len = crypto_pk_keysize(rsa_key); rsa_ed_crosscert_setlen_sig(cc, crypto_pk_keysize(rsa_key)); ssize_t alloc_sz = rsa_ed_crosscert_encoded_len(cc); tor_assert(alloc_sz > 0); res = tor_malloc_zero(alloc_sz); ssize_t sz = rsa_ed_crosscert_encode(res, alloc_sz, cc); tor_assert(sz > 0 && sz <= alloc_sz); crypto_digest_t *d = crypto_digest256_new(DIGEST_SHA256); crypto_digest_add_bytes(d, RSA_ED_CROSSCERT_PREFIX, strlen(RSA_ED_CROSSCERT_PREFIX)); const int signed_part_len = 32 + 4; crypto_digest_add_bytes(d, (char*)res, signed_part_len); uint8_t digest[DIGEST256_LEN]; crypto_digest_get_digest(d, (char*)digest, sizeof(digest)); crypto_digest_free(d); int siglen = crypto_pk_private_sign(rsa_key, (char*)rsa_ed_crosscert_getarray_sig(cc), rsa_ed_crosscert_getlen_sig(cc), (char*)digest, sizeof(digest)); tor_assert(siglen > 0 && siglen <= (int)crypto_pk_keysize(rsa_key)); tor_assert(siglen <= UINT8_MAX); cc->sig_len = siglen; rsa_ed_crosscert_setlen_sig(cc, siglen); sz = rsa_ed_crosscert_encode(res, alloc_sz, cc); rsa_ed_crosscert_free(cc); *cert = res; return sz; }
/** Note that we've seen a client connect from the IP <b>addr</b> * at time <b>now</b>. Ignored by all but bridges and directories if * configured accordingly. */ void geoip_note_client_seen(geoip_client_action_t action, const tor_addr_t *addr, time_t now) { const or_options_t *options = get_options(); clientmap_entry_t lookup, *ent; if (action == GEOIP_CLIENT_CONNECT) { /* Only remember statistics as entry guard or as bridge. */ if (!options->EntryStatistics && (!(options->BridgeRelay && options->BridgeRecordUsageByCountry))) return; } else { if (options->BridgeRelay || options->BridgeAuthoritativeDir || !options->DirReqStatistics) return; } tor_addr_copy(&lookup.addr, addr); lookup.action = (int)action; ent = HT_FIND(clientmap, &client_history, &lookup); if (! ent) { ent = tor_malloc_zero(sizeof(clientmap_entry_t)); tor_addr_copy(&ent->addr, addr); ent->action = (int)action; HT_INSERT(clientmap, &client_history, ent); } if (now / 60 <= (int)MAX_LAST_SEEN_IN_MINUTES && now >= 0) ent->last_seen_in_minutes = (unsigned)(now/60); else ent->last_seen_in_minutes = 0; if (action == GEOIP_CLIENT_NETWORKSTATUS || action == GEOIP_CLIENT_NETWORKSTATUS_V2) { int country_idx = geoip_get_country_by_addr(addr); if (country_idx < 0) country_idx = 0; /** unresolved requests are stored at index 0. */ if (country_idx >= 0 && country_idx < smartlist_len(geoip_countries)) { geoip_country_t *country = smartlist_get(geoip_countries, country_idx); if (action == GEOIP_CLIENT_NETWORKSTATUS) ++country->n_v3_ns_requests; else ++country->n_v2_ns_requests; } /* Periodically determine share of requests that we should see */ if (last_time_determined_shares + REQUEST_SHARE_INTERVAL < now) geoip_determine_shares(now); } }
/** * Return a new anonymous memory mapping that holds <b>sz</b> bytes. * * Memory mappings are unlike the results from malloc() in that they are * handled separately by the operating system, and as such can have different * kernel-level flags set on them. * * The "flags" argument may be zero or more of ANONMAP_PRIVATE and * ANONMAP_NOINHERIT. * * Memory returned from this function must be released with * tor_munmap_anonymous(). * * If <b>inherit_result_out</b> is non-NULL, set it to one of * INHERIT_RES_KEEP, INHERIT_RES_DROP, or INHERIT_RES_ZERO, depending on the * properties of the returned memory. * * [Note: OS people use the word "anonymous" here to mean that the memory * isn't associated with any file. This has *nothing* to do with the kind of * anonymity that Tor is trying to provide.] */ void * tor_mmap_anonymous(size_t sz, unsigned flags, inherit_res_t *inherit_result_out) { void *ptr; inherit_res_t itmp=0; if (inherit_result_out == NULL) { inherit_result_out = &itmp; } *inherit_result_out = INHERIT_RES_KEEP; #if defined(_WIN32) HANDLE mapping = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, /*attributes*/ PAGE_READWRITE, HIGH_SIZE_T_BYTES(sz), sz & 0xffffffff, NULL /* name */); raw_assert(mapping != NULL); ptr = MapViewOfFile(mapping, FILE_MAP_WRITE, 0, 0, /* Offset */ 0 /* Extend to end of mapping */); raw_assert(ptr); CloseHandle(mapping); /* mapped view holds a reference */ #elif defined(HAVE_SYS_MMAN_H) ptr = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0); raw_assert(ptr != MAP_FAILED); raw_assert(ptr != NULL); #else ptr = tor_malloc_zero(sz); #endif if (flags & ANONMAP_PRIVATE) { int lock_result = lock_mem(ptr, sz); raw_assert(lock_result == 0); int nodump_result = nodump_mem(ptr, sz); raw_assert(nodump_result == 0); } if (flags & ANONMAP_NOINHERIT) { int noinherit_result = noinherit_mem(ptr, sz, inherit_result_out); raw_assert(noinherit_result == 0); } return ptr; }
static void add_testing_cell_stats_entry(circuit_t *circ, uint8_t command, unsigned int waiting_time, unsigned int removed, unsigned int exitward) { testing_cell_stats_entry_t *ent = tor_malloc_zero( sizeof(testing_cell_stats_entry_t)); ent->command = command; ent->waiting_time = waiting_time; ent->removed = removed; ent->exitward = exitward; if (!circ->testing_cell_stats) circ->testing_cell_stats = smartlist_new(); smartlist_add(circ->testing_cell_stats, ent); }
/** Allocate and start a new worker thread to use state object <b>state</b>, * and send responses to <b>replyqueue</b>. */ static workerthread_t * workerthread_new(void *state, threadpool_t *pool, replyqueue_t *replyqueue) { workerthread_t *thr = tor_malloc_zero(sizeof(workerthread_t)); thr->state = state; thr->reply_queue = replyqueue; thr->in_pool = pool; if (spawn_func(worker_thread_main, thr) < 0) { log_err(LD_GENERAL, "Can't launch worker thread."); return NULL; } return thr; }
/* Allocate a new disk state, initialize it and return it. */ static sr_disk_state_t * disk_state_new(time_t now) { sr_disk_state_t *new_state = tor_malloc_zero(sizeof(*new_state)); new_state->magic_ = SR_DISK_STATE_MAGIC; new_state->Version = SR_PROTO_VERSION; new_state->TorVersion = tor_strdup(get_version()); new_state->ValidUntil = get_state_valid_until_time(now); new_state->ValidAfter = now; /* Init config format. */ config_init(&state_format, new_state); return new_state; }
/* Allocate an sr_state_t object and returns it. If no <b>fname</b>, the * default file name is used. This function does NOT initialize the state * timestamp, phase or shared random value. NULL is never returned. */ static sr_state_t * state_new(const char *fname, time_t now) { sr_state_t *new_state = tor_malloc_zero(sizeof(*new_state)); /* If file name is not provided, use default. */ if (fname == NULL) { fname = default_fname; } new_state->fname = tor_strdup(fname); new_state->version = SR_PROTO_VERSION; new_state->commits = digestmap_new(); new_state->phase = get_sr_protocol_phase(now); new_state->valid_until = get_state_valid_until_time(now); return new_state; }
static void test_util_format_base64_decode_nopad(void *ignored) { (void)ignored; int res; int i; char *src; uint8_t *dst; src = tor_malloc_zero(256); dst = tor_malloc_zero(1000); for (i=0;i<256;i++) { src[i] = (char)i; } res = base64_decode_nopad(dst, 1, src, SIZE_T_CEILING); tt_int_op(res, OP_EQ, -1); res = base64_decode_nopad(dst, 1, src, 5); tt_int_op(res, OP_EQ, -1); const char *s = "SGVsbG8gd29ybGQ"; res = base64_decode_nopad(dst, 1000, s, strlen(s)); tt_int_op(res, OP_EQ, 11); tt_mem_op(dst, OP_EQ, "Hello world", 11); s = "T3BhIG11bmRv"; res = base64_decode_nopad(dst, 9, s, strlen(s)); tt_int_op(res, OP_EQ, 9); tt_mem_op(dst, OP_EQ, "Opa mundo", 9); done: tor_free(src); tor_free(dst); }