/* auto_setup: called by dynamo_auto_start for non-early follow children. * This routine itself would be dynamo_auto_start except that we want * our own go-native path separate from load_dynamo (we could still have * this by dynamo_auto_start and jump to an asm routine for go-native, * but keeping the entry in asm is more flexible). * Assumptions: The saved priv_mcontext_t for the start of the app is on * the stack, followed by a pointer to a region of memory to free * (which can be NULL) and its size. If we decide not to take over * this process, this routine returns; otherwise it does not return. */ void auto_setup(ptr_uint_t appstack) { dcontext_t *dcontext; priv_mcontext_t *mcontext; byte *pappstack; byte *addr; pappstack = (byte *)appstack; /* Our parameter points at a priv_mcontext_t struct, beyond which are * two other fields: pappstack --> +0 priv_mcontext_t struct +x addr of memory to free (can be NULL) +y sizeof memory to free */ automatic_startup = true; /* we should control all threads */ control_all_threads = true; dynamorio_app_init(); if (INTERNAL_OPTION(nullcalls)) { dynamorio_app_exit(); return; } /* For apps injected using follow_children, this is where control should be * allowed to go native for hotp_only & thin_client. */ if (RUNNING_WITHOUT_CODE_CACHE()) return; /* useful to debug fork-following */ DOLOG(4, LOG_TOP, { SYSLOG_INTERNAL_INFO("dynamo auto start"); });
/* thread-shared initialization that should be repeated after a reset */ void fragment_reset_init() { if (RUNNING_WITHOUT_CODE_CACHE()) return; mutex_lock(&shared_cache_flush_lock); /* ASSUMPTION: a reset frees all deletions that use flushtimes, so we can * reset the global flushtime here */ flushtime_global = 0; mutex_unlock(&shared_cache_flush_lock); /* need to be filled up */ }
/* thread-shared initialization */ void fragment_init() { if (RUNNING_WITHOUT_CODE_CACHE()) return; /* make sure fields are at same place */ ASSERT(offsetof(fragment_t, flags) == offsetof(future_fragment_t, flags)); ASSERT(offsetof(fragment_t, tag) == offsetof(future_fragment_t, tag)); /* ensure we can read this w/o a lock: no cache line crossing, please */ ASSERT(ALIGNED(&flushtime_global, 4)); // if (SHARED_FRAGMENTS_ENABLED()) { // /* tables are persistent across resets, only on heap for selfprot (case 7957) */ // if (DENTRE_OPTION(shared_bbs)) { // shared_bb = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, fragment_table_t, // ACCT_FRAG_TABLE, PROTECTED); // } // if (DENTRE_OPTION(shared_traces)) { // shared_trace = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, fragment_table_t, // ACCT_FRAG_TABLE, PROTECTED); // } // shared_future = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, fragment_table_t, // ACCT_FRAG_TABLE, PROTECTED); // } // // if (USE_SHARED_PT()) // shared_pt = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, per_thread_t, ACCT_OTHER, PROTECTED); // // if (SHARED_IBT_TABLES_ENABLED()) { // dead_lists = // HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, dead_table_lists_t, ACCT_OTHER, PROTECTED); // memset(dead_lists, 0, sizeof(*dead_lists)); // } fragment_reset_init(); #if defined(INTERNAL) || defined(CLIENT_INTERFACE) /* need to be filled up */ #endif }
void fragment_thread_init(dcontext_t *dcontext) { /* we allocate per_thread_t in the global heap solely for self-protection, * even when turned off, since even with a lot of threads this isn't a lot of * pressure on the global heap */ per_thread_t *pt; /* don't initialize un-needed data for hotp_only & thin_client. * FIXME: could set htable initial sizes to 0 for all configurations, instead. * per_thread_t is pretty big, so we avoid it, though it costs us checks for * hotp_only in the islinking-related routines. */ if (RUNNING_WITHOUT_CODE_CACHE()) return; pt = (per_thread_t *)global_heap_alloc(sizeof(per_thread_t) HEAPACCT(ACCT_OTHER)); dcontext->fragment_field = (void *) pt; framgment_reset_init(dcontext); }