void arena::free_arena () { __TBB_ASSERT( !my_num_threads_active, "There are threads in the dying arena" ); poison_value( my_guard ); intptr_t drained = 0; for ( unsigned i = 1; i <= my_num_slots; ++i ) drained += mailbox(i).drain(); #if __TBB_TASK_PRIORITY && TBB_USE_ASSERT for ( intptr_t i = 0; i < num_priority_levels; ++i ) __TBB_ASSERT(my_task_stream[i].empty() && my_task_stream[i].drain()==0, "Not all enqueued tasks were executed"); #elif !__TBB_TASK_PRIORITY __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, "Not all enqueued tasks were executed"); #endif /* !__TBB_TASK_PRIORITY */ #if __TBB_COUNT_TASK_NODES my_market->update_task_node_count( -drained ); #endif /* __TBB_COUNT_TASK_NODES */ my_market->release(); #if __TBB_TASK_GROUP_CONTEXT __TBB_ASSERT( my_master_default_ctx, "Master thread never entered the arena?" ); my_master_default_ctx->~task_group_context(); NFS_Free(my_master_default_ctx); #endif /* __TBB_TASK_GROUP_CONTEXT */ #if __TBB_STATISTICS for( unsigned i = 0; i < my_num_slots; ++i ) NFS_Free( my_slots[i].my_counters ); #endif /* __TBB_STATISTICS */ void* storage = &mailbox(my_num_slots); __TBB_ASSERT( my_num_threads_active == 0, NULL ); __TBB_ASSERT( my_pool_state == SNAPSHOT_EMPTY || !my_max_num_workers, NULL ); this->~arena(); #if TBB_USE_ASSERT > 1 memset( storage, 0, allocation_size(my_max_num_workers) ); #endif /* TBB_USE_ASSERT */ NFS_Free( storage ); }
OSRAdapter* OSRAdapter::new_osr_adapter(CodeBuffer* cb, OopMapSet *oop_maps, int frame_size, int returning_fp_entry_offset) { unsigned int size = allocation_size(cb, sizeof(OSRAdapter)); OSRAdapter* osr_adapter = NULL; { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); osr_adapter = new (size) OSRAdapter(cb, oop_maps, size, frame_size, returning_fp_entry_offset); } // We do not need to hold the CodeCache lock during name formatting. if (osr_adapter != NULL) { char blob_id[256]; jio_snprintf(blob_id, sizeof(blob_id), "OSRAdapter@" PTR_FORMAT, osr_adapter->instructions_begin()); VTune::register_stub(blob_id, osr_adapter->instructions_begin(), osr_adapter->instructions_end()); Forte::register_stub(blob_id, osr_adapter->instructions_begin(), osr_adapter->instructions_end()); // notify JVMTI profiler about this OSR if (JvmtiExport::should_post_dynamic_code_generated()) { JvmtiExport::post_dynamic_code_generated("OSRAdapter", osr_adapter->instructions_begin(), osr_adapter->instructions_end()); } } return osr_adapter; }
arena& arena::allocate_arena( market& m, unsigned max_num_workers ) { __TBB_ASSERT( sizeof(base_type) + sizeof(arena_slot) == sizeof(arena), "All arena data fields must go to arena_base" ); __TBB_ASSERT( sizeof(base_type) % NFS_GetLineSize() == 0, "arena slots area misaligned: wrong padding" ); __TBB_ASSERT( sizeof(mail_outbox) == NFS_MaxLineSize, "Mailbox padding is wrong" ); size_t n = allocation_size(max_num_workers); unsigned char* storage = (unsigned char*)NFS_Allocate( n, 1, NULL ); // Zero all slots to indicate that they are empty memset( storage, 0, n ); return *new( storage + num_slots_to_reserve(max_num_workers) * sizeof(mail_outbox) ) arena(m, max_num_workers); }
void initialize(OopDesc *klass, jint statics_size, jint vtable_length) { OopDesc::initialize(klass); _object_size = allocation_size(statics_size, vtable_length); }
/*! * This function performs the garbage collection for the Scheme interpreter. * It also contains code to track how many objects were collected on each run, * and also it can optionally be set to do GC when the total memory used grows * beyond a certain limit. */ void collect_garbage() { Environment *global_env; PtrStack *eval_stack; int i; EvaluationContext *eval; #ifdef GC_STATS int vals_before, procs_before, envs_before; int vals_after, procs_after, envs_after; vals_before = allocated_values.size; procs_before = allocated_lambdas.size; envs_before = allocated_environments.size; #endif #ifndef ALWAYS_GC /* Don't perform garbage collection if we still have room to grow. */ if (allocation_size() < max_allocation_size) return; #endif /*==========================================================* * TODO: Implement mark-and-sweep garbage collection here! * * * * Mark all objects referenceable from either the global * * environment, or from the evaluation stack. Then sweep * * through all allocated objects, freeing unmarked objects. * * * * Reminder to self: DECLARE THESE FUNCTIONS @ TOP * *==========================================================*/ global_env = get_global_environment(); eval_stack = get_eval_stack(); /* ... TODO ... */ // Mark everything mark_environment(global_env); for (i = 0; i < (*eval_stack).size; i++) { eval = (EvaluationContext *) pv_get_elem(eval_stack, i); mark_eval_stack(eval); } // Sweep everything sweep_values(); sweep_lambdas(); sweep_environments(); #ifndef ALWAYS_GC /* If we are still above the maximum allocation size, increase it. */ if (allocation_size() > max_allocation_size) { max_allocation_size *= 2; printf("Increasing maximum allocation size to %ld bytes.\n", max_allocation_size); } #endif #ifdef GC_STATS vals_after = allocated_values.size; procs_after = allocated_lambdas.size; envs_after = allocated_environments.size; printf("GC Results:\n"); printf("\tBefore: \t%d vals \t%d lambdas \t%d envs\n", vals_before, procs_before, envs_before); printf("\tAfter: \t%d vals \t%d lambdas \t%d envs\n", vals_after, procs_after, envs_after); printf("\tChange: \t%d vals \t%d lambdas \t%d envs\n", vals_after - vals_before, procs_after - procs_before, envs_after - envs_before); #endif }
// Returns the object size size_t object_size() { return allocation_size(_entry_count); }
// Returns the object size size_t object_size() { return allocation_size(_length); }
// Returns the size of the object size_t object_size() { return allocation_size(); }