/** Launches a thread that periodically checks in with the wifidog auth server to perform heartbeat function. @param arg NULL @todo This thread loops infinitely, need a watchdog to verify that it is still running? */ void thread_ping(void *arg) { pthread_cond_t cond = PTHREAD_COND_INITIALIZER; pthread_mutex_t cond_mutex = PTHREAD_MUTEX_INITIALIZER; struct timespec timeout; while (1) { /* Make sure we check the servers at the very begining */ debug(LOG_DEBUG, "Running ping()"); ping(); debug(LOG_DEBUG, "Running update_counters"); update_counters(); /* Sleep for config.checkinterval seconds... */ timeout.tv_sec = time(NULL) + config_get_config()->checkinterval; timeout.tv_nsec = 0; /* Mutex must be locked for pthread_cond_timedwait... */ pthread_mutex_lock(&cond_mutex); /* Thread safe "sleep" */ pthread_cond_timedwait(&cond, &cond_mutex, &timeout); /* No longer needs to be locked */ pthread_mutex_unlock(&cond_mutex); } }
void DefNewGeneration::gc_epilogue(bool full) { // Check if the heap is approaching full after a collection has // been done. Generally the young generation is empty at // a minimum at the end of a collection. If it is not, then // the heap is approaching full. GenCollectedHeap* gch = GenCollectedHeap::heap(); clear_should_allocate_from_space(); if (collection_attempt_is_safe()) { gch->clear_incremental_collection_will_fail(); } else { gch->set_incremental_collection_will_fail(); if (full) { // we seem to be running out of space set_should_allocate_from_space(); } } if (ZapUnusedHeapArea) { eden()->check_mangled_unused_area_complete(); from()->check_mangled_unused_area_complete(); to()->check_mangled_unused_area_complete(); } // update the generation and space performance counters update_counters(); gch->collector_policy()->counters()->update_counters(); }
static void update_pred_resets(SingleChannelElement *sce) { int i, max_group_id_c, max_frame = 0; float avg_frame = 0.0f; IndividualChannelStream *ics = &sce->ics; /* Update the counters and immediately update any frame behind schedule */ if ((ics->predictor_reset_group = update_counters(&sce->ics, 1))) return; for (i = 1; i < 31; i++) { /* Count-based */ if (ics->predictor_reset_count[i] > max_frame) { max_group_id_c = i; max_frame = ics->predictor_reset_count[i]; } avg_frame = (ics->predictor_reset_count[i] + avg_frame)/2; } if (max_frame > PRED_RESET_MIN) { ics->predictor_reset_group = max_group_id_c; } else { ics->predictor_reset_group = 0; } }
bool CardGeneration::grow_by(size_t bytes) { assert_correct_size_change_locking(); bool result = _virtual_space.expand_by(bytes); if (result) { size_t new_word_size = heap_word_size(_virtual_space.committed_size()); MemRegion mr(space()->bottom(), new_word_size); // Expand card table GenCollectedHeap::heap()->barrier_set()->resize_covered_region(mr); // Expand shared block offset array _bts->resize(new_word_size); // Fix for bug #4668531 if (ZapUnusedHeapArea) { MemRegion mangle_region(space()->end(), (HeapWord*)_virtual_space.high()); SpaceMangler::mangle_region(mangle_region); } // Expand space -- also expands space's BOT // (which uses (part of) shared array above) space()->set_end((HeapWord*)_virtual_space.high()); // update the space and generation capacity counters update_counters(); size_t new_mem_size = _virtual_space.committed_size(); size_t old_mem_size = new_mem_size - bytes; log_trace(gc, heap)("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", name(), old_mem_size/K, bytes/K, new_mem_size/K); } return result; }
void TenuredGeneration::gc_epilogue(bool full) { // update the generation and space performance counters update_counters(); if (ZapUnusedHeapArea) { _the_space->check_mangled_unused_area_complete(); } }
static Eina_Bool _ut_cb_check (void *data) { Instance *inst; int days, hours, minutes; char u_date_time[256] = "up: N/A"; char load_avg[256] = "la: N/A"; char users[256] = "users: N/A"; time_t uptime; if (!(inst = data)) return EINA_FALSE; #ifdef UPTIME_DEBUG syslog (LOG_EMERG, "counter = %d update = %d\n", inst->counter, inst->ci->check_interval); #endif inst->counter += inst->ci->update_interval; if (inst->counter >= inst->ci->check_interval) { inst->counter = 0; (void) update_counters (inst); } uptime = inst->uptime + inst->counter; if (uptime != -1) { days = uptime / (24 * 3600); uptime %= 24 * 3600; hours = uptime / 3600; uptime %= 3600; minutes = uptime / 60; uptime %= 60; snprintf (u_date_time, sizeof (u_date_time), D_ ("up: %d days, %d:%02d:%02ld"), days, hours, minutes, uptime); } if (inst->la[0] != -1) { snprintf (load_avg, sizeof (load_avg), D_ ("la: %.2f %.2f %.2f"), inst->la[0], inst->la[1], inst->la[2]); } if (inst->nusers != -1) snprintf (users, sizeof (users), D_ ("users: %d"), inst->nusers); edje_object_part_text_set (inst->ut->ut_obj, "uptime", u_date_time); edje_object_part_text_set (inst->ut->ut_obj, "load_average", load_avg); edje_object_part_text_set (inst->ut->ut_obj, "nusers", users); return EINA_TRUE; }
void CMSGCAdaptivePolicyCounters::update_counters(CMSGCStats* gc_stats) { if (UsePerfData) { update_counters(); update_promoted((size_t) gc_stats->avg_promoted()->last_sample()); update_avg_promoted_avg(gc_stats); update_avg_promoted_dev(gc_stats); update_avg_promoted_padded_avg(gc_stats); } }
DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, int level, const char* policy) : Generation(rs, initial_size, level), _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL), _promo_failure_scan_stack(NULL), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { _eden_space = new ConcEdenSpace(this); } else { _eden_space = new EdenSpace(this); } _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) vm_exit_during_initialization("Could not allocate a new gen space"); // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); uintx size = _virtual_space.reserved_size(); _max_survivor_size = compute_survivor_size(size, alignment); _max_eden_size = size - (2*_max_survivor_size); // allocate the performance counters // Generation counters -- generation 0, 3 subspaces _gen_counters = new GenerationCounters("new", 0, 3, &_virtual_space); _gc_counters = new CollectorCounters(policy, 0); _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, _gen_counters); _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, _gen_counters); _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); _next_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; }
DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, const char* policy) : Generation(rs, initial_size), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); GenCollectedHeap* gch = GenCollectedHeap::heap(); gch->barrier_set()->resize_covered_region(cmr); _eden_space = new ContiguousSpace(); _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { vm_exit_during_initialization("Could not allocate a new gen space"); } // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. uintx alignment = gch->collector_policy()->space_alignment(); uintx size = _virtual_space.reserved_size(); _max_survivor_size = compute_survivor_size(size, alignment); _max_eden_size = size - (2*_max_survivor_size); // allocate the performance counters GenCollectorPolicy* gcp = gch->gen_policy(); // Generation counters -- generation 0, 3 subspaces _gen_counters = new GenerationCounters("new", 0, 3, gcp->min_young_size(), gcp->max_young_size(), &_virtual_space); _gc_counters = new CollectorCounters(policy, 0); _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space, _gen_counters); _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, _gen_counters); _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, _gen_counters); compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle); update_counters(); _old_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); }
int main(int argc, char **argv) { PERF_DATA_BLOCK data; RuntimeSettings rt; initialize(&data, &rt, argc, argv); while(keep_running) { update_counters(&data, rt); sleep(1); } return 0; }
void DefNewGeneration::gc_epilogue(bool full) { if (full_promotion_would_succeed()) { GenCollectedHeap::heap()->clear_incremental_collection_will_fail(); } else { GenCollectedHeap::heap()->set_incremental_collection_will_fail(); } // update the generation and space performance counters update_counters(); if (Universe::jvmpi_slow_allocation()) { // If JVMPI alloc event has been disabled, turn off slow allocation now; // otherwise, fill the new generation. if (!Universe::jvmpi_alloc_event_enabled()) { Universe::set_jvmpi_alloc_event_enabled(Universe::_jvmpi_disabled); } else { fill_newgen(); } } }
DefNewGeneration::DefNewGeneration(ReservedSpace rs, size_t initial_size, int level, const char* policy) : Generation(rs, initial_size, level) { MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); _eden_space = new EdenSpace(this); _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) vm_exit_during_initialization("Could not allocate a new gen space"); // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. uintx alignment = CarSpace::car_size(); uintx size = _virtual_space.reserved_size(); uintx max_survivor_size = compute_survivor_size(size, alignment); uintx max_eden_size = size - (2*max_survivor_size); // allocate the performance counters // Generation counters -- generation 0, 3 subspaces _gen_counters = new GenerationCounters(PERF_GC, "new", 0, 3, &_virtual_space); _gc_counters = new CollectorCounters(PERF_GC, policy, 0); const char* ns = _gen_counters->name_space(); _eden_counters = new CSpaceCounters(ns, "eden", 0, max_eden_size, _eden_space); _from_counters = new CSpaceCounters(ns, "s0", 1, max_survivor_size, _from_space); _to_counters = new CSpaceCounters(ns, "s1", 2, max_survivor_size, _to_space); compute_space_boundaries(0); update_counters(); _next_gen = NULL; _tenuring_threshold = MaxTenuringThreshold; _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; }
static E_Gadcon_Client * _gc_init (E_Gadcon * gc, const char *name, const char *id, const char *style) { Evas_Object *o; E_Gadcon_Client *gcc; Instance *inst; Uptime *ut; inst = E_NEW (Instance, 1); inst->ci = _ut_config_item_get (id); #ifdef UPTIME_DEBUG syslog (LOG_EMERG, "ii id= %s, inst->ci->update_interval = %d, ci = %d\n", inst->ci->id, inst->ci->update_interval, inst->ci->check_interval); #endif ut = _ut_new (gc->evas); ut->inst = inst; inst->ut = ut; o = ut->ut_obj; gcc = e_gadcon_client_new (gc, name, id, style, o); gcc->data = inst; inst->gcc = gcc; inst->ut_obj = o; evas_object_event_callback_add (o, EVAS_CALLBACK_MOUSE_DOWN, _ut_cb_mouse_down, inst); ut_config->instances = eina_list_append (ut_config->instances, inst); (void) update_counters (inst); inst->counter = 0; if (!inst->monitor) inst->monitor = ecore_timer_add (inst->ci->update_interval, _ut_cb_check, inst); return gcc; }
bool OneContigSpaceCardGeneration::grow_by(size_t bytes) { assert_locked_or_safepoint(ExpandHeap_lock); bool result = _virtual_space.expand_by(bytes); if (result) { size_t new_word_size = heap_word_size(_virtual_space.committed_size()); MemRegion mr(_the_space->bottom(), new_word_size); // Expand card table Universe::heap()->barrier_set()->resize_covered_region(mr); // Expand shared block offset array _bts->resize(new_word_size); // Fix for bug #4668531 if (ZapUnusedHeapArea) { MemRegion mangle_region(_the_space->end(), (HeapWord*)_virtual_space.high()); SpaceMangler::mangle_region(mangle_region); } // Expand space -- also expands space's BOT // (which uses (part of) shared array above) _the_space->set_end((HeapWord*)_virtual_space.high()); // update the space and generation capacity counters update_counters(); if (Verbose && PrintGC) { size_t new_mem_size = _virtual_space.committed_size(); size_t old_mem_size = new_mem_size - bytes; gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K", name(), old_mem_size/K, bytes/K, new_mem_size/K); } } return result; }
UINT16 crtc_ega_device::get_ma() { update_counters(); return m_line_address + m_character_counter; }
UINT16 mc6845_device::get_ma() { update_counters(); return ( m_line_address + m_character_counter ) & 0x3fff; }