void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask", PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); // If not all threads are active, get a draining stack // from the list. Else, just use this threads draining stack. uint which_stack_index; bool use_all_workers = manager->all_workers_active(); if (use_all_workers) { which_stack_index = which; assert(manager->active_workers() == ParallelGCThreads, err_msg("all_workers_active has been incorrectly set: " " active %d ParallelGCThreads %d", manager->active_workers(), ParallelGCThreads)); } else { which_stack_index = ParCompactionManager::pop_recycled_stack_index(); } cm->set_region_stack_index(which_stack_index); cm->set_region_stack(ParCompactionManager::region_list(which_stack_index)); if (TraceDynamicGCThreads) { gclog_or_tty->print_cr("StealRegionCompactionTask::do_it " "region_stack_index %d region_stack = 0x%x " " empty (%d) use all workers %d", which_stack_index, ParCompactionManager::region_list(which_stack_index), cm->region_stack()->is_empty(), use_all_workers); } // Has to drain stacks first because there may be regions on // preloaded onto the stack and this thread may never have // done a draining task. Are the draining tasks needed? cm->drain_region_stacks(); size_t region_index = 0; int random_seed = 17; // If we're the termination task, try 10 rounds of stealing before // setting the termination flag while(true) { if (ParCompactionManager::steal(which, &random_seed, region_index)) { PSParallelCompact::fill_and_update_region(cm, region_index); cm->drain_region_stacks(); } else { if (terminator()->offer_termination()) { break; } // Go around again. } } return; }
int main() { GaussianEliminator terminator(4); unsigned short int r1[] = {0, 1, 2}; unsigned short int r2[] = {1, 3}; unsigned short int r3[] = {0, 1}; unsigned short int r4[] = {3}; Equation eq; std::copy ( r1, r1 + 3, std::back_inserter ( eq ) ); terminator.addEquation(eq, true); eq.clear(); std::copy ( r2, r2 + 2, std::back_inserter ( eq ) ); terminator.addEquation(eq, true); eq.clear(); std::copy ( r3, r3 + 2, std::back_inserter ( eq ) ); terminator.addEquation(eq, false); eq.clear(); std::copy ( r4, r4 + 1, std::back_inserter ( eq ) ); terminator.addEquation(eq, true); std::vector <bool> sol = terminator.getSolution(); for (std::vector <bool>::iterator it = sol.begin(); it < sol.end(); it ++) std::cout << *it << std::endl; return 0; }
void Function::dump(std::ostream& out) { auto it = begin(); while (it != end()) { auto block = *it++; auto nextBlock = it != end() ? *it : nullptr; out << ".L" << block->index << ":"; for (auto s: *block) { if (s->asTerminator()) break; out << '\t'; s->dump(out); } auto t = block->terminator(); assert(t); auto j = t->asJump(); auto cj = t->asCJump(); if (j && j->target() == nextBlock) { // nothing to do out << std::endl; } else if (cj && cj->iffalse() == nextBlock) { out << "\tif ("; cj->expr()->dump(out); out << ") goto .L" << cj->iftrue()->index << ';' << std::endl; } else if (cj && cj->iftrue() == nextBlock) { out << "\tiffalse ("; cj->expr()->dump(out); out << ") goto .L" << cj->iffalse()->index << ';' << std::endl; } else { out << '\t'; t->dump(out); } } }
void StealMarkingTask::do_it(GCTaskManager* manager, uint which) { assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealMarkingTask", PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); oop obj = NULL; ObjArrayTask task; int random_seed = 17; do { while (ParCompactionManager::steal_objarray(which, &random_seed, task)) { objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint(); k->oop_follow_contents(cm, task.obj(), task.index()); cm->follow_marking_stacks(); } while (ParCompactionManager::steal(which, &random_seed, obj)) { obj->follow_contents(cm); cm->follow_marking_stacks(); } } while (!terminator()->offer_termination()); }
void aff_game(t_prog_base *base) { int nb; int refresh; refresh = 0; nb = 0; while (42) { if (refresh == 2) { base->matches = nb; do_reach(base, &refresh, nb); refresh = 0; } if (refresh == 1) { base->line = nb; do_reach(base, &refresh, nb); } if (refresh == 0) do_reach(base, &refresh, nb); if (terminator(base, &nb, &refresh)) return ; } }
void BasicBlock::setTerminator(Instruction* i) { if(terminator() != 0) { delete back(); back() = i->clone(); } else { push_back(i); } }
bool connection::get_message(std::string& msg) { std::string data(recv_buffer.bptr(), recv_buffer.size()); std::string::size_type terminator(data.find("\r\n")); if(terminator != std::string::npos) { msg.clear(); msg.append(data, 0, terminator); recv_buffer.discard(0, terminator + 2); } return terminator != std::string::npos; }
bool connection::get_message(std::string& msg) { // Try to get an entire message from the buffer, discard the data // collected if it is possible std::string data(recv_buffer.bptr(), recv_buffer.size()); std::string::size_type terminator(data.find("\r\n")); if(terminator != std::string::npos) { msg.clear(); msg.append(data, 0, terminator); recv_buffer.discard(0, terminator + 2); } return terminator != std::string::npos; }
void PSRefProcTaskExecutor::execute(ProcessTask& task) { GCTaskQueue* q = GCTaskQueue::create(); GCTaskManager* manager = ParallelScavengeHeap::gc_task_manager(); for(uint i=0; i < manager->active_workers(); i++) { q->enqueue(new PSRefProcTaskProxy(task, i)); } ParallelTaskTerminator terminator(manager->active_workers(), (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth()); if (task.marks_oops_alive() && manager->active_workers() > 1) { for (uint j = 0; j < manager->active_workers(); j++) { q->enqueue(new StealTask(&terminator)); } } manager->execute_and_wait(q); }
void PSRefProcTaskExecutor::execute(ProcessTask& task) { GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; i<ParallelGCThreads; i++) { q->enqueue(new PSRefProcTaskProxy(task, i)); } ParallelTaskTerminator terminator( ParallelScavengeHeap::gc_task_manager()->workers(), UseDepthFirstScavengeOrder ? (TaskQueueSetSuper*) PSPromotionManager::stack_array_depth() : (TaskQueueSetSuper*) PSPromotionManager::stack_array_breadth()); if (task.marks_oops_alive() && ParallelGCThreads > 1) { for (uint j=0; j<ParallelGCThreads; j++) { q->enqueue(new StealTask(&terminator)); } } ParallelScavengeHeap::gc_task_manager()->execute_and_wait(q); }
static int read_file(char *filename, char **ret_buf, int (* terminator)(char *buf, int pos) ) { #define GROW_AMOUNT 200 size_t f_len=0; size_t f_pos=0; char *tmp_buf; char *f_buf = NULL; // This one needs to initialize NULL! FILE *f; if (!filename) { *ret_buf = f_buf; return SIEVE2_ERROR_FAIL; } f = fopen(filename, "r"); if (!f) { printf("Could not open file '%s'\n", filename); return 1; } while(!feof(f) && !terminator(f_buf, f_pos)) { if( f_pos + 1 >= f_len ) { tmp_buf = realloc(f_buf, sizeof(char) * (f_len+=GROW_AMOUNT)); if( tmp_buf != NULL ) f_buf = tmp_buf; else return 1; } f_buf[f_pos] = fgetc(f); f_pos++; } if(f_pos) f_buf[f_pos] = '\0'; fclose(f); *ret_buf = f_buf; return SIEVE2_OK; }
void RefProcTaskExecutor::execute(ProcessTask& task) { ParallelScavengeHeap* heap = PSParallelCompact::gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); uint active_gc_threads = heap->gc_task_manager()->active_workers(); RegionTaskQueueSet* qset = ParCompactionManager::region_array(); ParallelTaskTerminator terminator(active_gc_threads, qset); GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; i<parallel_gc_threads; i++) { q->enqueue(new RefProcTaskProxy(task, i)); } if (task.marks_oops_alive()) { if (parallel_gc_threads>1) { for (uint j=0; j<active_gc_threads; j++) { q->enqueue(new StealMarkingTask(&terminator)); } } } PSParallelCompact::gc_task_manager()->execute_and_wait(q); }
void StealTask::do_it(GCTaskManager* manager, uint which) { assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc"); PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); pm->drain_stacks(true); guarantee(pm->stacks_empty(), "stacks should be empty at this point"); int random_seed = 17; while(true) { StarTask p; if (PSPromotionManager::steal_depth(which, &random_seed, p)) { TASKQUEUE_STATS_ONLY(pm->record_steal(p)); pm->process_popped_location_depth(p); pm->drain_stacks_depth(true); } else { if (terminator()->offer_termination()) { break; } } } guarantee(pm->stacks_empty(), "stacks should be empty at this point"); }
char * fd_read_hunk (int fd, hunk_terminator_t terminator, long sizehint, long maxsize) { long bufsize = sizehint; char *hunk = xmalloc (bufsize); int tail = 0; /* tail position in HUNK */ assert (!maxsize || maxsize >= bufsize); while (1) { const char *end; int pklen, rdlen, remain; /* First, peek at the available data. */ pklen = fd_peek (fd, hunk + tail, bufsize - 1 - tail, -1); if (pklen < 0) { xfree (hunk); return NULL; } end = terminator (hunk, hunk + tail, pklen); if (end) { /* The data contains the terminator: we'll drain the data up to the end of the terminator. */ remain = end - (hunk + tail); assert (remain >= 0); if (remain == 0) { /* No more data needs to be read. */ hunk[tail] = '\0'; return hunk; } if (bufsize - 1 < tail + remain) { bufsize = tail + remain + 1; hunk = xrealloc (hunk, bufsize); } } else /* No terminator: simply read the data we know is (or should be) available. */ remain = pklen; /* Now, read the data. Note that we make no assumptions about how much data we'll get. (Some TCP stacks are notorious for read returning less data than the previous MSG_PEEK.) */ rdlen = fd_read (fd, hunk + tail, remain, 0); if (rdlen < 0) { xfree_null (hunk); return NULL; } tail += rdlen; hunk[tail] = '\0'; if (rdlen == 0) { if (tail == 0) { /* EOF without anything having been read */ xfree (hunk); errno = 0; return NULL; } else /* EOF seen: return the data we've read. */ return hunk; } if (end && rdlen == remain) /* The terminator was seen and the remaining data drained -- we got what we came for. */ return hunk; /* Keep looping until all the data arrives. */ if (tail == bufsize - 1) { /* Double the buffer size, but refuse to allocate more than MAXSIZE bytes. */ if (maxsize && bufsize >= maxsize) { xfree (hunk); errno = ENOMEM; return NULL; } bufsize <<= 1; if (maxsize && bufsize > maxsize) bufsize = maxsize; hunk = xrealloc (hunk, bufsize); } } }
int main(int argc, char** argv) { // separator ok(separator('('), "( is a separator"); ok(separator('<'), "< is a separator"); ok(separator('>'), "> is a separator"); ok(separator('@'), "@ is a separator"); ok(separator(','), ", is a separator"); ok(separator(';'), "; is a separator"); ok(separator(':'), ": is a separator"); ok(separator('\\'), "\\ is a separator"); ok(separator('"'), "\" is a separator"); ok(separator('/'), "/ is a separator"); ok(separator('['), "[ is a separator"); ok(separator(']'), "] is a separator"); ok(separator('?'), "? is a separator"); ok(separator('='), "= is a separator"); ok(separator('{'), "{ is a separator"); ok(separator('}'), "} is a separator"); ok(separator(' '), "space is a separator"); ok(separator('\t'), "tab is a separator"); fail(separator('\n'), "nl is not a separator"); fail(separator('\r'), "cr is not a separator"); fail(separator('a'), "a is not a separator"); fail(separator('z'), "z is not a separator"); fail(separator('A'), "A is not a separator"); fail(separator('Z'), "Z is not a separator"); fail(separator('0'), "0 is not a separator"); fail(separator('9'), "9 is not a separator"); fail(separator('-'), "- is not a separator"); fail(separator('_'), "_ is not a separator"); // terminators for tokens ok(terminator('('), "( is a terminator"); ok(terminator('<'), "< is a terminator"); ok(terminator('>'), "> is a terminator"); ok(terminator('@'), "@ is a terminator"); ok(terminator(','), ", is a terminator"); ok(terminator(';'), "; is a terminator"); ok(terminator(':'), ": is a terminator"); ok(terminator('\\'), "\\ is a terminator"); ok(terminator('"'), "\" is a terminator"); ok(terminator('/'), "/ is a terminator"); ok(terminator('['), "[ is a terminator"); ok(terminator(']'), "] is a terminator"); ok(terminator('?'), "? is a terminator"); ok(terminator('='), "= is a terminator"); ok(terminator('{'), "{ is a terminator"); ok(terminator('}'), "} is a terminator"); ok(terminator(' '), "space is a terminator"); ok(terminator('\t'), "tab is a terminator"); ok(terminator('\n'), "nl is a terminator"); ok(terminator('\r'), "cr is a terminator"); fail(terminator('a'), "a is not a terminator"); fail(terminator('z'), "z is not a terminator"); fail(terminator('A'), "A is not a terminator"); fail(terminator('Z'), "Z is not a terminator"); fail(terminator('0'), "0 is not a terminator"); fail(terminator('9'), "9 is not a terminator"); fail(terminator('-'), "- is not a terminator"); fail(terminator('_'), "_ is not a terminator"); // token value(14,token("Content-Length: 123"),"Content-Length token"); value(6,token("abc123\r\nefg456"),"abc123 token"); value(4,token("HTTP/1.1"),"HTTP/1.1 version token"); value(0,token(" HTTP/1.1"),"space HTTP/1.1 version token"); // method value(3,method("GET / HTTP/1.1"), "GET method"); value(4,method("POST / HTTP/1.1"), "POST method"); value(7,method("OPTIONS / HTTP/1.1"), "OPTIONS method"); value(11,method("hello-world / HTTP/1.1"), "hello-world method"); // path value(1, path(&("GET / HTTP/1.1")[4]), "/ path"); value(4, path(&("GET /foo HTTP/1.1")[4]), "/foo path"); value(1, path(&("GET * HTTP/1.1")[4]), "* path"); // version value(8, version(&("GET / HTTP/1.1")[6]), "HTTP/1.1 version"); value(9, version(&("GET / HTTP/2.11")[6]), "HTTP/2.11 version"); // request_line value(22,request_line("hello-world / HTTP/1.1\r\n"), "request_line with extension methodd"); value(11,request.method->length, "request.method->length"); value(0,strncmp(request.method->data,"hello-world",11), "method content"); value(1,request.path->length, "request.path->length"); value(0,strncmp(request.path->data,"/",1), "path content"); value(8,request.version->length, "request.version->length"); value(0,strncmp(request.version->data,"HTTP/1.1",8), "version content"); value(26,request_line(" hello-world / HTTP/1.1\r\n"), "request_line with invalid padding"); // header value(25,header(" User-Agent: curl/7.38.0\r\n"),"User-Agent header with buggy spacing"); value(20,header("Host: 127.0.0.1:8080\r\n"),"Host header"); value(11,header("Accept: */*\n"),"Accept header"); value(6,header("\t,gzip\r\n"),"extended Accept header"); value(9,request.header[5]->length, "updated accept header length"); value(10,header("Ignore: me\r\n"), "ignore a header past max"); value(3,request.headers, "verify max_headers cf. -DMAX_HEADERS=3 in Makefile"); // clear request return done("test_http"); }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSScavenge::invoke_no_policy() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); assert(_preserved_mark_stack.is_empty(), "should be empty"); assert(_preserved_oop_stack.is_empty(), "should be empty"); _gc_timer.register_gc_start(); TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; scavenge_entry.update(); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Check for potential problems. if (!should_attempt_scavenge()) { return false; } _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); bool promotion_failure_occurred = false; PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); heap->increment_total_collections(); AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); if ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC) { // Gather the feedback data for eden occupancy. young_gen->eden_space()->accumulate_statistics(); } if (ZapUnusedHeapArea) { // Save information needed to minimize mangling heap->record_gen_tops_before_GC(); } heap->print_heap_before_gc(); heap->trace_heap_before_gc(&_gc_tracer); assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyBeforeGC:"); } { ResourceMark rm; HandleMark hm; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); if (TraceGen0Time) accumulated_time()->start(); // Let the size policy know we're starting size_policy->minor_collection_begin(); // Verify the object start arrays. if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); } // Verify no unmarked old->young roots if (VerifyRememberedSets) { CardTableExtension::verify_all_young_refs_imprecise(); } if (!ScavengeWithObjectsInToSpace) { assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space"); young_gen->to_space()->clear(SpaceDecorator::Mangle); } else if (ZapUnusedHeapArea) { young_gen->to_space()->mangle_unused_area(); } save_to_space_top_before_gc(); COMPILER2_PRESENT(DerivedPointerTable::clear()); reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/); reference_processor()->setup_policy(false); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. size_t old_gen_used_before = old_gen->used_in_bytes(); // For PrintGCDetails size_t young_gen_used_before = young_gen->used_in_bytes(); // Reset our survivor overflow. set_survivor_overflow(false); // We need to save the old top values before // creating the promotion_manager. We pass the top // values to the card_table, to prevent it from // straying into the promotion labs. HeapWord* old_top = old_gen->object_space()->top(); // Release all previously held resources gc_task_manager()->release_all_resources(); // Set the number of GC threads to be used in this collection gc_task_manager()->set_active_gang(); gc_task_manager()->task_idle_workers(); // Get the active number of workers here and use that value // throughout the methods. uint active_workers = gc_task_manager()->active_workers(); heap->set_par_threads(active_workers); PSPromotionManager::pre_scavenge(); // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { GCTraceTime tm("Scavenge", false, false, &_gc_timer); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); if (!old_gen->object_space()->is_empty()) { // There are only old-to-young pointers if there are objects // in the old gen. uint stripe_total = active_workers; for(uint i=0; i < stripe_total; i++) { q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total)); } } q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); // We scan the thread roots in parallel Threads::create_thread_roots_tasks(q); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache)); ParallelTaskTerminator terminator( active_workers, (TaskQueueSetSuper*) promotion_manager->stack_array_depth()); if (active_workers > 1) { for (uint j = 0; j < active_workers; j++) { q->enqueue(new StealTask(&terminator)); } } gc_task_manager()->execute_and_wait(q); } scavenge_midpoint.update(); // Process reference objects discovered during scavenge { GCTraceTime tm("References", false, false, &_gc_timer); reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); ReferenceProcessorStats stats; if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, &_gc_timer); } else { stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); } _gc_tracer.report_gc_reference_stats(stats); // Enqueue reference objects discovered during scavenge. if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->enqueue_discovered_references(&task_executor); } else { reference_processor()->enqueue_discovered_references(NULL); } } { GCTraceTime tm("StringTable", false, false, &_gc_timer); // Unlink any dead interned Strings and process the remaining live ones. PSScavengeRootsClosure root_closure(promotion_manager); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); } // Finally, flush the promotion_manager's labs, and deallocate its stacks. promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); if (promotion_failure_occurred) { clean_up_failed_promotion(); if (PrintGC) { gclog_or_tty->print("--"); } } // Let the size policy know we're done. Note that we count promotion // failure cleanup time as part of the collection (otherwise, we're // implicitly saying it's mutator time). size_policy->minor_collection_end(gc_cause); if (!promotion_failure_occurred) { // Swap the survivor spaces. young_gen->eden_space()->clear(SpaceDecorator::Mangle); young_gen->from_space()->clear(SpaceDecorator::Mangle); young_gen->swap_spaces(); size_t survived = young_gen->from_space()->used_in_bytes(); size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; size_policy->update_averages(_survivor_overflow, survived, promoted); // A successful scavenge should restart the GC time limit count which is // for full GC's. size_policy->reset_gc_overhead_limit_count(); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d", old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); } } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_old_eden_size( size_policy->calculated_eden_size_in_bytes()); counters->update_old_promo_size( size_policy->calculated_promo_size_in_bytes()); counters->update_old_capacity(old_gen->capacity_in_bytes()); counters->update_young_capacity(young_gen->capacity_in_bytes()); counters->update_survived(survived); counters->update_promoted(promoted); counters->update_survivor_overflowed(_survivor_overflow); } size_t max_young_size = young_gen->max_size(); // Deciding a free ratio in the young generation is tricky, so if // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating // that the old generation size may have been limited because of them) we // should then limit our young generation size using NewRatio to have it // follow the old generation size. if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size()); } size_t survivor_limit = size_policy->max_survivor_size(max_young_size); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold( _survivor_overflow, _tenuring_threshold, survivor_limit); if (PrintTenuringDistribution) { gclog_or_tty->cr(); gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)", size_policy->calculated_survivor_size_in_bytes(), _tenuring_threshold, MaxTenuringThreshold); } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_tenuring_threshold(_tenuring_threshold); counters->update_survivor_size_counters(); } // Do call at minor collections? // Don't check if the size_policy is ready at this // level. Let the size_policy check that internally. if (UseAdaptiveGenerationSizePolicyAtMinorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimial free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t young_live = young_gen->used_in_bytes(); size_t eden_live = young_gen->eden_space()->used_in_bytes(); size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); size_t max_old_gen_size = old_gen->max_gen_size(); size_t max_eden_size = max_young_size - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); // Used for diagnostics size_policy->clear_generation_free_space_flags(); size_policy->compute_eden_space_size(young_live, eden_live, cur_eden, max_eden_size, false /* not full gc*/); size_policy->check_gc_overhead_limit(young_live, eden_live, max_old_gen_size, max_eden_size, false /* not full gc*/, gc_cause, heap->collector_policy()); size_policy->decay_supplemental_growth(false /* not full gc*/); } // Resize the young generation at every collection // even if new sizes have not been calculated. This is // to allow resizes that may have been inhibited by the // relative location of the "to" and "from" spaces. // Resizing the old gen at minor collects can cause increases // that don't feed back to the generation sizing policy until // a major collection. Don't resize the old gen here. heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can // cause the change of the heap layout. Make sure eden is reshaped if that's the case. // Also update() will case adaptive NUMA chunk resizing. assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); young_gen->eden_space()->update(); heap->gc_policy_counters()->update_counters(); heap->resize_all_tlabs(); assert(young_gen->to_space()->is_empty(), "to space should be empty now"); } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); { GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); CodeCache::prune_scavenge_root_nmethods(); } // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); } // Verify all old -> young cards are now precise if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. // CardTableExtension::verify_all_young_refs_precise(); CardTableExtension::verify_all_young_refs_imprecise(); } if (TraceGen0Time) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_used_before); } heap->print_heap_change(prev_used); } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); gc_task_manager()->release_idle_workers(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification Universe::verify(" VerifyAfterGC:"); } heap->print_heap_after_gc(); heap->trace_heap_after_gc(&_gc_tracer); _gc_tracer.report_tenuring_threshold(tenuring_threshold()); if (ZapUnusedHeapArea) { young_gen->eden_space()->check_mangled_unused_area_complete(); young_gen->from_space()->check_mangled_unused_area_complete(); young_gen->to_space()->check_mangled_unused_area_complete(); } scavenge_exit.update(); if (PrintGCTaskTimeStamps) { tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT, scavenge_entry.ticks(), scavenge_midpoint.ticks(), scavenge_exit.ticks()); gc_task_manager()->print_task_time_stamps(); } #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif _gc_timer.register_gc_end(); _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); return !promotion_failure_occurred; }
long _fwch( unit *cup, long *uda, long chars, int mode) { register int bytsiz; register long nchr; unsigned char tbuf[TBUFSZB]; /* Line packing buffer */ FILE *fptr; /* * If positioned after an endfile, and the file does not * support multiple endfiles, a write is invalid. */ if (cup->uend && !cup->umultfil && !cup->uspcproc) { errno = FEWRAFEN; return(IOERR); } nchr = 0; switch (cup->ufs) { case FS_TEXT: case STD: fptr = cup->ufp.std; /* switch the FILE structure into write mode */ #if !defined(_LITTLE_ENDIAN) || (defined(_LITTLE_ENDIAN) && defined(__sv2)) if ((FILE_FLAG(fptr) & (_IOWRT | _IORW)) == _IORW) { if (FILE_FLAG(fptr) & _IOREAD) (void) fseek(fptr, 0, SEEK_CUR); FILE_FLAG(fptr) |= _IOWRT; } #endif #if defined(_SOLARIS) || (defined(_LITTLE_ENDIAN) && !defined(__sv2)) while (nchr < chars) { register long count; register int ret; /* Pack chars into temp buffer and write them */ count = chars - nchr; if (count > TBUFSZB) count = TBUFSZB; #ifdef KEY /* Bug 5926 */ count = _pack(&uda[nchr], (char *)tbuf, count, terminator(&mode, nchr, count, chars)); #else /* KEY Bug 5926 */ _pack(&uda[nchr], (char *)tbuf, count, -1); #endif /* KEY Bug 5926 */ ret = fwrite(tbuf, 1, count, fptr); if ( ret != count || ferror(fptr) ) { if ( ret != count || errno == 0) errno = FESTIOER; return(IOERR); } nchr += count; } #else /* If the stream is unbuffered... */ if (FILE_FLAG(fptr) & (_IONBF | _IOLBF)) { while (nchr < chars) { register long count; register long ret; /* Pack chars into temp buffer and write them */ count = chars - nchr; if (count > TBUFSZB) count = TBUFSZB; #ifdef KEY /* Bug 5926 */ count= _pack(&uda[nchr], (char *)tbuf, count, terminator(&mode, nchr, count, chars)); #else /* KEY Bug 5926 */ _pack(&uda[nchr], (char *)tbuf, count, -1); #endif /* KEY Bug 5926 */ ret = fwrite(tbuf, 1, count, fptr); if ( ret != count || ferror(fptr) ) { if ( ret != count || errno == 0) errno = FESTIOER; return(IOERR); } nchr += count; } } else { /* for a buffered stream... */ while (FILE_CNT(fptr) < chars - nchr) { register long count; register int ret; count = FILE_CNT(fptr); /* space left in buffer */ if (count > 0) { /* pack data into the buffer */ _pack(&uda[nchr], (char *)FILE_PTR(fptr), count, -1); FILE_PTR(fptr) += count; FILE_CNT(fptr) = 0; } /* * We set errno to 0 here in case the following * buffer flush fails. UNICOS 8.2 fputc (and * previous) was not X/Open compliant and did * not always set errno when a buffer flush * completed partially due to a disk full * conditon. The zeroing of errno may be * removed when we can assume that the fputc() * from UNICOS and Solaris are X/Open compliant. */ errno = 0; /* * This fputc() will either trigger a buffer * flush or cause the buffer to be allocated * for the first time. */ ret = fputc(uda[nchr + count], fptr); if (ret == EOF && ferror(fptr)) { if (errno == 0) errno = FESTIOER; return(IOERR); } nchr += count + 1; } if (nchr < chars) { /* Put data in buffer */ _pack(&uda[nchr], (char *)FILE_PTR(fptr), chars - nchr, -1); FILE_CNT(fptr) -= chars - nchr; FILE_PTR(fptr) += chars - nchr; } } #endif if (mode == FULL) { register int ret; ret = putc('\n', fptr);; if (ret == EOF && ferror(fptr)) { if (errno == 0) errno = FESTIOER; return(IOERR); } chars++; } return(chars); case FS_FDC: /* * If a logical endfile record had just been read, * replace it with a physical endfile record before * starting the current data record. */ if ((cup->uend == LOGICAL_ENDFILE) && !(cup->uspcproc)) { if (XRCALL(cup->ufp.fdc, weofrtn)cup->ufp.fdc, &cup->uffsw) < 0){ errno = cup->uffsw.sw_error; return(IOERR); } } cup->uend = BEFORE_ENDFILE; if (cup->ucharset == 0) { register long ret; ret = XRCALL(cup->ufp.fdc, writecrtn) cup->ufp.fdc, WPTR2BP(uda), chars, &cup->uffsw, mode); if (ret < 0) { errno = cup->uffsw.sw_error; return(IOERR); } return(chars); } /* * Get proper byte size (might not be 8-bits if doing conversion). */ #if NUMERIC_DATA_CONVERSION_ENABLED bytsiz = __fndc_charsz[cup->ucharset]; #else bytsiz = 8; #endif do { register long breq; register int fulp; register long ncnt; register long ret; int ubc; ncnt = TBUFSZB; breq = 0; ubc = 0; if ((chars - nchr) > 0) { register long totbits; if (ncnt > (chars - nchr)) ncnt = chars - nchr; if (_fdc_packc((char *)tbuf, &uda[nchr], ncnt, cup->ucharset) < 0) { return(IOERR); } totbits = bytsiz * ncnt; /* bit count */ breq = (totbits + 7) >> 3; /* 8-bit bytes */ ubc = (breq << 3) - totbits; } nchr += ncnt; if ((nchr >= chars) && ( mode == FULL )) fulp = FULL; else fulp = PARTIAL; ret = XRCALL(cup->ufp.fdc, writertn) cup->ufp.fdc, CPTR2BP(tbuf), breq, &cup->uffsw, fulp, &ubc); if (ret != breq) { /* if an error */ errno = cup->uffsw.sw_error; return(IOERR); } } while (nchr < chars); return(chars); /* * unsupported structure if not TEXT/STD, or FDC */ default: errno = FEINTFST; return(IOERR); } }
/*===========================================================================* * main * *===========================================================================*/ PUBLIC int main() { /* Main routine of the process manager. */ int result; /* SEF local startup. */ sef_local_startup(); sched_init(); /* initialize user-space scheduling */ /* This is PM's main loop- get work and do it, forever and forever. */ while (TRUE) { int ipc_status; /* Wait for the next message and extract useful information from it. */ if (sef_receive_status(ANY, &m_in, &ipc_status) != OK) panic("PM sef_receive_status error"); who_e = m_in.m_source; /* who sent the message */ if(pm_isokendpt(who_e, &who_p) != OK) panic("PM got message from invalid endpoint: %d", who_e); call_nr = m_in.m_type; /* system call number */ /* Process slot of caller. Misuse PM's own process slot if the kernel is * calling. This can happen in case of synchronous alarms (CLOCK) or or * event like pending kernel signals (SYSTEM). */ mp = &mproc[who_p < 0 ? PM_PROC_NR : who_p]; if(who_p >= 0 && mp->mp_endpoint != who_e) { panic("PM endpoint number out of sync with source: %d", mp->mp_endpoint); } /* Drop delayed calls from exiting processes. */ if (mp->mp_flags & EXITING) continue; /* Check for system notifications first. Special cases. */ if (is_ipc_notify(ipc_status)) { switch(who_p) { case CLOCK: pm_expire_timers(m_in.NOTIFY_TIMESTAMP); result = SUSPEND; /* don't reply */ break; default : result = ENOSYS; } /* done, send reply and continue */ if (result != SUSPEND) setreply(who_p, result); sendreply(); continue; } switch(call_nr) { case PM_SETUID_REPLY: case PM_SETGID_REPLY: case PM_SETSID_REPLY: case PM_EXEC_REPLY: case PM_EXIT_REPLY: case PM_CORE_REPLY: case PM_FORK_REPLY: case PM_SRV_FORK_REPLY: case PM_UNPAUSE_REPLY: case PM_REBOOT_REPLY: case PM_SETGROUPS_REPLY: /*?????????????????????????????????????????????????????????????????????*/ /*?????????????????????????????????????????????????????????????????????*/ /* Chamando a funcao terminator() para todos os processos finalizados. */ if ( call_nr==PM_EXIT_REPLY ) terminator( _ENDPOINT_P(m_in.m1_i1)); /*?????????????????????????????????????????????????????????????????????*/ /*?????????????????????????????????????????????????????????????????????*/ if (who_e == FS_PROC_NR) { handle_fs_reply(); result= SUSPEND; /* don't reply */ } else result= ENOSYS; break; default: /* Else, if the system call number is valid, perform the * call. */ if ((unsigned) call_nr >= NCALLS) { result = ENOSYS; } else { #if ENABLE_SYSCALL_STATS calls_stats[call_nr]++; #endif result = (*call_vec[call_nr])(); } break; } /* Send reply. */ if (result != SUSPEND) setreply(who_p, result); sendreply(); } return(OK); }
void* reader(void* arg) { char filename[FNLEN + 1]; char line[STRLEN + 1]; char firstline[STRLEN + 1]; unsigned int i; unsigned int error_count; unsigned int strn; int bytes_read; int fdesc; (void) arg; while (!finish_flag) { error_count = 0; if (sem_wait(&sem_info)) { perror("Error on sem_wait (child thread)"); exit(-1); } if (pthread_mutex_lock(&buffer_mutex)) { perror("Error on pthread_mutex_lock (child thread)"); exit(-1); } if (0 != strcmp(buffer[next_read_index], "sair")) { strcpy(filename, buffer[next_read_index]); next_read_index = (next_read_index + 1) % BUFFER_SIZE; } else { finish_flag = 1; if (sem_post(&sem_info)) { perror("Error on sem_post (child thread)"); exit(-1); } } if (pthread_mutex_unlock(&buffer_mutex)) { perror("Error on pthread_mutex_unlock (child thread)"); exit(-1); } if (sem_post(&sem_no_info)) { perror("Error on sem_post (child thread)"); exit(-1); } if (finish_flag) { break; } if (terminator(filename[0])) { continue; } printf("Checking %s\n", filename); /* * Open file. Return if there was an error opening the file. * * O_RDONLY: Open the file so that it is read only. */ if ((fdesc = open(filename, O_RDONLY)) < 0) { if (errno == ENOENT) { printf("File does not exist: %s\n", filename); continue; } else if (errno == EISDIR) { printf("Not a file: %s\n", filename); continue; } perror("Error opening file"); return (void*) -1; } if (flock(fdesc, LOCK_SH) < 0) { perror("Error locking file"); return (void*) -1; } /* * Read file. Return if there was an error reading the file. */ if ((read(fdesc, firstline, STRLEN)) < 0) { perror("Error reading file"); return (void*) -1; } firstline[STRLEN] = '\0'; /* * First line must be composed of STRLEN - 1 equal chars between * 'a' and 'j', followed by a newline character, '\n'. */ if (strlen(firstline) != STRLEN || firstline[0] < 'a' || firstline[0] > 'j') { error_count++; incorrect_file_msg(filename, "char not between 'a' and 'j'"); } for (i = 1; i < STRLEN - 1; i++) { if (firstline[i] != firstline[0]) { error_count++; incorrect_file_msg(filename, "firstline not all equal"); break; } } if (firstline[STRLEN - 1] != '\n') { error_count++; incorrect_file_msg(filename, "last char of line not a '\n'"); } /* * Check if all lines are equal. Return if not. */ for (strn = 0; (bytes_read = read(fdesc, line, STRLEN)); strn++) { if (bytes_read == -1) { perror("Error reading file"); return (void*) -1; } line[STRLEN] = '\0'; if (strcmp(line, firstline)) { error_count++; incorrect_file_msg(filename, "lines not all equal"); break; } } /* * Return if there aren't STRNUM valid strings in the file. */ if (strn != STRNUM - 1) { /* -1 because the first line was already read */ error_count++; incorrect_file_msg(filename, "illegal number of lines"); } if (flock(fdesc, LOCK_UN) < 0) { perror("Error unlocking file"); return (void*) -1; } /* * Return upon failure to close. */ if (close(fdesc) < 0) { perror("Error closing file"); return (void*) -1; } if (error_count > 0) { printf("%s: %d errors\n", filename, error_count); continue; } printf("%s is correct\n", filename); } /* while(!finish_flag) */ return (void*) 0; }
// This method contains no policy. You should probably // be calling invoke() instead. bool PSScavenge::invoke_no_policy() { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); elapsedTimer scavenge_time; TimeStamp scavenge_entry; TimeStamp scavenge_midpoint; TimeStamp scavenge_exit; scavenge_entry.update(); if (GC_locker::check_active_before_gc()) { return false; } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); GCCause::Cause gc_cause = heap->gc_cause(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Check for potential problems. if (!should_attempt_scavenge()) { return false; } bool promotion_failure_occurred = false; PSYoungGen* young_gen = heap->young_gen(); PSOldGen* old_gen = heap->old_gen(); PSPermGen* perm_gen = heap->perm_gen(); PSAdaptiveSizePolicy* size_policy = heap->size_policy(); heap->increment_total_collections(); AdaptiveSizePolicyOutput(size_policy, heap->total_collections()); if ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC) { // Gather the feedback data for eden occupancy. young_gen->eden_space()->accumulate_statistics(); } // We need to track unique scavenge invocations as well. _total_invocations++; if (PrintHeapAtGC) { Universe::print_heap_before_gc(); } assert(!NeverTenure||_tenuring_threshold==markWord::max_age+1,"Sanity"); assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); size_t prev_used = heap->used(); assert(promotion_failed() == false, "Sanity"); // Fill in TLABs heap->accumulate_statistics_all_tlabs(); heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyBeforeGC:"); Universe::verify(true); } { ResourceMark rm; HandleMark hm; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */); if (TraceGen0Time) scavenge_time.start(); // Let the size policy know we're starting size_policy->minor_collection_begin(); // Verify the object start arrays. if (VerifyObjectStartArray && VerifyBeforeGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } // Verify no unmarked old->young roots if (VerifyRememberedSets) { CardTableExtension::verify_all_young_refs_imprecise(); } if (!ScavengeWithObjectsInToSpace) { assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space"); young_gen->to_space()->clear(); } else if (ZapUnusedHeapArea) { young_gen->to_space()->mangle_unused_area(); } save_to_space_top_before_gc(); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); DerivedPointerTable::clear(); reference_processor()->enable_discovery(); // We track how much was promoted to the next generation for // the AdaptiveSizePolicy. size_t old_gen_used_before = old_gen->used_in_bytes(); // For PrintGCDetails size_t young_gen_used_before = young_gen->used_in_bytes(); // Reset our survivor overflow. set_survivor_overflow(false); // We need to save the old/perm top values before // creating the promotion_manager. We pass the top // values to the card_table, to prevent it from // straying into the promotion labs. HeapWord* old_top = old_gen->object_space()->top(); HeapWord* perm_top = perm_gen->object_space()->top(); // Release all previously held resources gc_task_manager()->release_all_resources(); PSPromotionManager::pre_scavenge(); // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { // TraceTime("Roots"); GCTaskQueue* q = GCTaskQueue::create(); for(uint i=0; i<ParallelGCThreads; i++) { q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i)); q->enqueue(new OldToYoungRootsTask(perm_gen,perm_top,i)); } // q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles)); // We scan the thread roots in parallel // FIX ME! We should have a NoResourceMarkVerifier here! Threads::create_thread_roots_tasks(q); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary)); q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti)); // NOTE! ArtaObjects are not normal roots. During scavenges, they are // considered strong roots. During a mark sweep they are weak roots. q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::arta_objects)); ParallelTaskTerminator terminator( gc_task_manager()->workers(), promotion_manager->depth_first() ? (TaskQueueSetSuper*)promotion_manager->stack_array_depth() : (TaskQueueSetSuper*)promotion_manager->stack_array_breadth()); if (ParallelGCThreads>1) { for (uint j=0; j<ParallelGCThreads; j++) { q->enqueue(new StealTask(&terminator)); } } gc_task_manager()->execute_and_wait(q); } scavenge_midpoint.update(); // Process reference objects discovered during scavenge { ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy(); PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); assert(soft_ref_policy != NULL,"No soft reference policy"); if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->process_discovered_references( soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, &task_executor); } else { reference_processor()->process_discovered_references( soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, NULL); } } // Enqueue reference objects discovered during scavenge. if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; reference_processor()->enqueue_discovered_references(&task_executor); } else { reference_processor()->enqueue_discovered_references(NULL); } // Finally, flush the promotion_manager's labs, and deallocate its stacks. assert(promotion_manager->claimed_stack_empty(), "Sanity"); PSPromotionManager::post_scavenge(); promotion_failure_occurred = promotion_failed(); if (promotion_failure_occurred) { _total_promotion_failures++; clean_up_failed_promotion(); if (PrintGC) { gclog_or_tty->print("--"); } } // Let the size policy know we're done. Note that we count promotion // failure cleanup time as part of the collection (otherwise, we're // implicitly saying it's mutator time). size_policy->minor_collection_end(gc_cause); if (!promotion_failure_occurred) { // Swap the survivor spaces. young_gen->eden_space()->clear(); young_gen->from_space()->clear(); young_gen->swap_spaces(); size_t survived = young_gen->from_space()->used_in_bytes(); size_t promoted = old_gen->used_in_bytes() - old_gen_used_before; size_policy->update_averages(_survivor_overflow, survived, promoted); if (UseAdaptiveSizePolicy) { // Calculate the new survivor size and tenuring threshold if (PrintAdaptiveSizePolicy) { gclog_or_tty->print("AdaptiveSizeStart: "); gclog_or_tty->stamp(); gclog_or_tty->print_cr(" collection: %d ", heap->total_collections()); if (Verbose) { gclog_or_tty->print("old_gen_capacity: %zd young_gen_capacity: %zd" " perm_gen_capacity: %zd ", old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(), perm_gen->capacity_in_bytes()); } } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_old_eden_size( size_policy->calculated_eden_size_in_bytes()); counters->update_old_promo_size( size_policy->calculated_promo_size_in_bytes()); counters->update_old_capacity(old_gen->capacity_in_bytes()); counters->update_young_capacity(young_gen->capacity_in_bytes()); counters->update_survived(survived); counters->update_promoted(promoted); counters->update_survivor_overflowed(_survivor_overflow); } size_t survivor_limit = size_policy->max_survivor_size(young_gen->max_size()); _tenuring_threshold = size_policy->compute_survivor_space_size_and_threshold( _survivor_overflow, _tenuring_threshold, survivor_limit); if (PrintTenuringDistribution) { gclog_or_tty->cr(); gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %ld)", size_policy->calculated_survivor_size_in_bytes(), _tenuring_threshold, MaxTenuringThreshold); } if (UsePerfData) { PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); counters->update_tenuring_threshold(_tenuring_threshold); counters->update_survivor_size_counters(); } // Do call at minor collections? // Don't check if the size_policy is ready at this // level. Let the size_policy check that internally. if (UseAdaptiveSizePolicy && UseAdaptiveGenerationSizePolicyAtMinorCollection && ((gc_cause != GCCause::_java_lang_system_gc) || UseAdaptiveSizePolicyWithSystemGC)) { // Calculate optimial free space amounts assert(young_gen->max_size() > young_gen->from_space()->capacity_in_bytes() + young_gen->to_space()->capacity_in_bytes(), "Sizes of space in young gen are out-of-bounds"); size_t max_eden_size = young_gen->max_size() - young_gen->from_space()->capacity_in_bytes() - young_gen->to_space()->capacity_in_bytes(); size_policy->compute_generation_free_space(young_gen->used_in_bytes(), young_gen->eden_space()->used_in_bytes(), old_gen->used_in_bytes(), perm_gen->used_in_bytes(), young_gen->eden_space()->capacity_in_bytes(), old_gen->max_gen_size(), max_eden_size, false /* full gc*/, gc_cause); } // Resize the young generation at every collection // even if new sizes have not been calculated. This is // to allow resizes that may have been inhibited by the // relative location of the "to" and "from" spaces. // Resizing the old gen at minor collects can cause increases // that don't feed back to the generation sizing policy until // a major collection. Don't resize the old gen here. heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), size_policy->calculated_survivor_size_in_bytes()); if (PrintAdaptiveSizePolicy) { gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ", heap->total_collections()); } } // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can // cause the change of the heap layout. Make sure eden is reshaped if that's the case. // Also update() will case adaptive NUMA chunk resizing. assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); young_gen->eden_space()->update(); heap->gc_policy_counters()->update_counters(); heap->resize_all_tlabs(); assert(young_gen->to_space()->is_empty(), "to space should be empty now"); } DerivedPointerTable::update_pointers(); NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) { old_gen->verify_object_start_array(); perm_gen->verify_object_start_array(); } // Verify all old -> young cards are now precise if (VerifyRememberedSets) { // Precise verification will give false positives. Until this is fixed, // use imprecise verification. // CardTableExtension::verify_all_young_refs_precise(); CardTableExtension::verify_all_young_refs_imprecise(); } if (TraceGen0Time) { scavenge_time.stop(); if (promotion_failure_occurred) accumulated_undo_time()->add(scavenge_time); else accumulated_gc_time()->add(scavenge_time); } if (PrintGC) { if (PrintGCDetails) { // Don't print a GC timestamp here. This is after the GC so // would be confusing. young_gen->print_used_change(young_gen_used_before); } heap->print_heap_change(prev_used); } // Track memory usage and detect low memory MemoryService::track_memory_usage(); heap->update_counters(); } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { HandleMark hm; // Discard invalid handles created during verification gclog_or_tty->print(" VerifyAfterGC:"); Universe::verify(false); } if (PrintHeapAtGC) { Universe::print_heap_after_gc(); } scavenge_exit.update(); if (PrintGCTaskTimeStamps) { tty->print_cr("VM-Thread %lld %lld %lld", scavenge_entry.ticks(), scavenge_midpoint.ticks(), scavenge_exit.ticks()); gc_task_manager()->print_task_time_stamps(); } return !promotion_failure_occurred; }
bool BlockMergePass::MergeBlocks(Function* func) { bool modified = false; for (auto bi = func->begin(); bi != func->end();) { // Find block with single successor which has no other predecessors. auto ii = bi->end(); --ii; Instruction* br = &*ii; if (br->opcode() != SpvOpBranch) { ++bi; continue; } const uint32_t lab_id = br->GetSingleWordInOperand(0); if (cfg()->preds(lab_id).size() != 1) { ++bi; continue; } bool pred_is_merge = IsMerge(&*bi); bool succ_is_merge = IsMerge(lab_id); if (pred_is_merge && succ_is_merge) { // Cannot merge two merges together. ++bi; continue; } Instruction* merge_inst = bi->GetMergeInst(); bool pred_is_header = IsHeader(&*bi); if (pred_is_header && lab_id != merge_inst->GetSingleWordInOperand(0u)) { bool succ_is_header = IsHeader(lab_id); if (pred_is_header && succ_is_header) { // Cannot merge two headers together when the successor is not the merge // block of the predecessor. ++bi; continue; } // If this is a header block and the successor is not its merge, we must // be careful about which blocks we are willing to merge together. // OpLoopMerge must be followed by a conditional or unconditional branch. // The merge must be a loop merge because a selection merge cannot be // followed by an unconditional branch. BasicBlock* succ_block = context()->get_instr_block(lab_id); SpvOp succ_term_op = succ_block->terminator()->opcode(); assert(merge_inst->opcode() == SpvOpLoopMerge); if (succ_term_op != SpvOpBranch && succ_term_op != SpvOpBranchConditional) { ++bi; continue; } } // Merge blocks. context()->KillInst(br); auto sbi = bi; for (; sbi != func->end(); ++sbi) if (sbi->id() == lab_id) break; // If bi is sbi's only predecessor, it dominates sbi and thus // sbi must follow bi in func's ordering. assert(sbi != func->end()); // Update the inst-to-block mapping for the instructions in sbi. for (auto& inst : *sbi) { context()->set_instr_block(&inst, &*bi); } // Now actually move the instructions. bi->AddInstructions(&*sbi); if (merge_inst) { if (pred_is_header && lab_id == merge_inst->GetSingleWordInOperand(0u)) { // Merging the header and merge blocks, so remove the structured control // flow declaration. context()->KillInst(merge_inst); } else { // Move the merge instruction to just before the terminator. merge_inst->InsertBefore(bi->terminator()); } } context()->ReplaceAllUsesWith(lab_id, bi->id()); context()->KillInst(sbi->GetLabelInst()); (void)sbi.Erase(); // Reprocess block. modified = true; } return modified; }