int main(int argc, char *argv[]) { log_sweep(rpm_filter, NULL, 1, FS/2, 1000, log(10)); struct lp2_filter f2 = { 0 }; lp2_set_fc(&f2, FILTER_CRITICALDAMPED, 16.666 / FS); log_sweep( lp2_filter, &f2, 1, FS/2, 1000, log(10) ); /* struct lp1_filter f1 = { 0 }; struct lp2_filter f2 = { 0 }; lp1_set_fc(&f1, 100.0 / FS); log_sweep( lp1_filter, &f1, 1, FS/2, 1000, log(10) ); lp2_set_fc(&f2, FILTER_CRITICALDAMPED, 100.0 / FS); log_sweep( lp2_filter, &f2, 1, FS/2, 1000, log(10) ); lp2_set_fc(&f2, FILTER_CHEBYSHEV_0_5, 100.0 / FS); log_sweep( lp2_filter, &f2, 1, FS/2, 1000, log(10) ); */ return EXIT_SUCCESS; }
// Code cache unloading: when compilers notice the code cache is getting full, // they will call a vm op that comes here. This code attempts to speculatively // unload the oldest half of the nmethods (based on the compile job id) by // saving the old code in a list in the CodeCache. Then // execution resumes. If a method so marked is not called by the second sweeper // stack traversal after the current one, the nmethod will be marked non-entrant and // got rid of by normal sweeping. If the method is called, the methodOop's // _code field is restored and the methodOop/nmethod // go back to their normal state. void NMethodSweeper::handle_full_code_cache(bool is_full) { // Only the first one to notice can advise us to start early cleaning if (!is_full){ jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 ); if (old != 0) { return; } } if (is_full) { // Since code cache is full, immediately stop new compiles bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation); if (!did_set) { // only the first to notice can start the cleaning, // others will go back and block return; } set_was_full(true); // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up jlong now = os::javaTimeMillis(); jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; jlong curr_interval = now - _last_was_full; if (curr_interval < max_interval) { _rescan = true; log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'", curr_interval/1000); return; } } VM_HandleFullCodeCache op(is_full); VMThread::execute(&op); // rescan again as soon as possible _rescan = true; }
void NMethodSweeper::scan_stacks() { assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint"); if (!MethodFlushing) return; _do_sweep = true; // No need to synchronize access, since this is always executed at a // safepoint. If we aren't in the middle of scan and a rescan // hasn't been requested then just return. If UseCodeCacheFlushing is on and // code cache flushing is in progress, don't skip sweeping to help make progress // clearing space in the code cache. if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) { _do_sweep = false; return; } // Make sure CompiledIC_lock in unlocked, since we might update some // inline caches. If it is, we just bail-out and try later. if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return; // Check for restart assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); if (_current == NULL) { _seen = 0; _invocations = NmethodSweepFraction; _current = CodeCache::first_nmethod(); _traversals += 1; if (PrintMethodFlushing) { tty->print_cr("### Sweep: stack traversal %d", _traversals); } Threads::nmethods_do(&mark_activation_closure); // reset the flags since we started a scan from the beginning. _rescan = false; _locked_seen = 0; _not_entrant_seen_on_stack = 0; } if (UseCodeCacheFlushing) { if (!CodeCache::needs_flushing()) { // scan_stacks() runs during a safepoint, no race with setters _advise_to_sweep = 0; } if (was_full()) { // There was some progress so attempt to restart the compiler jlong now = os::javaTimeMillis(); jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000; jlong curr_interval = now - _last_was_full; if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) { CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); set_was_full(false); // Update the _last_was_full time so we can tell how fast the // code cache is filling up _last_was_full = os::javaTimeMillis(); log_sweep("restart_compiler"); } } } }
void NMethodSweeper::sweep_code_cache() { #ifdef ASSERT jlong sweep_start; if (PrintMethodFlushing) { sweep_start = os::javaTimeMillis(); } #endif if (PrintMethodFlushing && Verbose) { tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); } // We want to visit all nmethods after NmethodSweepFraction // invocations so divide the remaining number of nmethods by the // remaining number of invocations. This is only an estimate since // the number of nmethods changes during the sweep so the final // stage must iterate until it there are no more nmethods. int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here"); assert(!CodeCache_lock->owned_by_self(), "just checking"); { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // The last invocation iterates until there are no more nmethods for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { // Since we will give up the CodeCache_lock, always skip ahead // to the next nmethod. Other blobs can be deleted by other // threads but nmethods are only reclaimed by the sweeper. nmethod* next = CodeCache::next_nmethod(_current); // Now ready to process nmethod and give up CodeCache_lock { MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); process_nmethod(_current); } _seen++; _current = next; } } assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { // we've completed a scan without making progress but there were // nmethods we were unable to process either because they were // locked or were still on stack. We don't have to aggresively // clean them up so just stop scanning. We could scan once more // but that complicates the control logic and it's unlikely to // matter much. if (PrintMethodFlushing) { tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); } } #ifdef ASSERT if(PrintMethodFlushing) { jlong sweep_end = os::javaTimeMillis(); tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start); } #endif if (_invocations == 1) { log_sweep("finished"); } }