Ejemplo n.º 1
0
void Test_configure_stdout() {
  ResourceMark rm;
  LogHandle(logging) log;
  LogOutput* stdoutput = LogOutput::Stdout;

  // Save current stdout config and clear it
  char* saved_config = os::strdup_check_oom(stdoutput->config_string());
  LogConfiguration::parse_log_arguments("stdout", "all=off", NULL, NULL, log.error_stream());

  // Enable 'logging=info', verifying it has been set
  LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(logging));
  assert_str_eq("logging=info,", stdoutput->config_string());
  assert(log_is_enabled(Info, logging), "logging was not properly enabled");

  // Enable 'gc=debug' (no wildcard), verifying no other tags are enabled
  LogConfiguration::configure_stdout(LogLevel::Debug, true, LOG_TAGS(gc));
  // No '+' character means only single tags are enabled, and no combinations
  assert_char_not_in('+', stdoutput->config_string());
  assert(log_is_enabled(Debug, gc), "logging was not properly enabled");

  // Enable 'gc*=trace' (with wildcard), verifying at least one tag combination is enabled (gc+...)
  LogConfiguration::configure_stdout(LogLevel::Trace, false, LOG_TAGS(gc));
  assert_char_in('+', stdoutput->config_string());
  assert(log_is_enabled(Trace, gc), "logging was not properly enabled");

  // Disable 'gc*' and 'logging', verifying all logging is properly disabled
  LogConfiguration::configure_stdout(LogLevel::Off, false, LOG_TAGS(gc));
  LogConfiguration::configure_stdout(LogLevel::Off, true, LOG_TAGS(logging));
  assert_str_eq("all=off", stdoutput->config_string());

  // Restore saved configuration
  LogConfiguration::parse_log_arguments("stdout", saved_config, NULL, NULL, log.error_stream());
  os::free(saved_config);
}
Ejemplo n.º 2
0
// Unpacks one or more frames into user-supplied buffers.
// Updates the end index, and returns the number of unpacked frames.
// Always start with the existing vfst.method and bci.
// Do not call vfst.next to advance over the last returned value.
// In other words, do not leave any stale data in the vfst.
//
// Parameters:
//   mode             Restrict which frames to be decoded.
//   BaseFrameStream  stream of frames
//   max_nframes      Maximum number of frames to be filled.
//   start_index      Start index to the user-supplied buffers.
//   frames_array     Buffer to store Class or StackFrame in, starting at start_index.
//                    frames array is a Class<?>[] array when only getting caller
//                    reference, and a StackFrameInfo[] array (or derivative)
//                    otherwise. It should never be null.
//   end_index        End index to the user-supplied buffers with unpacked frames.
//
// Returns the number of frames whose information was transferred into the buffers.
//
int StackWalk::fill_in_frames(jlong mode, BaseFrameStream& stream,
                              int max_nframes, int start_index,
                              objArrayHandle  frames_array,
                              int& end_index, TRAPS) {
    log_debug(stackwalk)("fill_in_frames limit=%d start=%d frames length=%d",
                         max_nframes, start_index, frames_array->length());
    assert(max_nframes > 0, "invalid max_nframes");
    assert(start_index + max_nframes <= frames_array->length(), "oob");

    int frames_decoded = 0;
    for (; !stream.at_end(); stream.next()) {
        Method* method = stream.method();

        if (method == NULL) continue;

        // skip hidden frames for default StackWalker option (i.e. SHOW_HIDDEN_FRAMES
        // not set) and when StackWalker::getCallerClass is called
        if (!ShowHiddenFrames && (skip_hidden_frames(mode) || get_caller_class(mode))) {
            if (method->is_hidden()) {
                if (log_is_enabled(Debug, stackwalk)) {
                    ResourceMark rm(THREAD);
                    outputStream* st = Log(stackwalk)::debug_stream();
                    st->print("  hidden method: ");
                    method->print_short_name(st);
                    st->cr();
                }
                continue;
            }
        }

        int index = end_index++;
        if (log_is_enabled(Debug, stackwalk)) {
            ResourceMark rm(THREAD);
            outputStream* st = Log(stackwalk)::debug_stream();
            st->print("  %d: frame method: ", index);
            method->print_short_name(st);
            st->print_cr(" bci=%d", stream.bci());
        }

        if (!need_method_info(mode) && get_caller_class(mode) &&
                index == start_index && method->caller_sensitive()) {
            ResourceMark rm(THREAD);
            THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(),
                        err_msg("StackWalker::getCallerClass called from @CallerSensitive %s method",
                                method->name_and_sig_as_C_string()));
        }
        // fill in StackFrameInfo and initialize MemberName
        stream.fill_frame(index, frames_array, method, CHECK_0);
        if (++frames_decoded >= max_nframes)  break;
    }
    return frames_decoded;
}
Ejemplo n.º 3
0
Archivo: log.c Proyecto: saper/em400
// -----------------------------------------------------------------------
int log_enable()
{
	if (log_is_enabled()) {
		return E_OK;
	}

	// Open log file
	log_f = fopen(log_file, "a");
	if (!log_f) {
		return E_FILE_OPEN;
	}

	log_log_timestamp(L_EM4H, 0, "Opened log");

	// start up flusher thread
	log_flusher_stop = 0;
	if (pthread_create(&log_flusher_th, NULL, log_flusher, NULL) != 0) {
		fclose(log_f);
		return E_THREAD;
	}

	atom_store_release(&log_enabled, 1);

	return E_OK;
}
Ejemplo n.º 4
0
void VM_Version_init() {
    VM_Version::initialize();

    if (log_is_enabled(Info, os, cpu)) {
        char buf[1024];
        ResourceMark rm;
        outputStream* log = Log(os, cpu)::info_stream();
        os::print_cpu_info(log, buf, sizeof(buf));
    }
}
Ejemplo n.º 5
0
Archivo: cpu.c Proyecto: jakubfi/em400
// -----------------------------------------------------------------------
int ectl_log_state_get()
{
	int state = log_is_enabled();
	LOG(L_ECTL, "ECTL log state get: %i", state);
	if (state) {
		return ECTL_ON;
	} else {
		return ECTL_OFF;
	}
}
Ejemplo n.º 6
0
uint ageTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) {
  size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
  uint result;

  if (AlwaysTenure || NeverTenure) {
    assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
           "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is " UINTX_FORMAT, MaxTenuringThreshold);
    result = MaxTenuringThreshold;
  } else {
    size_t total = 0;
    uint age = 1;
    assert(sizes[0] == 0, "no objects with age zero should be recorded");
    while (age < table_size) {
      total += sizes[age];
      // check if including objects of age 'age' made us pass the desired
      // size, if so 'age' is the new threshold
      if (total > desired_survivor_size) break;
      age++;
    }
    result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold;
  }


  log_debug(gc, age)("Desired survivor size " SIZE_FORMAT " bytes, new threshold " UINTX_FORMAT " (max threshold " UINTX_FORMAT ")",
                     desired_survivor_size*oopSize, (uintx) result, MaxTenuringThreshold);

  if (log_is_enabled(Trace, gc, age) || UsePerfData) {
    size_t total = 0;
    uint age = 1;
    while (age < table_size) {
      total += sizes[age];
      if (sizes[age] > 0) {
        log_trace(gc, age)("- age %3u: " SIZE_FORMAT_W(10) " bytes, " SIZE_FORMAT_W(10) " total",
                            age, sizes[age]*oopSize, total*oopSize);
      }
      if (UsePerfData) {
        _perf_sizes[age]->set_value(sizes[age]*oopSize);
      }
      age++;
    }
    if (UsePerfData) {
      gc_counters->tenuring_threshold()->set_value(result);
      gc_counters->desired_survivor_size()->set_value(
        desired_survivor_size*oopSize);
    }
  }

  return result;
}
Ejemplo n.º 7
0
GCTraceCPUTime::GCTraceCPUTime() :
  _active(log_is_enabled(Info, gc, cpu)),
  _starting_user_time(0.0),
  _starting_system_time(0.0),
  _starting_real_time(0.0)
{
  if (_active) {
    bool valid = os::getTimesSecs(&_starting_real_time,
                               &_starting_user_time,
                               &_starting_system_time);
    if (!valid) {
      log_warning(gc, cpu)("TraceCPUTime: os::getTimesSecs() returned invalid result");
      _active = false;
    }
  }
}
Ejemplo n.º 8
0
void VM_Operation::evaluate() {
  ResourceMark rm;
  outputStream* debugstream;
  bool enabled = log_is_enabled(Debug, vmoperation);
  if (enabled) {
    debugstream = LogHandle(vmoperation)::debug_stream();
    debugstream->print("begin ");
    print_on_error(debugstream);
    debugstream->cr();
  }
  doit();
  if (enabled) {
    debugstream->print("end ");
    print_on_error(debugstream);
    debugstream->cr();
  }
}
Ejemplo n.º 9
0
Archivo: log.c Proyecto: saper/em400
// -----------------------------------------------------------------------
void log_disable()
{
	if (!log_is_enabled()) {
		return;
	}

	log_log_timestamp(L_EM4H, 0, "Closing log");

	atom_store_release(&log_enabled, 0);

	// stop flusher thread
	pthread_mutex_lock(&log_mutex);
	log_flusher_stop = 1;
	pthread_mutex_unlock(&log_mutex);
	pthread_join(log_flusher_th, NULL);

	if (log_f) {
		fclose(log_f);
	}
}
Ejemplo n.º 10
0
void G1HeapTransition::print() {
  Data after(_g1_heap);

  size_t eden_capacity_length_after_gc = _g1_heap->g1_policy()->young_list_target_length() - after._survivor_length;
  size_t survivor_capacity_length_after_gc = _g1_heap->g1_policy()->max_survivor_regions();

  DetailedUsage usage;
  if (log_is_enabled(Trace, gc, heap)) {
    DetailedUsageClosure blk;
    _g1_heap->heap_region_iterate(&blk);
    usage = blk._usage;
    assert(usage._eden_region_count == 0, "Expected no eden regions, but got " SIZE_FORMAT, usage._eden_region_count);
    assert(usage._survivor_region_count == after._survivor_length, "Expected survivors to be " SIZE_FORMAT " but was " SIZE_FORMAT,
        after._survivor_length, usage._survivor_region_count);
    assert(usage._old_region_count == after._old_length, "Expected old to be " SIZE_FORMAT " but was " SIZE_FORMAT,
        after._old_length, usage._old_region_count);
    assert(usage._humongous_region_count == after._humongous_length, "Expected humongous to be " SIZE_FORMAT " but was " SIZE_FORMAT,
        after._humongous_length, usage._humongous_region_count);
  }

  log_info(gc, heap)("Eden regions: " SIZE_FORMAT "->" SIZE_FORMAT "("  SIZE_FORMAT ")",
                     _before._eden_length, after._eden_length, eden_capacity_length_after_gc);
  log_trace(gc, heap)(" Used: 0K, Waste: 0K");

  log_info(gc, heap)("Survivor regions: " SIZE_FORMAT "->" SIZE_FORMAT "("  SIZE_FORMAT ")",
                     _before._survivor_length, after._survivor_length, survivor_capacity_length_after_gc);
  log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
      usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K);

  log_info(gc, heap)("Old regions: " SIZE_FORMAT "->" SIZE_FORMAT,
                     _before._old_length, after._old_length);
  log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
      usage._old_used / K, ((after._old_length * HeapRegion::GrainBytes) - usage._old_used) / K);

  log_info(gc, heap)("Humongous regions: " SIZE_FORMAT "->" SIZE_FORMAT,
                     _before._humongous_length, after._humongous_length);
  log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
      usage._humongous_used / K, ((after._humongous_length * HeapRegion::GrainBytes) - usage._humongous_used) / K);

  MetaspaceAux::print_metaspace_change(_before._metaspace_used_bytes);
}
void ClassLoadingService::notify_class_unloaded(InstanceKlass* k) {
  DTRACE_CLASSLOAD_PROBE(unloaded, k, false);
  // Classes that can be unloaded must be non-shared
  _classes_unloaded_count->inc();

  if (UsePerfData) {
    // add the class size
    size_t size = compute_class_size(k);
    _classbytes_unloaded->inc(size);

    // Compute method size & subtract from running total.
    // We are called during phase 1 of mark sweep, so it's
    // still ok to iterate through Method*s here.
    Array<Method*>* methods = k->methods();
    for (int i = 0; i < methods->length(); i++) {
      _class_methods_size->inc(-methods->at(i)->size());
    }
  }

  if (log_is_enabled(Info, classunload)) {
    ResourceMark rm;
    log_info(classunload)("unloading class %s " INTPTR_FORMAT , k->external_name(), p2i(k));
  }
}
Ejemplo n.º 12
0
void JvmtiTrace::initialize() {
  if (_initialized) {
    return;
  }
  SafeResourceMark rm;

  const char *very_end;
  const char *curr;
  if (TraceJVMTI != NULL) {
    curr = TraceJVMTI;
  } else {
    curr = "";  // hack in fixed tracing here
  }

  // Enable UL for JVMTI tracing
  if (strlen(curr) > 0) {
    if (!log_is_enabled(Trace, jvmti)) {
      log_warning(arguments)("-XX:+TraceJVMTI specified, "
         "but no log output configured for the 'jvmti' tag on Trace level. "
         "Defaulting to -Xlog:jvmti=trace");
      LogConfiguration::configure_stdout(LogLevel::Trace, true, LOG_TAGS(jvmti));
    }
  }

  very_end = curr + strlen(curr);
  while (curr < very_end) {
    const char *curr_end = strchr(curr, ',');
    if (curr_end == NULL) {
      curr_end = very_end;
    }
    const char *op_pos = strchr(curr, '+');
    const char *minus_pos = strchr(curr, '-');
    if (minus_pos != NULL && (minus_pos < op_pos || op_pos == NULL)) {
      op_pos = minus_pos;
    }
    char op;
    const char *flags = op_pos + 1;
    const char *flags_end = curr_end;
    if (op_pos == NULL || op_pos > curr_end) {
      flags = "ies";
      flags_end = flags + strlen(flags);
      op_pos = curr_end;
      op = '+';
    } else {
      op = *op_pos;
    }
    jbyte bits = 0;
    for (; flags < flags_end; ++flags) {
      switch (*flags) {
      case 'i':
        bits |= SHOW_IN;
        break;
      case 'I':
        bits |= SHOW_IN_DETAIL;
        break;
      case 'e':
        bits |= SHOW_ERROR;
        break;
      case 'o':
        bits |= SHOW_OUT;
        break;
      case 'O':
        bits |= SHOW_OUT_DETAIL;
        break;
      case 't':
        bits |= SHOW_EVENT_TRIGGER;
        break;
      case 's':
        bits |= SHOW_EVENT_SENT;
        break;
      default:
        log_warning(jvmti)("Invalid trace flag '%c'", *flags);
        break;
      }
    }
    const int FUNC = 1;
    const int EXCLUDE  = 2;
    const int ALL_FUNC = 4;
    const int EVENT = 8;
    const int ALL_EVENT = 16;
    int domain = 0;
    size_t len = op_pos - curr;
    if (op_pos == curr) {
      domain = ALL_FUNC | FUNC | ALL_EVENT | EVENT | EXCLUDE;
    } else if (len==3 && strncmp(curr, "all", 3)==0) {
      domain = ALL_FUNC | FUNC | ALL_EVENT | EVENT;
    } else if (len==7 && strncmp(curr, "allfunc", 7)==0) {
      domain = ALL_FUNC | FUNC;
    } else if (len==4 && strncmp(curr, "func", 4)==0) {
      domain = ALL_FUNC | FUNC | EXCLUDE;
    } else if (len==8 && strncmp(curr, "allevent", 8)==0) {
      domain = ALL_EVENT | EVENT;
    } else if (len==5 && strncmp(curr, "event", 5)==0) {
      domain = ALL_EVENT | EVENT;
    } else if (len==2 && strncmp(curr, "ec", 2)==0) {
      _trace_event_controller = true;
      log_trace(jvmti)("Tracing the event controller");
    } else {
      domain = FUNC | EVENT;  // go searching
    }

    int exclude_index = 0;
    if (domain & FUNC) {
      if (domain & ALL_FUNC) {
        if (domain & EXCLUDE) {
          log_trace(jvmti)("Tracing all significant functions");
        } else {
          log_trace(jvmti)("Tracing all functions");
        }
      }
      for (int i = 0; i <= _max_function_index; ++i) {
        if (domain & EXCLUDE && i == _exclude_functions[exclude_index]) {
          ++exclude_index;
        } else {
          bool do_op = false;
          if (domain & ALL_FUNC) {
            do_op = true;
          } else {
            const char *fname = function_name(i);
            if (fname != NULL) {
              size_t fnlen = strlen(fname);
              if (len==fnlen && strncmp(curr, fname, fnlen)==0) {
                log_trace(jvmti)("Tracing the function: %s", fname);
                do_op = true;
              }
            }
          }
          if (do_op) {
            if (op == '+') {
              _trace_flags[i] |= bits;
            } else {
              _trace_flags[i] &= ~bits;
            }
            _on = true;
          }
        }
      }
    }
    if (domain & EVENT) {
      if (domain & ALL_EVENT) {
        log_trace(jvmti)("Tracing all events");
      }
      for (int i = 0; i <= _max_event_index; ++i) {
        bool do_op = false;
        if (domain & ALL_EVENT) {
          do_op = true;
        } else {
          const char *ename = event_name(i);
          if (ename != NULL) {
            size_t evtlen = strlen(ename);
            if (len==evtlen && strncmp(curr, ename, evtlen)==0) {
              log_trace(jvmti)("Tracing the event: %s", ename);
              do_op = true;
            }
          }
        }
        if (do_op) {
          if (op == '+') {
            _event_trace_flags[i] |= bits;
          } else {
            _event_trace_flags[i] &= ~bits;
          }
          _on = true;
        }
      }
    }
    if (!_on && (domain & (FUNC|EVENT))) {
      log_warning(jvmti)("Trace domain not found");
    }
    curr = curr_end + 1;
  }
  _initialized = true;
}
Ejemplo n.º 13
0
TEST_F(LogConfigurationTest, configure_stdout) {
  // Start out with all logging disabled
  LogConfiguration::disable_logging();

  // Enable 'logging=info', verifying it has been set
  LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(logging));
  EXPECT_TRUE(log_is_enabled(Info, logging));
  EXPECT_FALSE(log_is_enabled(Debug, logging));
  EXPECT_FALSE(log_is_enabled(Info, gc));
  LogTagSet* logging_ts = &LogTagSetMapping<LOG_TAGS(logging)>::tagset();
  EXPECT_EQ(LogLevel::Info, logging_ts->level_for(&StdoutLog));

  // Enable 'gc=debug' (no wildcard), verifying no other tags are enabled
  LogConfiguration::configure_stdout(LogLevel::Debug, true, LOG_TAGS(gc));
  EXPECT_TRUE(log_is_enabled(Debug, gc));
  EXPECT_TRUE(log_is_enabled(Info, logging));
  EXPECT_FALSE(log_is_enabled(Debug, gc, heap));
  for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
    if (ts->contains(PREFIX_LOG_TAG(gc))) {
      if (ts->ntags() == 1) {
        EXPECT_EQ(LogLevel::Debug, ts->level_for(&StdoutLog));
      } else {
        EXPECT_EQ(LogLevel::Off, ts->level_for(&StdoutLog));
      }
    }
  }

  // Enable 'gc*=trace' (with wildcard), verifying that all tag combinations with gc are enabled (gc+...)
  LogConfiguration::configure_stdout(LogLevel::Trace, false, LOG_TAGS(gc));
  EXPECT_TRUE(log_is_enabled(Trace, gc));
  EXPECT_TRUE(log_is_enabled(Trace, gc, heap));
  for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
    if (ts->contains(PREFIX_LOG_TAG(gc))) {
      EXPECT_EQ(LogLevel::Trace, ts->level_for(&StdoutLog));
    } else if (ts == logging_ts) {
      // Previous setting for 'logging' should remain
      EXPECT_EQ(LogLevel::Info, ts->level_for(&StdoutLog));
    } else {
      EXPECT_EQ(LogLevel::Off, ts->level_for(&StdoutLog));
    }
  }

  // Disable 'gc*' and 'logging', verifying all logging is properly disabled
  LogConfiguration::configure_stdout(LogLevel::Off, true, LOG_TAGS(logging));
  EXPECT_FALSE(log_is_enabled(Error, logging));
  LogConfiguration::configure_stdout(LogLevel::Off, false, LOG_TAGS(gc));
  EXPECT_FALSE(log_is_enabled(Error, gc));
  EXPECT_FALSE(log_is_enabled(Error, gc, heap));
  for (LogTagSet* ts = LogTagSet::first(); ts != NULL; ts = ts->next()) {
    EXPECT_EQ(LogLevel::Off, ts->level_for(&StdoutLog));
  }
}
Ejemplo n.º 14
0
// Roll all threads forward to a safepoint and suspend them all
void SafepointSynchronize::begin() {

  Thread* myThread = Thread::current();
  assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint");

  if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
    _safepoint_begin_time = os::javaTimeNanos();
    _ts_of_current_safepoint = tty->time_stamp().seconds();
  }

#if INCLUDE_ALL_GCS
  if (UseConcMarkSweepGC) {
    // In the future we should investigate whether CMS can use the
    // more-general mechanism below.  DLD (01/05).
    ConcurrentMarkSweepThread::synchronize(false);
  } else if (UseG1GC) {
    SuspendibleThreadSet::synchronize();
  }
#endif // INCLUDE_ALL_GCS

  // By getting the Threads_lock, we assure that no threads are about to start or
  // exit. It is released again in SafepointSynchronize::end().
  Threads_lock->lock();

  assert( _state == _not_synchronized, "trying to safepoint synchronize with wrong state");

  int nof_threads = Threads::number_of_threads();

  log_debug(safepoint)("Safepoint synchronization initiated. (%d)", nof_threads);

  RuntimeService::record_safepoint_begin();

  MutexLocker mu(Safepoint_lock);

  // Reset the count of active JNI critical threads
  _current_jni_active_count = 0;

  // Set number of threads to wait for, before we initiate the callbacks
  _waiting_to_block = nof_threads;
  TryingToBlock     = 0 ;
  int still_running = nof_threads;

  // Save the starting time, so that it can be compared to see if this has taken
  // too long to complete.
  jlong safepoint_limit_time = 0;
  timeout_error_printed = false;

  // PrintSafepointStatisticsTimeout can be specified separately. When
  // specified, PrintSafepointStatistics will be set to true in
  // deferred_initialize_stat method. The initialization has to be done
  // early enough to avoid any races. See bug 6880029 for details.
  if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
    deferred_initialize_stat();
  }

  // Begin the process of bringing the system to a safepoint.
  // Java threads can be in several different states and are
  // stopped by different mechanisms:
  //
  //  1. Running interpreted
  //     The interpreter dispatch table is changed to force it to
  //     check for a safepoint condition between bytecodes.
  //  2. Running in native code
  //     When returning from the native code, a Java thread must check
  //     the safepoint _state to see if we must block.  If the
  //     VM thread sees a Java thread in native, it does
  //     not wait for this thread to block.  The order of the memory
  //     writes and reads of both the safepoint state and the Java
  //     threads state is critical.  In order to guarantee that the
  //     memory writes are serialized with respect to each other,
  //     the VM thread issues a memory barrier instruction
  //     (on MP systems).  In order to avoid the overhead of issuing
  //     a memory barrier for each Java thread making native calls, each Java
  //     thread performs a write to a single memory page after changing
  //     the thread state.  The VM thread performs a sequence of
  //     mprotect OS calls which forces all previous writes from all
  //     Java threads to be serialized.  This is done in the
  //     os::serialize_thread_states() call.  This has proven to be
  //     much more efficient than executing a membar instruction
  //     on every call to native code.
  //  3. Running compiled Code
  //     Compiled code reads a global (Safepoint Polling) page that
  //     is set to fault if we are trying to get to a safepoint.
  //  4. Blocked
  //     A thread which is blocked will not be allowed to return from the
  //     block condition until the safepoint operation is complete.
  //  5. In VM or Transitioning between states
  //     If a Java thread is currently running in the VM or transitioning
  //     between states, the safepointing code will wait for the thread to
  //     block itself when it attempts transitions to a new state.
  //
  _state            = _synchronizing;
  OrderAccess::fence();

  // Flush all thread states to memory
  if (!UseMembar) {
    os::serialize_thread_states();
  }

  // Make interpreter safepoint aware
  Interpreter::notice_safepoints();

  if (DeferPollingPageLoopCount < 0) {
    // Make polling safepoint aware
    guarantee (PageArmed == 0, "invariant") ;
    PageArmed = 1 ;
    os::make_polling_page_unreadable();
  }

  // Consider using active_processor_count() ... but that call is expensive.
  int ncpus = os::processor_count() ;

#ifdef ASSERT
  for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
    assert(cur->safepoint_state()->is_running(), "Illegal initial state");
    // Clear the visited flag to ensure that the critical counts are collected properly.
    cur->set_visited_for_critical_count(false);
  }
#endif // ASSERT

  if (SafepointTimeout)
    safepoint_limit_time = os::javaTimeNanos() + (jlong)SafepointTimeoutDelay * MICROUNITS;

  // Iterate through all threads until it have been determined how to stop them all at a safepoint
  unsigned int iterations = 0;
  int steps = 0 ;
  while(still_running > 0) {
    for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
      assert(!cur->is_ConcurrentGC_thread(), "A concurrent GC thread is unexpectly being suspended");
      ThreadSafepointState *cur_state = cur->safepoint_state();
      if (cur_state->is_running()) {
        cur_state->examine_state_of_thread();
        if (!cur_state->is_running()) {
           still_running--;
           // consider adjusting steps downward:
           //   steps = 0
           //   steps -= NNN
           //   steps >>= 1
           //   steps = MIN(steps, 2000-100)
           //   if (iterations != 0) steps -= NNN
        }
        if (log_is_enabled(Trace, safepoint)) {
          ResourceMark rm;
          cur_state->print_on(LogHandle(safepoint)::debug_stream());
        }
      }
    }

    if (PrintSafepointStatistics && iterations == 0) {
      begin_statistics(nof_threads, still_running);
    }

    if (still_running > 0) {
      // Check for if it takes to long
      if (SafepointTimeout && safepoint_limit_time < os::javaTimeNanos()) {
        print_safepoint_timeout(_spinning_timeout);
      }

      // Spin to avoid context switching.
      // There's a tension between allowing the mutators to run (and rendezvous)
      // vs spinning.  As the VM thread spins, wasting cycles, it consumes CPU that
      // a mutator might otherwise use profitably to reach a safepoint.  Excessive
      // spinning by the VM thread on a saturated system can increase rendezvous latency.
      // Blocking or yielding incur their own penalties in the form of context switching
      // and the resultant loss of $ residency.
      //
      // Further complicating matters is that yield() does not work as naively expected
      // on many platforms -- yield() does not guarantee that any other ready threads
      // will run.   As such we revert to naked_short_sleep() after some number of iterations.
      // nakes_short_sleep() is implemented as a short unconditional sleep.
      // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping
      // can actually increase the time it takes the VM thread to detect that a system-wide
      // stop-the-world safepoint has been reached.  In a pathological scenario such as that
      // described in CR6415670 the VMthread may sleep just before the mutator(s) become safe.
      // In that case the mutators will be stalled waiting for the safepoint to complete and the
      // the VMthread will be sleeping, waiting for the mutators to rendezvous.  The VMthread
      // will eventually wake up and detect that all mutators are safe, at which point
      // we'll again make progress.
      //
      // Beware too that that the VMThread typically runs at elevated priority.
      // Its default priority is higher than the default mutator priority.
      // Obviously, this complicates spinning.
      //
      // Note too that on Windows XP SwitchThreadTo() has quite different behavior than Sleep(0).
      // Sleep(0) will _not yield to lower priority threads, while SwitchThreadTo() will.
      //
      // See the comments in synchronizer.cpp for additional remarks on spinning.
      //
      // In the future we might:
      // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
      //    This is tricky as the path used by a thread exiting the JVM (say on
      //    on JNI call-out) simply stores into its state field.  The burden
      //    is placed on the VM thread, which must poll (spin).
      // 2. Find something useful to do while spinning.  If the safepoint is GC-related
      //    we might aggressively scan the stacks of threads that are already safe.
      // 3. Use Solaris schedctl to examine the state of the still-running mutators.
      //    If all the mutators are ONPROC there's no reason to sleep or yield.
      // 4. YieldTo() any still-running mutators that are ready but OFFPROC.
      // 5. Check system saturation.  If the system is not fully saturated then
      //    simply spin and avoid sleep/yield.
      // 6. As still-running mutators rendezvous they could unpark the sleeping
      //    VMthread.  This works well for still-running mutators that become
      //    safe.  The VMthread must still poll for mutators that call-out.
      // 7. Drive the policy on time-since-begin instead of iterations.
      // 8. Consider making the spin duration a function of the # of CPUs:
      //    Spin = (((ncpus-1) * M) + K) + F(still_running)
      //    Alternately, instead of counting iterations of the outer loop
      //    we could count the # of threads visited in the inner loop, above.
      // 9. On windows consider using the return value from SwitchThreadTo()
      //    to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions.

      if (int(iterations) == DeferPollingPageLoopCount) {
         guarantee (PageArmed == 0, "invariant") ;
         PageArmed = 1 ;
         os::make_polling_page_unreadable();
      }

      // Instead of (ncpus > 1) consider either (still_running < (ncpus + EPSILON)) or
      // ((still_running + _waiting_to_block - TryingToBlock)) < ncpus)
      ++steps ;
      if (ncpus > 1 && steps < SafepointSpinBeforeYield) {
        SpinPause() ;     // MP-Polite spin
      } else
      if (steps < DeferThrSuspendLoopCount) {
        os::naked_yield() ;
      } else {
        os::naked_short_sleep(1);
      }

      iterations ++ ;
    }
    assert(iterations < (uint)max_jint, "We have been iterating in the safepoint loop too long");
  }
  assert(still_running == 0, "sanity check");

  if (PrintSafepointStatistics) {
    update_statistics_on_spin_end();
  }

  // wait until all threads are stopped
  while (_waiting_to_block > 0) {
    log_debug(safepoint)("Waiting for %d thread(s) to block", _waiting_to_block);
    if (!SafepointTimeout || timeout_error_printed) {
      Safepoint_lock->wait(true);  // true, means with no safepoint checks
    } else {
      // Compute remaining time
      jlong remaining_time = safepoint_limit_time - os::javaTimeNanos();

      // If there is no remaining time, then there is an error
      if (remaining_time < 0 || Safepoint_lock->wait(true, remaining_time / MICROUNITS)) {
        print_safepoint_timeout(_blocking_timeout);
      }
    }
  }
  assert(_waiting_to_block == 0, "sanity check");

#ifndef PRODUCT
  if (SafepointTimeout) {
    jlong current_time = os::javaTimeNanos();
    if (safepoint_limit_time < current_time) {
      tty->print_cr("# SafepointSynchronize: Finished after "
                    INT64_FORMAT_W(6) " ms",
                    ((current_time - safepoint_limit_time) / MICROUNITS +
                     (jlong)SafepointTimeoutDelay));
    }
  }
#endif

  assert((_safepoint_counter & 0x1) == 0, "must be even");
  assert(Threads_lock->owned_by_self(), "must hold Threads_lock");
  _safepoint_counter ++;

  // Record state
  _state = _synchronized;

  OrderAccess::fence();

#ifdef ASSERT
  for (JavaThread *cur = Threads::first(); cur != NULL; cur = cur->next()) {
    // make sure all the threads were visited
    assert(cur->was_visited_for_critical_count(), "missed a thread");
  }
#endif // ASSERT

  // Update the count of active JNI critical regions
  GC_locker::set_jni_lock_count(_current_jni_active_count);

  if (log_is_enabled(Debug, safepoint)) {
    VM_Operation *op = VMThread::vm_operation();
    log_debug(safepoint)("Entering safepoint region: %s",
                         (op != NULL) ? op->name() : "no vm operation");
  }

  RuntimeService::record_safepoint_synchronized();
  if (PrintSafepointStatistics) {
    update_statistics_on_sync_end(os::javaTimeNanos());
  }

  // Call stuff that needs to be run when a safepoint is just about to be completed
  do_cleanup_tasks();

  if (PrintSafepointStatistics) {
    // Record how much time spend on the above cleanup tasks
    update_statistics_on_cleanup_end(os::javaTimeNanos());
  }
}
 static bool enabled() {
   return UseParallelGC &&
          UseAdaptiveSizePolicy &&
          log_is_enabled(Debug, gc, ergo);
 }
Ejemplo n.º 16
0
oop StackWalk::fetchFirstBatch(BaseFrameStream& stream, Handle stackStream,
                               jlong mode, int skip_frames, int frame_count,
                               int start_index, objArrayHandle frames_array, TRAPS) {
    methodHandle m_doStackWalk(THREAD, Universe::do_stack_walk_method());

    {
        Klass* stackWalker_klass = SystemDictionary::StackWalker_klass();
        Klass* abstractStackWalker_klass = SystemDictionary::AbstractStackWalker_klass();
        while (!stream.at_end()) {
            InstanceKlass* ik = stream.method()->method_holder();
            if (ik != stackWalker_klass &&
                    ik != abstractStackWalker_klass && ik->super() != abstractStackWalker_klass)  {
                break;
            }

            if (log_is_enabled(Debug, stackwalk)) {
                ResourceMark rm(THREAD);
                outputStream* st = Log(stackwalk)::debug_stream();
                st->print("  skip ");
                stream.method()->print_short_name(st);
                st->cr();
            }
            stream.next();
        }

        // stack frame has been traversed individually and resume stack walk
        // from the stack frame at depth == skip_frames.
        for (int n=0; n < skip_frames && !stream.at_end(); stream.next(), n++) {
            if (log_is_enabled(Debug, stackwalk)) {
                ResourceMark rm(THREAD);
                outputStream* st = Log(stackwalk)::debug_stream();
                st->print("  skip ");
                stream.method()->print_short_name(st);
                st->cr();
            }
        }
    }

    int end_index = start_index;
    int numFrames = 0;
    if (!stream.at_end()) {
        numFrames = fill_in_frames(mode, stream, frame_count, start_index,
                                   frames_array, end_index, CHECK_NULL);
        if (numFrames < 1) {
            THROW_MSG_(vmSymbols::java_lang_InternalError(), "stack walk: decode failed", NULL);
        }
    }

    // JVM_CallStackWalk walks the stack and fills in stack frames, then calls to
    // Java method java.lang.StackStreamFactory.AbstractStackWalker::doStackWalk
    // which calls the implementation to consume the stack frames.
    // When JVM_CallStackWalk returns, it invalidates the stack stream.
    JavaValue result(T_OBJECT);
    JavaCallArguments args(stackStream);
    args.push_long(stream.address_value());
    args.push_int(skip_frames);
    args.push_int(frame_count);
    args.push_int(start_index);
    args.push_int(end_index);

    // Link the thread and vframe stream into the callee-visible object
    stream.setup_magic_on_entry(frames_array);

    JavaCalls::call(&result, m_doStackWalk, &args, THREAD);

    // Do this before anything else happens, to disable any lingering stream objects
    bool ok = stream.cleanup_magic_on_exit(frames_array);

    // Throw pending exception if we must
    (void) (CHECK_NULL);

    if (!ok) {
        THROW_MSG_(vmSymbols::java_lang_InternalError(), "doStackWalk: corrupted buffers on exit", NULL);
    }

    // Return normally
    return (oop)result.get_jobject();
}
bool AdaptiveSizePolicy::print() const {
  assert(UseAdaptiveSizePolicy, "UseAdaptiveSizePolicy need to be enabled.");

  if (!log_is_enabled(Debug, gc, ergo)) {
    return false;
  }

  // Print goal for which action is needed.
  char* action = NULL;
  bool change_for_pause = false;
  if ((change_old_gen_for_maj_pauses() ==
         decrease_old_gen_for_maj_pauses_true) ||
      (change_young_gen_for_min_pauses() ==
         decrease_young_gen_for_min_pauses_true)) {
    action = (char*) " *** pause time goal ***";
    change_for_pause = true;
  } else if ((change_old_gen_for_throughput() ==
               increase_old_gen_for_throughput_true) ||
            (change_young_gen_for_throughput() ==
               increase_young_gen_for_througput_true)) {
    action = (char*) " *** throughput goal ***";
  } else if (decrease_for_footprint()) {
    action = (char*) " *** reduced footprint ***";
  } else {
    // No actions were taken.  This can legitimately be the
    // situation if not enough data has been gathered to make
    // decisions.
    return false;
  }

  // Pauses
  // Currently the size of the old gen is only adjusted to
  // change the major pause times.
  char* young_gen_action = NULL;
  char* tenured_gen_action = NULL;

  char* shrink_msg = (char*) "(attempted to shrink)";
  char* grow_msg = (char*) "(attempted to grow)";
  char* no_change_msg = (char*) "(no change)";
  if (change_young_gen_for_min_pauses() ==
      decrease_young_gen_for_min_pauses_true) {
    young_gen_action = shrink_msg;
  } else if (change_for_pause) {
    young_gen_action = no_change_msg;
  }

  if (change_old_gen_for_maj_pauses() == decrease_old_gen_for_maj_pauses_true) {
    tenured_gen_action = shrink_msg;
  } else if (change_for_pause) {
    tenured_gen_action = no_change_msg;
  }

  // Throughput
  if (change_old_gen_for_throughput() == increase_old_gen_for_throughput_true) {
    assert(change_young_gen_for_throughput() ==
           increase_young_gen_for_througput_true,
           "Both generations should be growing");
    young_gen_action = grow_msg;
    tenured_gen_action = grow_msg;
  } else if (change_young_gen_for_throughput() ==
             increase_young_gen_for_througput_true) {
    // Only the young generation may grow at start up (before
    // enough full collections have been done to grow the old generation).
    young_gen_action = grow_msg;
    tenured_gen_action = no_change_msg;
  }

  // Minimum footprint
  if (decrease_for_footprint() != 0) {
    young_gen_action = shrink_msg;
    tenured_gen_action = shrink_msg;
  }

  log_debug(gc, ergo)("UseAdaptiveSizePolicy actions to meet %s", action);
  log_debug(gc, ergo)("                       GC overhead (%%)");
  log_debug(gc, ergo)("    Young generation:     %7.2f\t  %s",
                      100.0 * avg_minor_gc_cost()->average(), young_gen_action);
  log_debug(gc, ergo)("    Tenured generation:   %7.2f\t  %s",
                      100.0 * avg_major_gc_cost()->average(), tenured_gen_action);
  return true;
}