void CompiledIC::verify() {
  // make sure code pattern is actually a call imm32 instruction
  _ic_call->verify();
  if (os::is_MP()) {
    _ic_call->verify_alignment();
  }
  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
          || is_optimized() || is_megamorphic(), "sanity check");
}
Beispiel #2
0
void c_seq::remove(wisdom_IOStream& io)
{
	SEQ_INIT();
	if (!is_clean())
		return;
	c_wlock lock(&m_lock);
	uint32 del_count = 0;
	uint64_t ms = get_ms();
	uint32 last = m_seq_head.g_cleantime();
	uint32 count = m_seq_head.g_count();
	uint32 ibegin = seq_begin_index();

	leveldb::WriteOptions op;
	leveldb::WriteBatch bh;
	
	while (true)
	{
		string value;
		_zmsg_head* head = NULL;
		if (c_base::get_value(__tos(m_key << "@" << ibegin), value, &head) != 0)
		{
			break;
		}

		if (head->type != T_SEQ_VALUE)
		{
			LOG4_ERROR("SEQ KEY:" << __tos(m_key << "@" << ibegin) << " error.");
			break;
		}

		uint32 idelay = m_seq_head.g_delay() == 0 ? c_server::get_instance()->seq_valid_time() : m_seq_head.g_delay();
		if (head->g_effective() + idelay > time(0))
		{
			break;
		}

		seq_del(ibegin, bh);
		del_count++;
		
		ibegin++;
	}
	
	m_seq_head.s_cleantime(time(0));
	seq_update(bh);
	if (!c_base::write(op, bh))
	{
		m_seq_head.s_count(count);
		ZERROR_RESULT("seq write error.");
	}

	io->push(ZRESULT_OK);

	LOG4_INFO("SEQ " << m_key << " clean record:" << del_count 
		<< " last min " << ((time(0) - last) / 60)
		<< " cost " << (uint32)(get_ms() - ms) << " ms");
}
void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  // Updating a cache to the wrong entry can cause bugs that are very hard
  // to track down - if cache entry gets invalid - we just clean it. In
  // this way it is always the same code path that is responsible for
  // updating and resolving an inline cache
  assert(is_clean(), "should only go to monomorphic from clean state");

  Thread *thread = Thread::current();
  if (info._to_interpreter) {
    COMPILER2_ONLY(debug_only(CodeBlob* cb = CodeCache::find_blob(info.entry()));)
void CompiledStaticCall::print() {
  tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
  if (is_clean()) {
    tty->print("clean");
  } else if (is_call_to_compiled()) {
    tty->print("compiled");
  } else if (is_call_to_interpreted()) {
    tty->print("interpreted");
  }
  tty->cr();
}
Beispiel #5
0
void c_seq::index_(wisdom_IOStream& io)
{
	SEQ_INIT();
	if (is_clean())
	{
		wisdom_IOStream os = new c_ostream_array;
		remove(os);
	}

	c_rlock lock(&m_lock);
	io->push(ZRESULT_OK);
	io->push(itostr(m_seq_head.g_index()));
}
Beispiel #6
0
void CompiledStaticCall::verify() {
  // Verify call
  NativeCall::verify();
  if (os::is_MP()) {
    verify_alignment();
  }

  // Verify stub
  address stub = find_stub();
  assert(stub != NULL, "no stub found for static call");
  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());

  // Verify state
  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
Beispiel #7
0
void CompiledDirectStaticCall::verify() {
  // Verify call.
  _call->verify();
  if (os::is_MP()) {
    _call->verify_alignment();
  }

  // Verify stub.
  address stub = find_stub(/*is_aot*/ false);
  assert(stub != NULL, "no stub found for static call");
  // Creation also verifies the object.
  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());

  // Verify state.
  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
Beispiel #8
0
void c_seq::seek(uint32 keyId, int index, wisdom_IOStream& io)
 {
	SEQ_INIT();

	if (is_clean())
	{
		wisdom_IOStream os = new c_ostream_array;
		remove(os);
	}

	c_rlock lock(&m_lock);
	io->push(ZRESULT_OK);

	if (keyId == 0)
	{
		keyId = m_seq_head.g_index();
	}

	bool down_up = index > 0 ? true : false;
	index = abs(index);

	for (int i = 0; i < index; i++)
	{
		string value;
		if (!seq_get(keyId, value))
		{
			break;
		}
		char* ptr = (char*)value.c_str() + sizeof(_zmsg_head);
		int len = value.length() - sizeof(_zmsg_head);
		_zmsg_head* h = (_zmsg_head*)value.c_str();
		c_time t(h->g_effective());
		io->push(__tos(keyId << "," << t.time_stamp() << ","));
		io->push(ptr, len);

		if (down_up)
			keyId++;
		else
			keyId--;
	}
}
void CompiledStaticCall::set(const StaticCallInfo& info) {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
  // Updating a cache to the wrong entry can cause bugs that are very hard
  // to track down - if cache entry gets invalid - we just clean it. In
  // this way it is always the same code path that is responsible for
  // updating and resolving an inline cache
  assert(is_clean(), "do not update a call entry - use clean");

  if (info._to_interpreter) {
    // Call to interpreted code
    set_to_interpreted(info.callee(), info.entry());
  } else {
    if (TraceICs) {
      ResourceMark rm;
      tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
                    p2i(instruction_address()),
                    p2i(info.entry()));
    }
    // Call to compiled code
    assert (CodeCache::contains(info.entry()), "wrong entry point");
    set_destination_mt_safe(info.entry());
  }
}
Beispiel #10
0
void c_seq::query(uint32 datetime, wisdom_IOStream& io)
{
	SEQ_INIT();
	if (is_clean())
	{
		wisdom_IOStream os = new c_ostream_array;
		remove(os);
	}

	c_rlock lock(&m_lock);

	uint32 begin_index = seq_begin_index();
	uint32 end_index = m_seq_head.g_index();

	/*int last_diff = 0;
	uint32 last_key = 0;*/
	uint32 ibegin = 0;
	uint32 iend = 0;
	map<int, uint32> mfind;
	int diff = 0;
	while (true)
	{
		
		if (diff > 0)
		{
			ibegin = ibegin;
			iend = ibegin + (iend - ibegin) / 2;
		}
		else if (diff < 0)
		{
			ibegin = ibegin + (iend - ibegin) / 2;
			iend = iend;
		}
		else
		{
			ibegin = begin_index;
			iend = end_index;
		}
		
		uint32 keyId = dichotomy(datetime, ibegin, iend, diff, io);
		if (keyId == 0)
			break;

		//exists
		if (diff == 0)
		{
			io->push(ZRESULT_OK);
			io->push(itostr(keyId));
			return;
		}

		
		mfind.insert(make_pair(diff, keyId));
		//no data find
		if (keyId == ibegin || keyId == iend)
		{
			break;
		}
	}

	for (map<int, uint32>::iterator pos = mfind.begin(); pos != mfind.end(); ++pos)
	{
		if (pos->first > 0 || pos == --mfind.end())
		{
			io->push(ZRESULT_OK);
			io->push(itostr(pos->second));
			return;
		}
	}

	io->push(ZRESULT_ERROR);
}
void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  // Updating a cache to the wrong entry can cause bugs that are very hard
  // to track down - if cache entry gets invalid - we just clean it. In
  // this way it is always the same code path that is responsible for
  // updating and resolving an inline cache
  //
  // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
  // callsites. In addition ic_miss code will update a site to monomorphic if it determines
  // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
  //
  // In both of these cases the only thing being modifed is the jump/call target and these
  // transitions are mt_safe

  Thread *thread = Thread::current();
  if (info.to_interpreter()) {
    // Call to interpreter
    if (info.is_optimized() && is_optimized()) {
       assert(is_clean(), "unsafe IC path");
       MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
      // the call analysis (callee structure) specifies that the call is optimized
      // (either because of CHA or the static target is final)
      // At code generation time, this call has been emitted as static call
      // Call via stub
      assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
      CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
      methodHandle method (thread, (Method*)info.cached_metadata());
      csc->set_to_interpreted(method, info.entry());
      if (TraceICs) {
         ResourceMark rm(thread);
         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
           p2i(instruction_address()),
           method->print_value_string());
      }
    } else {
      // Call via method-klass-holder
      InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
      if (TraceICs) {
         ResourceMark rm(thread);
         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
      }
    }
  } else {
    // Call to compiled code
    bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
#ifdef ASSERT
    CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
    assert (cb->is_nmethod(), "must be compiled!");
#endif /* ASSERT */

    // This is MT safe if we come from a clean-cache and go through a
    // non-verified entry point
    bool safe = SafepointSynchronize::is_at_safepoint() ||
                (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));

    if (!safe) {
      InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
    } else {
      if (is_optimized()) {
      set_ic_destination(info.entry());
      } else {
        set_ic_destination_and_value(info.entry(), info.cached_metadata());
      }
    }

    if (TraceICs) {
      ResourceMark rm(thread);
      assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
      tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
        p2i(instruction_address()),
        ((Klass*)info.cached_metadata())->print_value_string(),
        (safe) ? "" : "via stub");
    }
  }
  // We can't check this anymore. With lazy deopt we could have already
  // cleaned this IC entry before we even return. This is possible if
  // we ran out of space in the inline cache buffer trying to do the
  // set_next and we safepointed to free up space. This is a benign
  // race because the IC entry was complete when we safepointed so
  // cleaning it immediately is harmless.
  // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}