Example #1
0
//-----------------------------profile_receiver_type---------------------------
void Parse::profile_receiver_type(Node* receiver) {
  assert(method_data_update(), "must be generating profile code");

  ciMethodData* md = method()->method_data();
  assert(md != NULL, "expected valid ciMethodData");
  ciProfileData* data = md->bci_to_data(bci());
  assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here");

  // Skip if we aren't tracking receivers
  if (TypeProfileWidth < 1) {
    increment_md_counter_at(md, data, CounterData::count_offset());
    return;
  }
  ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData();

  Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0));

  // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems.
  // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM.
  make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(),
                    CAST_FROM_FN_PTR(address,
                                     OptoRuntime::profile_receiver_type_C),
                    "profile_receiver_type_C",
                    TypePtr::BOTTOM,
                    method_data, receiver);
}
Example #2
0
//------------------------------profile_virtual_call---------------------------
void Parse::profile_virtual_call(Node* receiver) {
  assert(method_data_update(), "must be generating profile code");

  // Skip if we aren't tracking receivers
  if (TypeProfileWidth < 1) return;

  ciMethodData* md = method()->method_data();
  assert(md != NULL, "expected valid ciMethodData");
  ciProfileData* data = md->bci_to_data(bci());
  assert(data->is_VirtualCallData(), "need VirtualCallData at call site");
  ciVirtualCallData* call_data = (ciVirtualCallData*)data->as_VirtualCallData();

  Node* method_data = method_data_addressing(md, call_data, in_ByteSize(0));

  // The following construction of the CallLeafNode is almost identical to
  // make_slow_call().  However, with make_slow_call(), the merge mem 
  // characteristics were causing incorrect anti-deps to be added.

  CallRuntimeNode *call = new CallLeafNode(OptoRuntime::profile_virtual_call_Type(), CAST_FROM_FN_PTR(address, OptoRuntime::profile_virtual_call_C), "profile_virtual_call_C");

  set_predefined_input_for_runtime_call(call);
  call->set_req( TypeFunc::Parms+0, method_data );
  call->set_req( TypeFunc::Parms+1, receiver );

  Node* c = _gvn.transform(call);

  set_predefined_output_for_runtime_call(c);
}
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
  Values* dims = x->dims();
  int i = dims->length();
  LIRItemList* items = new LIRItemList(dims->length(), NULL);
  while (i-- > 0) {
    LIRItem* size = new LIRItem(dims->at(i), this);
    items->at_put(i, size);
  }

  // need to get the info before, as the items may become invalid through item_free
  CodeEmitInfo* patching_info = NULL;
  if (!x->klass()->is_loaded() || PatchALot) {
    patching_info = state_for(x, x->state_before());

    // cannot re-use same xhandlers for multiple CodeEmitInfos, so
    // clone all handlers
    x->set_exception_handlers(new XHandlers(x->exception_handlers()));
  }

  i = dims->length();
  while (i-- > 0) {
    LIRItem* size = items->at(i);
    // if a patching_info was generated above then debug information for the state before
    // the call is going to be emitted.  The LIRGenerator calls above may have left some values
    // in registers and that's been recorded in the CodeEmitInfo.  In that case the items
    // for those values can't simply be freed if they are registers because the values
    // might be destroyed by store_stack_parameter.  So in the case of patching, delay the
    // freeing of the items that already were in registers
    size->load_item();
    store_stack_parameter (size->result(),
                           in_ByteSize(STACK_BIAS +
                                       frame::memory_parameter_word_sp_offset * wordSize +
                                       i * sizeof(jint)));
  }

  // This instruction can be deoptimized in the slow path : use
  // O0 as result register.
  const LIR_Opr reg = result_register_for(x->type());
  CodeEmitInfo* info = state_for(x, x->state());

  jobject2reg_with_patching(reg, x->klass(), patching_info);
  LIR_Opr rank = FrameMap::O1_opr;
  __ move(LIR_OprFact::intConst(x->rank()), rank);
  LIR_Opr varargs = FrameMap::as_pointer_opr(O2);
  int offset_from_sp = (frame::memory_parameter_word_sp_offset * wordSize) + STACK_BIAS;
  __ add(FrameMap::SP_opr,
         LIR_OprFact::intptrConst(offset_from_sp),
         varargs);
  LIR_OprList* args = new LIR_OprList(3);
  args->append(reg);
  args->append(rank);
  args->append(varargs);
  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
                  LIR_OprFact::illegalOpr,
                  reg, args, info);

  LIR_Opr result = rlock_result(x);
  __ move(reg, result);
}
Example #4
0
//-------------------------------set_md_flag_at--------------------------------
void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
  Node* adr_node = method_data_addressing(md, data, in_ByteSize(0));

  const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
  Node* flags = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type);
  Node* incr = _gvn.transform(new (3) OrINode(flags, _gvn.intcon(flag_constant)));
  store_to_memory(NULL, adr_node, incr, T_INT, adr_type );
}
Example #5
0
ByteSize FrameMap::sp_offset_for_slot(const int index) const {
  if (index < argcount()) {
    int offset = _argument_locations->at(index);
    assert(offset != -1, "not a memory argument");
    assert(offset >= framesize() * 4, "argument inside of frame");
    return in_ByteSize(offset);
  }
  ByteSize offset = sp_offset_for_spill(index - argcount());
  assert(in_bytes(offset) < framesize() * 4, "spill outside of frame");
  return offset;
}
ByteSize ciMethodData::offset_of_slot(ciProfileData* data, ByteSize slot_offset_in_data) {
  // Get offset within MethodData* of the data array
  ByteSize data_offset = MethodData::data_offset();

  // Get cell offset of the ProfileData within data array
  int cell_offset = dp_to_di(data->dp());

  // Add in counter_offset, the # of bytes into the ProfileData of counter or flag
  int offset = in_bytes(data_offset) + cell_offset + in_bytes(slot_offset_in_data);

  return in_ByteSize(offset);
}
Example #7
0
bool FrameMap::locations_for_slot  (int index, Location::Type loc_type,
                                     Location* loc, Location* second) const {
  ByteSize offset_from_sp = sp_offset_for_slot(index);
  if (!location_for_sp_offset(offset_from_sp, loc_type, loc)) {
    return false;
  }
  if (second != NULL) {
    // two word item
    offset_from_sp = offset_from_sp + in_ByteSize(4);
    return location_for_sp_offset(offset_from_sp, loc_type, second);
  }
  return true;
}
Example #8
0
VMReg FrameMap::regname(LIR_Opr opr) const {
  if (opr->is_single_cpu()) {
    assert(!opr->is_virtual(), "should not see virtual registers here");
    return opr->as_register()->as_VMReg();
  } else if (opr->is_single_stack()) {
    return sp_offset2vmreg(sp_offset_for_slot(opr->single_stack_ix()));
  } else if (opr->is_address()) {
    LIR_Address* addr = opr->as_address_ptr();
    assert(addr->base() == stack_pointer(), "sp based addressing only");
    return sp_offset2vmreg(in_ByteSize(addr->index()->as_jint()));
  }
  ShouldNotReachHere();
  return VMRegImpl::Bad();
}
Example #9
0
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
  Values* dims = x->dims();
  int i = dims->length();
  LIRItemList* items = new LIRItemList(dims->length(), NULL);
  while (i-- > 0) {
    LIRItem* size = new LIRItem(dims->at(i), this);
    items->at_put(i, size);
  }

  // Evaluate state_for early since it may emit code.
  CodeEmitInfo* patching_info = NULL;
  if (!x->klass()->is_loaded() || PatchALot) {
    patching_info = state_for(x, x->state_before());

    // cannot re-use same xhandlers for multiple CodeEmitInfos, so
    // clone all handlers.  This is handled transparently in other
    // places by the CodeEmitInfo cloning logic but is handled
    // specially here because a stub isn't being used.
    x->set_exception_handlers(new XHandlers(x->exception_handlers()));
  }
  CodeEmitInfo* info = state_for(x, x->state());

  i = dims->length();
  while (i-- > 0) {
    LIRItem* size = items->at(i);
    size->load_nonconstant();

    store_stack_parameter(size->result(), in_ByteSize(i*4));
  }

  LIR_Opr reg = result_register_for(x->type());
  jobject2reg_with_patching(reg, x->klass(), patching_info);

  LIR_Opr rank = FrameMap::rbx_opr;
  __ move(LIR_OprFact::intConst(x->rank()), rank);
  LIR_Opr varargs = FrameMap::rcx_opr;
  __ move(FrameMap::rsp_opr, varargs);
  LIR_OprList* args = new LIR_OprList(3);
  args->append(reg);
  args->append(rank);
  args->append(varargs);
  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
                  LIR_OprFact::illegalOpr,
                  reg, args, info);

  LIR_Opr result = rlock_result(x);
  __ move(reg, result);
}
Example #10
0
void LIRGenerator::trace_block_entry(BlockBegin* block) {
  store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0));
  LIR_OprList* args = new LIR_OprList();
  address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
  __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
}
Example #11
0
ByteSize FrameMap::sp_offset_for_monitor_object(int index) const {
  check_monitor_index(index);
  return sp_offset_for_monitor_base(index) + in_ByteSize(BasicObjectLock::obj_offset_in_bytes());
}
Example #12
0
ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
  int end_of_spills = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
    _num_spills * spill_slot_size_in_bytes;
  int offset = (int) round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock);
  return in_ByteSize(offset);
}
Example #13
0
ByteSize FrameMap::sp_offset_for_spill(const int index) const {
  assert(index >= 0 && index < _num_spills, "out of range");
  int offset = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
    index * spill_slot_size_in_bytes;
  return in_ByteSize(offset);
}
Example #14
0
 // Compiler/Interpreter offset
 static ByteSize component_mirror_offset() { return in_ByteSize(offset_of(ArrayKlass, _component_mirror)); }