void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) {
  if (src->is_register()) {
    if (dest->is_register()) {
      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
      assert(!tmp1->is_valid() && !tmp2->is_valid(), "unnecessary definition of temp operands");
      reg2reg(src,  dest);
    } else if (dest->is_stack()) {
      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
      assert(!tmp1->is_valid() && !tmp2->is_valid(), "unnecessary definition of temp operands");
reg2stack(src,dest,type);
    } else if (dest->is_address()) {
reg2mem(src,dest,tmp1,tmp2,tmp3,type,patch_code,info,unaligned);
    } else {
      ShouldNotReachHere();
    }

  } else if (src->is_stack()) {
    assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
    if (dest->is_register()) {
      assert(!tmp1->is_valid() && !tmp2->is_valid(), "unnecessary definition of temp operands");
      stack2reg(src, dest, type);
    } else if (dest->is_stack()) {
assert(!tmp2->is_valid(),"unnecessary definition of temp operands");
      stack2stack(src, dest, tmp1, type);
    } else {
      ShouldNotReachHere();
    }

  } else if (src->is_constant()) {
    if (dest->is_register()) {
assert(!tmp3->is_valid(),"unnecessary definition of temp operands");
      const2reg(src, dest, patch_code, info, tmp1, tmp2); // patching is possible
    } else if (dest->is_stack()) {
      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
assert(!tmp3->is_valid(),"unnecessary definition of temp operands");
      const2stack(src, dest, tmp1, tmp2);
    } else if (dest->is_address()) {
      assert(patch_code == lir_patch_none, "no patching allowed here");
      const2mem(src, dest, tmp1, tmp2, tmp3, type, info);
    } else {
      ShouldNotReachHere();
    }

  } else if (src->is_address()) {
assert(!tmp2->is_valid(),"unnecessary definition of temp operand");
    mem2reg(src, dest, tmp1, type, patch_code, info, unaligned);

  } else {
    ShouldNotReachHere();
  }
}
 // Version that _does_ generate a load of the previous value from addr.
 // addr (the address of the field to be read) must be a LIR_Address
 // pre_val (a temporary register) must be a register;
 G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
   _addr(addr), _pre_val(pre_val), _do_load(true),
   _patch_code(patch_code), _info(info)
 {
   assert(_pre_val->is_register(), "should be temporary register");
   assert(_addr->is_address(), "should be the address of the field");
 }
Beispiel #3
0
FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) {
  assert(_init_done, "should already be completed");

  _framesize = -1;
  _num_spills = -1;

  assert(monitors >= 0, "not set");
  _num_monitors = monitors;
  assert(reserved_argument_area_size >= 0, "not set");
  _reserved_argument_area_size = MAX2(4, reserved_argument_area_size) * BytesPerWord;

  _argcount = method->arg_size();
  _argument_locations = new intArray(_argcount, -1);
  _incoming_arguments = java_calling_convention(signature_type_array_for(method), false);
  _oop_map_arg_count = _incoming_arguments->reserved_stack_slots();

  int java_index = 0;
  for (int i = 0; i < _incoming_arguments->length(); i++) {
    LIR_Opr opr = _incoming_arguments->at(i);
    if (opr->is_address()) {
      LIR_Address* address = opr->as_address_ptr();
      _argument_locations->at_put(java_index, address->disp() - STACK_BIAS);
      _incoming_arguments->args()->at_put(i, LIR_OprFact::stack(java_index, as_BasicType(as_ValueType(address->type()))));
    }
    java_index += type2size[opr->type()];
  }

}
void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
    if (src->is_register()) {
        if (dest->is_register()) {
            assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
            reg2reg(src,  dest);
        } else if (dest->is_stack()) {
            assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
            reg2stack(src, dest, type, pop_fpu_stack);
        } else if (dest->is_address()) {
            reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
        } else {
            ShouldNotReachHere();
        }

    } else if (src->is_stack()) {
        assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
        if (dest->is_register()) {
            stack2reg(src, dest, type);
        } else if (dest->is_stack()) {
            stack2stack(src, dest, type);
        } else {
            ShouldNotReachHere();
        }

    } else if (src->is_constant()) {
        if (dest->is_register()) {
            const2reg(src, dest, patch_code, info); // patching is possible
        } else if (dest->is_stack()) {
            assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
            const2stack(src, dest);
        } else if (dest->is_address()) {
            assert(patch_code == lir_patch_none, "no patching allowed here");
            const2mem(src, dest, type, info, wide);
        } else {
            ShouldNotReachHere();
        }

    } else if (src->is_address()) {
        mem2reg(src, dest, type, patch_code, info, wide, unaligned);

    } else {
        ShouldNotReachHere();
    }
}
void LIR_OopMapGenerator::process_move(LIR_Op* op) {
  LIR_Op1* op1 = op->as_Op1();
  LIR_Opr src = op1->in_opr();
  LIR_Opr dst = op1->result_opr();
  
  assert(!src->is_stack() || !dst->is_stack(), "No memory-memory moves allowed");
  if ((src->is_stack() && frame_map()->is_spill_pos(src)) ||
      (dst->is_stack() && frame_map()->is_spill_pos(dst))) {
    // Oops in the spill area are handled by another mechanism (see
    // CodeEmitInfo::record_spilled_oops)
    return;
  }
  if (dst->is_oop()) {
    assert((src->is_oop() &&
            (src->is_stack() || src->is_register() || src->is_constant())
           ) ||
           src->is_address(), "Wrong tracking of oops/non-oops in LIR");
    assert(!src->is_stack() || is_marked(src->single_stack_ix()),
           "Error in tracking of oop stores to stack");
    if (dst->is_stack()) {
      mark(dst->single_stack_ix());
    } else if (dst->is_register()) {
      if (LIRCacheLocals) {
        if (local_mapping()->is_cache_reg(dst)) {
          mark(dst);
        }
      } else {
        assert(local_mapping() == NULL, "expected no local mapping");
      }
    }
  } else {
    // !dst->is_oop()
    // Note that dst may be an address
    assert(!src->is_single_stack() || !is_marked(src->single_stack_ix()), "Error in tracking of oop stores to stack");
    assert(!src->is_double_stack() || !is_marked(src->double_stack_ix()), "Error in tracking of oop stores to stack");
    assert(!src->is_double_stack() || !is_marked(1 + src->double_stack_ix()), "Error in tracking of oop stores to stack");
    if (dst->is_stack()) {
      if (dst->is_single_stack()) {
        clear_all(dst->single_stack_ix());
      } else {
        clear_all(dst->double_stack_ix());
        clear_all(1 + dst->double_stack_ix());
      }
    } else if (dst->is_register()) {
      if (LIRCacheLocals) {
        if (local_mapping()->is_cache_reg(dst)) {
          clear_all(dst);
        }
      } else {
        assert(local_mapping() == NULL, "expected no local mapping");
      }
    }
  }
}
Beispiel #6
0
VMReg FrameMap::regname(LIR_Opr opr) const {
  if (opr->is_single_cpu()) {
    assert(!opr->is_virtual(), "should not see virtual registers here");
    return opr->as_register()->as_VMReg();
  } else if (opr->is_single_stack()) {
    return sp_offset2vmreg(sp_offset_for_slot(opr->single_stack_ix()));
  } else if (opr->is_address()) {
    LIR_Address* addr = opr->as_address_ptr();
    assert(addr->base() == stack_pointer(), "sp based addressing only");
    return sp_offset2vmreg(in_ByteSize(addr->index()->as_jint()));
  }
  ShouldNotReachHere();
  return VMRegImpl::Bad();
}
Beispiel #7
0
CallingConvention* FrameMap::c_calling_convention(const BasicTypeArray* signature) {
  // compute the size of the arguments first.  The signature array
  // that java_calling_convention takes includes a T_VOID after double
  // work items but our signatures do not.
  int i;
  int sizeargs = 0;
  for (i = 0; i < signature->length(); i++) {
    sizeargs += type2size[signature->at(i)];
  }

  BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
  VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
  int sig_index = 0;
  for (i = 0; i < sizeargs; i++, sig_index++) {
    sig_bt[i] = signature->at(sig_index);
    if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
      sig_bt[i + 1] = T_VOID;
      i++;
    }
  }

  intptr_t out_preserve = SharedRuntime::c_calling_convention(sig_bt, regs, NULL, sizeargs);
  LIR_OprList* args = new LIR_OprList(signature->length());
  for (i = 0; i < sizeargs;) {
    BasicType t = sig_bt[i];
    assert(t != T_VOID, "should be skipping these");

    // C calls are always outgoing
    bool outgoing = true;
    LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
    // they might be of different types if for instance floating point
    // values are passed in cpu registers, but the sizes must match.
    assert(type2size[opr->type()] == type2size[t], "type mismatch");
    args->append(opr);
    if (opr->is_address()) {
      LIR_Address* addr = opr->as_address_ptr();
      out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
    }
    i += type2size[t];
  }
  assert(args->length() == signature->length(), "size mismatch");
  out_preserve += SharedRuntime::out_preserve_stack_slots();
  update_reserved_argument_area_size(out_preserve * BytesPerWord);
  return new CallingConvention(args, out_preserve);
}
Beispiel #8
0
CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signature, bool outgoing) {
  // compute the size of the arguments first.  The signature array
  // that java_calling_convention takes includes a T_VOID after double
  // work items but our signatures do not.
  int i;
  int sizeargs = 0;
  for (i = 0; i < signature->length(); i++) {
    sizeargs += type2size[signature->at(i)];
  }

  BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
  VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
  int sig_index = 0;
  for (i = 0; i < sizeargs; i++, sig_index++) {
    sig_bt[i] = signature->at(sig_index);
    if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
      sig_bt[i + 1] = T_VOID;
      i++;
    }
  }

  intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, outgoing);
  LIR_OprList* args = new LIR_OprList(signature->length());
  for (i = 0; i < sizeargs;) {
    BasicType t = sig_bt[i];
    assert(t != T_VOID, "should be skipping these");
    LIR_Opr opr = map_to_opr(t, regs + i, outgoing);
    args->append(opr);
    if (opr->is_address()) {
      LIR_Address* addr = opr->as_address_ptr();
      assert(addr->disp() == (int)addr->disp(), "out of range value");
      out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
    }
    i += type2size[t];
  }
  assert(args->length() == signature->length(), "size mismatch");
  out_preserve += SharedRuntime::out_preserve_stack_slots();

  if (outgoing) {
    // update the space reserved for arguments.
    update_reserved_argument_area_size(out_preserve * BytesPerWord);
  }
  return new CallingConvention(args, out_preserve);
}