void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
    assert_different_registers(obj, klass, len);
    if (UseBiasedLocking && !len->is_valid()) {
        assert_different_registers(obj, klass, len, t1, t2);
        movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
        movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
    } else {
        // This assumes that all prototype bits fit in an int32_t
        movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
    }
#ifdef _LP64
    if (UseCompressedOops) { // Take care not to kill klass
        movptr(t1, klass);
        encode_heap_oop_not_null(t1);
        movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
    } else
#endif
    {
        movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
    }

    if (len->is_valid()) {
        movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
    }
#ifdef _LP64
    else if (UseCompressedOops) {
        xorptr(t1, t1);
        store_klass_gap(obj, t1);
    }
#endif
}
inline void MacroAssembler::store_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1, Register tmp) {
  if (UseCompressedOops) {
    Register compressedOop = encode_heap_oop_not_null((tmp != noreg) ? tmp : d, d);
    stw(compressedOop, offs, s1);
  } else {
    std(d, offs, s1);
  }
}
Ejemplo n.º 3
0
// Encode and store a heap oop.
inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
  *p = encode_heap_oop_not_null(v);
}
Ejemplo n.º 4
0
inline void oopDesc::release_encode_store_heap_oop_not_null(
                                                volatile narrowOop* p, oop v) {
  // heap oop is not pointer sized.
  OrderAccess::release_store(p, encode_heap_oop_not_null(v));
}
Ejemplo n.º 5
0
inline narrowOop oopDesc::encode_heap_oop(oop v) {
  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
}