Example #1
0
void NativeMovConstReg::set_data(int64_t src)  {
  verify();
  uint41_t new_X;
  uint41_t new_L;
  IPF_Bundle *bundle = (IPF_Bundle *)addr_at(0);
  X2::set_imm((uint64_t)src, bundle->get_slot2(), new_X, new_L);
  bundle->set_slot1( new_L );
  bundle->set_slot2( new_X );

  ICache::invalidate_range((address)bundle, sizeof(bundle));

  // also store the value into an oop_Relocation cell, if any
  CodeBlob* nm = CodeCache::find_blob(instruction_address());
  if (nm != NULL) {
    RelocIterator iter(nm, instruction_address(), next_instruction_address());
    oop* oop_addr = NULL;
    while (iter.next()) {
      if (iter.type() == relocInfo::oop_type) {
        oop_Relocation *r = iter.oop_reloc();
        if (oop_addr == NULL) {
          oop_addr = r->oop_addr();
          *oop_addr = (oop)src;
        } else {
          assert(oop_addr == r->oop_addr(), "must be only one set-oop here") ;
        }
      }
    }
  }
}
Example #2
0
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
  uint41_t new_X;
  uint41_t new_L;

  // Not even remotely MT safe
  IPF_Bundle *bundle = (IPF_Bundle *)(nativeInstruction_at(verified_entry)->addr_at(0));

  M37 nopfill(1, 0, PR0);
  X3 branch(0, (uint) Assembler::sptk, (uint) Assembler::few, (uint) Assembler::keep, 0, PR0);

  branch.set_target((uint64_t)(dest - verified_entry), branch.bits(), new_X, new_L);

  bundle->set(IPF_Bundle::MLX, nopfill.bits(), new_L, new_X);

  ICache::invalidate_range((address)bundle, sizeof(bundle));
}
// Similar to replace_mt_safe, but just changes the destination.  The
// important thing is that free-running threads are able to execute this
// call instruction at all times.  Thus, the displacement field must be
// instruction-word-aligned.
//
// Used in the runtime linkage of calls; see class CompiledIC.
void NativeCall::set_destination_mt_safe(address dest) {

  assert(Patching_lock->is_locked() ||
         SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 
  // Get the address of the bundle containing the call
  IPF_Bundle *bundle = (IPF_Bundle*)addr_at(0);

  // Generate the bits for a "chk.a.nc GR0, .+0", which always branches to self
  M22 check(4 | Assembler::keep, GR0, 0, PR0);

  // Loop until the change is accomplished
  while (true) {
    uint41_t new_X, new_L;

    // verify that this is a movl
    guarantee( Assembler::is_movl( bundle->get_template(), bundle->get_slot2() ), "not a movl instruction");
  
    // Save the old bundle, and make an image that is updated
    IPF_Bundle old_bundle = *bundle;
    IPF_Bundle mid_bundle = old_bundle;
    IPF_Bundle new_bundle = old_bundle;
  
    // Change the middle bundle so that the 0 slot instruction branchs to self
    mid_bundle.set_slot0( check.bits() );
  
    // Update the new image
    X2::set_imm((uint64_t)dest, new_bundle.get_slot2(), new_X, new_L);
    new_bundle.set_slot1( new_L );
    new_bundle.set_slot2( new_X );
  
    // Now the synchronous work begins: get the halves
    uint64_t old_half0 = old_bundle.get_half0();
  
    // Exchange the low order half, verify it was unchanged, and retry if it was different
    int64_t cur_half0 = atomic::compare_and_exchange_long(
        (jlong)mid_bundle.get_half0(), (jlong*)bundle->addr_half0(), (jlong)old_half0);

    if( cur_half0 == old_half0 ) {
  
      // Force a memory barrier
      atomic::membar();
  
      // Write the upper half with the changed bits
      bundle->set_half1(new_bundle.get_half1());
  
      // Force a memory barrier
      atomic::membar();
  
      // Write the lower half
      bundle->set_half0(new_bundle.get_half0());
  
      // Final memory barries
      atomic::membar();
  
      break;
    }
  }

  ICache::invalidate_range((address)bundle, sizeof(bundle));
}