コード例 #1
0
// --- pre_write_barrier_compiled
void C1_MacroAssembler::pre_write_barrier_compiled(RInOuts, Register Rtmp0, Register Rtmp1,
                                                   RKeepIns, Register Rbase, int off,  Register Rindex, int scale, Register Rval,
                                                   CodeEmitInfo *info) {
  Label retry, safe_to_store;
  if (UseSBA) bind(retry); // Come here to retry barrier following an SBA escape
  // Perform regular pre write barrier
  pre_write_barrier(RInOuts::a, Rtmp0, Rtmp1, RKeepIns::a, Rbase, off, Rindex, scale, Rval, safe_to_store);
  if( UseSBA ) {
    // SBA escape will update Rbase. Rbase may already have been added to the
    // oop map for patching. Force Rbase into oop map.
    // NB. this is the last use of the oop map!
    info->oop_map()->add((VOopReg::VR)Rbase);
    // Full SBA escape - Rtmp0 holds FID of Rbase
    // Place args to sba_escape below return address in outgoing stack frame
    assert0 (-frame::runtime_stub_frame_size + frame::xPAD == -16)
    st8(RSP, -16, Rval);
    assert0 (-frame::runtime_stub_frame_size + frame::xRAX == -24)
    st8(RSP, -24, Rbase);
    call(StubRoutines::x86::sba_escape_handler());  // Do call
assert(info,"oop map expected");
    add_oopmap(rel_pc(), info->oop_map()); // Add oop map on return address of call
    add_dbg(rel_pc(), info->debug_scope());
    jmp (retry);
  }
bind(safe_to_store);
}
コード例 #2
0
// --- ref_store_with_check
// Store oop taking care of SBA escape and barriers. Returns relpc of where NPE
// info should be added.
int C1_MacroAssembler::ref_store_with_check(RInOuts, Register Rbase, Register Rtmp0, Register Rtmp1,
                                            RKeepIns, Register Rindex, int off, int scale, Register Rval,
                                            CodeEmitInfo *info) {
Label strip;

#ifdef ASSERT
  verify_oop(Rval, MacroAssembler::OopVerify_Store);
  if (RefPoisoning) move8(Rtmp1,Rval); // Save Rval
#endif

  null_chk( Rval,strip  ); // NULLs are always safe to store

  pre_write_barrier_compiled(RInOuts::a, Rtmp0, Rtmp1,
                             RKeepIns::a, Rbase, off, Rindex, scale, Rval,
                             info);
#ifdef ASSERT
  if (RefPoisoning) {
    mov8  (Rtmp1,Rval);   // Save Rval again as it will have been squashed by the barrier
    if( Rbase == Rval ) { // if base can look like value then don't poison base
      assert0( MultiMapMetaData );
      Rval = Rtmp1;
      Rtmp1 = Rbase;
    }
    always_poison(Rval); // Poison ref
  }
#endif // ASSERT
bind(strip);
  verify_not_null_oop(Rbase, MacroAssembler::OopVerify_StoreBase);
  cvta2va(Rbase);
  int npe_relpc = rel_pc();
#ifdef ASSERT
  // check the value to be squashed is an oop, npe on this rather than the store
  if (should_verify_oop(MacroAssembler::OopVerify_OverWrittenField))
    npe_relpc = verify_oop (Rbase, off, Rindex, scale, MacroAssembler::OopVerify_OverWrittenField);
#endif
  if (Rindex == noreg) st8  (Rbase, off, Rval);
  else                 st8  (Rbase, off, Rindex, scale, Rval);
#ifdef ASSERT
  if (RefPoisoning) mov8(Rval,Rtmp1); // Restore unpoisoned Rval
#endif
  return npe_relpc;
}
コード例 #3
0
  //------------------------------------------------------------------------------------------------------------------------
  // Return point for a Java call if there's an exception thrown in Java code.
  // The exception is caught and transformed into a pending exception stored in
  // JavaThread that can be tested from within the VM.
  //
  address generate_catch_exception() {
    StubCodeMark mark(this, "StubRoutines", "catch_exception");

    address start = __ pc();

    // verify that thread corresponds
//  __ verify_thread();

    // set pending exception
//  __ verify_oop(GR8_exception, "generate_catch_exception");

    const Register pending_exception_addr   = GR2_SCRATCH;
    const Register exception_file_addr      = GR3_SCRATCH;
    const Register exception_line_addr      = GR31_SCRATCH;
    const Register exception_file           = GR30_SCRATCH;
    const Register exception_line           = GR29_SCRATCH;
    const Register call_stub_return_address = GR28_SCRATCH;

    const BranchRegister call_stub_return_address_br = BR6_SCRATCH;

    __ add(pending_exception_addr, thread_(pending_exception));
    __ mova(exception_file, (address)__FILE__);
    __ add(exception_file_addr, thread_(exception_file));
    __ mova(exception_line, (address)__LINE__);

    __ st8(pending_exception_addr, GR8_exception);
    __ st8(exception_file_addr, exception_file);
    __ add(exception_line_addr, thread_(exception_line));

    __ st8(exception_line_addr, exception_line);

    // complete return to VM
    assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");

    __ mova(call_stub_return_address, StubRoutines::_call_stub_return_address);
    __ mov(call_stub_return_address_br, call_stub_return_address);
    __ br(call_stub_return_address_br);

    __ flush_bundle();

    return start;
  }
コード例 #4
0
ファイル: unicode.c プロジェクト: duper/blackbag
char *
unicode2asc(char *dst, u_char *bp, u_char *ep, size_t *l) {
	char *cp = dst, *tp = &dst[*l] - 1;

	while(&bp[1] < ep && cp < tp) {
		u_int16_t code = lld16(&bp, ep);
		if(isprint(code) || code == '\n' || code == '\r')
			st8((u_char)code, (u_char**)&cp, (u_char*)tp);
		else 
     /*oink*/		cp += fmt_sfmt(cp, (size_t)(tp-cp), "&%s;", _ent(code));
	}

	*cp++ = 0;

	*l = (size_t)(cp - dst);
	return(dst);
}
コード例 #5
0
ファイル: inst6809.c プロジェクト: gordonjcp/miragetools
void stb()
{
  st8(rb);
}
コード例 #6
0
ファイル: inst6809.c プロジェクト: gordonjcp/miragetools
void sta()
{
  st8(ra);
}
コード例 #7
0
void InterpreterRuntime::SignatureHandlerGenerator::pass_prev(int slot_offset) {
  Argument      jni_arg(_prev_jni_offset);
  Argument::Sig sig = _prev_sig;

  if (sig == Argument::no_sig) {
    return;
  }

  slot_offset += BytesPerWord;

  if (Argument::is_integral(sig)) {
    // Integral argument

    // Load either the output register or a very-local temp from the java stack
    // Bump java stack offset address if requested.
    const Register tmp = jni_arg.is_register() ? jni_arg.as_register() : GR2_SCRATCH;

    if (slot_offset == 0) {
      __ ld8(tmp, GR_I0);
    } else {
      __ ld8(tmp, GR_I0, -slot_offset);
    }

    if (Argument::is_4byte(sig)) {
      __ sxt4(tmp, tmp);
    }

    if (Argument::is_obj(sig)) {
      // Object, box if not null
      const PredicateRegister box = PR15_SCRATCH;

      __ cmp(box, PR0, 0, tmp, Assembler::notEqual);
      __ add(box, tmp, GR_I0, slot_offset);
    }

    if (!jni_arg.is_register()) {
      // Store into native memory parameter list
      __ add(GR3_SCRATCH, SP, jni_arg.jni_offset_in_frame());
      __ st8(GR3_SCRATCH, tmp);
    }

  } else {
    // Floating point argument
    const FloatRegister tmp = jni_arg.is_register() ? as_FloatRegister(FR_I0->encoding() + _prev_float_reg_offset) : FR6;

    if (jni_arg.is_register()) {
      if (Argument::is_4byte(sig)) {
	// Single precision float
	if (slot_offset == 0) {
	  __ ldfs(tmp, GR_I0);
	} else {
	  __ ldfs(tmp, GR_I0, -slot_offset);
	}
      } else {
	// Double precision float
	if (slot_offset == 0) {
	  __ ldfd(tmp, GR_I0);
	} else {
	  __ ldfd(tmp, GR_I0, -slot_offset);
	}
      }
    } else {
      if (slot_offset == 0) {
	__ ld8(GR2_SCRATCH, GR_I0);
      } else {
	__ ld8(GR2_SCRATCH, GR_I0, -slot_offset);
      }
      __ add(GR3_SCRATCH, SP, jni_arg.jni_offset_in_frame());
      __ st8(GR3_SCRATCH, GR2_SCRATCH);
    }
  }
}
コード例 #8
0
  // Support for uint StubRoutine::ia64::partial_subtype_check( Klass sub, Klass super );
  // Arguments :
  //      ret  : GR_RET, returned
  //      sub  : I0, argument
  //      super: I1, argument
  //
  address generate_partial_subtype_check() {
    StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
    address start = __ pc();

    Label loop, missed;

    const Register subklass   = GR_I0; // subklass
    const Register superklass = GR_I1; // superklass

    const Register length     = GR_L0; // cache array length
    const Register index      = GR_L1; // index into cache array
    const Register value      = GR_L2; // current value from cache array
    const Register save_PFS   = GR_L3;

    const PredicateRegister miss = PR6_SCRATCH;

    // Allocate a small frame for a leaf routine
    __ alloc(save_PFS, 8, 4, 0, 0);

    // Set up the input and local registers

    int source_offset = Klass::secondary_supers_offset_in_bytes();
    int target_offset = Klass::secondary_super_cache_offset_in_bytes();

    int length_offset = arrayOopDesc::length_offset_in_bytes();
    int base_offset   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);

    // Compare super with sub directly, since super is not in its own SSA.
    // The compiler used to emit this test, but we fold it in here,
    // to allow platform-specific tweaking on sparc.
    __ cmp(PR6_SCRATCH, PR0, subklass, superklass, Assembler::equal);
    __ br(PR6_SCRATCH, hit, Assembler::sptk);

    __ add(subklass, sizeof(oopDesc) + source_offset, subklass);

    __ ld8(value, subklass, target_offset - source_offset);

    // Point to the length
    __ add(value, length_offset, value);

    // Load the length, set the pointer to the base, and clear the index
    __ ld2(length, value, base_offset - length_offset);
    __ clr(index);

    // Load the next pointer (which can run 1 past the end)
    // Exit the loop if the count is reached
    __ bind(loop);

    __ ld8(GR_RET, value, BytesPerWord);
    __ cmp(miss, PR0, index, length, Assembler::equal);
    __ br(miss, missed, Assembler::spnt);

    // Increment the loop counter
    // Exit if this is a match
    __ cmp(miss, PR0, GR_RET, superklass, Assembler::notEqual);
                                  // Check for match
    __ add(index, 1, index);      // Bump index
    __ br(miss, loop, Assembler::sptk);

    // Got a hit; return success (zero result); set cache.
    // Cache load doesn't happen here; for speed it is directly emitted by the compiler.
    __ st8(subklass, superklass); // Save result to cache

    // Got a hit, return zero result
    __ bind(hit);

    __ mov(AR_PFS, save_PFS);
    __ clr(GR_RET);               // Set zero result
    __ ret();                     // Result in GR_RET is ok

    // Got a miss, return non-zero result
    __ bind(missed);

    __ mov(AR_PFS, save_PFS);
    __ mov(GR_RET, 1);            // Set non-zero result
    __ ret();                     // Result in GR_RET is ok

    return start;
  }
コード例 #9
0
  //------------------------------------------------------------------------------------------------------------------------
  // Call stubs are used to call Java from C
  //
  // GR_I0 - call wrapper address     : address
  // GR_I1 - result                   : intptr_t*
  // GR_I2 - result type              : BasicType
  // GR_I3 - method                   : methodOop
  // GR_I4 - interpreter entry point  : address
  // GR_I5 - parameter block          : intptr_t*
  // GR_I6 - parameter count in words : int
  // GR_I7 - thread                   : Thread*
  //
  address generate_call_stub(address& return_address) {
    StubCodeMark mark(this, "StubRoutines", "call_stub");

    const Register result     = GR_I1;
    const Register type       = GR_I2;
    const Register method     = GR_I3;
    const Register entry_ptr  = GR_I4;
    const Register parms      = GR_I5;
    const Register parm_count = GR_I6;
    const Register thread     = GR_I7;

    const Register parm_size = GR31_SCRATCH;
    const Register entry     = GR30_SCRATCH;
    const Register arg       = GR29_SCRATCH;

    const Register out_tos   = GR49; // Equivalent of GR_Otos
    const Register out_parms = GR50; // Equivalent of GR_Olocals (unused)

    const BranchRegister    entry_br = BR6_SCRATCH;
    const PredicateRegister no_args  = PR6_SCRATCH;

    address start = __ emit_fd();

    // Must allocate 8 output registers in case we go thru an i2c
    // and the callee needs 8 input registers
    __ alloc(GR_Lsave_PFS, 8, 9, 8, 0);                     // save AR_PFS
    __ sxt4(parm_count, parm_count);                        // # of parms
    __ mov(GR_Lsave_SP, SP);                                // save caller's SP
    __ mov(GR_entry_frame_GR5, GR5_poll_page_addr);
    __ mov(GR_entry_frame_GR6, GR6_caller_BSP);
    __ mov(GR_entry_frame_GR7, GR7_reg_stack_limit);

    // We can not tolerate an eager RSE cpu. Itanium-1 & 2 do not support
    // this feature but we turn it off anyway
    const Register RSC   = GR2_SCRATCH;
    __ mov(RSC, AR_RSC);
    __ and3(RSC, -4, RSC);      // Turn off two low bits
    __ mov(AR_RSC, RSC);        //  enforced lazy mode

    __ shladd(parm_size, parm_count, Interpreter::logStackElementSize(), GR0); // size of stack space for the parms
    __ mov(GR_Lsave_RP, RP);                                // save return address

    __ add(parm_size, parm_size, 15);                       // round up to multiple of 16 bytes.  we use
                                                            // caller's 16-byte scratch area for params,
                                                            // so no need to add 16 to the current frame size.
    __ mov(GR_Lsave_LC, AR_LC);                             // save AR_LC
    __ add(out_parms, SP, Interpreter::stackElementSize());      // caller's SP+8 is 1st parm addr == target method locals addr

    __ and3(parm_size, parm_size, -16);
    __ cmp4(PR0, no_args, 0, parm_count, Assembler::less);  // any parms?

    __ mov(GR_entry_frame_GR4, GR4_thread);                 // save GR4_thread: it's a preserved register
    __ sub(SP, SP, parm_size);                              // allocate the space for args + scratch
    __ mov(entry_br, entry_ptr);

    __ mov(GR27_method, method);                            // load method
    __ mov(GR4_thread, thread);                             // load thread
    if (TaggedStackInterpreter) __ shl(parm_count, parm_count, 1);  // 2x tags
    __ sub(parm_count, parm_count, 1);                      // cloop counts down to zero

    // Initialize the register and memory stack limits for stack checking in compiled code
    __ add(GR7_reg_stack_limit, thread_(register_stack_limit));
    __ mov(GR6_caller_BSP, AR_BSP);                         // load register SP
    __ movl(GR5_poll_page_addr, (intptr_t) os::get_polling_page() );
    __ ld8(GR7_reg_stack_limit, GR7_reg_stack_limit);       // load register stack limit

    Label exit;

    __ mov(AR_LC, parm_count);
    __ mov(out_tos, out_parms);                             // out_tos = &out_parms[0]
    __ br(no_args, exit, Assembler::dpnt);

    // Reverse argument list and set up sender tos

    Label copy_word;
    __ bind(copy_word);

    __ ld8(arg, parms, BytesPerWord);                       // load *parms++
    __ st8(out_tos, arg, -BytesPerWord);                    // store *out_tos--
    __ cloop(copy_word, Assembler::sptk, Assembler::few);

    // Bias stack for tags.
    if (TaggedStackInterpreter) __ st8(out_tos, GR0, -BytesPerWord);
    __ bind(exit);

    __ mov(GR_entry_frame_TOS, out_tos);                    // so entry_frame_argument_at can find TOS

    // call interpreter frame manager

    // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
    // when called via a c2i.

    __ mov(GR28_sender_SP, SP);

    __ call(entry_br);

    return_address = __ pc();

    // Store result depending on type.  Everything that is not
    // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.

    const PredicateRegister is_obj = PR6_SCRATCH;
    const PredicateRegister is_flt = PR7_SCRATCH;
    const PredicateRegister is_dbl = PR8_SCRATCH;
    const PredicateRegister is_lng = PR9_SCRATCH;

    __ cmp4(is_obj, PR0,    T_OBJECT, type, Assembler::equal);
    __ cmp4(is_flt, PR0,    T_FLOAT,  type, Assembler::equal);
    __ st4( result, GR_RET);

    __ st8( is_obj, result, GR_RET);
    __ stfs(is_flt, result, FR_RET);
    __ cmp4(is_dbl, PR0,    T_DOUBLE, type, Assembler::equal);

    __ stfd(is_dbl, result, FR_RET);
    __ cmp4(is_lng, PR0,    T_LONG,   type, Assembler::equal);
    __ mov(RP, GR_Lsave_RP);

    __ st8( is_lng, result, GR_RET);
    __ mov(GR4_thread, GR_entry_frame_GR4);

    __ mov(GR6_caller_BSP, GR_entry_frame_GR6);
    __ mov(GR7_reg_stack_limit, GR_entry_frame_GR7);
    __ mov(GR5_poll_page_addr, GR_entry_frame_GR5);
    __ mov(AR_PFS, GR_Lsave_PFS);

    __ mov(AR_LC, GR_Lsave_LC);
    __ mov(SP, GR_Lsave_SP);
    __ ret();

    return start;
  }
コード例 #10
0
  //------------------------------------------------------------------------------------------------------------------------
  // Continuation point for runtime calls returning with a pending exception.
  // The pending exception check happened in the runtime or native call stub.
  // The pending exception in Thread is converted into a Java-level exception.
  //
  // Contract with Java-level exception handlers:
  //
  address generate_forward_exception() {
    StubCodeMark mark(this, "StubRoutines", "forward exception");

    address start = __ pc();

    // Upon entry, GR_Lsave_RP has the return address returning into Java
    // compiled code; i.e. the return address becomes the throwing pc.

    const Register pending_exception_addr = GR31_SCRATCH;
    const Register handler                = GR30_SCRATCH;

    const PredicateRegister is_not_null   = PR15_SCRATCH;
    const BranchRegister    handler_br    = BR6_SCRATCH;

    // Allocate abi scratch, since the compiler didn't allocate a memory frame.
    // pop_dummy_thin_frame will restore the caller's SP.
    __ sub(SP, SP, 16);

#ifdef ASSERT
    // Get pending exception oop.
    __ add(pending_exception_addr, thread_(pending_exception));
    __ ld8(GR8_exception, pending_exception_addr);

    // Make sure that this code is only executed if there is a pending exception.
    {
      Label not_null;
      __ cmp(is_not_null, PR0, 0, GR8_exception, Assembler::notEqual);
      __ br(is_not_null, not_null);
      __ stop("StubRoutines::forward exception: no pending exception (1)");
      __ bind(not_null);
    }

//  __ verify_oop(GR8_exception, "generate_forward_exception");
#endif

    // Find exception handler
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), GR_Lsave_RP);

    __ mov(handler, GR_RET);

    // Load pending exception oop.
    __ add(pending_exception_addr, thread_(pending_exception));
    __ ld8(GR8_exception, pending_exception_addr);

    // The exception pc is the return address in the caller.
    __ mov(GR9_issuing_pc, GR_Lsave_RP);

    // Uses GR2, BR6
    __ pop_dummy_thin_frame();
    // Now in caller of native/stub register frame

#ifdef ASSERT
    // make sure exception is set
    {
      Label not_null;
      __ cmp(is_not_null, PR0, 0, GR8_exception, Assembler::notEqual);
      __ br(is_not_null, not_null);
      __ stop("StubRoutines::forward exception: no pending exception (2)");
      __ bind(not_null);
    }
#endif
    // clear pending exception
    __ st8(pending_exception_addr, GR0);

    // jump to exception handler
    __ mov(handler_br, handler);
    __ br(handler_br);

    __ flush_bundle();

    return start;
  }