コード例 #1
0
inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) {
  assert_not_delayed();
  if (ForceUnreachable) {
    patchable_sethi(addrlit, d);
  } else {
    sethi(addrlit, d);
  }
  ldub(d, addrlit.low10() + offset, d);
}
コード例 #2
0
ファイル: c1_Runtime1_sparc.cpp プロジェクト: pombreda/graal
OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {

  OopMapSet* oop_maps = NULL;
  // for better readability
  const bool must_gc_arguments = true;
  const bool dont_gc_arguments = false;

  // stub code & info for the different stubs
  switch (id) {
    case forward_exception_id:
      {
        oop_maps = generate_handle_exception(id, sasm);
      }
      break;

    case new_instance_id:
    case fast_new_instance_id:
    case fast_new_instance_init_check_id:
      {
        Register G5_klass = G5; // Incoming
        Register O0_obj   = O0; // Outgoing

        if (id == new_instance_id) {
          __ set_info("new_instance", dont_gc_arguments);
        } else if (id == fast_new_instance_id) {
          __ set_info("fast new_instance", dont_gc_arguments);
        } else {
          assert(id == fast_new_instance_init_check_id, "bad StubID");
          __ set_info("fast new_instance init check", dont_gc_arguments);
        }

        if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
            UseTLAB && FastTLABRefill) {
          Label slow_path;
          Register G1_obj_size = G1;
          Register G3_t1 = G3;
          Register G4_t2 = G4;
          assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2);

          // Push a frame since we may do dtrace notification for the
          // allocation which requires calling out and we don't want
          // to stomp the real return address.
          __ save_frame(0);

          if (id == fast_new_instance_init_check_id) {
            // make sure the klass is initialized
            __ ldub(G5_klass, in_bytes(InstanceKlass::init_state_offset()), G3_t1);
            __ cmp_and_br_short(G3_t1, InstanceKlass::fully_initialized, Assembler::notEqual, Assembler::pn, slow_path);
          }
#ifdef ASSERT
          // assert object can be fast path allocated
          {
            Label ok, not_ok;
          __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
          // make sure it's an instance (LH > 0)
          __ cmp_and_br_short(G1_obj_size, 0, Assembler::lessEqual, Assembler::pn, not_ok);
          __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size);
          __ br(Assembler::zero, false, Assembler::pn, ok);
          __ delayed()->nop();
          __ bind(not_ok);
          __ stop("assert(can be fast path allocated)");
          __ should_not_reach_here();
          __ bind(ok);
          }
#endif // ASSERT
          // if we got here then the TLAB allocation failed, so try
          // refilling the TLAB or allocating directly from eden.
          Label retry_tlab, try_eden;
          __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass

          __ bind(retry_tlab);

          // get the instance size
          __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);

          __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path);

          __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
          __ verify_oop(O0_obj);
          __ mov(O0, I0);
          __ ret();
          __ delayed()->restore();

          __ bind(try_eden);
          // get the instance size
          __ ld(G5_klass, in_bytes(Klass::layout_helper_offset()), G1_obj_size);
          __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path);
          __ incr_allocated_bytes(G1_obj_size, G3_t1, G4_t2);

          __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2);
          __ verify_oop(O0_obj);
          __ mov(O0, I0);
          __ ret();
          __ delayed()->restore();

          __ bind(slow_path);

          // pop this frame so generate_stub_call can push it's own
          __ restore();
        }

        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass);
        // I0->O0: new instance
      }

      break;

    case counter_overflow_id:
        // G4 contains bci, G5 contains method
      oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);
      break;

    case new_type_array_id:
    case new_object_array_id:
      {
        Register G5_klass = G5; // Incoming
        Register G4_length = G4; // Incoming
        Register O0_obj   = O0; // Outgoing

        Address klass_lh(G5_klass, Klass::layout_helper_offset());
        assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
        assert(Klass::_lh_header_size_mask == 0xFF, "bytewise");
        // Use this offset to pick out an individual byte of the layout_helper:
        const int klass_lh_header_size_offset = ((BytesPerInt - 1)  // 3 - 2 selects byte {0,1,0,0}
                                                 - Klass::_lh_header_size_shift / BitsPerByte);

        if (id == new_type_array_id) {
          __ set_info("new_type_array", dont_gc_arguments);
        } else {
          __ set_info("new_object_array", dont_gc_arguments);
        }

#ifdef ASSERT
        // assert object type is really an array of the proper kind
        {
          Label ok;
          Register G3_t1 = G3;
          __ ld(klass_lh, G3_t1);
          __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1);
          int tag = ((id == new_type_array_id)
                     ? Klass::_lh_array_tag_type_value
                     : Klass::_lh_array_tag_obj_value);
          __ cmp_and_brx_short(G3_t1, tag, Assembler::equal, Assembler::pt, ok);
          __ stop("assert(is an array klass)");
          __ should_not_reach_here();
          __ bind(ok);
        }
#endif // ASSERT

        if (UseTLAB && FastTLABRefill) {
          Label slow_path;
          Register G1_arr_size = G1;
          Register G3_t1 = G3;
          Register O1_t2 = O1;
          assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2);

          // check that array length is small enough for fast path
          __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1);
          __ cmp_and_br_short(G4_length, G3_t1, Assembler::greaterUnsigned, Assembler::pn, slow_path);

          // if we got here then the TLAB allocation failed, so try
          // refilling the TLAB or allocating directly from eden.
          Label retry_tlab, try_eden;
          __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass

          __ bind(retry_tlab);

          // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
          __ ld(klass_lh, G3_t1);
          __ sll(G4_length, G3_t1, G1_arr_size);
          __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
          __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
          __ add(G1_arr_size, G3_t1, G1_arr_size);
          __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);  // align up
          __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);

          __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path);  // preserves G1_arr_size

          __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
          __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
          __ sub(G1_arr_size, G3_t1, O1_t2);  // body length
          __ add(O0_obj, G3_t1, G3_t1);       // body start
          __ initialize_body(G3_t1, O1_t2);
          __ verify_oop(O0_obj);
          __ retl();
          __ delayed()->nop();

          __ bind(try_eden);
          // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
          __ ld(klass_lh, G3_t1);
          __ sll(G4_length, G3_t1, G1_arr_size);
          __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1);
          __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1);
          __ add(G1_arr_size, G3_t1, G1_arr_size);
          __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size);
          __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size);

          __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path);  // preserves G1_arr_size
          __ incr_allocated_bytes(G1_arr_size, G3_t1, O1_t2);

          __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2);
          __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset);
          __ sub(G1_arr_size, G3_t1, O1_t2);  // body length
          __ add(O0_obj, G3_t1, G3_t1);       // body start
          __ initialize_body(G3_t1, O1_t2);
          __ verify_oop(O0_obj);
          __ retl();
          __ delayed()->nop();

          __ bind(slow_path);
        }

        if (id == new_type_array_id) {
          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length);
        } else {
          oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length);
        }
        // I0 -> O0: new array
      }
      break;

    case new_multi_array_id:
      { // O0: klass
        // O1: rank
        // O2: address of 1st dimension
        __ set_info("new_multi_array", dont_gc_arguments);
        oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2);
        // I0 -> O0: new multi array
      }
      break;

    case register_finalizer_id:
      {
        __ set_info("register_finalizer", dont_gc_arguments);

        // load the klass and check the has finalizer flag
        Label register_finalizer;
        Register t = O1;
        __ load_klass(O0, t);
        __ ld(t, in_bytes(Klass::access_flags_offset()), t);
        __ set(JVM_ACC_HAS_FINALIZER, G3);
        __ andcc(G3, t, G0);
        __ br(Assembler::notZero, false, Assembler::pt, register_finalizer);
        __ delayed()->nop();

        // do a leaf return
        __ retl();
        __ delayed()->nop();

        __ bind(register_finalizer);
        OopMap* oop_map = save_live_registers(sasm);
        int call_offset = __ call_RT(noreg, noreg,
                                     CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0);
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);

        // Now restore all the live registers
        restore_live_registers(sasm);

        __ ret();
        __ delayed()->restore();
      }
      break;

    case throw_range_check_failed_id:
      { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded
        // G4: index
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
      }
      break;

    case throw_index_exception_id:
      { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded
        // G4: index
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
      }
      break;

    case throw_div0_exception_id:
      { __ set_info("throw_div0_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
      }
      break;

    case throw_null_pointer_exception_id:
      { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
      }
      break;

    case handle_exception_id:
      { __ set_info("handle_exception", dont_gc_arguments);
        oop_maps = generate_handle_exception(id, sasm);
      }
      break;

    case handle_exception_from_callee_id:
      { __ set_info("handle_exception_from_callee", dont_gc_arguments);
        oop_maps = generate_handle_exception(id, sasm);
      }
      break;

    case unwind_exception_id:
      {
        // O0: exception
        // I7: address of call to this method

        __ set_info("unwind_exception", dont_gc_arguments);
        __ mov(Oexception, Oexception->after_save());
        __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save());

        __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
                        G2_thread, Oissuing_pc->after_save());
        __ verify_not_null_oop(Oexception->after_save());

        // Restore SP from L7 if the exception PC is a method handle call site.
        __ mov(O0, G5);  // Save the target address.
        __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0);
        __ tst(L0);  // Condition codes are preserved over the restore.
        __ restore();

        __ jmp(G5, 0);
        __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP);  // Restore SP if required.
      }
      break;

    case throw_array_store_exception_id:
      {
        __ set_info("throw_array_store_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
      }
      break;

    case throw_class_cast_exception_id:
      {
        // G4: object
        __ set_info("throw_class_cast_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
      }
      break;

    case throw_incompatible_class_change_error_id:
      {
        __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
        oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
      }
      break;

    case slow_subtype_check_id:
      { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
        // Arguments :
        //
        //      ret  : G3
        //      sub  : G3, argument, destroyed
        //      super: G1, argument, not changed
        //      raddr: O7, blown by call
        Label miss;

        __ save_frame(0);               // Blow no registers!

        __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss);

        __ mov(1, G3);
        __ ret();                       // Result in G5 is 'true'
        __ delayed()->restore();        // free copy or add can go here

        __ bind(miss);
        __ mov(0, G3);
        __ ret();                       // Result in G5 is 'false'
        __ delayed()->restore();        // free copy or add can go here
      }

    case monitorenter_nofpu_id:
    case monitorenter_id:
      { // G4: object
        // G5: lock address
        __ set_info("monitorenter", dont_gc_arguments);

        int save_fpu_registers = (id == monitorenter_id);
        // make a frame and preserve the caller's caller-save registers
        OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);

        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5);

        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);
        restore_live_registers(sasm, save_fpu_registers);

        __ ret();
        __ delayed()->restore();
      }
      break;

    case monitorexit_nofpu_id:
    case monitorexit_id:
      { // G4: lock address
        // note: really a leaf routine but must setup last java sp
        //       => use call_RT for now (speed can be improved by
        //       doing last java sp setup manually)
        __ set_info("monitorexit", dont_gc_arguments);

        int save_fpu_registers = (id == monitorexit_id);
        // make a frame and preserve the caller's caller-save registers
        OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);

        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4);

        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);
        restore_live_registers(sasm, save_fpu_registers);

        __ ret();
        __ delayed()->restore();
      }
      break;

    case deoptimize_id:
      {
        __ set_info("deoptimize", dont_gc_arguments);
        OopMap* oop_map = save_live_registers(sasm);
        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);
        restore_live_registers(sasm);
        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
        assert(deopt_blob != NULL, "deoptimization blob must have been created");
        AddressLiteral dest(deopt_blob->unpack_with_reexecution());
        __ jump_to(dest, O0);
        __ delayed()->restore();
      }
      break;

    case access_field_patching_id:
      { __ set_info("access_field_patching", dont_gc_arguments);
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
      }
      break;

    case load_klass_patching_id:
      { __ set_info("load_klass_patching", dont_gc_arguments);
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
      }
      break;

    case load_mirror_patching_id:
      { __ set_info("load_mirror_patching", dont_gc_arguments);
        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
      }
      break;

    case dtrace_object_alloc_id:
      { // O0: object
        __ set_info("dtrace_object_alloc", dont_gc_arguments);
        // we can't gc here so skip the oopmap but make sure that all
        // the live registers get saved.
        save_live_registers(sasm);

        __ save_thread(L7_thread_cache);
        __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
                relocInfo::runtime_call_type);
        __ delayed()->mov(I0, O0);
        __ restore_thread(L7_thread_cache);

        restore_live_registers(sasm);
        __ ret();
        __ delayed()->restore();
      }
      break;

#if INCLUDE_ALL_GCS
    case g1_pre_barrier_slow_id:
      { // G4: previous value of memory
        BarrierSet* bs = Universe::heap()->barrier_set();
        if (bs->kind() != BarrierSet::G1SATBCTLogging) {
          __ save_frame(0);
          __ set((int)id, O1);
          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
          __ should_not_reach_here();
          break;
        }

        __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);

        Register pre_val = G4;
        Register tmp  = G1_scratch;
        Register tmp2 = G3_scratch;

        Label refill, restart;
        bool with_frame = false; // I don't know if we can do with-frame.
        int satb_q_index_byte_offset =
          in_bytes(JavaThread::satb_mark_queue_offset() +
                   PtrQueue::byte_offset_of_index());
        int satb_q_buf_byte_offset =
          in_bytes(JavaThread::satb_mark_queue_offset() +
                   PtrQueue::byte_offset_of_buf());

        __ bind(restart);
        // Load the index into the SATB buffer. PtrQueue::_index is a
        // size_t so ld_ptr is appropriate
        __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);

        // index == 0?
        __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill);

        __ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
        __ sub(tmp, oopSize, tmp);

        __ st_ptr(pre_val, tmp2, tmp);  // [_buf + index] := <address_of_card>
        // Use return-from-leaf
        __ retl();
        __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);

        __ bind(refill);
        __ save_frame(0);

        __ mov(pre_val, L0);
        __ mov(tmp,     L1);
        __ mov(tmp2,    L2);

        __ call_VM_leaf(L7_thread_cache,
                        CAST_FROM_FN_PTR(address,
                                         SATBMarkQueueSet::handle_zero_index_for_thread),
                                         G2_thread);

        __ mov(L0, pre_val);
        __ mov(L1, tmp);
        __ mov(L2, tmp2);

        __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
        __ delayed()->restore();
      }
      break;

    case g1_post_barrier_slow_id:
      {
        BarrierSet* bs = Universe::heap()->barrier_set();
        if (bs->kind() != BarrierSet::G1SATBCTLogging) {
          __ save_frame(0);
          __ set((int)id, O1);
          __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
          __ should_not_reach_here();
          break;
        }

        __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);

        Register addr = G4;
        Register cardtable = G5;
        Register tmp  = G1_scratch;
        Register tmp2 = G3_scratch;
        jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;

        Label not_already_dirty, restart, refill;

#ifdef _LP64
        __ srlx(addr, CardTableModRefBS::card_shift, addr);
#else
        __ srl(addr, CardTableModRefBS::card_shift, addr);
#endif

        AddressLiteral rs(byte_map_base);
        __ set(rs, cardtable);         // cardtable := <card table base>
        __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]

        assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
        __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);

        // We didn't take the branch, so we're already dirty: return.
        // Use return-from-leaf
        __ retl();
        __ delayed()->nop();

        // Not dirty.
        __ bind(not_already_dirty);

        // Get cardtable + tmp into a reg by itself
        __ add(addr, cardtable, tmp2);

        // First, dirty it.
        __ stb(G0, tmp2, 0);  // [cardPtr] := 0  (i.e., dirty).

        Register tmp3 = cardtable;
        Register tmp4 = tmp;

        // these registers are now dead
        addr = cardtable = tmp = noreg;

        int dirty_card_q_index_byte_offset =
          in_bytes(JavaThread::dirty_card_queue_offset() +
                   PtrQueue::byte_offset_of_index());
        int dirty_card_q_buf_byte_offset =
          in_bytes(JavaThread::dirty_card_queue_offset() +
                   PtrQueue::byte_offset_of_buf());

        __ bind(restart);

        // Get the index into the update buffer. PtrQueue::_index is
        // a size_t so ld_ptr is appropriate here.
        __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);

        // index == 0?
        __ cmp_and_brx_short(tmp3, G0, Assembler::equal,  Assembler::pn, refill);

        __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
        __ sub(tmp3, oopSize, tmp3);

        __ st_ptr(tmp2, tmp4, tmp3);  // [_buf + index] := <address_of_card>
        // Use return-from-leaf
        __ retl();
        __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);

        __ bind(refill);
        __ save_frame(0);

        __ mov(tmp2, L0);
        __ mov(tmp3, L1);
        __ mov(tmp4, L2);

        __ call_VM_leaf(L7_thread_cache,
                        CAST_FROM_FN_PTR(address,
                                         DirtyCardQueueSet::handle_zero_index_for_thread),
                                         G2_thread);

        __ mov(L0, tmp2);
        __ mov(L1, tmp3);
        __ mov(L2, tmp4);

        __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
        __ delayed()->restore();
      }
      break;
#endif // INCLUDE_ALL_GCS

    case predicate_failed_trap_id:
      {
        __ set_info("predicate_failed_trap", dont_gc_arguments);
        OopMap* oop_map = save_live_registers(sasm);

        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));

        oop_maps = new OopMapSet();
        oop_maps->add_gc_map(call_offset, oop_map);

        DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
        assert(deopt_blob != NULL, "deoptimization blob must have been created");
        restore_live_registers(sasm);

        AddressLiteral dest(deopt_blob->unpack_with_reexecution());
        __ jump_to(dest, O0);
        __ delayed()->restore();
      }
      break;

    default:
      { __ set_info("unimplemented entry", dont_gc_arguments);
        __ save_frame(0);
        __ set((int)id, O1);
        __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1);
        __ should_not_reach_here();
      }
      break;
  }
  return oop_maps;
}
コード例 #3
0
void sparc_cpu_do_interrupt(CPUState *cs)
{
    SPARCCPU *cpu = SPARC_CPU(cs);
    CPUSPARCState *env = &cpu->env;
    int intno = cs->exception_index;
    trap_state *tsptr;

    /* Compute PSR before exposing state.  */
    if (env->cc_op != CC_OP_FLAGS) {
        cpu_get_psr(env);
    }

#ifdef DEBUG_PCALL
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
        static int count;
        const char *name;

        if (intno < 0 || intno >= 0x180) {
            name = "Unknown";
        } else if (intno >= 0x100) {
            name = "Trap Instruction";
        } else if (intno >= 0xc0) {
            name = "Window Fill";
        } else if (intno >= 0x80) {
            name = "Window Spill";
        } else {
            name = excp_names[intno];
            if (!name) {
                name = "Unknown";
            }
        }

        qemu_log("%6d: %s (v=%04x)\n", count, name, intno);
        log_cpu_state(cs, 0);
#if 0
        {
            int i;
            uint8_t *ptr;

            qemu_log("       code=");
            ptr = (uint8_t *)env->pc;
            for (i = 0; i < 16; i++) {
                qemu_log(" %02x", ldub(ptr + i));
            }
            qemu_log("\n");
        }
#endif
        count++;
    }
#endif
#if !defined(CONFIG_USER_ONLY)
    if (env->tl >= env->maxtl) {
        cpu_abort(cs, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
                  " Error state", cs->exception_index, env->tl, env->maxtl);
        return;
    }
#endif
    if (env->tl < env->maxtl - 1) {
        env->tl++;
    } else {
        env->pstate |= PS_RED;
        if (env->tl < env->maxtl) {
            env->tl++;
        }
    }
    tsptr = cpu_tsptr(env);

    tsptr->tstate = (cpu_get_ccr(env) << 32) |
        ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
        cpu_get_cwp64(env);
    tsptr->tpc = env->pc;
    tsptr->tnpc = env->npc;
    tsptr->tt = intno;

    switch (intno) {
    case TT_IVEC:
        cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG);
        break;
    case TT_TFAULT:
    case TT_DFAULT:
    case TT_TMISS ... TT_TMISS + 3:
    case TT_DMISS ... TT_DMISS + 3:
    case TT_DPROT ... TT_DPROT + 3:
        cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG);
        break;
    default:
        cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_AG);
        break;
    }

    if (intno == TT_CLRWIN) {
        cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
    } else if ((intno & 0x1c0) == TT_SPILL) {
        cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
    } else if ((intno & 0x1c0) == TT_FILL) {
        cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
    }
    env->tbr &= ~0x7fffULL;
    env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
    env->pc = env->tbr;
    env->npc = env->pc + 4;
    cs->exception_index = -1;
}
コード例 #4
0
void G1UnsafeGetObjSATBBarrierStub::emit_code(LIR_Assembler* ce) {
    // At this point we know that offset == referent_offset.
    //
    // So we might have to emit:
    //   if (src == null) goto continuation.
    //
    // and we definitely have to emit:
    //   if (klass(src).reference_type == REF_NONE) goto continuation
    //   if (!marking_active) goto continuation
    //   if (pre_val == null) goto continuation
    //   call pre_barrier(pre_val)
    //   goto continuation
    //
    __ bind(_entry);

    assert(src()->is_register(), "sanity");
    Register src_reg = src()->as_register();

    if (gen_src_check()) {
        // The original src operand was not a constant.
        // Generate src == null?
        if (__ is_in_wdisp16_range(_continuation)) {
            __ br_null(src_reg, /*annul*/false, Assembler::pt, _continuation);
        } else {
            __ cmp(src_reg, G0);
            __ brx(Assembler::equal, false, Assembler::pt, _continuation);
        }
        __ delayed()->nop();
    }

    // Generate src->_klass->_reference_type() == REF_NONE)?
    assert(tmp()->is_register(), "sanity");
    Register tmp_reg = tmp()->as_register();

    __ load_klass(src_reg, tmp_reg);

    Address ref_type_adr(tmp_reg, instanceKlass::reference_type_offset());
    __ ldub(ref_type_adr, tmp_reg);

    // _reference_type field is of type ReferenceType (enum)
    assert(REF_NONE == 0, "check this code");
    __ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
    __ delayed()->nop();

    // Is marking active?
    assert(thread()->is_register(), "precondition");
    Register thread_reg = thread()->as_pointer_register();

    Address in_progress(thread_reg, in_bytes(JavaThread::satb_mark_queue_offset() +
                        PtrQueue::byte_offset_of_active()));

    if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
        __ ld(in_progress, tmp_reg);
    } else {
        assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
        __ ldsb(in_progress, tmp_reg);
    }

    __ cmp_zero_and_br(Assembler::equal, tmp_reg, _continuation, /*annul*/false, Assembler::pt);
    __ delayed()->nop();

    // val == null?
    assert(val()->is_register(), "Precondition.");
    Register val_reg = val()->as_register();

    if (__ is_in_wdisp16_range(_continuation)) {
        __ br_null(val_reg, /*annul*/false, Assembler::pt, _continuation);
    } else {
        __ cmp(val_reg, G0);
        __ brx(Assembler::equal, false, Assembler::pt, _continuation);
    }
    __ delayed()->nop();

    __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
    __ delayed()->mov(val_reg, G4);
    __ br(Assembler::always, false, Assembler::pt, _continuation);
    __ delayed()->nop();
}
コード例 #5
0
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
  const char *name;
  switch (type) {
    case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
    case T_BYTE:    name = "jni_fast_GetByteField";    break;
    case T_CHAR:    name = "jni_fast_GetCharField";    break;
    case T_SHORT:   name = "jni_fast_GetShortField";   break;
    case T_INT:     name = "jni_fast_GetIntField";     break;
    default:        ShouldNotReachHere();
  }
  ResourceMark rm;
  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize);
  CodeBuffer cbuf(blob);
  MacroAssembler* masm = new MacroAssembler(&cbuf);
  address fast_entry = __ pc();

  Label label1, label2;

  AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr());
  __ sethi (cnt_addrlit, O3);
  Address cnt_addr(O3, cnt_addrlit.low10());
  __ ld (cnt_addr, G4);
  __ andcc (G4, 1, G0);
  __ br (Assembler::notZero, false, Assembler::pn, label1);
  __ delayed()->srl (O2, 2, O4);
  __ ld_ptr (O1, 0, O5);

  assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
  speculative_load_pclist[count] = __ pc();
  switch (type) {
    case T_BOOLEAN: __ ldub (O5, O4, G3);  break;
    case T_BYTE:    __ ldsb (O5, O4, G3);  break;
    case T_CHAR:    __ lduh (O5, O4, G3);  break;
    case T_SHORT:   __ ldsh (O5, O4, G3);  break;
    case T_INT:     __ ld (O5, O4, G3);    break;
    default:        ShouldNotReachHere();
  }

  __ ld (cnt_addr, O5);
  __ cmp (O5, G4);
  __ br (Assembler::notEqual, false, Assembler::pn, label2);
  __ delayed()->mov (O7, G1);
  __ retl ();
  __ delayed()->mov (G3, O0);

  slowcase_entry_pclist[count++] = __ pc();
  __ bind (label1);
  __ mov (O7, G1);

  address slow_case_addr;
  switch (type) {
    case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
    case T_BYTE:    slow_case_addr = jni_GetByteField_addr();    break;
    case T_CHAR:    slow_case_addr = jni_GetCharField_addr();    break;
    case T_SHORT:   slow_case_addr = jni_GetShortField_addr();   break;
    case T_INT:     slow_case_addr = jni_GetIntField_addr();     break;
    default:        ShouldNotReachHere();
  }
  __ bind (label2);
  __ call (slow_case_addr, relocInfo::none);
  __ delayed()->mov (G1, O7);

  __ flush ();

  return fast_entry;
}
コード例 #6
0
inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); }
コード例 #7
0
inline void MacroAssembler::ldub(const Address& a, Register d, int offset) {
  if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(),         d); }
  else               {                          ldub(a.base(), a.disp() + offset, d); }
}
コード例 #8
0
ファイル: int32_helper.c プロジェクト: aliguori/qemu
void do_interrupt(CPUState *env)
{
    int cwp, intno = env->exception_index;

#ifdef DEBUG_PCALL
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
        static int count;
        const char *name;

        if (intno < 0 || intno >= 0x100) {
            name = "Unknown";
        } else if (intno >= 0x80) {
            name = "Trap Instruction";
        } else {
            name = excp_names[intno];
            if (!name) {
                name = "Unknown";
            }
        }

        qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
                count, name, intno,
                env->pc,
                env->npc, env->regwptr[6]);
        log_cpu_state(env, 0);
#if 0
        {
            int i;
            uint8_t *ptr;

            qemu_log("       code=");
            ptr = (uint8_t *)env->pc;
            for (i = 0; i < 16; i++) {
                qemu_log(" %02x", ldub(ptr + i));
            }
            qemu_log("\n");
        }
#endif
        count++;
    }
#endif
#if !defined(CONFIG_USER_ONLY)
    if (env->psret == 0) {
        cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
                  env->exception_index);
        return;
    }
#endif
    env->psret = 0;
    cwp = cpu_cwp_dec(env, env->cwp - 1);
    cpu_set_cwp(env, cwp);
    env->regwptr[9] = env->pc;
    env->regwptr[10] = env->npc;
    env->psrps = env->psrs;
    env->psrs = 1;
    env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
    env->pc = env->tbr;
    env->npc = env->pc + 4;
    env->exception_index = -1;

#if !defined(CONFIG_USER_ONLY)
    /* IRQ acknowledgment */
    if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) {
        env->qemu_irq_ack(env->irq_manager, intno);
    }
#endif
}
コード例 #9
0
ファイル: int32_helper.c プロジェクト: 32bitmicro/riscv-qemu
void sparc_cpu_do_interrupt(CPUState *cs)
{
    SPARCCPU *cpu = SPARC_CPU(cs);
    CPUSPARCState *env = &cpu->env;
    int cwp, intno = cs->exception_index;

    /* Compute PSR before exposing state.  */
    if (env->cc_op != CC_OP_FLAGS) {
        cpu_get_psr(env);
    }

#ifdef DEBUG_PCALL
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
        static int count;
        const char *name;

        if (intno < 0 || intno >= 0x100) {
            name = "Unknown";
        } else if (intno >= 0x80) {
            name = "Trap Instruction";
        } else {
            name = excp_names[intno];
            if (!name) {
                name = "Unknown";
            }
        }

        qemu_log("%6d: %s (v=%02x)\n", count, name, intno);
        log_cpu_state(cs, 0);
#if 0
        {
            int i;
            uint8_t *ptr;

            qemu_log("       code=");
            ptr = (uint8_t *)env->pc;
            for (i = 0; i < 16; i++) {
                qemu_log(" %02x", ldub(ptr + i));
            }
            qemu_log("\n");
        }
#endif
        count++;
    }
#endif
#if !defined(CONFIG_USER_ONLY)
    if (env->psret == 0) {
        if (cs->exception_index == 0x80 &&
            env->def->features & CPU_FEATURE_TA0_SHUTDOWN) {
            qemu_system_shutdown_request();
        } else {
            cpu_abort(cs, "Trap 0x%02x while interrupts disabled, Error state",
                      cs->exception_index);
        }
        return;
    }
#endif
    env->psret = 0;
    cwp = cpu_cwp_dec(env, env->cwp - 1);
    cpu_set_cwp(env, cwp);
    env->regwptr[9] = env->pc;
    env->regwptr[10] = env->npc;
    env->psrps = env->psrs;
    env->psrs = 1;
    env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
    env->pc = env->tbr;
    env->npc = env->pc + 4;
    cs->exception_index = -1;

#if !defined(CONFIG_USER_ONLY)
    /* IRQ acknowledgment */
    if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) {
        env->qemu_irq_ack(env, env->irq_manager, intno);
    }
#endif
}