Example #1
0
void RangeCheckStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  if (_info->deoptimize_on_exception()) {
    address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
    ce->emit_call_c(a);
    CHECK_BAILOUT();
    ce->add_call_info_here(_info);
    ce->verify_oop_map(_info);
    debug_only(__ should_not_reach_here());
    return;
  }

  // Pass the array index in Z_R1_scratch which is not managed by linear scan.
  if (_index->is_cpu_register()) {
    __ lgr_if_needed(Z_R1_scratch, _index->as_register());
  } else {
    __ load_const_optimized(Z_R1_scratch, _index->as_jint());
  }

  Runtime1::StubID stub_id;
  if (_throw_index_out_of_bounds_exception) {
    stub_id = Runtime1::throw_index_exception_id;
  } else {
    stub_id = Runtime1::throw_range_check_failed_id;
  }
  ce->emit_call_c(Runtime1::entry_for (stub_id));
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  ce->verify_oop_map(_info);
  debug_only(__ should_not_reach_here());
}
void Compilation::emit_lir() {
  CHECK_BAILOUT();

  LIRGenerator gen(this, method());
  {
    PhaseTraceTime timeit(_t_lirGeneration);
    hir()->iterate_linear_scan_order(&gen);
  }

  CHECK_BAILOUT();

  {
    PhaseTraceTime timeit(_t_linearScan);

    LinearScan* allocator = new LinearScan(hir(), &gen, frame_map());
    set_allocator(allocator);
    // Assign physical registers to LIR operands using a linear scan algorithm.
    allocator->do_linear_scan();
    CHECK_BAILOUT();

    _max_spills = allocator->max_spills();
  }

  if (BailoutAfterLIR) {
    if (PrintLIR && !bailed_out()) {
      print_LIR(hir()->code());
    }
    bailout("Bailing out because of -XX:+BailoutAfterLIR");
  }
}
void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
#ifndef PRODUCT
    if (VerifyOops) {
        OopMapStream s(info->oop_map());
        while (!s.is_done()) {
            OopMapValue v = s.current();
            if (v.is_oop()) {
                VMReg r = v.reg();
                if (!r->is_stack()) {
                    stringStream st;
                    st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
#ifdef SPARC
                    _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__);
#else
                    _masm->verify_oop(r->as_Register());
#endif
                } else {
                    _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
                }
            }
            check_codespace();
            CHECK_BAILOUT();

            s.next();
        }
    }
#endif
}
Example #4
0
void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch.
  ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id));
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  DEBUG_ONLY(__ should_not_reach_here());
}
Example #5
0
void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
  ce->emit_call_c(a);
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  ce->verify_oop_map(_info);
  debug_only(__ should_not_reach_here());
}
Example #6
0
void DivByZeroStub::emit_code(LIR_Assembler* ce) {
  if (_offset != -1) {
    ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
  }
  __ bind(_entry);
  ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id));
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  debug_only(__ should_not_reach_here());
}
void LIR_Assembler::check_no_unbound_labels() {
    CHECK_BAILOUT();

    for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
        if (!_branch_target_blocks.at(i)->label()->is_bound()) {
            tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
            assert(false, "unbound label");
        }
    }
}
Example #8
0
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
  address a = Runtime1::entry_for (_stub_id);
  ce->emit_call_c(a);
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  ce->verify_oop_map(_info);
  assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
  __ z_brul(_continuation);
}
Example #9
0
// Note: pass object in Z_R1_scratch
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  if (_obj->is_valid()) {
    __ z_lgr(Z_R1_scratch, _obj->as_register()); // _obj contains the optional argument to the stub
  }
  address a = Runtime1::entry_for (_stub);
  ce->emit_call_c(a);
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  debug_only(__ should_not_reach_here());
}
void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
  CHECK_BAILOUT();

  // generate code or slow cases
  assembler->emit_slow_case_stubs();
  CHECK_BAILOUT();

  // generate exception adapters
  assembler->emit_exception_entries(exception_info_list());
  CHECK_BAILOUT();

  // generate code for exception handler
  assembler->emit_exception_handler();
  CHECK_BAILOUT();
  assembler->emit_deopt_handler();
  CHECK_BAILOUT();

  // done
  masm()->flush();
}
void LIR_Assembler::emit_code(BlockList* hir) {
  if (PrintLIR) {
    print_LIR(hir);
  }

  int n = hir->length();
  for (int i = 0; i < n; i++) {
    emit_block(hir->at(i));
    CHECK_BAILOUT();
  }
}
Example #12
0
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
  __ lgr_if_needed(Z_R13, _length->as_register());
  address a = Runtime1::entry_for (Runtime1::new_object_array_id);
  ce->emit_call_c(a);
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  ce->verify_oop_map(_info);
  assert(_result->as_register() == Z_R2, "callee returns result in Z_R2,");
  __ z_brul(_continuation);
}
Example #13
0
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
  assert(addr()->is_register(), "Precondition.");
  assert(new_val()->is_register(), "Precondition.");
  Register new_val_reg = new_val()->as_register();
  __ z_ltgr(new_val_reg, new_val_reg);
  __ branch_optimized(Assembler::bcondZero, _continuation);
  __ z_lgr(Z_R1_scratch, addr()->as_pointer_register());
  ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_post_barrier_slow_id));
  CHECK_BAILOUT();
  __ branch_optimized(Assembler::bcondAlways, _continuation);
}
void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
    verify_oop_map(op->info());

    if (os::is_MP()) {
        // must align calls sites, otherwise they can't be updated atomically on MP hardware
        align_call(op->code());
    }

    // emit the static call stub stuff out of line
    emit_static_call_stub();
    CHECK_BAILOUT();

    switch (op->code()) {
    case lir_static_call:
    case lir_dynamic_call:
        call(op, relocInfo::static_call_type);
        break;
    case lir_optvirtual_call:
        call(op, relocInfo::opt_virtual_call_type);
        break;
    case lir_icvirtual_call:
        ic_call(op);
        break;
    case lir_virtual_call:
        vtable_call(op);
        break;
    default:
        fatal("unexpected op code: %s", op->name());
        break;
    }

    // JSR 292
    // Record if this method has MethodHandle invokes.
    if (op->is_method_handle_invoke()) {
        compilation()->set_has_method_handle_invokes(true);
    }

#if defined(X86) && defined(TIERED)
    // C2 leave fpu stack dirty clean it
    if (UseSSE < 2) {
        int i;
        for ( i = 1; i <= 7 ; i++ ) {
            ffree(i);
        }
        if (!op->result_opr()->is_float_kind()) {
            ffree(0);
        }
    }
#endif // X86 && TIERED
}
void LIR_Assembler::emit_code(BlockList* hir) {
    if (PrintLIR) {
        print_LIR(hir);
    }

    int n = hir->length();
    for (int i = 0; i < n; i++) {
        emit_block(hir->at(i));
        CHECK_BAILOUT();
    }

    flush_debug_info(code_offset());

    DEBUG_ONLY(check_no_unbound_labels());
}
void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
  for (int m = 0; m < stub_list->length(); m++) {
    CodeStub* s = (*stub_list)[m];

    check_codespace();
    CHECK_BAILOUT();

#ifndef PRODUCT
    stringStream st;
    s->print_name(&st);
    st.print(" slow case");
    _masm->block_comment(st.as_string());
#endif
    s->emit_code(this);
  }
}
Example #17
0
void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  Metadata *m = _method->as_constant_ptr()->as_metadata();
  bool success = __ set_metadata_constant(m, Z_R1_scratch);
  if (!success) {
    ce->compilation()->bailout("const section overflow");
    return;
  }
  ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1);
  ce->store_parameter(_bci, 0);
  ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id));
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  ce->verify_oop_map(_info);
  __ branch_optimized(Assembler::bcondAlways, _continuation);
}
Example #18
0
void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  Runtime1::StubID enter_id;
  if (ce->compilation()->has_fpu_code()) {
    enter_id = Runtime1::monitorenter_id;
  } else {
    enter_id = Runtime1::monitorenter_nofpu_id;
  }
  __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register());
  __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr().
  ce->emit_call_c(Runtime1::entry_for (enter_id));
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  ce->verify_oop_map(_info);
  __ branch_optimized(Assembler::bcondAlways, _continuation);
}
Example #19
0
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
  address a;
  if (_info->deoptimize_on_exception()) {
    // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
    a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
  } else {
    a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id);
  }

  ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
  __ bind(_entry);
  ce->emit_call_c(a);
  CHECK_BAILOUT();
  ce->add_call_info_here(_info);
  ce->verify_oop_map(_info);
  debug_only(__ should_not_reach_here());
}
Example #20
0
void Compilation::compile_method() {
  // setup compilation
  initialize();

  if (!method()->can_be_compiled()) {
    // Prevent race condition 6328518.
    // This can happen if the method is obsolete or breakpointed.
    bailout("Bailing out because method is not compilable");
    return;
  }

  if (_env->jvmti_can_hotswap_or_post_breakpoint()) {
    // We can assert evol_method because method->can_be_compiled is true.
    dependency_recorder()->assert_evol_method(method());
  }

  if (method()->break_at_execute()) {
    BREAKPOINT;
  }

#ifndef PRODUCT
  if (PrintCFGToFile) {
    CFGPrinter::print_compilation(this);
  }
#endif

  // compile method
  int frame_size = compile_java_method();

  // bailout if method couldn't be compiled
  // Note: make sure we mark the method as not compilable!
  CHECK_BAILOUT();

  if (InstallMethods) {
    // install code
    PhaseTraceTime timeit(_t_codeinstall);
    install_code(frame_size);
  }

  if (log() != NULL) // Print code cache state into compiler log
    log()->code_cache_state();

  totalInstructionNodes += Instruction::number_of_instructions();
}
void LIR_Assembler::emit_lir_list(LIR_List* list) {
    peephole(list);

    int n = list->length();
    for (int i = 0; i < n; i++) {
        LIR_Op* op = list->at(i);

        check_codespace();
        CHECK_BAILOUT();

#ifndef PRODUCT
        if (CommentedAssembly) {
            // Don't record out every op since that's too verbose.  Print
            // branches since they include block and stub names.  Also print
            // patching moves since they generate funny looking code.
            if (op->code() == lir_branch ||
                    (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
                stringStream st;
                op->print_on(&st);
                _masm->block_comment(st.as_string());
            }
        }
        if (PrintLIRWithAssembly) {
            // print out the LIR operation followed by the resulting assembly
            list->at(i)->print();
            tty->cr();
        }
#endif /* PRODUCT */

        op->emit_code(this);

        if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
            process_debug_info(op);
        }

#ifndef PRODUCT
        if (PrintLIRWithAssembly) {
            _masm->code()->decode();
        }
#endif /* PRODUCT */
    }
}
void LIR_Assembler::emit_lir_list(LIR_List* list) {
  peephole(list);

  int n = list->length();
  for (int i = 0; i < n; i++) {
    LIR_Op* op = list->at(i);

    check_codespace();
    CHECK_BAILOUT();

#ifndef PRODUCT
    // Don't record out every op since that's too verbose.  Print
    // branches since they include block and stub names.  Also print
    // patching moves since they generate funny looking code.
    if (op->code() == lir_branch ||
        (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
      stringStream st;
      op->print_on(&st);
      _masm->block_comment(st.as_string());
    }
    int start_relpc = _masm->rel_pc();
    if (PrintLIRWithAssembly) {
      // print out the LIR operation followed by the resulting assembly
list->at(i)->print(C1OUT);C1OUT->cr();
    }
#endif /* PRODUCT */

    op->emit_code(this);

#ifndef PRODUCT
    if (PrintLIRWithAssembly) {
      address code = (address)_masm->blob();
      Disassembler::decode(code+start_relpc, code+_masm->rel_pc(), C1OUT);
    }
#endif /* PRODUCT */
  }
#ifndef PRODUCT
  if (PrintLIRWithAssembly) {
C1OUT->flush();
  }
#endif
}
Example #23
0
void MonitorExitStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  // Move address of the BasicObjectLock into Z_R1_scratch.
  if (_compute_lock) {
    // Lock_reg was destroyed by fast unlocking attempt => recompute it.
    ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch));
  } else {
    __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
  }
  // Note: non-blocking leaf routine => no call info needed.
  Runtime1::StubID exit_id;
  if (ce->compilation()->has_fpu_code()) {
    exit_id = Runtime1::monitorexit_id;
  } else {
    exit_id = Runtime1::monitorexit_nofpu_id;
  }
  ce->emit_call_c(Runtime1::entry_for (exit_id));
  CHECK_BAILOUT();
  __ branch_optimized(Assembler::bcondAlways, _continuation);
}
Example #24
0
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
  // At this point we know that marking is in progress.
  // If do_load() is true then we have to emit the
  // load of the previous value; otherwise it has already
  // been loaded into _pre_val.
  __ bind(_entry);
  ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
  assert(pre_val()->is_register(), "Precondition.");

  Register pre_val_reg = pre_val()->as_register();

  if (do_load()) {
    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
  }

  __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
  __ branch_optimized(Assembler::bcondZero, _continuation);
  ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_pre_barrier_slow_id));
  CHECK_BAILOUT();
  __ branch_optimized(Assembler::bcondAlways, _continuation);
}
Example #25
0
void FpuStackAllocator::allocate() {
  int num_blocks = allocator()->block_count();
  for (int i = 0; i < num_blocks; i++) {
    // Set up to process block
    BlockBegin* block = allocator()->block_at(i);
    intArray* fpu_stack_state = block->fpu_stack_state();

#ifndef PRODUCT
    if (TraceFPUStack) {
      tty->cr();
      tty->print_cr("------- Begin of new Block %d -------", block->block_id());
    }
#endif

    assert(fpu_stack_state != NULL ||
           block->end()->as_Base() != NULL ||
           block->is_set(BlockBegin::exception_entry_flag),
           "FPU stack state must be present due to linear-scan order for FPU stack allocation");
    // note: exception handler entries always start with an empty fpu stack
    //       because stack merging would be too complicated

    if (fpu_stack_state != NULL) {
      sim()->read_state(fpu_stack_state);
    } else {
      sim()->clear();
    }

#ifndef PRODUCT
    if (TraceFPUStack) {
      tty->print("Reading FPU state for block %d:", block->block_id());
      sim()->print();
      tty->cr();
    }
#endif

    allocate_block(block);
    CHECK_BAILOUT();
  }
}
void Compilation::emit_code_epilog(LIR_Assembler* assembler) {
  CHECK_BAILOUT();

  CodeOffsets* code_offsets = assembler->offsets();

  // generate code or slow cases
  assembler->emit_slow_case_stubs();
  CHECK_BAILOUT();

  // generate exception adapters
  assembler->emit_exception_entries(exception_info_list());
  CHECK_BAILOUT();

  // Generate code for exception handler.
  code_offsets->set_value(CodeOffsets::Exceptions, assembler->emit_exception_handler());
  CHECK_BAILOUT();

  // Generate code for deopt handler.
  code_offsets->set_value(CodeOffsets::Deopt, assembler->emit_deopt_handler());
  CHECK_BAILOUT();

  // Emit the MethodHandle deopt handler code (if required).
  if (has_method_handle_invokes()) {
    // We can use the same code as for the normal deopt handler, we
    // just need a different entry point address.
    code_offsets->set_value(CodeOffsets::DeoptMH, assembler->emit_deopt_handler());
    CHECK_BAILOUT();
  }

  // Emit the handler to remove the activation from the stack and
  // dispatch to the caller.
  offsets()->set_value(CodeOffsets::UnwindHandler, assembler->emit_unwind_handler());

  // done
  masm()->flush();
}
Example #27
0
void Compilation::build_hir() {
  CHECK_BAILOUT();

  // setup ir
  CompileLog* log = this->log();
  if (log != NULL) {
    log->begin_head("parse method='%d' ",
                    log->identify(_method));
    log->stamp();
    log->end_head();
  }
  _hir = new IR(this, method(), osr_bci());
  if (log)  log->done("parse");
  if (!_hir->is_valid()) {
    bailout("invalid parsing");
    return;
  }

#ifndef PRODUCT
  if (PrintCFGToFile) {
    CFGPrinter::print_cfg(_hir, "After Generation of HIR", true, false);
  }
#endif

#ifndef PRODUCT
  if (PrintCFG || PrintCFG0) { tty->print_cr("CFG after parsing"); _hir->print(true); }
  if (PrintIR  || PrintIR0 ) { tty->print_cr("IR after parsing"); _hir->print(false); }
#endif

  _hir->verify();

  if (UseC1Optimizations) {
    NEEDS_CLEANUP
    // optimization
    PhaseTraceTime timeit(_t_optimize_blocks);

    _hir->optimize_blocks();
  }

  _hir->verify();

  _hir->split_critical_edges();

#ifndef PRODUCT
  if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after optimizations"); _hir->print(true); }
  if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after optimizations"); _hir->print(false); }
#endif

  _hir->verify();

  // compute block ordering for code generation
  // the control flow must not be changed from here on
  _hir->compute_code();

  if (UseGlobalValueNumbering) {
    // No resource mark here! LoopInvariantCodeMotion can allocate ValueStack objects.
    int instructions = Instruction::number_of_instructions();
    GlobalValueNumbering gvn(_hir);
    assert(instructions == Instruction::number_of_instructions(),
           "shouldn't have created an instructions");
  }

  _hir->verify();

#ifndef PRODUCT
  if (PrintCFGToFile) {
    CFGPrinter::print_cfg(_hir, "Before RangeCheckElimination", true, false);
  }
#endif

  if (RangeCheckElimination) {
    if (_hir->osr_entry() == NULL) {
      PhaseTraceTime timeit(_t_rangeCheckElimination);
      RangeCheckElimination::eliminate(_hir);
    }
  }

#ifndef PRODUCT
  if (PrintCFGToFile) {
    CFGPrinter::print_cfg(_hir, "After RangeCheckElimination", true, false);
  }
#endif

  if (UseC1Optimizations) {
    // loop invariant code motion reorders instructions and range
    // check elimination adds new instructions so do null check
    // elimination after.
    NEEDS_CLEANUP
    // optimization
    PhaseTraceTime timeit(_t_optimize_null_checks);

    _hir->eliminate_null_checks();
  }

  _hir->verify();

  // compute use counts after global value numbering
  _hir->compute_use_counts();

#ifndef PRODUCT
  if (PrintCFG || PrintCFG2) { tty->print_cr("CFG before code generation"); _hir->code()->print(true); }
  if (PrintIR  || PrintIR2 ) { tty->print_cr("IR before code generation"); _hir->code()->print(false, true); }
#endif

  _hir->verify();
}
void Compilation::build_hir() {
  CHECK_BAILOUT();

  // setup ir
  _hir = new IR(this, method(), osr_bci());
  if (!_hir->is_valid()) {
    bailout("invalid parsing");
    return;
  }

#ifndef PRODUCT
  if (PrintCFGToFile) {
    CFGPrinter::print_cfg(_hir, "After Generation of HIR", true, false);
  }
#endif

#ifndef PRODUCT
  if (PrintCFG || PrintCFG0) { tty->print_cr("CFG after parsing"); _hir->print(true); }
  if (PrintIR  || PrintIR0 ) { tty->print_cr("IR after parsing"); _hir->print(false); }
#endif

  _hir->verify();

  if (UseC1Optimizations) {
    NEEDS_CLEANUP
    // optimization
    PhaseTraceTime timeit(_t_optimizeIR);

    _hir->optimize();
  }

  _hir->verify();

  _hir->split_critical_edges();

#ifndef PRODUCT
  if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after optimizations"); _hir->print(true); }
  if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after optimizations"); _hir->print(false); }
#endif

  _hir->verify();

  // compute block ordering for code generation
  // the control flow must not be changed from here on
  _hir->compute_code();

  if (UseGlobalValueNumbering) {
    ResourceMark rm;
    int instructions = Instruction::number_of_instructions();
    GlobalValueNumbering gvn(_hir);
    assert(instructions == Instruction::number_of_instructions(),
           "shouldn't have created an instructions");
  }

  // compute use counts after global value numbering
  _hir->compute_use_counts();

#ifndef PRODUCT
  if (PrintCFG || PrintCFG2) { tty->print_cr("CFG before code generation"); _hir->code()->print(true); }
  if (PrintIR  || PrintIR2 ) { tty->print_cr("IR before code generation"); _hir->code()->print(false, true); }
#endif

  _hir->verify();
}