void LIR_Assembler::emit_op1(LIR_Op1* op) {
    switch (op->code()) {
    case lir_move:
        if (op->move_kind() == lir_move_volatile) {
            assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
            volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
        } else {
            move_op(op->in_opr(), op->result_opr(), op->type(),
                    op->patch_code(), op->info(), op->pop_fpu_stack(),
                    op->move_kind() == lir_move_unaligned,
                    op->move_kind() == lir_move_wide);
        }
        break;

    case lir_roundfp: {
        LIR_OpRoundFP* round_op = op->as_OpRoundFP();
        roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
        break;
    }

    case lir_return:
        return_op(op->in_opr());
        break;

    case lir_safepoint:
        if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
            _masm->nop();
        }
        safepoint_poll(op->in_opr(), op->info());
        break;

    case lir_fxch:
        fxch(op->in_opr()->as_jint());
        break;

    case lir_fld:
        fld(op->in_opr()->as_jint());
        break;

    case lir_ffree:
        ffree(op->in_opr()->as_jint());
        break;

    case lir_branch:
        break;

    case lir_push:
        push(op->in_opr());
        break;

    case lir_pop:
        pop(op->in_opr());
        break;

    case lir_neg:
        negate(op->in_opr(), op->result_opr());
        break;

    case lir_leal:
        leal(op->in_opr(), op->result_opr());
        break;

    case lir_null_check:
        if (GenerateCompilerNullChecks) {
            ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());

            if (op->in_opr()->is_single_cpu()) {
                _masm->null_check(op->in_opr()->as_register(), stub->entry());
            } else {
                Unimplemented();
            }
        }
        break;

    case lir_monaddr:
        monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
        break;

#ifdef SPARC
    case lir_pack64:
        pack64(op->in_opr(), op->result_opr());
        break;

    case lir_unpack64:
        unpack64(op->in_opr(), op->result_opr());
        break;
#endif

    case lir_unwind:
        unwind_op(op->in_opr());
        break;

    default:
        Unimplemented();
        break;
    }
}
void LIR_Assembler::emit_op1(LIR_Op1* op) {
  switch (op->code()) {
    case lir_move:   
      if (op->move_kind() == lir_move_volatile) {
        assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
        volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
      } else {
move_op(op->in_opr(),op->result_opr(),op->tmp1_opr(),op->tmp2_opr(),op->tmp3_opr(),op->type(),
                op->patch_code(), op->info(), op->move_kind() == lir_move_unaligned);
      }
      break;

    case lir_prefetchr:
      prefetchr(op->in_opr());
      break;

    case lir_prefetchw:
      prefetchw(op->in_opr());
      break;

    case lir_return:
      return_op(op->in_opr()); 
      break;
    
    case lir_branch:
      break;

    case lir_push:
      push(op->in_opr());
      break;

    case lir_pop:
      pop(op->in_opr());
      break;

    case lir_neg:
      negate(op->in_opr(), op->result_opr());
      break;
    
    case lir_bit_test:
bit_test(op->in_opr(),op->result_opr());
      break;
    
    case lir_leal:
      leal(op->in_opr(), op->result_opr());
      break;
    
    case lir_null_check:
      if (GenerateCompilerNullChecks) {
null_check(op->in_opr(),op->info());
      }
      break;

    case lir_klassTable_oop_load:
      klassTable_oop_load(op->in_opr(), op->result_opr(), op->tmp1_opr());
      break;

    case lir_monaddr:
      monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
      break;

    default:
      Unimplemented();
      break;
  }
}