void mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs) { guint8 *callsite_begin; guint64 *callsite = (guint64*)(gpointer)(code - 16); guint64 instructions [3]; guint64 buf [16]; Ia64CodegenState gen; while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) && (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS)) callsite -= 2; callsite_begin = (guint8*)callsite; /* Replace the code generated by emit_call with a sets of nops */ /* The first bundle might have other instructions in it */ instructions [0] = ia64_bundle_ins1 (callsite); instructions [1] = IA64_NOP_X; instructions [2] = IA64_NOP_X; ia64_codegen_init (gen, (guint8*)buf); ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]); ia64_codegen_close (gen); /* This might not be safe, but not all itanium processors support st16 */ callsite [0] = buf [0]; callsite [1] = buf [1]; callsite += 2; /* The other bundles can be full replaced with nops */ ia64_codegen_init (gen, (guint8*)buf); ia64_emit_bundle_template (&gen, IA64_TEMPLATE_MII, IA64_NOP_M, IA64_NOP_I, IA64_NOP_I); ia64_codegen_close (gen); while ((guint8*)callsite < code) { callsite [0] = buf [0]; callsite [1] = buf [1]; callsite += 2; } mono_arch_flush_icache (callsite_begin, code - callsite_begin); }
void mono_arch_patch_callsite (guint8 *method_start, guint8 *code, guint8 *addr) { guint8 *callsite_begin; guint64 *callsite = (guint64*)(gpointer)(code - 16); guint64 *next_bundle; guint64 ins, instructions [3]; guint64 buf [16]; Ia64CodegenState gen; gpointer func = ((gpointer*)(gpointer)addr)[0]; while ((ia64_bundle_template (callsite) != IA64_TEMPLATE_MLX) && (ia64_bundle_template (callsite) != IA64_TEMPLATE_MLXS)) callsite -= 2; callsite_begin = (guint8*)callsite; next_bundle = callsite + 2; ins = ia64_bundle_ins1 (next_bundle); if (ia64_ins_opcode (ins) == 5) { /* ld8_inc_imm -> indirect call through a function pointer */ g_assert (ia64_ins_r1 (ins) == GP_SCRATCH_REG2); g_assert (ia64_ins_r3 (ins) == GP_SCRATCH_REG); return; } /* Patch the code generated by emit_call */ instructions [0] = ia64_bundle_ins1 (callsite); instructions [1] = ia64_bundle_ins2 (callsite); instructions [2] = ia64_bundle_ins3 (callsite); ia64_codegen_init (gen, (guint8*)buf); ia64_movl (gen, GP_SCRATCH_REG, func); instructions [1] = gen.instructions [0]; instructions [2] = gen.instructions [1]; ia64_codegen_init (gen, (guint8*)buf); ia64_emit_bundle_template (&gen, ia64_bundle_template (callsite), instructions [0], instructions [1], instructions [2]); ia64_codegen_close (gen); /* This might not be safe, but not all itanium processors support st16 */ callsite [0] = buf [0]; callsite [1] = buf [1]; mono_arch_flush_icache (callsite_begin, code - callsite_begin); }
gpointer mono_arch_get_throw_exception_by_name (void) { guint8* start; Ia64CodegenState code; start = mono_global_codeman_reserve (64); /* Not used on ia64 */ ia64_codegen_init (code, start); ia64_break_i (code, 1001); ia64_codegen_close (code); g_assert ((code.buf - start) <= 256); mono_arch_flush_icache (start, code.buf - start); return start; }
gpointer mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len) { guint8 *buf, *tramp; gint64 disp; Ia64CodegenState code; tramp = mono_get_trampoline_code (tramp_type); buf = mono_domain_code_reserve (domain, TRAMPOLINE_SIZE); /* FIXME: Optimize this */ ia64_codegen_init (code, buf); ia64_movl (code, GP_SCRATCH_REG, arg1); ia64_begin_bundle (code); disp = (tramp - code.buf) >> 4; if (ia64_is_imm21 (disp)) { ia64_br_cond (code, disp); } else { ia64_movl (code, GP_SCRATCH_REG2, tramp); ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2); ia64_br_cond_reg (code, IA64_B6); } ia64_codegen_close (code); g_assert (code.buf - buf <= TRAMPOLINE_SIZE); mono_arch_flush_icache (buf, code.buf - buf); if (code_len) *code_len = code.buf - buf; return buf; }
/* * mono_arch_get_unbox_trampoline: * @m: method pointer * @addr: pointer to native code for @m * * when value type methods are called through the vtable we need to unbox the * this argument. This method returns a pointer to a trampoline which does * unboxing before calling the method */ gpointer mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr) { guint8 *buf; gpointer func_addr, func_gp; Ia64CodegenState code; int this_reg = 0; gpointer *desc; MonoDomain *domain = mono_domain_get (); /* FIXME: Optimize this */ func_addr = ((gpointer*)addr) [0]; func_gp = ((gpointer*)addr) [1]; buf = mono_domain_code_reserve (domain, 256); /* Since the this reg is a stacked register, its a bit hard to access it */ ia64_codegen_init (code, buf); ia64_alloc (code, 40, 8, 1, 0, 0); ia64_adds_imm (code, 32 + this_reg, sizeof (MonoObject), 32 + this_reg); ia64_mov_to_ar_i (code, IA64_PFS, 40); ia64_movl (code, GP_SCRATCH_REG, func_addr); ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG); ia64_br_cond_reg (code, IA64_B6); ia64_codegen_close (code); g_assert (code.buf - buf < 256); mono_arch_flush_icache (buf, code.buf - buf); /* FIXME: */ desc = g_malloc0 (sizeof (gpointer) * 2); desc [0] = buf; desc [1] = func_gp; return desc; }
/** * mono_arch_get_throw_corlib_exception: * * Returns a function pointer which can be used to raise * corlib exceptions. The returned function has the following * signature: void (*func) (guint32 ex_token_index, guint32 offset); * Here, offset is the offset which needs to be substracted from the caller IP * to get the IP of the throw. Passing the offset has the advantage that it * needs no relocations in the caller. */ gpointer mono_arch_get_throw_corlib_exception (MonoTrampInfo **info, gboolean aot) { static guint8* res; static gboolean inited = FALSE; guint8 *start; gpointer ptr; int i, in0, local0, out0, nout; Ia64CodegenState code; unw_dyn_info_t *di; unw_dyn_region_info_t *r_pro; g_assert (!aot); if (info) *info = NULL; if (inited) return res; start = mono_global_codeman_reserve (1024); in0 = 32; local0 = in0 + 2; out0 = local0 + 4; nout = 3; ia64_codegen_init (code, start); ia64_alloc (code, local0 + 0, local0 - in0, out0 - local0, nout, 0); ia64_mov_from_br (code, local0 + 1, IA64_RP); r_pro = g_malloc0 (_U_dyn_region_info_size (2)); r_pro->op_count = 2; r_pro->insn_count = 6; i = 0; _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 2, /* reg=*/ UNW_IA64_AR_PFS, /* dst=*/ UNW_IA64_GR + local0 + 0); _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 5, /* reg=*/ UNW_IA64_RP, /* dst=*/ UNW_IA64_GR + local0 + 1); g_assert ((unsigned) i <= r_pro->op_count); /* Call exception_from_token */ ia64_movl (code, out0 + 0, mono_defaults.exception_class->image); ia64_mov (code, out0 + 1, in0 + 0); ia64_movl (code, GP_SCRATCH_REG, MONO_TOKEN_TYPE_DEF); ia64_add (code, out0 + 1, in0 + 0, GP_SCRATCH_REG); ptr = mono_exception_from_token; ia64_movl (code, GP_SCRATCH_REG, ptr); ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8); ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2); ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG); ia64_br_call_reg (code, IA64_B0, IA64_B6); ia64_mov (code, local0 + 3, IA64_R8); /* Compute throw ip */ ia64_mov (code, local0 + 2, local0 + 1); ia64_sub (code, local0 + 2, local0 + 2, in0 + 1); /* Trick the unwind library into using throw_ip as the IP in the caller frame */ ia64_mov (code, local0 + 1, local0 + 2); /* Set args */ ia64_mov (code, out0 + 0, local0 + 3); ia64_mov (code, out0 + 1, IA64_R0); /* Call throw_exception */ ptr = throw_exception; ia64_movl (code, GP_SCRATCH_REG, ptr); ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8); ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2); ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG); ia64_br_call_reg (code, IA64_B0, IA64_B6); ia64_break_i (code, 1002); ia64_codegen_close (code); g_assert ((code.buf - start) <= 1024); di = g_malloc0 (sizeof (unw_dyn_info_t)); di->start_ip = (unw_word_t) start; di->end_ip = (unw_word_t) code.buf; di->gp = 0; di->format = UNW_INFO_FORMAT_DYNAMIC; di->u.pi.name_ptr = (unw_word_t)"throw_corlib_exception_trampoline"; di->u.pi.regions = r_pro; _U_dyn_register (di); mono_arch_flush_icache (start, code.buf - start); res = ia64_create_ftnptr (start); inited = TRUE; return res; }
static gpointer get_throw_trampoline (gboolean rethrow) { guint8* start; Ia64CodegenState code; gpointer ptr = throw_exception; int i, in0, local0, out0; unw_dyn_info_t *di; unw_dyn_region_info_t *r_pro; start = mono_global_codeman_reserve (256); in0 = 32; local0 = in0 + 1; out0 = local0 + 2; ia64_codegen_init (code, start); ia64_alloc (code, local0 + 0, local0 - in0, out0 - local0, 3, 0); ia64_mov_from_br (code, local0 + 1, IA64_B0); /* FIXME: This depends on the current instruction emitter */ r_pro = g_malloc0 (_U_dyn_region_info_size (2)); r_pro->op_count = 2; r_pro->insn_count = 6; i = 0; _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 2, /* reg=*/ UNW_IA64_AR_PFS, /* dst=*/ UNW_IA64_GR + local0 + 0); _U_dyn_op_save_reg (&r_pro->op[i++], _U_QP_TRUE, /* when=*/ 5, /* reg=*/ UNW_IA64_RP, /* dst=*/ UNW_IA64_GR + local0 + 1); g_assert ((unsigned) i <= r_pro->op_count); /* Set args */ ia64_mov (code, out0 + 0, in0 + 0); ia64_adds_imm (code, out0 + 1, rethrow, IA64_R0); /* Call throw_exception */ ia64_movl (code, GP_SCRATCH_REG, ptr); ia64_ld8_inc_imm (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 8); ia64_mov_to_br (code, IA64_B6, GP_SCRATCH_REG2); ia64_ld8 (code, IA64_GP, GP_SCRATCH_REG); ia64_br_call_reg (code, IA64_B0, IA64_B6); /* Not reached */ ia64_break_i (code, 1000); ia64_codegen_close (code); g_assert ((code.buf - start) <= 256); mono_arch_flush_icache (start, code.buf - start); di = g_malloc0 (sizeof (unw_dyn_info_t)); di->start_ip = (unw_word_t) start; di->end_ip = (unw_word_t) code.buf; di->gp = 0; di->format = UNW_INFO_FORMAT_DYNAMIC; di->u.pi.name_ptr = (unw_word_t)"throw_trampoline"; di->u.pi.regions = r_pro; _U_dyn_register (di); return ia64_create_ftnptr (start); }
static gpointer get_real_call_filter (void) { static gpointer filter; static gboolean inited = FALSE; guint8 *start; Ia64CodegenState code; int in0, local0, out0, nout; unw_dyn_info_t *di; unw_dyn_region_info_t *r_pro, *r_body, *r_epilog; if (inited) return filter; start = mono_global_codeman_reserve (1024); /* int call_filter (guint64 fp, guint64 ip) */ /* * We have to create a register+stack frame similar to the frame which * contains the filter. * - setting fp * - setting up a register stack frame * These cannot be set up in this function, because the fp register is a * stacked register which is different in each method. Also, the register * stack frame is different in each method. So we pass the FP value in a a * non-stacked register and the code generated by the OP_START_HANDLER * opcode will copy it to the appropriate register after setting up the * register stack frame. * The stacked registers are not need to be set since variables used in * handler regions are never allocated to registers. */ in0 = 32; local0 = in0 + 2; out0 = local0 + 4; nout = 0; ia64_codegen_init (code, start); ia64_codegen_set_one_ins_per_bundle (code, TRUE); ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + local0 + 0); ia64_alloc (code, local0 + 0, local0 - in0, out0 - local0, nout, 0); ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + local0 + 1); ia64_mov_from_br (code, local0 + 1, IA64_B0); ia64_begin_bundle (code); r_pro = mono_ia64_create_unwind_region (&code); /* Frame pointer */ ia64_mov (code, IA64_R15, in0 + 0); /* Target ip */ ia64_mov_to_br (code, IA64_B6, in0 + 1); /* Call the filter */ ia64_br_call_reg (code, IA64_B0, IA64_B6); /* R8 contains the result of the filter */ /* FIXME: Add unwind info for this */ ia64_begin_bundle (code); r_body = mono_ia64_create_unwind_region (&code); r_pro->next = r_body; ia64_mov_to_ar_i (code, IA64_PFS, local0 + 0); ia64_mov_ret_to_br (code, IA64_B0, local0 + 1); ia64_br_ret_reg (code, IA64_B0); ia64_begin_bundle (code); r_epilog = mono_ia64_create_unwind_region (&code); r_body->next = r_epilog; ia64_codegen_set_one_ins_per_bundle (code, FALSE); ia64_codegen_close (code); g_assert ((code.buf - start) <= 256); mono_arch_flush_icache (start, code.buf - start); di = g_malloc0 (sizeof (unw_dyn_info_t)); di->start_ip = (unw_word_t) start; di->end_ip = (unw_word_t) code.buf; di->gp = 0; di->format = UNW_INFO_FORMAT_DYNAMIC; di->u.pi.name_ptr = (unw_word_t)"throw_trampoline"; di->u.pi.regions = r_body; _U_dyn_register (di); filter = ia64_create_ftnptr (start); inited = TRUE; return filter; }
guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot) { guint8 *buf, *tramp; int i, offset, saved_regs_offset, saved_fpregs_offset, last_offset, framesize; int in0, local0, out0, l0, l1, l2, l3, l4, l5, l6, l7, l8, o0, o1, o2, o3; gboolean has_caller; Ia64CodegenState code; unw_dyn_info_t *di; unw_dyn_region_info_t *r_pro; g_assert (!aot); *info = NULL; /* * Since jump trampolines are not patched, this trampoline is executed every * time a call is made to a jump trampoline. So we try to keep things faster * in that case. */ if (tramp_type == MONO_TRAMPOLINE_JUMP) has_caller = FALSE; else has_caller = TRUE; buf = mono_global_codeman_reserve (2048); ia64_codegen_init (code, buf); /* Stacked Registers */ in0 = 32; local0 = in0 + 8; out0 = local0 + 16; l0 = 40; l1 = 41; l2 = 42; l3 = 43; l4 = 44; l5 = 45; /* saved ar.pfs */ l6 = 46; /* arg */ l7 = 47; /* code */ l8 = 48; /* saved sp */ o0 = out0 + 0; /* regs */ o1 = out0 + 1; /* code */ o2 = out0 + 2; /* arg */ o3 = out0 + 3; /* tramp */ framesize = (128 * 8) + 1024; framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1); /* * Allocate a new register+memory stack frame. * 8 input registers (the max used by the ABI) * 16 locals * 4 output (number of parameters passed to trampoline) */ ia64_unw_save_reg (code, UNW_IA64_AR_PFS, UNW_IA64_GR + l5); ia64_alloc (code, l5, local0 - in0, out0 - local0, 4, 0); ia64_unw_save_reg (code, UNW_IA64_SP, UNW_IA64_GR + l8); ia64_mov (code, l8, IA64_SP); ia64_adds_imm (code, IA64_SP, (-framesize), IA64_SP); offset = 16; /* scratch area */ /* Save the argument received from the specific trampoline */ ia64_mov (code, l6, GP_SCRATCH_REG); /* Save the calling address */ ia64_unw_save_reg (code, UNW_IA64_RP, UNW_IA64_GR + local0 + 7); ia64_mov_from_br (code, l7, IA64_B0); /* Create unwind info for the prolog */ ia64_begin_bundle (code); r_pro = mono_ia64_create_unwind_region (&code); /* Save registers */ /* Not needed for jump trampolines */ if (tramp_type != MONO_TRAMPOLINE_JUMP) { saved_regs_offset = offset; offset += 128 * 8; /* * Only the registers which are needed for computing vtable slots need * to be saved. */ last_offset = -1; for (i = 0; i < 64; ++i) if ((1 << i) & MONO_ARCH_CALLEE_REGS) { if (last_offset != i * 8) ia64_adds_imm (code, l1, saved_regs_offset + (i * 8), IA64_SP); ia64_st8_spill_inc_imm_hint (code, l1, i, 8, 0); last_offset = (i + 1) * 8; } } /* Save fp registers */ saved_fpregs_offset = offset; offset += 8 * 8; ia64_adds_imm (code, l1, saved_fpregs_offset, IA64_SP); for (i = 0; i < 8; ++i) ia64_stfd_inc_imm_hint (code, l1, i + 8, 8, 0); g_assert (offset < framesize); /* Arg1 is the pointer to the saved registers */ ia64_adds_imm (code, o0, saved_regs_offset, IA64_SP); /* Arg2 is the address of the calling code */ if (has_caller) ia64_mov (code, o1, l7); else ia64_mov (code, o1, 0); /* Arg3 is the method/vtable ptr */ ia64_mov (code, o2, l6); /* Arg4 is the trampoline address */ /* FIXME: */ ia64_mov (code, o3, 0); tramp = (guint8*)mono_get_trampoline_func (tramp_type); /* Call the trampoline using an indirect call */ ia64_movl (code, l0, tramp); ia64_ld8_inc_imm (code, l1, l0, 8); ia64_mov_to_br (code, IA64_B6, l1); ia64_ld8 (code, IA64_GP, l0); ia64_br_call_reg (code, 0, IA64_B6); /* Check for thread interruption */ /* This is not perf critical code so no need to check the interrupt flag */ ia64_mov (code, l2, IA64_R8); tramp = (guint8*)mono_thread_force_interruption_checkpoint; ia64_movl (code, l0, tramp); ia64_ld8_inc_imm (code, l1, l0, 8); ia64_mov_to_br (code, IA64_B6, l1); ia64_ld8 (code, IA64_GP, l0); ia64_br_call_reg (code, 0, IA64_B6); ia64_mov (code, IA64_R8, l2); /* Restore fp regs */ ia64_adds_imm (code, l1, saved_fpregs_offset, IA64_SP); for (i = 0; i < 8; ++i) ia64_ldfd_inc_imm (code, i + 8, l1, 8); /* FIXME: Handle NATs in fp regs / scratch regs */ if (tramp_type != MONO_TRAMPOLINE_CLASS_INIT) { /* Load method address from function descriptor */ ia64_ld8 (code, l0, IA64_R8); ia64_mov_to_br (code, IA64_B6, l0); } /* Clean up register/memory stack frame */ ia64_adds_imm (code, IA64_SP, framesize, IA64_SP); ia64_mov_to_ar_i (code, IA64_PFS, l5); if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) { ia64_mov_ret_to_br (code, IA64_B0, l7); ia64_br_ret_reg (code, IA64_B0); } else { /* Call the compiled method */ ia64_mov_to_br (code, IA64_B0, l7); ia64_br_cond_reg (code, IA64_B6); } ia64_codegen_close (code); g_assert ((code.buf - buf) <= 2048); /* FIXME: emit unwind info for epilog */ di = g_malloc0 (sizeof (unw_dyn_info_t)); di->start_ip = (unw_word_t) buf; di->end_ip = (unw_word_t) code.buf; di->gp = 0; di->format = UNW_INFO_FORMAT_DYNAMIC; di->u.pi.name_ptr = (unw_word_t)"ia64_generic_trampoline"; di->u.pi.regions = r_pro; _U_dyn_register (di); mono_arch_flush_icache (buf, code.buf - buf); return buf; }
int main () { Ia64CodegenState code; guint8 *buf = g_malloc0 (40960); ia64_codegen_init (code, buf); ia64_add (code, 1, 2, 3); ia64_add1 (code, 1, 2, 3); ia64_sub (code, 1, 2, 3); ia64_sub1 (code, 1, 2, 3); ia64_addp4 (code, 1, 2, 3); ia64_and (code, 1, 2, 3); ia64_andcm (code, 1, 2, 3); ia64_or (code, 1, 2, 3); ia64_xor (code, 1, 2, 3); ia64_shladd (code, 1, 2, 3, 4); ia64_shladdp4 (code, 1, 2, 3, 4); ia64_sub_imm (code, 1, 0x7f, 2); ia64_sub_imm (code, 1, -1, 2); ia64_and_imm (code, 1, -128, 2); ia64_andcm_imm (code, 1, -128, 2); ia64_or_imm (code, 1, -128, 2); ia64_xor_imm (code, 1, -128, 2); ia64_adds_imm (code, 1, 8191, 2); ia64_adds_imm (code, 1, -8192, 2); ia64_adds_imm (code, 1, 1234, 2); ia64_adds_imm (code, 1, -1234, 2); ia64_addp4_imm (code, 1, -1234, 2); ia64_addl_imm (code, 1, 1234, 2); ia64_addl_imm (code, 1, -1234, 2); ia64_addl_imm (code, 1, 2097151, 2); ia64_addl_imm (code, 1, -2097152, 2); ia64_cmp_lt (code, 1, 2, 1, 2); ia64_cmp_ltu (code, 1, 2, 1, 2); ia64_cmp_eq (code, 1, 2, 1, 2); ia64_cmp_lt_unc (code, 1, 2, 1, 2); ia64_cmp_ltu_unc (code, 1, 2, 1, 2); ia64_cmp_eq_unc (code, 1, 2, 1, 2); ia64_cmp_eq_and (code, 1, 2, 1, 2); ia64_cmp_eq_or (code, 1, 2, 1, 2); ia64_cmp_eq_or_andcm (code, 1, 2, 1, 2); ia64_cmp_ne_and (code, 1, 2, 1, 2); ia64_cmp_ne_or (code, 1, 2, 1, 2); ia64_cmp_ne_or_andcm (code, 1, 2, 1, 2); ia64_cmp4_lt (code, 1, 2, 1, 2); ia64_cmp4_ltu (code, 1, 2, 1, 2); ia64_cmp4_eq (code, 1, 2, 1, 2); ia64_cmp4_lt_unc (code, 1, 2, 1, 2); ia64_cmp4_ltu_unc (code, 1, 2, 1, 2); ia64_cmp4_eq_unc (code, 1, 2, 1, 2); ia64_cmp4_eq_and (code, 1, 2, 1, 2); ia64_cmp4_eq_or (code, 1, 2, 1, 2); ia64_cmp4_eq_or_andcm (code, 1, 2, 1, 2); ia64_cmp4_ne_and (code, 1, 2, 1, 2); ia64_cmp4_ne_or (code, 1, 2, 1, 2); ia64_cmp4_ne_or_andcm (code, 1, 2, 1, 2); ia64_cmp_gt_and (code, 1, 2, 0, 2); ia64_cmp_gt_or (code, 1, 2, 0, 2); ia64_cmp_gt_or_andcm (code, 1, 2, 0, 2); ia64_cmp_le_and (code, 1, 2, 0, 2); ia64_cmp_le_or (code, 1, 2, 0, 2); ia64_cmp_le_or_andcm (code, 1, 2, 0, 2); ia64_cmp_ge_and (code, 1, 2, 0, 2); ia64_cmp_ge_or (code, 1, 2, 0, 2); ia64_cmp_ge_or_andcm (code, 1, 2, 0, 2); ia64_cmp_lt_and (code, 1, 2, 0, 2); ia64_cmp_lt_or (code, 1, 2, 0, 2); ia64_cmp_lt_or_andcm (code, 1, 2, 0, 2); ia64_cmp4_gt_and (code, 1, 2, 0, 2); ia64_cmp4_gt_or (code, 1, 2, 0, 2); ia64_cmp4_gt_or_andcm (code, 1, 2, 0, 2); ia64_cmp4_le_and (code, 1, 2, 0, 2); ia64_cmp4_le_or (code, 1, 2, 0, 2); ia64_cmp4_le_or_andcm (code, 1, 2, 0, 2); ia64_cmp4_ge_and (code, 1, 2, 0, 2); ia64_cmp4_ge_or (code, 1, 2, 0, 2); ia64_cmp4_ge_or_andcm (code, 1, 2, 0, 2); ia64_cmp4_lt_and (code, 1, 2, 0, 2); ia64_cmp4_lt_or (code, 1, 2, 0, 2); ia64_cmp4_lt_or_andcm (code, 1, 2, 0, 2); ia64_cmp_lt_imm (code, 1, 2, 127, 2); ia64_cmp_lt_imm (code, 1, 2, -128, 2); ia64_cmp_lt_imm (code, 1, 2, -128, 2); ia64_cmp_ltu_imm (code, 1, 2, -128, 2); ia64_cmp_eq_imm (code, 1, 2, -128, 2); ia64_cmp_lt_unc_imm (code, 1, 2, -128, 2); ia64_cmp_ltu_unc_imm (code, 1, 2, -128, 2); ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2); ia64_cmp_eq_and_imm (code, 1, 2, -128, 2); ia64_cmp_eq_or_imm (code, 1, 2, -128, 2); ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2); ia64_cmp_ne_and_imm (code, 1, 2, -128, 2); ia64_cmp_ne_or_imm (code, 1, 2, -128, 2); ia64_cmp_ne_or_andcm_imm (code, 1, 2, -128, 2); ia64_cmp4_lt_imm (code, 1, 2, -128, 2); ia64_cmp4_ltu_imm (code, 1, 2, -128, 2); ia64_cmp4_eq_imm (code, 1, 2, -128, 2); ia64_cmp4_lt_unc_imm (code, 1, 2, -128, 2); ia64_cmp4_ltu_unc_imm (code, 1, 2, -128, 2); ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2); ia64_cmp4_eq_and_imm (code, 1, 2, -128, 2); ia64_cmp4_eq_or_imm (code, 1, 2, -128, 2); ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2); ia64_cmp4_ne_and_imm (code, 1, 2, -128, 2); ia64_cmp4_ne_or_imm (code, 1, 2, -128, 2); ia64_cmp4_ne_or_andcm_imm (code, 1, 2, -128, 2); ia64_padd1 (code, 1, 2, 3); ia64_padd2 (code, 1, 2, 3); ia64_padd4 (code, 1, 2, 3); ia64_padd1_sss (code, 1, 2, 3); ia64_padd2_sss (code, 1, 2, 3); ia64_padd1_uuu (code, 1, 2, 3); ia64_padd2_uuu (code, 1, 2, 3); ia64_padd1_uus (code, 1, 2, 3); ia64_padd2_uus (code, 1, 2, 3); ia64_psub1 (code, 1, 2, 3); ia64_psub2 (code, 1, 2, 3); ia64_psub4 (code, 1, 2, 3); ia64_psub1_sss (code, 1, 2, 3); ia64_psub2_sss (code, 1, 2, 3); ia64_psub1_uuu (code, 1, 2, 3); ia64_psub2_uuu (code, 1, 2, 3); ia64_psub1_uus (code, 1, 2, 3); ia64_psub2_uus (code, 1, 2, 3); ia64_pavg1 (code, 1, 2, 3); ia64_pavg2 (code, 1, 2, 3); ia64_pavg1_raz (code, 1, 2, 3); ia64_pavg2_raz (code, 1, 2, 3); ia64_pavgsub1 (code, 1, 2, 3); ia64_pavgsub2 (code, 1, 2, 3); ia64_pcmp1_eq (code, 1, 2, 3); ia64_pcmp2_eq (code, 1, 2, 3); ia64_pcmp4_eq (code, 1, 2, 3); ia64_pcmp1_gt (code, 1, 2, 3); ia64_pcmp2_gt (code, 1, 2, 3); ia64_pcmp4_gt (code, 1, 2, 3); ia64_pshladd2 (code, 1, 2, 3, 4); ia64_pshradd2 (code, 1, 2, 3, 4); ia64_pmpyshr2 (code, 1, 2, 3, 0); ia64_pmpyshr2_u (code, 1, 2, 3, 0); ia64_pmpyshr2 (code, 1, 2, 3, 7); ia64_pmpyshr2_u (code, 1, 2, 3, 7); ia64_pmpyshr2 (code, 1, 2, 3, 15); ia64_pmpyshr2_u (code, 1, 2, 3, 15); ia64_pmpyshr2 (code, 1, 2, 3, 16); ia64_pmpyshr2_u (code, 1, 2, 3, 16); ia64_pmpy2_r (code, 1, 2, 3); ia64_pmpy2_l (code, 1, 2, 3); ia64_mix1_r (code, 1, 2, 3); ia64_mix2_r (code, 1, 2, 3); ia64_mix4_r (code, 1, 2, 3); ia64_mix1_l (code, 1, 2, 3); ia64_mix2_l (code, 1, 2, 3); ia64_mix4_l (code, 1, 2, 3); ia64_pack2_uss (code, 1, 2, 3); ia64_pack2_sss (code, 1, 2, 3); ia64_pack4_sss (code, 1, 2, 3); ia64_unpack1_h (code, 1, 2, 3); ia64_unpack2_h (code, 1, 2, 3); ia64_unpack4_h (code, 1, 2, 3); ia64_unpack1_l (code, 1, 2, 3); ia64_unpack2_l (code, 1, 2, 3); ia64_unpack4_l (code, 1, 2, 3); ia64_pmin1_u (code, 1, 2, 3); ia64_pmax1_u (code, 1, 2, 3); ia64_pmin2 (code, 1, 2, 3); ia64_pmax2 (code, 1, 2, 3); ia64_psad1 (code, 1, 2, 3); ia64_mux1 (code, 1, 2, IA64_MUX1_BRCST); ia64_mux1 (code, 1, 2, IA64_MUX1_MIX); ia64_mux1 (code, 1, 2, IA64_MUX1_SHUF); ia64_mux1 (code, 1, 2, IA64_MUX1_ALT); ia64_mux1 (code, 1, 2, IA64_MUX1_REV); ia64_mux2 (code, 1, 2, 0x8d); ia64_pshr2 (code, 1, 2, 3); ia64_pshr4 (code, 1, 2, 3); ia64_shr (code, 1, 2, 3); ia64_pshr2_u (code, 1, 2, 3); ia64_pshr4_u (code, 1, 2, 3); ia64_shr_u (code, 1, 2, 3); ia64_pshr2_imm (code, 1, 2, 20); ia64_pshr4_imm (code, 1, 2, 20); ia64_pshr2_u_imm (code, 1, 2, 20); ia64_pshr4_u_imm (code, 1, 2, 20); ia64_pshl2 (code, 1, 2, 3); ia64_pshl4 (code, 1, 2, 3); ia64_shl (code, 1, 2, 3); ia64_pshl2_imm (code, 1, 2, 20); ia64_pshl4_imm (code, 1, 2, 20); ia64_popcnt (code, 1, 2); ia64_shrp (code, 1, 2, 3, 62); ia64_extr_u (code, 1, 2, 62, 61); ia64_extr (code, 1, 2, 62, 61); ia64_dep_z (code, 1, 2, 62, 61); ia64_dep_z_imm (code, 1, 127, 62, 61); ia64_dep_z_imm (code, 1, -128, 62, 61); ia64_dep_imm (code, 1, 0, 2, 62, 61); ia64_dep_imm (code, 1, -1, 2, 62, 61); ia64_dep (code, 1, 2, 3, 10, 15); ia64_tbit_z (code, 1, 2, 3, 0); ia64_tbit_z (code, 1, 2, 3, 63); ia64_tbit_z_unc (code, 1, 2, 3, 63); ia64_tbit_z_and (code, 1, 2, 3, 63); ia64_tbit_nz_and (code, 1, 2, 3, 63); ia64_tbit_z_or (code, 1, 2, 3, 63); ia64_tbit_nz_or (code, 1, 2, 3, 63); ia64_tbit_z_or_andcm (code, 1, 2, 3, 63); ia64_tbit_nz_or_andcm (code, 1, 2, 3, 63); ia64_tnat_z (code, 1, 2, 3); ia64_tnat_z_unc (code, 1, 2, 3); ia64_tnat_z_and (code, 1, 2, 3); ia64_tnat_nz_and (code, 1, 2, 3); ia64_tnat_z_or (code, 1, 2, 3); ia64_tnat_nz_or (code, 1, 2, 3); ia64_tnat_z_or_andcm (code, 1, 2, 3); ia64_tnat_nz_or_andcm (code, 1, 2, 3); ia64_nop_i (code, 0x1234); ia64_hint_i (code, 0x1234); ia64_break_i (code, 0x1234); ia64_chk_s_i (code, 1, 0); ia64_chk_s_i (code, 1, -1); ia64_chk_s_i (code, 1, 1); ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0); ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0); ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP); ia64_mov_ret_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0); ia64_mov_from_br (code, 1, 1); ia64_mov_to_pred (code, 1, 0xfe); ia64_mov_to_pred_rot_imm (code, 0xff0000); ia64_mov_from_ip (code, 1); ia64_mov_from_pred (code, 1); ia64_mov_to_ar_i (code, 1, 1); ia64_mov_to_ar_imm_i (code, 1, 127); ia64_mov_from_ar_i (code, 1, 1); ia64_zxt1 (code, 1, 2); ia64_zxt2 (code, 1, 2); ia64_zxt4 (code, 1, 2); ia64_sxt1 (code, 1, 2); ia64_sxt2 (code, 1, 2); ia64_sxt4 (code, 1, 2); ia64_czx1_l (code, 1, 2); ia64_czx2_l (code, 1, 2); ia64_czx1_r (code, 1, 2); ia64_czx2_r (code, 1, 2); ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NONE); ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NT1); ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NTA); ia64_ld1_hint (code, 1, 2, 0); ia64_ld2_hint (code, 1, 2, 0); ia64_ld4_hint (code, 1, 2, 0); ia64_ld8_hint (code, 1, 2, 0); ia64_ld1_s_hint (code, 1, 2, 0); ia64_ld2_s_hint (code, 1, 2, 0); ia64_ld4_s_hint (code, 1, 2, 0); ia64_ld8_s_hint (code, 1, 2, 0); ia64_ld1_a_hint (code, 1, 2, 0); ia64_ld2_a_hint (code, 1, 2, 0); ia64_ld4_a_hint (code, 1, 2, 0); ia64_ld8_a_hint (code, 1, 2, 0); ia64_ld1_sa_hint (code, 1, 2, 0); ia64_ld2_sa_hint (code, 1, 2, 0); ia64_ld4_sa_hint (code, 1, 2, 0); ia64_ld8_sa_hint (code, 1, 2, 0); ia64_ld1_bias_hint (code, 1, 2, 0); ia64_ld2_bias_hint (code, 1, 2, 0); ia64_ld4_bias_hint (code, 1, 2, 0); ia64_ld8_bias_hint (code, 1, 2, 0); ia64_ld1_inc_hint (code, 1, 2, 3, IA64_LD_HINT_NONE); ia64_ld1_inc_imm_hint (code, 1, 2, 255, IA64_LD_HINT_NONE); ia64_ld1_inc_imm_hint (code, 1, 2, -256, IA64_LD_HINT_NONE); ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NTA); ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st2_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st4_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st8_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st1_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st2_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st4_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st8_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st8_spill_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st16_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st16_rel_hint (code, 1, 2, IA64_ST_HINT_NONE); ia64_st1_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); ia64_st2_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); ia64_st4_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); ia64_st8_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); ia64_st1_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); ia64_st2_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); ia64_st4_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); ia64_st8_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); ia64_st8_spill_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE); ia64_ldfs_hint (code, 1, 2, 0); ia64_ldfd_hint (code, 1, 2, 0); ia64_ldf8_hint (code, 1, 2, 0); ia64_ldfe_hint (code, 1, 2, 0); ia64_ldfs_s_hint (code, 1, 2, 0); ia64_ldfd_s_hint (code, 1, 2, 0); ia64_ldf8_s_hint (code, 1, 2, 0); ia64_ldfe_s_hint (code, 1, 2, 0); ia64_ldfs_a_hint (code, 1, 2, 0); ia64_ldfd_a_hint (code, 1, 2, 0); ia64_ldf8_a_hint (code, 1, 2, 0); ia64_ldfe_a_hint (code, 1, 2, 0); ia64_ldfs_sa_hint (code, 1, 2, 0); ia64_ldfd_sa_hint (code, 1, 2, 0); ia64_ldf8_sa_hint (code, 1, 2, 0); ia64_ldfe_sa_hint (code, 1, 2, 0); ia64_ldfs_c_clr_hint (code, 1, 2, 0); ia64_ldfd_c_clr_hint (code, 1, 2, 0); ia64_ldf8_c_clr_hint (code, 1, 2, 0); ia64_ldfe_c_clr_hint (code, 1, 2, 0); ia64_ldfs_c_nc_hint (code, 1, 2, 0); ia64_ldfd_c_nc_hint (code, 1, 2, 0); ia64_ldf8_c_nc_hint (code, 1, 2, 0); ia64_ldfe_c_nc_hint (code, 1, 2, 0); ia64_ldf_fill_hint (code, 1, 2, 0); ia64_ldfs_inc_hint (code, 1, 2, 3, 0); ia64_ldfd_inc_hint (code, 1, 2, 3, 0); ia64_ldf8_inc_hint (code, 1, 2, 3, 0); ia64_ldfe_inc_hint (code, 1, 2, 3, 0); ia64_ldfs_s_inc_hint (code, 1, 2, 3, 0); ia64_ldfd_s_inc_hint (code, 1, 2, 3, 0); ia64_ldf8_s_inc_hint (code, 1, 2, 3, 0); ia64_ldfe_s_inc_hint (code, 1, 2, 3, 0); ia64_ldfs_a_inc_hint (code, 1, 2, 3, 0); ia64_ldfd_a_inc_hint (code, 1, 2, 3, 0); ia64_ldf8_a_inc_hint (code, 1, 2, 3, 0); ia64_ldfe_a_inc_hint (code, 1, 2, 3, 0); ia64_ldfs_sa_inc_hint (code, 1, 2, 3, 0); ia64_ldfd_sa_inc_hint (code, 1, 2, 3, 0); ia64_ldf8_sa_inc_hint (code, 1, 2, 3, 0); ia64_ldfe_sa_inc_hint (code, 1, 2, 3, 0); ia64_ldfs_c_clr_inc_hint (code, 1, 2, 3, 0); ia64_ldfd_c_clr_inc_hint (code, 1, 2, 3, 0); ia64_ldf8_c_clr_inc_hint (code, 1, 2, 3, 0); ia64_ldfe_c_clr_inc_hint (code, 1, 2, 3, 0); ia64_ldfs_c_nc_inc_hint (code, 1, 2, 3, 0); ia64_ldfd_c_nc_inc_hint (code, 1, 2, 3, 0); ia64_ldf8_c_nc_inc_hint (code, 1, 2, 3, 0); ia64_ldfe_c_nc_inc_hint (code, 1, 2, 3, 0); ia64_ldf_fill_inc_hint (code, 1, 2, 3, 0); ia64_ldfs_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfd_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldf8_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfe_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfs_s_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfd_s_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldf8_s_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfe_s_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfs_a_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfd_a_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldf8_a_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfe_a_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfs_sa_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfd_sa_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldf8_sa_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfe_sa_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfs_c_clr_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfd_c_clr_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldf8_c_clr_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfe_c_clr_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfs_c_nc_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfd_c_nc_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldf8_c_nc_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfe_c_nc_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldf_fill_inc_imm_hint (code, 1, 2, 255, 0); ia64_stfs_hint (code, 1, 2, 0); ia64_stfd_hint (code, 1, 2, 0); ia64_stf8_hint (code, 1, 2, 0); ia64_stfe_hint (code, 1, 2, 0); ia64_stf_spill_hint (code, 1, 2, 0); ia64_stfs_inc_imm_hint (code, 1, 2, 255, 0); ia64_stfd_inc_imm_hint (code, 1, 2, 255, 0); ia64_stf8_inc_imm_hint (code, 1, 2, 255, 0); ia64_stfe_inc_imm_hint (code, 1, 2, 255, 0); ia64_stf_spill_inc_imm_hint (code, 1, 2, 255, 0); ia64_ldfps_hint (code, 1, 2, 3, 0); ia64_ldfpd_hint (code, 1, 2, 3, 0); ia64_ldfp8_hint (code, 1, 2, 3, 0); ia64_ldfps_s_hint (code, 1, 2, 3, 0); ia64_ldfpd_s_hint (code, 1, 2, 3, 0); ia64_ldfp8_s_hint (code, 1, 2, 3, 0); ia64_ldfps_a_hint (code, 1, 2, 3, 0); ia64_ldfpd_a_hint (code, 1, 2, 3, 0); ia64_ldfp8_a_hint (code, 1, 2, 3, 0); ia64_ldfps_sa_hint (code, 1, 2, 3, 0); ia64_ldfpd_sa_hint (code, 1, 2, 3, 0); ia64_ldfp8_sa_hint (code, 1, 2, 3, 0); ia64_ldfps_c_clr_hint (code, 1, 2, 3, 0); ia64_ldfpd_c_clr_hint (code, 1, 2, 3, 0); ia64_ldfp8_c_clr_hint (code, 1, 2, 3, 0); ia64_ldfps_c_nc_hint (code, 1, 2, 3, 0); ia64_ldfpd_c_nc_hint (code, 1, 2, 3, 0); ia64_ldfp8_c_nc_hint (code, 1, 2, 3, 0); ia64_ldfps_inc_hint (code, 1, 2, 3, 0); ia64_ldfpd_inc_hint (code, 1, 2, 3, 0); ia64_ldfp8_inc_hint (code, 1, 2, 3, 0); ia64_ldfps_s_inc_hint (code, 1, 2, 3, 0); ia64_ldfpd_s_inc_hint (code, 1, 2, 3, 0); ia64_ldfp8_s_inc_hint (code, 1, 2, 3, 0); ia64_ldfps_a_inc_hint (code, 1, 2, 3, 0); ia64_ldfpd_a_inc_hint (code, 1, 2, 3, 0); ia64_ldfp8_a_inc_hint (code, 1, 2, 3, 0); ia64_ldfps_sa_inc_hint (code, 1, 2, 3, 0); ia64_ldfpd_sa_inc_hint (code, 1, 2, 3, 0); ia64_ldfp8_sa_inc_hint (code, 1, 2, 3, 0); ia64_ldfps_c_clr_inc_hint (code, 1, 2, 3, 0); ia64_ldfpd_c_clr_inc_hint (code, 1, 2, 3, 0); ia64_ldfp8_c_clr_inc_hint (code, 1, 2, 3, 0); ia64_ldfps_c_nc_inc_hint (code, 1, 2, 3, 0); ia64_ldfpd_c_nc_inc_hint (code, 1, 2, 3, 0); ia64_ldfp8_c_nc_inc_hint (code, 1, 2, 3, 0); ia64_lfetch_hint (code, 1, 0); ia64_lfetch_excl_hint (code, 1, 0); ia64_lfetch_fault_hint (code, 1, 0); ia64_lfetch_fault_excl_hint (code, 1, 0); ia64_lfetch_hint (code, 1, IA64_LFHINT_NT1); ia64_lfetch_hint (code, 1, IA64_LFHINT_NT2); ia64_lfetch_hint (code, 1, IA64_LFHINT_NTA); ia64_lfetch_inc_hint (code, 1, 2, 0); ia64_lfetch_excl_inc_hint (code, 1, 2, 0); ia64_lfetch_fault_inc_hint (code, 1, 2, 0); ia64_lfetch_fault_excl_inc_hint (code, 1, 2, 0); ia64_lfetch_inc_imm_hint (code, 1, 255, 0); ia64_lfetch_excl_inc_imm_hint (code, 1, 255, 0); ia64_lfetch_fault_inc_imm_hint (code, 1, 255, 0); ia64_lfetch_fault_excl_inc_imm_hint (code, 1, 255, 0); ia64_cmpxchg1_acq_hint (code, 1, 2, 3, 0); ia64_cmpxchg2_acq_hint (code, 1, 2, 3, 0); ia64_cmpxchg4_acq_hint (code, 1, 2, 3, 0); ia64_cmpxchg8_acq_hint (code, 1, 2, 3, 0); ia64_cmpxchg1_rel_hint (code, 1, 2, 3, 0); ia64_cmpxchg2_rel_hint (code, 1, 2, 3, 0); ia64_cmpxchg4_rel_hint (code, 1, 2, 3, 0); ia64_cmpxchg8_rel_hint (code, 1, 2, 3, 0); ia64_cmpxchg16_acq_hint (code, 1, 2, 3, 0); ia64_cmpxchg16_rel_hint (code, 1, 2, 3, 0); ia64_xchg1_hint (code, 1, 2, 3, 0); ia64_xchg2_hint (code, 1, 2, 3, 0); ia64_xchg4_hint (code, 1, 2, 3, 0); ia64_xchg8_hint (code, 1, 2, 3, 0); ia64_fetchadd4_acq_hint (code, 1, 2, -16, 0); ia64_fetchadd4_acq_hint (code, 1, 2, -8, 0); ia64_fetchadd4_acq_hint (code, 1, 2, -4, 0); ia64_fetchadd4_acq_hint (code, 1, 2, -1, 0); ia64_fetchadd4_acq_hint (code, 1, 2, 1, 0); ia64_fetchadd4_acq_hint (code, 1, 2, 4, 0); ia64_fetchadd4_acq_hint (code, 1, 2, 8, 0); ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0); ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0); ia64_fetchadd8_acq_hint (code, 1, 2, 16, 0); ia64_fetchadd4_rel_hint (code, 1, 2, 16, 0); ia64_fetchadd8_rel_hint (code, 1, 2, 16, 0); ia64_setf_sig (code, 1, 2); ia64_setf_exp (code, 1, 2); ia64_setf_s (code, 1, 2); ia64_setf_d (code, 1, 2); ia64_getf_sig (code, 1, 2); ia64_getf_exp (code, 1, 2); ia64_getf_s (code, 1, 2); ia64_getf_d (code, 1, 2); ia64_chk_s_m (code, 1, 0); ia64_chk_s_m (code, 1, 1); ia64_chk_s_m (code, 1, -1); ia64_chk_s_float_m (code, 1, 0); ia64_chk_a_nc (code, 1, 0); ia64_chk_a_nc (code, 1, 1); ia64_chk_a_nc (code, 1, -1); ia64_chk_a_nc (code, 1, 0); ia64_chk_a_clr (code, 1, 0); ia64_chk_a_nc_float (code, 1, 0); ia64_chk_a_clr_float (code, 1, 0); ia64_invala (code); ia64_fwb (code); ia64_mf (code); ia64_mf_a (code); ia64_srlz_d (code); ia64_stlz_i (code); ia64_sync_i (code); ia64_flushrs (code); ia64_loadrs (code); ia64_invala_e (code, 1); ia64_invala_e_float (code, 1); ia64_fc (code, 1); ia64_fc_i (code, 1); ia64_mov_to_ar_m (code, 1, 1); ia64_mov_to_ar_imm_m (code, 1, 127); ia64_mov_from_ar_m (code, 1, 1); ia64_mov_to_cr (code, 1, 2); ia64_mov_from_cr (code, 1, 2); ia64_alloc (code, 1, 3, 4, 5, 0); ia64_alloc (code, 1, 3, 4, 5, 8); ia64_mov_to_psr_l (code, 1); ia64_mov_to_psr_um (code, 1); ia64_mov_from_psr (code, 1); ia64_mov_from_psr_um (code, 1); ia64_break_m (code, 0x1234); ia64_nop_m (code, 0x1234); ia64_hint_m (code, 0x1234); ia64_br_cond_hint (code, 0, 0, 0, 0); ia64_br_wexit_hint (code, 0, 0, 0, 0); ia64_br_wtop_hint (code, 0, 0, 0, 0); ia64_br_cloop_hint (code, 0, 0, 0, 0); ia64_br_cexit_hint (code, 0, 0, 0, 0); ia64_br_ctop_hint (code, 0, 0, 0, 0); ia64_br_call_hint (code, 1, 0, 0, 0, 0); ia64_br_cond_reg_hint (code, 1, 0, 0, 0); ia64_br_ia_reg_hint (code, 1, 0, 0, 0); ia64_br_ret_reg_hint (code, 1, 0, 0, 0); ia64_br_call_reg_hint (code, 1, 2, 0, 0, 0); ia64_cover (code); ia64_clrrrb (code); ia64_clrrrb_pr (code); ia64_rfi (code); ia64_bsw_0 (code); ia64_bsw_1 (code); ia64_epc (code); ia64_break_b (code, 0x1234); ia64_nop_b (code, 0x1234); ia64_hint_b (code, 0x1234); ia64_break_x (code, 0x2123456789ABCDEFULL); ia64_movl (code, 1, 0x123456789ABCDEF0LL); ia64_brl_cond_hint (code, 0, 0, 0, 0); ia64_brl_cond_hint (code, -1, 0, 0, 0); ia64_brl_call_hint (code, 1, 0, 0, 0, 0); ia64_brl_call_hint (code, 1, -1, 0, 0, 0); ia64_nop_x (code, 0x2123456789ABCDEFULL); ia64_hint_x (code, 0x2123456789ABCDEFULL); ia64_movl_pred (code, 1, 1, 0x123456789ABCDEF0LL); /* FLOATING-POINT */ ia64_fma_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fma_s_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fma_d_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fpma_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fms_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fms_s_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fms_d_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fpms_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fnma_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fnma_s_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fnma_d_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_fpnma_sf_pred (code, 1, 1, 2, 3, 4, 2); ia64_xma_l_pred (code, 1, 1, 2, 3, 4); ia64_xma_h_pred (code, 1, 1, 2, 3, 4); ia64_xma_hu_pred (code, 1, 1, 2, 3, 4); ia64_fselect_pred (code, 1, 1, 2, 3, 4); ia64_fcmp_eq_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_fcmp_lt_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_fcmp_le_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_fcmp_unord_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_fcmp_eq_unc_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_fcmp_lt_unc_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_fcmp_le_unc_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_fcmp_unord_unc_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_fclass_m_pred (code, 1, 1, 2, 3, 0x1ff); ia64_fclass_m_unc_pred (code, 1, 1, 2, 3, 0x1ff); ia64_frcpa_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_fprcpa_sf_pred (code, 1, 1, 2, 3, 4, 0); ia64_frsqrta_sf_pred (code, 1, 1, 2, 4, 0); ia64_fprsqrta_sf_pred (code, 1, 1, 2, 4, 0); ia64_fmin_sf_pred (code, 1, 2, 3, 4, 0); ia64_fman_sf_pred (code, 1, 2, 3, 4, 0); ia64_famin_sf_pred (code, 1, 2, 3, 4, 0); ia64_famax_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpmin_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpman_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpamin_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpamax_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpcmp_eq_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpcmp_lt_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpcmp_le_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpcmp_unord_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpcmp_neq_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpcmp_nlt_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpcmp_nle_sf_pred (code, 1, 2, 3, 4, 0); ia64_fpcmp_ord_sf_pred (code, 1, 2, 3, 4, 0); ia64_fmerge_s_pred (code, 1, 2, 3, 4); ia64_fmerge_ns_pred (code, 1, 2, 3, 4); ia64_fmerge_se_pred (code, 1, 2, 3, 4); ia64_fmix_lr_pred (code, 1, 2, 3, 4); ia64_fmix_r_pred (code, 1, 2, 3, 4); ia64_fmix_l_pred (code, 1, 2, 3, 4); ia64_fsxt_r_pred (code, 1, 2, 3, 4); ia64_fsxt_l_pred (code, 1, 2, 3, 4); ia64_fpack_pred (code, 1, 2, 3, 4); ia64_fswap_pred (code, 1, 2, 3, 4); ia64_fswap_nl_pred (code, 1, 2, 3, 4); ia64_fswap_nr_pred (code, 1, 2, 3, 4); ia64_fand_pred (code, 1, 2, 3, 4); ia64_fandcm_pred (code, 1, 2, 3, 4); ia64_for_pred (code, 1, 2, 3, 4); ia64_fxor_pred (code, 1, 2, 3, 4); ia64_fpmerge_s_pred (code, 1, 2, 3, 4); ia64_fpmerge_ns_pred (code, 1, 2, 3, 4); ia64_fpmerge_se_pred (code, 1, 2, 3, 4); ia64_fcvt_fx_sf_pred ((code), 1, 2, 3, 0); ia64_fcvt_fxu_sf_pred ((code), 1, 2, 3, 0); ia64_fcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0); ia64_fcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0); ia64_fpcvt_fx_sf_pred ((code), 1, 2, 3, 0); ia64_fpcvt_fxu_sf_pred ((code), 1, 2, 3, 0); ia64_fpcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0); ia64_fpcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0); ia64_fcvt_xf_pred ((code), 1, 2, 3); ia64_fsetc_sf_pred ((code), 1, 0x33, 0x33, 3); ia64_fclrf_sf_pred ((code), 1, 3); ia64_fchkf_sf_pred ((code), 1, -1, 3); ia64_break_f_pred ((code), 1, 0x1234); ia64_movl (code, 31, -123456); ia64_codegen_close (code); #if 0 /* disassembly */ { guint8 *buf = code.buf; int template;