Esempio n. 1
0
void
ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	unsigned long saved_tpr;

#if IRQ_DEBUG
	{
		unsigned long bsp, sp;

		bsp = ia64_getreg(_IA64_REG_AR_BSP);
		sp = ia64_getreg(_IA64_REG_SP);

		if ((sp - bsp) < 1024) {
			static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);

			if (__ratelimit(&ratelimit)) {
				printk("ia64_handle_irq: DANGER: less than "
				       "1KB of free stack space!!\n"
				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
			}
		}
	}
#endif 

	irq_enter();
	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
	ia64_srlz_d();
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		int irq = local_vector_to_irq(vector);
		struct irq_desc *desc = irq_to_desc(irq);

		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
			smp_local_flush_tlb();
			kstat_incr_irqs_this_cpu(irq, desc);
		} else if (unlikely(IS_RESCHEDULE(vector))) {
			scheduler_ipi();
			kstat_incr_irqs_this_cpu(irq, desc);
		} else {
			ia64_setreg(_IA64_REG_CR_TPR, vector);
			ia64_srlz_d();

			if (unlikely(irq < 0)) {
				printk(KERN_ERR "%s: Unexpected interrupt "
				       "vector %d on CPU %d is not mapped "
				       "to any IRQ!\n", __func__, vector,
				       smp_processor_id());
			} else
				generic_handle_irq(irq);

			local_irq_disable();
			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
		}
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	irq_exit();
	set_irq_regs(old_regs);
}
Esempio n. 2
0
/*
 * This function emulates a interrupt processing when a cpu is about to be
 * brought down.
 */
void ia64_process_pending_intr(void)
{
	ia64_vector vector;
	unsigned long saved_tpr;
	extern unsigned int vectors_in_migration[NR_IRQS];

	vector = ia64_get_ivr();

	irq_enter();
	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
	ia64_srlz_d();

	 /*
	  * Perform normal interrupt style processing
	  */
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		int irq = local_vector_to_irq(vector);
		struct irq_desc *desc = irq_to_desc(irq);

		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
			smp_local_flush_tlb();
			kstat_incr_irqs_this_cpu(irq, desc);
		} else if (unlikely(IS_RESCHEDULE(vector))) {
			kstat_incr_irqs_this_cpu(irq, desc);
		} else {
			struct pt_regs *old_regs = set_irq_regs(NULL);

			ia64_setreg(_IA64_REG_CR_TPR, vector);
			ia64_srlz_d();

			/*
			 * Now try calling normal ia64_handle_irq as it would have got called
			 * from a real intr handler. Try passing null for pt_regs, hopefully
			 * it will work. I hope it works!.
			 * Probably could shared code.
			 */
			if (unlikely(irq < 0)) {
				printk(KERN_ERR "%s: Unexpected interrupt "
				       "vector %d on CPU %d not being mapped "
				       "to any IRQ!!\n", __func__, vector,
				       smp_processor_id());
			} else {
				vectors_in_migration[irq]=0;
				generic_handle_irq(irq);
			}
			set_irq_regs(old_regs);

			/*
			 * Disable interrupts and send EOI
			 */
			local_irq_disable();
			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
		}
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	irq_exit();
}
Esempio n. 3
0
static u_int
ia64_ih_clock(struct thread *td, u_int xiv, struct trapframe *tf)
{
	struct eventtimer *et;
	uint64_t itc, load;
	uint32_t mode;

	PCPU_INC(md.stats.pcs_nclks);
	intrcnt[INTRCNT_CLOCK]++;

	itc = ia64_get_itc();
	PCPU_SET(md.clock, itc);

	mode = PCPU_GET(md.clock_mode);
	if (mode == CLOCK_ET_PERIODIC) {
		load = PCPU_GET(md.clock_load);
		ia64_set_itm(itc + load);
	} else
		ia64_set_itv((1 << 16) | xiv);

	ia64_set_eoi(0);
	ia64_srlz_d();

	et = &ia64_clock_et;
	if (et->et_active)
		et->et_event_cb(et, et->et_arg);
	return (1);
}
Esempio n. 4
0
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
{
	xen_kexec_image_t *image = arg;
	int ii;

	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();

	/* Mask CMC and Performance Monitor interrupts */
	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);

	/* Mask ITV and Local Redirect Registers */
	ia64_set_itv(1 << 16);
	ia64_set_lrr0(1 << 16);
	ia64_set_lrr1(1 << 16);

	/* terminate possible nested in-service interrupts */
	for (ii = 0; ii < 16; ii++)
		ia64_eoi();

	/* unmask TPR and clear any pending interrupts */
	ia64_setreg(_IA64_REG_CR_TPR, 0);
	ia64_srlz_d();
	while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
		ia64_eoi();
	platform_kernel_launch_event();
	relocate_new_kernel(image->indirection_page, image->start_address,
			    __pa(ia64_boot_param), image->reboot_code_buffer);
	BUG();
}
void
switch_to_physical_rid(VCPU *vcpu)
{
    u64 psr;
    u64 rr;

    switch (vcpu->arch.arch_vmx.mmu_mode) {
    case VMX_MMU_PHY_DT:
        rr = vcpu->arch.metaphysical_rid_dt;
        break;
    case VMX_MMU_PHY_D:
        rr = vcpu->arch.metaphysical_rid_d;
        break;
    default:
        panic_domain(NULL, "bad mmu mode value");
    }
    
    psr = ia64_clear_ic();
    ia64_set_rr(VRN0<<VRN_SHIFT, rr);
    ia64_dv_serialize_data();
    ia64_set_rr(VRN4<<VRN_SHIFT, rr);
    ia64_srlz_d();
    
    ia64_set_psr(psr);
    ia64_srlz_i();
    return;
}
Esempio n. 6
0
void ia64_process_pending_intr(void)
{
	ia64_vector vector;
	unsigned long saved_tpr;
	extern unsigned int vectors_in_migration[NR_IRQS];

	vector = ia64_get_ivr();

	irq_enter();
	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
	ia64_srlz_d();

	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		int irq = local_vector_to_irq(vector);
		struct irq_desc *desc = irq_to_desc(irq);

		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
			smp_local_flush_tlb();
			kstat_incr_irqs_this_cpu(irq, desc);
		} else if (unlikely(IS_RESCHEDULE(vector))) {
			kstat_incr_irqs_this_cpu(irq, desc);
		} else {
			struct pt_regs *old_regs = set_irq_regs(NULL);

			ia64_setreg(_IA64_REG_CR_TPR, vector);
			ia64_srlz_d();

			if (unlikely(irq < 0)) {
				printk(KERN_ERR "%s: Unexpected interrupt "
				       "vector %d on CPU %d not being mapped "
				       "to any IRQ!!\n", __func__, vector,
				       smp_processor_id());
			} else {
				vectors_in_migration[irq]=0;
				generic_handle_irq(irq);
			}
			set_irq_regs(old_regs);

			local_irq_disable();
			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
		}
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	irq_exit();
}
Esempio n. 7
0
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
{
	struct kimage *image = arg;
	relocate_new_kernel_t rnk;
	void *pal_addr = efi_get_pal_addr();
	unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
	unsigned long vector;
	int ii;
	u64 fp, gp;
	ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump;

	BUG_ON(!image);
	if (image->type == KEXEC_TYPE_CRASH) {
		crash_save_this_cpu();
		current->thread.ksp = (__u64)info->sw - 16;

		/* Register noop init handler */
		fp = ia64_tpa(init_handler->fp);
		gp = ia64_tpa(ia64_getreg(_IA64_REG_GP));
		ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0);
	} else {
		/* Unregister init handlers of current kernel */
		ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0);
	}

	/* Unregister mca handler - No more recovery on current kernel */
	ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0);

	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();

	/* Mask CMC and Performance Monitor interrupts */
	ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
	ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);

	/* Mask ITV and Local Redirect Registers */
	ia64_set_itv(1 << 16);
	ia64_set_lrr0(1 << 16);
	ia64_set_lrr1(1 << 16);

	/* terminate possible nested in-service interrupts */
	for (ii = 0; ii < 16; ii++)
		ia64_eoi();

	/* unmask TPR and clear any pending interrupts */
	ia64_setreg(_IA64_REG_CR_TPR, 0);
	ia64_srlz_d();
	vector = ia64_get_ivr();
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	platform_kernel_launch_event();
	rnk = (relocate_new_kernel_t)&code_addr;
	(*rnk)(image->head, image->start, ia64_boot_param,
		     GRANULEROUNDDOWN((unsigned long) pal_addr));
	BUG();
}
Esempio n. 8
0
void
pcpu_initclock(void)
{

	PCPU_SET(clockadj, 0);
	PCPU_SET(clock, ia64_get_itc());
	ia64_set_itm(PCPU_GET(clock) + ia64_clock_reload);
	ia64_set_itv(CLOCK_VECTOR);	/* highest priority class */
	ia64_srlz_d();
}
void
switch_to_virtual_rid(VCPU *vcpu)
{
    u64 psr;

    psr = ia64_clear_ic();
    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
    ia64_dv_serialize_data();
    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
    ia64_srlz_d();
    ia64_set_psr(psr);
    ia64_srlz_i();
    return;
}
void
vmx_load_all_rr(VCPU *vcpu)
{
	unsigned long rr0, rr4;

	switch (vcpu->arch.arch_vmx.mmu_mode) {
	case VMX_MMU_VIRTUAL:
		rr0 = vcpu->arch.metaphysical_saved_rr0;
		rr4 = vcpu->arch.metaphysical_saved_rr4;
		break;
	case VMX_MMU_PHY_DT:
		rr0 = vcpu->arch.metaphysical_rid_dt;
		rr4 = vcpu->arch.metaphysical_rid_dt;
		break;
	case VMX_MMU_PHY_D:
		rr0 = vcpu->arch.metaphysical_rid_d;
		rr4 = vcpu->arch.metaphysical_rid_d;
		break;
	default:
		panic_domain(NULL, "bad mmu mode value");
	}

	ia64_set_rr((VRN0 << VRN_SHIFT), rr0);
	ia64_dv_serialize_data();
	ia64_set_rr((VRN4 << VRN_SHIFT), rr4);
	ia64_dv_serialize_data();
	ia64_set_rr((VRN1 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN2 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN3 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN5 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5])));
	ia64_dv_serialize_data();
	ia64_set_rr((VRN6 << VRN_SHIFT), vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6])));
	ia64_dv_serialize_data();
	vmx_switch_rr7_vcpu(vcpu, vrrtomrr(vcpu, VMX(vcpu, vrr[VRN7])));
	ia64_set_pta(VMX(vcpu, mpta));
	vmx_ia64_set_dcr(vcpu);

	ia64_srlz_d();
}
Esempio n. 11
0
/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
{
    struct kimage *image = arg;
    relocate_new_kernel_t rnk;
    void *pal_addr = efi_get_pal_addr();
    unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
    int ii;

    BUG_ON(!image);
    if (image->type == KEXEC_TYPE_CRASH) {
        crash_save_this_cpu();
        current->thread.ksp = (__u64)info->sw - 16;
    }

    /* Interrupts aren't acceptable while we reboot */
    local_irq_disable();

    /* Mask CMC and Performance Monitor interrupts */
    ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
    ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);

    /* Mask ITV and Local Redirect Registers */
    ia64_set_itv(1 << 16);
    ia64_set_lrr0(1 << 16);
    ia64_set_lrr1(1 << 16);

    /* terminate possible nested in-service interrupts */
    for (ii = 0; ii < 16; ii++)
        ia64_eoi();

    /* unmask TPR and clear any pending interrupts */
    ia64_setreg(_IA64_REG_CR_TPR, 0);
    ia64_srlz_d();
    while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
        ia64_eoi();
    platform_kernel_launch_event();
    rnk = (relocate_new_kernel_t)&code_addr;
    (*rnk)(image->head, image->start, ia64_boot_param,
             GRANULEROUNDDOWN((unsigned long) pal_addr));
    BUG();
}
Esempio n. 12
0
/*
 * That's where the IVT branches when we get an external
 * interrupt. This branches to the correct hardware IRQ handler via
 * function ptr.
 */
void
ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	unsigned long saved_tpr;

#if IRQ_DEBUG
	{
		unsigned long bsp, sp;

		/*
		 * Note: if the interrupt happened while executing in
		 * the context switch routine (ia64_switch_to), we may
		 * get a spurious stack overflow here.  This is
		 * because the register and the memory stack are not
		 * switched atomically.
		 */
		bsp = ia64_getreg(_IA64_REG_AR_BSP);
		sp = ia64_getreg(_IA64_REG_SP);

		if ((sp - bsp) < 1024) {
			static unsigned char count;
			static long last_time;

			if (time_after(jiffies, last_time + 5 * HZ))
				count = 0;
			if (++count < 5) {
				last_time = jiffies;
				printk("ia64_handle_irq: DANGER: less than "
				       "1KB of free stack space!!\n"
				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
			}
		}
	}
#endif /* IRQ_DEBUG */

	/*
	 * Always set TPR to limit maximum interrupt nesting depth to
	 * 16 (without this, it would be ~240, which could easily lead
	 * to kernel stack overflows).
	 */
	irq_enter();
	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
	ia64_srlz_d();
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		int irq = local_vector_to_irq(vector);
		struct irq_desc *desc = irq_to_desc(irq);

		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
			smp_local_flush_tlb();
			kstat_incr_irqs_this_cpu(irq, desc);
		} else if (unlikely(IS_RESCHEDULE(vector))) {
			kstat_incr_irqs_this_cpu(irq, desc);
		} else {
			ia64_setreg(_IA64_REG_CR_TPR, vector);
			ia64_srlz_d();

			if (unlikely(irq < 0)) {
				printk(KERN_ERR "%s: Unexpected interrupt "
				       "vector %d on CPU %d is not mapped "
				       "to any IRQ!\n", __func__, vector,
				       smp_processor_id());
			} else
				generic_handle_irq(irq);

			/*
			 * Disable interrupts and send EOI:
			 */
			local_irq_disable();
			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
		}
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	/*
	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
	 * handler needs to be able to wait for further keyboard interrupts, which can't
	 * come through until ia64_eoi() has been done.
	 */
	irq_exit();
	set_irq_regs(old_regs);
}
Esempio n. 13
0
int
main ()
{
	Ia64CodegenState code;

	guint8 *buf = g_malloc0 (40960);

	ia64_codegen_init (code, buf);

	ia64_add (code, 1, 2, 3);
	ia64_add1 (code, 1, 2, 3);
	ia64_sub (code, 1, 2, 3);
	ia64_sub1 (code, 1, 2, 3);
	ia64_addp4 (code, 1, 2, 3);
	ia64_and (code, 1, 2, 3);
	ia64_andcm (code, 1, 2, 3);
	ia64_or (code, 1, 2, 3);
	ia64_xor (code, 1, 2, 3);
	ia64_shladd (code, 1, 2, 3, 4);
	ia64_shladdp4 (code, 1, 2, 3, 4);
	ia64_sub_imm (code, 1, 0x7f, 2);
	ia64_sub_imm (code, 1, -1, 2);
	ia64_and_imm (code, 1, -128, 2);
	ia64_andcm_imm (code, 1, -128, 2);
	ia64_or_imm (code, 1, -128, 2);
	ia64_xor_imm (code, 1, -128, 2);
	ia64_adds_imm (code, 1, 8191, 2);
	ia64_adds_imm (code, 1, -8192, 2);
	ia64_adds_imm (code, 1, 1234, 2);
	ia64_adds_imm (code, 1, -1234, 2);
	ia64_addp4_imm (code, 1, -1234, 2);
	ia64_addl_imm (code, 1, 1234, 2);
	ia64_addl_imm (code, 1, -1234, 2);
	ia64_addl_imm (code, 1, 2097151, 2);
	ia64_addl_imm (code, 1, -2097152, 2);

	ia64_cmp_lt (code, 1, 2, 1, 2);
	ia64_cmp_ltu (code, 1, 2, 1, 2);
	ia64_cmp_eq (code, 1, 2, 1, 2);
	ia64_cmp_lt_unc (code, 1, 2, 1, 2);
	ia64_cmp_ltu_unc (code, 1, 2, 1, 2);
	ia64_cmp_eq_unc (code, 1, 2, 1, 2);
	ia64_cmp_eq_and (code, 1, 2, 1, 2);
	ia64_cmp_eq_or (code, 1, 2, 1, 2);
	ia64_cmp_eq_or_andcm (code, 1, 2, 1, 2);
	ia64_cmp_ne_and (code, 1, 2, 1, 2);
	ia64_cmp_ne_or (code, 1, 2, 1, 2);
	ia64_cmp_ne_or_andcm (code, 1, 2, 1, 2);

	ia64_cmp4_lt (code, 1, 2, 1, 2);
	ia64_cmp4_ltu (code, 1, 2, 1, 2);
	ia64_cmp4_eq (code, 1, 2, 1, 2);
	ia64_cmp4_lt_unc (code, 1, 2, 1, 2);
	ia64_cmp4_ltu_unc (code, 1, 2, 1, 2);
	ia64_cmp4_eq_unc (code, 1, 2, 1, 2);
	ia64_cmp4_eq_and (code, 1, 2, 1, 2);
	ia64_cmp4_eq_or (code, 1, 2, 1, 2);
	ia64_cmp4_eq_or_andcm (code, 1, 2, 1, 2);
	ia64_cmp4_ne_and (code, 1, 2, 1, 2);
	ia64_cmp4_ne_or (code, 1, 2, 1, 2);
	ia64_cmp4_ne_or_andcm (code, 1, 2, 1, 2);

	ia64_cmp_gt_and (code, 1, 2, 0, 2);
	ia64_cmp_gt_or (code, 1, 2, 0, 2);
	ia64_cmp_gt_or_andcm (code, 1, 2, 0, 2);
	ia64_cmp_le_and (code, 1, 2, 0, 2);
	ia64_cmp_le_or (code, 1, 2, 0, 2);
	ia64_cmp_le_or_andcm (code, 1, 2, 0, 2);
	ia64_cmp_ge_and (code, 1, 2, 0, 2);
	ia64_cmp_ge_or (code, 1, 2, 0, 2);
	ia64_cmp_ge_or_andcm (code, 1, 2, 0, 2);
	ia64_cmp_lt_and (code, 1, 2, 0, 2);
	ia64_cmp_lt_or (code, 1, 2, 0, 2);
	ia64_cmp_lt_or_andcm (code, 1, 2, 0, 2);

	ia64_cmp4_gt_and (code, 1, 2, 0, 2);
	ia64_cmp4_gt_or (code, 1, 2, 0, 2);
	ia64_cmp4_gt_or_andcm (code, 1, 2, 0, 2);
	ia64_cmp4_le_and (code, 1, 2, 0, 2);
	ia64_cmp4_le_or (code, 1, 2, 0, 2);
	ia64_cmp4_le_or_andcm (code, 1, 2, 0, 2);
	ia64_cmp4_ge_and (code, 1, 2, 0, 2);
	ia64_cmp4_ge_or (code, 1, 2, 0, 2);
	ia64_cmp4_ge_or_andcm (code, 1, 2, 0, 2);
	ia64_cmp4_lt_and (code, 1, 2, 0, 2);
	ia64_cmp4_lt_or (code, 1, 2, 0, 2);
	ia64_cmp4_lt_or_andcm (code, 1, 2, 0, 2);

	ia64_cmp_lt_imm (code, 1, 2, 127, 2);
	ia64_cmp_lt_imm (code, 1, 2, -128, 2);

	ia64_cmp_lt_imm (code, 1, 2, -128, 2);
	ia64_cmp_ltu_imm (code, 1, 2, -128, 2);
	ia64_cmp_eq_imm (code, 1, 2, -128, 2);
	ia64_cmp_lt_unc_imm (code, 1, 2, -128, 2);
	ia64_cmp_ltu_unc_imm (code, 1, 2, -128, 2);
	ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2);
	ia64_cmp_eq_and_imm (code, 1, 2, -128, 2);
	ia64_cmp_eq_or_imm (code, 1, 2, -128, 2);
	ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2);
	ia64_cmp_ne_and_imm (code, 1, 2, -128, 2);
	ia64_cmp_ne_or_imm (code, 1, 2, -128, 2);
	ia64_cmp_ne_or_andcm_imm (code, 1, 2, -128, 2);

	ia64_cmp4_lt_imm (code, 1, 2, -128, 2);
	ia64_cmp4_ltu_imm (code, 1, 2, -128, 2);
	ia64_cmp4_eq_imm (code, 1, 2, -128, 2);
	ia64_cmp4_lt_unc_imm (code, 1, 2, -128, 2);
	ia64_cmp4_ltu_unc_imm (code, 1, 2, -128, 2);
	ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2);
	ia64_cmp4_eq_and_imm (code, 1, 2, -128, 2);
	ia64_cmp4_eq_or_imm (code, 1, 2, -128, 2);
	ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2);
	ia64_cmp4_ne_and_imm (code, 1, 2, -128, 2);
	ia64_cmp4_ne_or_imm (code, 1, 2, -128, 2);
	ia64_cmp4_ne_or_andcm_imm (code, 1, 2, -128, 2);

	ia64_padd1 (code, 1, 2, 3);
	ia64_padd2 (code, 1, 2, 3);
	ia64_padd4 (code, 1, 2, 3);
	ia64_padd1_sss (code, 1, 2, 3);
	ia64_padd2_sss (code, 1, 2, 3);
	ia64_padd1_uuu (code, 1, 2, 3);
	ia64_padd2_uuu (code, 1, 2, 3);
	ia64_padd1_uus (code, 1, 2, 3);
	ia64_padd2_uus (code, 1, 2, 3);

	ia64_psub1 (code, 1, 2, 3);
	ia64_psub2 (code, 1, 2, 3);
	ia64_psub4 (code, 1, 2, 3);
	ia64_psub1_sss (code, 1, 2, 3);
	ia64_psub2_sss (code, 1, 2, 3);
	ia64_psub1_uuu (code, 1, 2, 3);
	ia64_psub2_uuu (code, 1, 2, 3);
	ia64_psub1_uus (code, 1, 2, 3);
	ia64_psub2_uus (code, 1, 2, 3);

	ia64_pavg1 (code, 1, 2, 3);
	ia64_pavg2 (code, 1, 2, 3);
	ia64_pavg1_raz (code, 1, 2, 3);
	ia64_pavg2_raz (code, 1, 2, 3);
	ia64_pavgsub1 (code, 1, 2, 3);
	ia64_pavgsub2 (code, 1, 2, 3);
	ia64_pcmp1_eq (code, 1, 2, 3);
	ia64_pcmp2_eq (code, 1, 2, 3);
	ia64_pcmp4_eq (code, 1, 2, 3);
	ia64_pcmp1_gt (code, 1, 2, 3);
	ia64_pcmp2_gt (code, 1, 2, 3);
	ia64_pcmp4_gt (code, 1, 2, 3);
	
	ia64_pshladd2 (code, 1, 2, 3, 4);
	ia64_pshradd2 (code, 1, 2, 3, 4);

	ia64_pmpyshr2 (code, 1, 2, 3, 0);
	ia64_pmpyshr2_u (code, 1, 2, 3, 0);
	ia64_pmpyshr2 (code, 1, 2, 3, 7);
	ia64_pmpyshr2_u (code, 1, 2, 3, 7);
	ia64_pmpyshr2 (code, 1, 2, 3, 15);
	ia64_pmpyshr2_u (code, 1, 2, 3, 15);
	ia64_pmpyshr2 (code, 1, 2, 3, 16);
	ia64_pmpyshr2_u (code, 1, 2, 3, 16);

	ia64_pmpy2_r (code, 1, 2, 3);
	ia64_pmpy2_l (code, 1, 2, 3);
	ia64_mix1_r (code, 1, 2, 3);
	ia64_mix2_r (code, 1, 2, 3);
	ia64_mix4_r (code, 1, 2, 3);
	ia64_mix1_l (code, 1, 2, 3);
	ia64_mix2_l (code, 1, 2, 3);
	ia64_mix4_l (code, 1, 2, 3);
	ia64_pack2_uss (code, 1, 2, 3);
	ia64_pack2_sss (code, 1, 2, 3);
	ia64_pack4_sss (code, 1, 2, 3);
	ia64_unpack1_h (code, 1, 2, 3);
	ia64_unpack2_h (code, 1, 2, 3);
	ia64_unpack4_h (code, 1, 2, 3);
	ia64_unpack1_l (code, 1, 2, 3);
	ia64_unpack2_l (code, 1, 2, 3);
	ia64_unpack4_l (code, 1, 2, 3);
	ia64_pmin1_u (code, 1, 2, 3);
	ia64_pmax1_u (code, 1, 2, 3);
	ia64_pmin2 (code, 1, 2, 3);
	ia64_pmax2 (code, 1, 2, 3);
	ia64_psad1 (code, 1, 2, 3);

	ia64_mux1 (code, 1, 2, IA64_MUX1_BRCST);
	ia64_mux1 (code, 1, 2, IA64_MUX1_MIX);
	ia64_mux1 (code, 1, 2, IA64_MUX1_SHUF);
	ia64_mux1 (code, 1, 2, IA64_MUX1_ALT);
	ia64_mux1 (code, 1, 2, IA64_MUX1_REV);

	ia64_mux2 (code, 1, 2, 0x8d);

	ia64_pshr2 (code, 1, 2, 3);
	ia64_pshr4 (code, 1, 2, 3);
	ia64_shr (code, 1, 2, 3);
	ia64_pshr2_u (code, 1, 2, 3);
	ia64_pshr4_u (code, 1, 2, 3);
	ia64_shr_u (code, 1, 2, 3);

	ia64_pshr2_imm (code, 1, 2, 20);
	ia64_pshr4_imm (code, 1, 2, 20);
	ia64_pshr2_u_imm (code, 1, 2, 20);
	ia64_pshr4_u_imm (code, 1, 2, 20);

	ia64_pshl2 (code, 1, 2, 3);
	ia64_pshl4 (code, 1, 2, 3);
	ia64_shl (code, 1, 2, 3);

	ia64_pshl2_imm (code, 1, 2, 20);
	ia64_pshl4_imm (code, 1, 2, 20);

	ia64_popcnt (code, 1, 2);

	ia64_shrp (code, 1, 2, 3, 62);

	ia64_extr_u (code, 1, 2, 62, 61);
	ia64_extr (code, 1, 2, 62, 61);

	ia64_dep_z (code, 1, 2, 62, 61);

	ia64_dep_z_imm (code, 1, 127, 62, 61);
	ia64_dep_z_imm (code, 1, -128, 62, 61);
	ia64_dep_imm (code, 1, 0, 2, 62, 61);
	ia64_dep_imm (code, 1, -1, 2, 62, 61);
	ia64_dep (code, 1, 2, 3, 10, 15);

	ia64_tbit_z (code, 1, 2, 3, 0);

	ia64_tbit_z (code, 1, 2, 3, 63);
	ia64_tbit_z_unc (code, 1, 2, 3, 63);
	ia64_tbit_z_and (code, 1, 2, 3, 63);
	ia64_tbit_nz_and (code, 1, 2, 3, 63);
	ia64_tbit_z_or (code, 1, 2, 3, 63);
	ia64_tbit_nz_or (code, 1, 2, 3, 63);
	ia64_tbit_z_or_andcm (code, 1, 2, 3, 63);
	ia64_tbit_nz_or_andcm (code, 1, 2, 3, 63);

	ia64_tnat_z (code, 1, 2, 3);
	ia64_tnat_z_unc (code, 1, 2, 3);
	ia64_tnat_z_and (code, 1, 2, 3);
	ia64_tnat_nz_and (code, 1, 2, 3);
	ia64_tnat_z_or (code, 1, 2, 3);
	ia64_tnat_nz_or (code, 1, 2, 3);
	ia64_tnat_z_or_andcm (code, 1, 2, 3);
	ia64_tnat_nz_or_andcm (code, 1, 2, 3);

	ia64_nop_i (code, 0x1234);
	ia64_hint_i (code, 0x1234);

	ia64_break_i (code, 0x1234);

	ia64_chk_s_i (code, 1, 0);
	ia64_chk_s_i (code, 1, -1);
	ia64_chk_s_i (code, 1, 1);

	ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0);
	ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0);
	ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0);
	ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP);
	ia64_mov_ret_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0);

	ia64_mov_from_br (code, 1, 1);

	ia64_mov_to_pred (code, 1, 0xfe);

	ia64_mov_to_pred_rot_imm (code, 0xff0000);

	ia64_mov_from_ip (code, 1);
	ia64_mov_from_pred (code, 1);

	ia64_mov_to_ar_i (code, 1, 1);

	ia64_mov_to_ar_imm_i (code, 1, 127);

	ia64_mov_from_ar_i (code, 1, 1);

	ia64_zxt1 (code, 1, 2);
	ia64_zxt2 (code, 1, 2);
	ia64_zxt4 (code, 1, 2);
	ia64_sxt1 (code, 1, 2);
	ia64_sxt2 (code, 1, 2);
	ia64_sxt4 (code, 1, 2);

	ia64_czx1_l (code, 1, 2);
	ia64_czx2_l (code, 1, 2);
	ia64_czx1_r (code, 1, 2);
	ia64_czx2_r (code, 1, 2);

	ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NONE);
	ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NT1);
	ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NTA);

	ia64_ld1_hint (code, 1, 2, 0);
	ia64_ld2_hint (code, 1, 2, 0);
	ia64_ld4_hint (code, 1, 2, 0);
	ia64_ld8_hint (code, 1, 2, 0);

	ia64_ld1_s_hint (code, 1, 2, 0);
	ia64_ld2_s_hint (code, 1, 2, 0);
	ia64_ld4_s_hint (code, 1, 2, 0);
	ia64_ld8_s_hint (code, 1, 2, 0);

	ia64_ld1_a_hint (code, 1, 2, 0);
	ia64_ld2_a_hint (code, 1, 2, 0);
	ia64_ld4_a_hint (code, 1, 2, 0);
	ia64_ld8_a_hint (code, 1, 2, 0);

	ia64_ld1_sa_hint (code, 1, 2, 0);
	ia64_ld2_sa_hint (code, 1, 2, 0);
	ia64_ld4_sa_hint (code, 1, 2, 0);
	ia64_ld8_sa_hint (code, 1, 2, 0);

	ia64_ld1_bias_hint (code, 1, 2, 0);
	ia64_ld2_bias_hint (code, 1, 2, 0);
	ia64_ld4_bias_hint (code, 1, 2, 0);
	ia64_ld8_bias_hint (code, 1, 2, 0);

	ia64_ld1_inc_hint (code, 1, 2, 3, IA64_LD_HINT_NONE);

	ia64_ld1_inc_imm_hint (code, 1, 2, 255, IA64_LD_HINT_NONE);
	ia64_ld1_inc_imm_hint (code, 1, 2, -256, IA64_LD_HINT_NONE);

	ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NTA);

	ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NONE);
	ia64_st2_hint (code, 1, 2, IA64_ST_HINT_NONE);
	ia64_st4_hint (code, 1, 2, IA64_ST_HINT_NONE);
	ia64_st8_hint (code, 1, 2, IA64_ST_HINT_NONE);

	ia64_st1_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
	ia64_st2_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
	ia64_st4_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
	ia64_st8_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);

	ia64_st8_spill_hint (code, 1, 2, IA64_ST_HINT_NONE);

	ia64_st16_hint (code, 1, 2, IA64_ST_HINT_NONE);
	ia64_st16_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);

	ia64_st1_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
	ia64_st2_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
	ia64_st4_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
	ia64_st8_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);

	ia64_st1_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
	ia64_st2_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
	ia64_st4_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
	ia64_st8_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);

	ia64_st8_spill_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);

	ia64_ldfs_hint (code, 1, 2, 0);
	ia64_ldfd_hint (code, 1, 2, 0);
	ia64_ldf8_hint (code, 1, 2, 0);
	ia64_ldfe_hint (code, 1, 2, 0);

	ia64_ldfs_s_hint (code, 1, 2, 0);
	ia64_ldfd_s_hint (code, 1, 2, 0);
	ia64_ldf8_s_hint (code, 1, 2, 0);
	ia64_ldfe_s_hint (code, 1, 2, 0);

	ia64_ldfs_a_hint (code, 1, 2, 0);
	ia64_ldfd_a_hint (code, 1, 2, 0);
	ia64_ldf8_a_hint (code, 1, 2, 0);
	ia64_ldfe_a_hint (code, 1, 2, 0);

	ia64_ldfs_sa_hint (code, 1, 2, 0);
	ia64_ldfd_sa_hint (code, 1, 2, 0);
	ia64_ldf8_sa_hint (code, 1, 2, 0);
	ia64_ldfe_sa_hint (code, 1, 2, 0);

	ia64_ldfs_c_clr_hint (code, 1, 2, 0);
	ia64_ldfd_c_clr_hint (code, 1, 2, 0);
	ia64_ldf8_c_clr_hint (code, 1, 2, 0);
	ia64_ldfe_c_clr_hint (code, 1, 2, 0);

	ia64_ldfs_c_nc_hint (code, 1, 2, 0);
	ia64_ldfd_c_nc_hint (code, 1, 2, 0);
	ia64_ldf8_c_nc_hint (code, 1, 2, 0);
	ia64_ldfe_c_nc_hint (code, 1, 2, 0);

	ia64_ldf_fill_hint (code, 1, 2, 0);

	ia64_ldfs_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfd_inc_hint (code, 1, 2, 3, 0);
	ia64_ldf8_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfe_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfs_s_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfd_s_inc_hint (code, 1, 2, 3, 0);
	ia64_ldf8_s_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfe_s_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfs_a_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfd_a_inc_hint (code, 1, 2, 3, 0);
	ia64_ldf8_a_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfe_a_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfs_sa_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfd_sa_inc_hint (code, 1, 2, 3, 0);
	ia64_ldf8_sa_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfe_sa_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfs_c_clr_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfd_c_clr_inc_hint (code, 1, 2, 3, 0);
	ia64_ldf8_c_clr_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfe_c_clr_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfs_c_nc_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfd_c_nc_inc_hint (code, 1, 2, 3, 0);
	ia64_ldf8_c_nc_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfe_c_nc_inc_hint (code, 1, 2, 3, 0);

	ia64_ldf_fill_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfs_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfd_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldf8_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfe_inc_imm_hint (code, 1, 2, 255, 0);

	ia64_ldfs_s_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfd_s_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldf8_s_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfe_s_inc_imm_hint (code, 1, 2, 255, 0);

	ia64_ldfs_a_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfd_a_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldf8_a_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfe_a_inc_imm_hint (code, 1, 2, 255, 0);

	ia64_ldfs_sa_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfd_sa_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldf8_sa_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfe_sa_inc_imm_hint (code, 1, 2, 255, 0);

	ia64_ldfs_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfd_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldf8_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfe_c_clr_inc_imm_hint (code, 1, 2, 255, 0);

	ia64_ldfs_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfd_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldf8_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_ldfe_c_nc_inc_imm_hint (code, 1, 2, 255, 0);

	ia64_ldf_fill_inc_imm_hint (code, 1, 2, 255, 0);

	ia64_stfs_hint (code, 1, 2, 0);
	ia64_stfd_hint (code, 1, 2, 0);
	ia64_stf8_hint (code, 1, 2, 0);
	ia64_stfe_hint (code, 1, 2, 0);

	ia64_stf_spill_hint (code, 1, 2, 0);

	ia64_stfs_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_stfd_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_stf8_inc_imm_hint (code, 1, 2, 255, 0);
	ia64_stfe_inc_imm_hint (code, 1, 2, 255, 0);

	ia64_stf_spill_inc_imm_hint (code, 1, 2, 255, 0);

	ia64_ldfps_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_hint (code, 1, 2, 3, 0);

	ia64_ldfps_s_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_s_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_s_hint (code, 1, 2, 3, 0);

	ia64_ldfps_a_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_a_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_a_hint (code, 1, 2, 3, 0);

	ia64_ldfps_sa_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_sa_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_sa_hint (code, 1, 2, 3, 0);

	ia64_ldfps_c_clr_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_c_clr_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_c_clr_hint (code, 1, 2, 3, 0);

	ia64_ldfps_c_nc_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_c_nc_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_c_nc_hint (code, 1, 2, 3, 0);

	ia64_ldfps_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfps_s_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_s_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_s_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfps_a_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_a_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_a_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfps_sa_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_sa_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_sa_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfps_c_clr_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_c_clr_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_c_clr_inc_hint (code, 1, 2, 3, 0);

	ia64_ldfps_c_nc_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfpd_c_nc_inc_hint (code, 1, 2, 3, 0);
	ia64_ldfp8_c_nc_inc_hint (code, 1, 2, 3, 0);

	ia64_lfetch_hint (code, 1, 0);
	ia64_lfetch_excl_hint (code, 1, 0);
	ia64_lfetch_fault_hint (code, 1, 0);
	ia64_lfetch_fault_excl_hint (code, 1, 0);

	ia64_lfetch_hint (code, 1, IA64_LFHINT_NT1);
	ia64_lfetch_hint (code, 1, IA64_LFHINT_NT2);
	ia64_lfetch_hint (code, 1, IA64_LFHINT_NTA);

	ia64_lfetch_inc_hint (code, 1, 2, 0);
	ia64_lfetch_excl_inc_hint (code, 1, 2, 0);
	ia64_lfetch_fault_inc_hint (code, 1, 2, 0);
	ia64_lfetch_fault_excl_inc_hint (code, 1, 2, 0);

	ia64_lfetch_inc_imm_hint (code, 1, 255, 0);
	ia64_lfetch_excl_inc_imm_hint (code, 1, 255, 0);
	ia64_lfetch_fault_inc_imm_hint (code, 1, 255, 0);
	ia64_lfetch_fault_excl_inc_imm_hint (code, 1, 255, 0);

	ia64_cmpxchg1_acq_hint (code, 1, 2, 3, 0);
	ia64_cmpxchg2_acq_hint (code, 1, 2, 3, 0);
	ia64_cmpxchg4_acq_hint (code, 1, 2, 3, 0);
	ia64_cmpxchg8_acq_hint (code, 1, 2, 3, 0);
	ia64_cmpxchg1_rel_hint (code, 1, 2, 3, 0);
	ia64_cmpxchg2_rel_hint (code, 1, 2, 3, 0);
	ia64_cmpxchg4_rel_hint (code, 1, 2, 3, 0);
	ia64_cmpxchg8_rel_hint (code, 1, 2, 3, 0);
	ia64_cmpxchg16_acq_hint (code, 1, 2, 3, 0);
	ia64_cmpxchg16_rel_hint (code, 1, 2, 3, 0);
	ia64_xchg1_hint (code, 1, 2, 3, 0);
	ia64_xchg2_hint (code, 1, 2, 3, 0);
	ia64_xchg4_hint (code, 1, 2, 3, 0);
	ia64_xchg8_hint (code, 1, 2, 3, 0);

	ia64_fetchadd4_acq_hint (code, 1, 2, -16, 0);
	ia64_fetchadd4_acq_hint (code, 1, 2, -8, 0);
	ia64_fetchadd4_acq_hint (code, 1, 2, -4, 0);
	ia64_fetchadd4_acq_hint (code, 1, 2, -1, 0);
	ia64_fetchadd4_acq_hint (code, 1, 2, 1, 0);
	ia64_fetchadd4_acq_hint (code, 1, 2, 4, 0);
	ia64_fetchadd4_acq_hint (code, 1, 2, 8, 0);
	ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0);

	ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0);
	ia64_fetchadd8_acq_hint (code, 1, 2, 16, 0);
	ia64_fetchadd4_rel_hint (code, 1, 2, 16, 0);
	ia64_fetchadd8_rel_hint (code, 1, 2, 16, 0);

	ia64_setf_sig (code, 1, 2);
	ia64_setf_exp (code, 1, 2);
	ia64_setf_s (code, 1, 2);
	ia64_setf_d (code, 1, 2);

	ia64_getf_sig (code, 1, 2);
	ia64_getf_exp (code, 1, 2);
	ia64_getf_s (code, 1, 2);
	ia64_getf_d (code, 1, 2);

	ia64_chk_s_m (code, 1, 0);
	ia64_chk_s_m (code, 1, 1);
	ia64_chk_s_m (code, 1, -1);

	ia64_chk_s_float_m (code, 1, 0);

	ia64_chk_a_nc (code, 1, 0);
	ia64_chk_a_nc (code, 1, 1);
	ia64_chk_a_nc (code, 1, -1);

	ia64_chk_a_nc (code, 1, 0);
	ia64_chk_a_clr (code, 1, 0);

	ia64_chk_a_nc_float (code, 1, 0);
	ia64_chk_a_clr_float (code, 1, 0);

	ia64_invala (code);
	ia64_fwb (code);
	ia64_mf (code);
	ia64_mf_a (code);
	ia64_srlz_d (code);
	ia64_stlz_i (code);
	ia64_sync_i (code);

	ia64_flushrs (code);
	ia64_loadrs (code);

	ia64_invala_e (code, 1);
	ia64_invala_e_float (code, 1);

	ia64_fc (code, 1);
	ia64_fc_i (code, 1);

	ia64_mov_to_ar_m (code, 1, 1);

	ia64_mov_to_ar_imm_m (code, 1, 127);

	ia64_mov_from_ar_m (code, 1, 1);

	ia64_mov_to_cr (code, 1, 2);

	ia64_mov_from_cr (code, 1, 2);

	ia64_alloc (code, 1, 3, 4, 5, 0);
	ia64_alloc (code, 1, 3, 4, 5, 8);

	ia64_mov_to_psr_l (code, 1);
	ia64_mov_to_psr_um (code, 1);

	ia64_mov_from_psr (code, 1);
	ia64_mov_from_psr_um (code, 1);

	ia64_break_m (code, 0x1234);
	ia64_nop_m (code, 0x1234);
	ia64_hint_m (code, 0x1234);

	ia64_br_cond_hint (code, 0, 0, 0, 0);
	ia64_br_wexit_hint (code, 0, 0, 0, 0);
	ia64_br_wtop_hint (code, 0, 0, 0, 0);

	ia64_br_cloop_hint (code, 0, 0, 0, 0);
	ia64_br_cexit_hint (code, 0, 0, 0, 0);
	ia64_br_ctop_hint (code, 0, 0, 0, 0);

	ia64_br_call_hint (code, 1, 0, 0, 0, 0);

	ia64_br_cond_reg_hint (code, 1, 0, 0, 0);
	ia64_br_ia_reg_hint (code, 1, 0, 0, 0);
	ia64_br_ret_reg_hint (code, 1, 0, 0, 0);

	ia64_br_call_reg_hint (code, 1, 2, 0, 0, 0);

	ia64_cover (code);
	ia64_clrrrb (code);
	ia64_clrrrb_pr (code);
	ia64_rfi (code);
	ia64_bsw_0 (code);
	ia64_bsw_1 (code);
	ia64_epc (code);

	ia64_break_b (code, 0x1234);
	ia64_nop_b (code, 0x1234);
	ia64_hint_b (code, 0x1234);

	ia64_break_x (code, 0x2123456789ABCDEFULL);

	ia64_movl (code, 1, 0x123456789ABCDEF0LL);

	ia64_brl_cond_hint (code, 0, 0, 0, 0);
	ia64_brl_cond_hint (code, -1, 0, 0, 0);

	ia64_brl_call_hint (code, 1, 0, 0, 0, 0);
	ia64_brl_call_hint (code, 1, -1, 0, 0, 0);

	ia64_nop_x (code, 0x2123456789ABCDEFULL);
	ia64_hint_x (code, 0x2123456789ABCDEFULL);

	ia64_movl_pred (code, 1, 1, 0x123456789ABCDEF0LL);

	/* FLOATING-POINT */
	ia64_fma_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fma_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fma_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fpma_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fms_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fms_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fms_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fpms_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fnma_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fnma_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fnma_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
	ia64_fpnma_sf_pred (code, 1, 1, 2, 3, 4, 2);

	ia64_xma_l_pred (code, 1, 1, 2, 3, 4);
	ia64_xma_h_pred (code, 1, 1, 2, 3, 4);
	ia64_xma_hu_pred (code, 1, 1, 2, 3, 4);

	ia64_fselect_pred (code, 1, 1, 2, 3, 4);

	ia64_fcmp_eq_sf_pred (code, 1, 1, 2, 3, 4, 0);
	ia64_fcmp_lt_sf_pred (code, 1, 1, 2, 3, 4, 0);
	ia64_fcmp_le_sf_pred (code, 1, 1, 2, 3, 4, 0);
	ia64_fcmp_unord_sf_pred (code, 1, 1, 2, 3, 4, 0);
	ia64_fcmp_eq_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
	ia64_fcmp_lt_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
	ia64_fcmp_le_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
	ia64_fcmp_unord_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);

	ia64_fclass_m_pred (code, 1, 1, 2, 3, 0x1ff);
	ia64_fclass_m_unc_pred (code, 1, 1, 2, 3, 0x1ff);

	ia64_frcpa_sf_pred (code, 1, 1, 2, 3, 4, 0);
	ia64_fprcpa_sf_pred (code, 1, 1, 2, 3, 4, 0);

	ia64_frsqrta_sf_pred (code, 1, 1, 2, 4, 0);
	ia64_fprsqrta_sf_pred (code, 1, 1, 2, 4, 0);

	ia64_fmin_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fman_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_famin_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_famax_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpmin_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpman_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpamin_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpamax_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpcmp_eq_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpcmp_lt_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpcmp_le_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpcmp_unord_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpcmp_neq_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpcmp_nlt_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpcmp_nle_sf_pred (code, 1, 2, 3, 4, 0);
	ia64_fpcmp_ord_sf_pred (code, 1, 2, 3, 4, 0);

	ia64_fmerge_s_pred (code, 1, 2, 3, 4);
	ia64_fmerge_ns_pred (code, 1, 2, 3, 4);
	ia64_fmerge_se_pred (code, 1, 2, 3, 4);
	ia64_fmix_lr_pred (code, 1, 2, 3, 4);
	ia64_fmix_r_pred (code, 1, 2, 3, 4);
	ia64_fmix_l_pred (code, 1, 2, 3, 4);
	ia64_fsxt_r_pred (code, 1, 2, 3, 4);
	ia64_fsxt_l_pred (code, 1, 2, 3, 4);
	ia64_fpack_pred (code, 1, 2, 3, 4);
	ia64_fswap_pred (code, 1, 2, 3, 4);
	ia64_fswap_nl_pred (code, 1, 2, 3, 4);
	ia64_fswap_nr_pred (code, 1, 2, 3, 4);
	ia64_fand_pred (code, 1, 2, 3, 4);
	ia64_fandcm_pred (code, 1, 2, 3, 4);
	ia64_for_pred (code, 1, 2, 3, 4);
	ia64_fxor_pred (code, 1, 2, 3, 4);
	ia64_fpmerge_s_pred (code, 1, 2, 3, 4);
	ia64_fpmerge_ns_pred (code, 1, 2, 3, 4);
	ia64_fpmerge_se_pred (code, 1, 2, 3, 4);
	
	ia64_fcvt_fx_sf_pred ((code), 1, 2, 3, 0);
	ia64_fcvt_fxu_sf_pred ((code), 1, 2, 3, 0);
	ia64_fcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0);
	ia64_fcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0);
	ia64_fpcvt_fx_sf_pred ((code), 1, 2, 3, 0);
	ia64_fpcvt_fxu_sf_pred ((code), 1, 2, 3, 0);
	ia64_fpcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0);
	ia64_fpcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0);

	ia64_fcvt_xf_pred ((code), 1, 2, 3);

	ia64_fsetc_sf_pred ((code), 1, 0x33, 0x33, 3);

	ia64_fclrf_sf_pred ((code), 1, 3);

	ia64_fchkf_sf_pred ((code), 1, -1, 3);

	ia64_break_f_pred ((code), 1, 0x1234);

	ia64_movl (code, 31, -123456);

	ia64_codegen_close (code);

#if 0
	/* disassembly */
	{
		guint8 *buf = code.buf;
		int template;