Пример #1
0
/* Return a pt_regs pointer for a valid fault handler frame */
static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
{
	const char *fault = NULL;  /* happy compiler */
	char fault_buf[64];
	unsigned long sp = kbt->it.sp;
	struct pt_regs *p;

	if (!in_kernel_stack(kbt, sp))
		return NULL;
	if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
		return NULL;
	p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
	if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
		fault = "syscall";
	else {
		if (kbt->verbose) {     /* else we aren't going to use it */
			snprintf(fault_buf, sizeof(fault_buf),
				 "interrupt %ld", p->faultnum);
			fault = fault_buf;
		}
	}
	if (EX1_PL(p->ex1) == KERNEL_PL &&
	    __kernel_text_address(p->pc) &&
	    in_kernel_stack(kbt, p->sp) &&
	    p->sp >= sp) {
		if (kbt->verbose)
			pr_err("  <%s while in kernel mode>\n", fault);
	} else if (EX1_PL(p->ex1) == USER_PL &&
	    p->pc < PAGE_OFFSET &&
	    p->sp < PAGE_OFFSET) {
		if (kbt->verbose)
			pr_err("  <%s while in user mode>\n", fault);
	} else if (kbt->verbose) {
		pr_err("  (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
		       p->pc, p->sp, p->ex1);
		p = NULL;
	}
	if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
		return p;
	return NULL;
}
Пример #2
0
/*
 * If the current sp is on a page different than what we recorded
 * as the top-of-kernel-stack last time we context switched, we have
 * probably blown the stack, and nothing is going to work out well.
 * If we can at least get out a warning, that may help the debug,
 * though we probably won't be able to backtrace into the code that
 * actually did the recursive damage.
 */
static void validate_stack(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	unsigned long ksp0 = get_current_ksp0();
	unsigned long ksp0_base = ksp0 - THREAD_SIZE;
	unsigned long sp = stack_pointer;

	if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
		pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
		       cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
	}

	else if (sp < ksp0_base + sizeof(struct thread_info)) {
		pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
		       cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
	}
}
Пример #3
0
/*
 * This routine handles page faults.  It determines the address, and the
 * problem, and then passes it handle_page_fault() for normal DTLB and
 * ITLB issues, and for DMA or SN processor faults when we are in user
 * space.  For the latter, if we're in kernel mode, we just save the
 * interrupt away appropriately and return immediately.  We can't do
 * page faults for user code while in kernel mode.
 */
void do_page_fault(struct pt_regs *regs, int fault_num,
		   unsigned long address, unsigned long write)
{
	int is_page_fault;

	/* This case should have been handled by do_page_fault_ics(). */
	BUG_ON(write & ~1);

#if CHIP_HAS_TILE_DMA()
	/*
	 * If it's a DMA fault, suspend the transfer while we're
	 * handling the miss; we'll restart after it's handled.  If we
	 * don't suspend, it's possible that this process could swap
	 * out and back in, and restart the engine since the DMA is
	 * still 'running'.
	 */
	if (fault_num == INT_DMATLB_MISS ||
	    fault_num == INT_DMATLB_ACCESS ||
	    fault_num == INT_DMATLB_MISS_DWNCL ||
	    fault_num == INT_DMATLB_ACCESS_DWNCL) {
		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
		while (__insn_mfspr(SPR_DMA_USER_STATUS) &
		       SPR_DMA_STATUS__BUSY_MASK)
			;
	}
#endif

	/* Validate fault num and decide if this is a first-time page fault. */
	switch (fault_num) {
	case INT_ITLB_MISS:
	case INT_DTLB_MISS:
#if CHIP_HAS_TILE_DMA()
	case INT_DMATLB_MISS:
	case INT_DMATLB_MISS_DWNCL:
#endif
#if CHIP_HAS_SN_PROC()
	case INT_SNITLB_MISS:
	case INT_SNITLB_MISS_DWNCL:
#endif
		is_page_fault = 1;
		break;

	case INT_DTLB_ACCESS:
#if CHIP_HAS_TILE_DMA()
	case INT_DMATLB_ACCESS:
	case INT_DMATLB_ACCESS_DWNCL:
#endif
		is_page_fault = 0;
		break;

	default:
		panic("Bad fault number %d in do_page_fault", fault_num);
	}

#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
	if (EX1_PL(regs->ex1) != USER_PL) {
		struct async_tlb *async;
		switch (fault_num) {
#if CHIP_HAS_TILE_DMA()
		case INT_DMATLB_MISS:
		case INT_DMATLB_ACCESS:
		case INT_DMATLB_MISS_DWNCL:
		case INT_DMATLB_ACCESS_DWNCL:
			async = &current->thread.dma_async_tlb;
			break;
#endif
#if CHIP_HAS_SN_PROC()
		case INT_SNITLB_MISS:
		case INT_SNITLB_MISS_DWNCL:
			async = &current->thread.sn_async_tlb;
			break;
#endif
		default:
			async = NULL;
		}
		if (async) {

			/*
			 * No vmalloc check required, so we can allow
			 * interrupts immediately at this point.
			 */
			local_irq_enable();

			set_thread_flag(TIF_ASYNC_TLB);
			if (async->fault_num != 0) {
				panic("Second async fault %d;"
				      " old fault was %d (%#lx/%ld)",
				      fault_num, async->fault_num,
				      address, write);
			}
			BUG_ON(fault_num == 0);
			async->fault_num = fault_num;
			async->is_fault = is_page_fault;
			async->is_write = write;
			async->address = address;
			return;
		}
	}
#endif

	handle_page_fault(regs, fault_num, is_page_fault, address, write);
}
Пример #4
0
/*
 * This routine is responsible for faulting in user pages.
 * It passes the work off to one of the appropriate routines.
 * It returns true if the fault was successfully handled.
 */
static int handle_page_fault(struct pt_regs *regs,
			     int fault_num,
			     int is_page_fault,
			     unsigned long address,
			     int write)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	unsigned long stack_offset;
	int fault;
	int si_code;
	int is_kernel_mode;
	pgd_t *pgd;

	/* on TILE, protection faults are always writes */
	if (!is_page_fault)
		write = 1;

	flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;

	is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);

	tsk = validate_current();

	/*
	 * Check to see if we might be overwriting the stack, and bail
	 * out if so.  The page fault code is a relatively likely
	 * place to get trapped in an infinite regress, and once we
	 * overwrite the whole stack, it becomes very hard to recover.
	 */
	stack_offset = stack_pointer & (THREAD_SIZE-1);
	if (stack_offset < THREAD_SIZE / 8) {
		pr_alert("Potential stack overrun: sp %#lx\n",
		       stack_pointer);
		show_regs(regs);
		pr_alert("Killing current process %d/%s\n",
		       tsk->pid, tsk->comm);
		do_group_exit(SIGKILL);
	}

	/*
	 * Early on, we need to check for migrating PTE entries;
	 * see homecache.c.  If we find a migrating PTE, we wait until
	 * the backing page claims to be done migrating, then we proceed.
	 * For kernel PTEs, we rewrite the PTE and return and retry.
	 * Otherwise, we treat the fault like a normal "no PTE" fault,
	 * rather than trying to patch up the existing PTE.
	 */
	pgd = get_current_pgd();
	if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
				 is_kernel_mode, write))
		return 1;

	si_code = SEGV_MAPERR;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * and that the fault was not a protection fault.
	 */
	if (unlikely(address >= TASK_SIZE &&
		     !is_arch_mappable_range(address, 0))) {
		if (is_kernel_mode && is_page_fault &&
		    vmalloc_fault(pgd, address) >= 0)
			return 1;
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
		 * fault we could otherwise deadlock.
		 */
		mm = NULL;  /* happy compiler */
		vma = NULL;
		goto bad_area_nosemaphore;
	}

	/*
	 * If we're trying to touch user-space addresses, we must
	 * be either at PL0, or else with interrupts enabled in the
	 * kernel, so either way we can re-enable interrupts here
	 * unless we are doing atomic access to user space with
	 * interrupts disabled.
	 */
	if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
		local_irq_enable();

	mm = tsk->mm;

	/*
	 * If we're in an interrupt, have no user context or are running in an
	 * atomic region then we must not take the fault.
	 */
	if (in_atomic() || !mm) {
		vma = NULL;  /* happy compiler */
		goto bad_area_nosemaphore;
	}

	if (!is_kernel_mode)
		flags |= FAULT_FLAG_USER;

	/*
	 * When running in the kernel we expect faults to occur only to
	 * addresses in user space.  All other faults represent errors in the
	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
	 * erroneous fault occurring in a code path which already holds mmap_sem
	 * we will deadlock attempting to validate the fault against the
	 * address space.  Luckily the kernel only validly references user
	 * space from well defined areas of code, which are listed in the
	 * exceptions table.
	 *
	 * As the vast majority of faults will be valid we will only perform
	 * the source reference check when there is a possibility of a deadlock.
	 * Attempt to lock the address space, if we cannot we then validate the
	 * source.  If this is invalid we can skip the address space check,
	 * thus avoiding the deadlock.
	 */
	if (!down_read_trylock(&mm->mmap_sem)) {
		if (is_kernel_mode &&
		    !search_exception_tables(regs->pc)) {
			vma = NULL;  /* happy compiler */
			goto bad_area_nosemaphore;
		}
		down_read(&mm->mmap_sem);
	}

	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (regs->sp < PAGE_OFFSET) {
		/*
		 * accessing the stack below sp is always a bug.
		 */
		if (address < regs->sp)
			goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;

/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	si_code = SEGV_ACCERR;
	if (fault_num == INT_ITLB_MISS) {
		if (!(vma->vm_flags & VM_EXEC))
			goto bad_area;
	} else if (write) {
#ifdef TEST_VERIFY_AREA
		if (!is_page_fault && regs->cs == KERNEL_CS)
			pr_err("WP fault at "REGFMT"\n", regs->eip);
#endif
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		flags |= FAULT_FLAG_WRITE;
	} else {
		if (!is_page_fault || !(vma->vm_flags & VM_READ))
			goto bad_area;
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	fault = handle_mm_fault(mm, vma, address, write);
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
	}
	if (fault & VM_FAULT_MAJOR)
		tsk->maj_flt++;
	else
		tsk->min_flt++;

#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
	/*
	 * If this was an asynchronous fault,
	 * restart the appropriate engine.
	 */
	switch (fault_num) {
#if CHIP_HAS_TILE_DMA()
	case INT_DMATLB_MISS:
	case INT_DMATLB_MISS_DWNCL:
	case INT_DMATLB_ACCESS:
	case INT_DMATLB_ACCESS_DWNCL:
		__insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
		break;
#endif
#if CHIP_HAS_SN_PROC()
	case INT_SNITLB_MISS:
	case INT_SNITLB_MISS_DWNCL:
		__insn_mtspr(SPR_SNCTL,
			     __insn_mfspr(SPR_SNCTL) &
			     ~SPR_SNCTL__FRZPROC_MASK);
		break;
#endif
	}
#endif

	up_read(&mm->mmap_sem);
	return 1;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (!is_kernel_mode) {
		/*
		 * It's possible to have interrupts off here.
		 */
		local_irq_enable();

		force_sig_info_fault("segfault", SIGSEGV, si_code, address,
				     fault_num, tsk, regs);
		return 0;
	}

no_context:
	/* Are we prepared to handle this kernel fault?  */
	if (fixup_exception(regs))
		return 0;

/*
 * Oops. The kernel tried to access some bad page. We'll have to
 * terminate things with extreme prejudice.
 */

	bust_spinlocks(1);

	/* FIXME: no lookup_address() yet */
#ifdef SUPPORT_LOOKUP_ADDRESS
	if (fault_num == INT_ITLB_MISS) {
		pte_t *pte = lookup_address(address);

		if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
			pr_crit("kernel tried to execute"
			       " non-executable page - exploit attempt?"
			       " (uid: %d)\n", current->uid);
	}
#endif
	if (address < PAGE_SIZE)
		pr_alert("Unable to handle kernel NULL pointer dereference\n");
	else
		pr_alert("Unable to handle kernel paging request\n");
	pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n",
		 address, regs->pc);

	show_regs(regs);

	if (unlikely(tsk->pid < 2)) {
		panic("Kernel page fault running %s!",
		      is_idle_task(tsk) ? "the idle task" : "init");
	}

	/*
	 * More FIXME: we should probably copy the i386 here and
	 * implement a generic die() routine.  Not today.
	 */
#ifdef SUPPORT_DIE
	die("Oops", regs);
#endif
	bust_spinlocks(1);

	do_group_exit(SIGKILL);

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up_read(&mm->mmap_sem);
	if (is_kernel_mode)
		goto no_context;
	pagefault_out_of_memory();
	return 0;

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
	if (is_kernel_mode)
		goto no_context;

	force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
			     fault_num, tsk, regs);
	return 0;
}
Пример #5
0
static
void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
		    int align_ctl)
{
	struct thread_info *info = current_thread_info();
	struct unaligned_jit_fragment frag;
	struct unaligned_jit_fragment *jit_code_area;
	tilegx_bundle_bits bundle_2 = 0;
	/* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */
	bool     bundle_2_enable = true;
	uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1;
	/*
	 * Indicate if the unalign access
	 * instruction's registers hit with
	 * others in the same bundle.
	 */
	bool     alias = false;
	bool     load_n_store = true;
	bool     load_store_signed = false;
	unsigned int  load_store_size = 8;
	bool     y1_br = false;  /* True, for a branch in same bundle at Y1.*/
	int      y1_br_reg = 0;
	/* True for link operation. i.e. jalr or lnk at Y1 */
	bool     y1_lr = false;
	int      y1_lr_reg = 0;
	bool     x1_add = false;/* True, for load/store ADD instruction at X1*/
	int      x1_add_imm8 = 0;
	bool     unexpected = false;
	int      n = 0, k;

	jit_code_area =
		(struct unaligned_jit_fragment *)(info->unalign_jit_base);

	memset((void *)&frag, 0, sizeof(frag));

	/* 0: X mode, Otherwise: Y mode. */
	if (bundle & TILEGX_BUNDLE_MODE_MASK) {
		unsigned int mod, opcode;

		if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
		    get_RRROpcodeExtension_Y1(bundle) ==
		    UNARY_RRR_1_OPCODE_Y1) {

			opcode = get_UnaryOpcodeExtension_Y1(bundle);

			/*
			 * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1
			 * pipeline.
			 */
			switch (opcode) {
			case JALR_UNARY_OPCODE_Y1:
			case JALRP_UNARY_OPCODE_Y1:
				y1_lr = true;
				y1_lr_reg = 55; /* Link register. */
				/* FALLTHROUGH */
			case JR_UNARY_OPCODE_Y1:
			case JRP_UNARY_OPCODE_Y1:
				y1_br = true;
				y1_br_reg = get_SrcA_Y1(bundle);
				break;
			case LNK_UNARY_OPCODE_Y1:
				/* "lnk" at Y1 pipeline. */
				y1_lr = true;
				y1_lr_reg = get_Dest_Y1(bundle);
				break;
			}
		}

		opcode = get_Opcode_Y2(bundle);
		mod = get_Mode(bundle);

		/*
		 *  bundle_2 is bundle after making Y2 as a dummy operation
		 *  - ld zero, sp
		 */
		bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy();

		/* Make Y1 as fnop if Y1 is a branch or lnk operation. */
		if (y1_br || y1_lr) {
			bundle_2 &= ~(GX_INSN_Y1_MASK);
			bundle_2 |= jit_y1_fnop();
		}

		if (is_y0_y1_nop(bundle_2))
			bundle_2_enable = false;

		if (mod == MODE_OPCODE_YC2) {
			/* Store. */
			load_n_store = false;
			load_store_size = 1 << opcode;
			load_store_signed = false;
			find_regs(bundle, 0, &ra, &rb, &clob1, &clob2,
				  &clob3, &alias);
			if (load_store_size > 8)
				unexpected = true;
		} else {
			/* Load. */
			load_n_store = true;
			if (mod == MODE_OPCODE_YB2) {
				switch (opcode) {
				case LD_OPCODE_Y2:
					load_store_signed = false;
					load_store_size = 8;
					break;
				case LD4S_OPCODE_Y2:
					load_store_signed = true;
					load_store_size = 4;
					break;
				case LD4U_OPCODE_Y2:
					load_store_signed = false;
					load_store_size = 4;
					break;
				default:
					unexpected = true;
				}
			} else if (mod == MODE_OPCODE_YA2) {
				if (opcode == LD2S_OPCODE_Y2) {
					load_store_signed = true;
					load_store_size = 2;
				} else if (opcode == LD2U_OPCODE_Y2) {
					load_store_signed = false;
					load_store_size = 2;
				} else
					unexpected = true;
			} else
				unexpected = true;
			find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2,
				  &clob3, &alias);
		}
	} else {
		unsigned int opcode;

		/* bundle_2 is bundle after making X1 as "fnop". */
		bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop();

		if (is_x0_x1_nop(bundle_2))
			bundle_2_enable = false;

		if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
			opcode = get_UnaryOpcodeExtension_X1(bundle);

			if (get_RRROpcodeExtension_X1(bundle) ==
			    UNARY_RRR_0_OPCODE_X1) {
				load_n_store = true;
				find_regs(bundle, &rd, &ra, &rb, &clob1,
					  &clob2, &clob3, &alias);

				switch (opcode) {
				case LD_UNARY_OPCODE_X1:
					load_store_signed = false;
					load_store_size = 8;
					break;
				case LD4S_UNARY_OPCODE_X1:
					load_store_signed = true;
					/* FALLTHROUGH */
				case LD4U_UNARY_OPCODE_X1:
					load_store_size = 4;
					break;

				case LD2S_UNARY_OPCODE_X1:
					load_store_signed = true;
					/* FALLTHROUGH */
				case LD2U_UNARY_OPCODE_X1:
					load_store_size = 2;
					break;
				default:
					unexpected = true;
				}
			} else {
				load_n_store = false;
				load_store_signed = false;
				find_regs(bundle, 0, &ra, &rb,
					  &clob1, &clob2, &clob3,
					  &alias);

				opcode = get_RRROpcodeExtension_X1(bundle);
				switch (opcode)	{
				case ST_RRR_0_OPCODE_X1:
					load_store_size = 8;
					break;
				case ST4_RRR_0_OPCODE_X1:
					load_store_size = 4;
					break;
				case ST2_RRR_0_OPCODE_X1:
					load_store_size = 2;
					break;
				default:
					unexpected = true;
				}
			}
		} else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) {
			load_n_store = true;
			opcode = get_Imm8OpcodeExtension_X1(bundle);
			switch (opcode)	{
			case LD_ADD_IMM8_OPCODE_X1:
				load_store_size = 8;
				break;

			case LD4S_ADD_IMM8_OPCODE_X1:
				load_store_signed = true;
				/* FALLTHROUGH */
			case LD4U_ADD_IMM8_OPCODE_X1:
				load_store_size = 4;
				break;

			case LD2S_ADD_IMM8_OPCODE_X1:
				load_store_signed = true;
				/* FALLTHROUGH */
			case LD2U_ADD_IMM8_OPCODE_X1:
				load_store_size = 2;
				break;

			case ST_ADD_IMM8_OPCODE_X1:
				load_n_store = false;
				load_store_size = 8;
				break;
			case ST4_ADD_IMM8_OPCODE_X1:
				load_n_store = false;
				load_store_size = 4;
				break;
			case ST2_ADD_IMM8_OPCODE_X1:
				load_n_store = false;
				load_store_size = 2;
				break;
			default:
				unexpected = true;
			}

			if (!unexpected) {
				x1_add = true;
				if (load_n_store)
					x1_add_imm8 = get_Imm8_X1(bundle);
				else
					x1_add_imm8 = get_Dest_Imm8_X1(bundle);
			}

			find_regs(bundle, load_n_store ? (&rd) : NULL,
				  &ra, &rb, &clob1, &clob2, &clob3, &alias);
		} else
			unexpected = true;
	}

	/*
	 * Some sanity check for register numbers extracted from fault bundle.
	 */
	if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true)
		unexpected = true;

	/* Give warning if register ra has an aligned address. */
	if (!unexpected)
		WARN_ON(!((load_store_size - 1) & (regs->regs[ra])));


	/*
	 * Fault came from kernel space, here we only need take care of
	 * unaligned "get_user/put_user" macros defined in "uaccess.h".
	 * Basically, we will handle bundle like this:
	 * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0}
	 * (Refer to file "arch/tile/include/asm/uaccess.h" for details).
	 * For either load or store, byte-wise operation is performed by calling
	 * get_user() or put_user(). If the macro returns non-zero value,
	 * set the value to rx, otherwise set zero to rx. Finally make pc point
	 * to next bundle and return.
	 */

	if (EX1_PL(regs->ex1) != USER_PL) {

		unsigned long rx = 0;
		unsigned long x = 0, ret = 0;

		if (y1_br || y1_lr || x1_add ||
		    (load_store_signed !=
		     (load_n_store && load_store_size == 4))) {
			/* No branch, link, wrong sign-ext or load/store add. */
			unexpected = true;
		} else if (!unexpected) {
			if (bundle & TILEGX_BUNDLE_MODE_MASK) {
				/*
				 * Fault bundle is Y mode.
				 * Check if the Y1 and Y0 is the form of
				 * { movei rx, 0; nop/fnop }, if yes,
				 * find the rx.
				 */

				if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1)
				    && (get_SrcA_Y1(bundle) == TREG_ZERO) &&
				    (get_Imm8_Y1(bundle) == 0) &&
				    is_bundle_y0_nop(bundle)) {
					rx = get_Dest_Y1(bundle);
				} else if ((get_Opcode_Y0(bundle) ==
					    ADDI_OPCODE_Y0) &&
					   (get_SrcA_Y0(bundle) == TREG_ZERO) &&
					   (get_Imm8_Y0(bundle) == 0) &&
					   is_bundle_y1_nop(bundle)) {
					rx = get_Dest_Y0(bundle);
				} else {
					unexpected = true;
				}
			} else {
				/*
				 * Fault bundle is X mode.
				 * Check if the X0 is 'movei rx, 0',
				 * if yes, find the rx.
				 */

				if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0)
				    && (get_Imm8OpcodeExtension_X0(bundle) ==
					ADDI_IMM8_OPCODE_X0) &&
				    (get_SrcA_X0(bundle) == TREG_ZERO) &&
				    (get_Imm8_X0(bundle) == 0)) {
					rx = get_Dest_X0(bundle);
				} else {
					unexpected = true;
				}
			}

			/* rx should be less than 56. */
			if (!unexpected && (rx >= 56))
				unexpected = true;
		}

		if (!search_exception_tables(regs->pc)) {
			/* No fixup in the exception tables for the pc. */
			unexpected = true;
		}

		if (unexpected) {
			/* Unexpected unalign kernel fault. */
			struct task_struct *tsk = validate_current();

			bust_spinlocks(1);

			show_regs(regs);

			if (unlikely(tsk->pid < 2)) {
				panic("Kernel unalign fault running %s!",
				      tsk->pid ? "init" : "the idle task");
			}
#ifdef SUPPORT_DIE
			die("Oops", regs);
#endif
			bust_spinlocks(1);

			do_group_exit(SIGKILL);

		} else {
			unsigned long i, b = 0;
			unsigned char *ptr =
				(unsigned char *)regs->regs[ra];
			if (load_n_store) {
				/* handle get_user(x, ptr) */
				for (i = 0; i < load_store_size; i++) {
					ret = get_user(b, ptr++);
					if (!ret) {
						/* Success! update x. */
#ifdef __LITTLE_ENDIAN
						x |= (b << (8 * i));
#else
						x <<= 8;
						x |= b;
#endif /* __LITTLE_ENDIAN */
					} else {
						x = 0;
						break;
					}
				}

				/* Sign-extend 4-byte loads. */
				if (load_store_size == 4)
					x = (long)(int)x;

				/* Set register rd. */
				regs->regs[rd] = x;

				/* Set register rx. */
				regs->regs[rx] = ret;

				/* Bump pc. */
				regs->pc += 8;

			} else {
				/* Handle put_user(x, ptr) */
				x = regs->regs[rb];
#ifdef __LITTLE_ENDIAN
				b = x;
#else
				/*
				 * Swap x in order to store x from low
				 * to high memory same as the
				 * little-endian case.
				 */
				switch (load_store_size) {
				case 8:
					b = swab64(x);
					break;
				case 4:
					b = swab32(x);
					break;
				case 2:
					b = swab16(x);
					break;
				}
#endif /* __LITTLE_ENDIAN */
				for (i = 0; i < load_store_size; i++) {
					ret = put_user(b, ptr++);
					if (ret)
						break;
					/* Success! shift 1 byte. */
					b >>= 8;
				}
				/* Set register rx. */
				regs->regs[rx] = ret;

				/* Bump pc. */
				regs->pc += 8;
			}
		}

		unaligned_fixup_count++;

		if (unaligned_printk) {
			pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
				current->comm, current->pid, regs->regs[ra]);
		}

		/* Done! Return to the exception handler. */
		return;
	}