Ejemplo n.º 1
0
void do_handle_exception(arch_regs_t *regs)
{
	unsigned long baddr = csr_read(sbadaddr);
	unsigned long scause = csr_read(scause);

	if (scause & SCAUSE_INTERRUPT_MASK) {
		do_handle_async(regs, scause & SCAUSE_EXC_MASK, baddr);
	} else {
		do_handle_sync(regs, scause & SCAUSE_EXC_MASK, baddr);
	}
}
Ejemplo n.º 2
0
static inline bool supports_extension(char ext)
{
#ifdef CONFIG_CPU
	struct udevice *dev;
	char desc[32];

	uclass_find_first_device(UCLASS_CPU, &dev);
	if (!dev) {
		debug("unable to find the RISC-V cpu device\n");
		return false;
	}
	if (!cpu_get_desc(dev, desc, sizeof(desc))) {
		/* skip the first 4 characters (rv32|rv64) */
		if (strchr(desc + 4, ext))
			return true;
	}

	return false;
#else  /* !CONFIG_CPU */
#ifdef CONFIG_RISCV_MMODE
	return csr_read(misa) & (1 << (ext - 'a'));
#else  /* !CONFIG_RISCV_MMODE */
#warning "There is no way to determine the available extensions in S-mode."
#warning "Please convert your board to use the RISC-V CPU driver."
	return false;
#endif /* CONFIG_RISCV_MMODE */
#endif /* CONFIG_CPU */
}
Ejemplo n.º 3
0
void do_handle_exception(arch_regs_t *regs)
{
	unsigned long scause = csr_read(CSR_SCAUSE);

	if (scause & SCAUSE_INTERRUPT_MASK) {
		do_handle_irq(regs, scause & ~SCAUSE_INTERRUPT_MASK);
	} else {
		do_handle_trap(regs, scause & ~SCAUSE_INTERRUPT_MASK);
	}
}
Ejemplo n.º 4
0
void do_error(struct vmm_vcpu *vcpu, arch_regs_t *regs,
	      unsigned long scause, const char *msg, int err, bool panic)
{
	u32 cpu = vmm_smp_processor_id();

	vmm_printf("%s: CPU%d: VCPU=%s %s (error %d)\n",
		   __func__, cpu, (vcpu) ? vcpu->name : "(NULL)", msg, err);
	cpu_vcpu_dump_general_regs(NULL, regs);
	cpu_vcpu_dump_exception_regs(NULL, scause, csr_read(CSR_STVAL));
	if (panic) {
		vmm_panic("%s: please reboot ...\n", __func__);
	}
}
Ejemplo n.º 5
0
void arch_vcpu_switch(struct vmm_vcpu *tvcpu,
		      struct vmm_vcpu *vcpu,
		      arch_regs_t *regs)
{
	struct riscv_priv *priv;

	if (tvcpu) {
		memcpy(riscv_regs(tvcpu), regs, sizeof(*regs));
		if (tvcpu->is_normal) {
			priv = riscv_priv(tvcpu);
			priv->hideleg = csr_read(CSR_HIDELEG);
			priv->hedeleg = csr_read(CSR_HEDELEG);
			priv->bsstatus = csr_read(CSR_BSSTATUS);
			priv->bsie = csr_read(CSR_BSIE);
			priv->bstvec = csr_read(CSR_BSTVEC);
			priv->bsscratch = csr_read(CSR_BSSCRATCH);
			priv->bsepc = csr_read(CSR_BSEPC);
			priv->bscause = csr_read(CSR_BSCAUSE);
			priv->bstval = csr_read(CSR_BSTVAL);
			priv->bsip = csr_read(CSR_BSIP);
			priv->bsatp = csr_read(CSR_BSATP);
		}
	}

	memcpy(regs, riscv_regs(vcpu), sizeof(*regs));
	if (vcpu->is_normal) {
		priv = riscv_priv(vcpu);
		csr_write(CSR_HIDELEG, priv->hideleg);
		csr_write(CSR_HEDELEG, priv->hedeleg);
		csr_write(CSR_BSSTATUS, priv->bsstatus);
		csr_write(CSR_BSIE, priv->bsie);
		csr_write(CSR_BSTVEC, priv->bstvec);
		csr_write(CSR_BSSCRATCH, priv->bsscratch);
		csr_write(CSR_BSEPC, priv->bsepc);
		csr_write(CSR_BSCAUSE, priv->bscause);
		csr_write(CSR_BSTVAL, priv->bstval);
		csr_write(CSR_BSIP, priv->bsip);
		csr_write(CSR_BSATP, priv->bsatp);
		cpu_mmu_stage2_change_pgtbl(vcpu->guest->id,
					    riscv_guest_priv(vcpu->guest)->pgtbl);
	}
}
Ejemplo n.º 6
0
/*
 * This routine handles page faults.  It determines the address and the
 * problem, and then passes it off to one of the appropriate routines.
 */
asmlinkage void do_page_fault(struct pt_regs *regs)
{
	struct task_struct *tsk;
	struct vm_area_struct *vma;
	struct mm_struct *mm;
	unsigned long addr, cause;
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
	int fault, code = SEGV_MAPERR;

	cause = regs->scause;
	addr = regs->sbadaddr;

	tsk = current;
	mm = tsk->mm;

	/*
	 * Fault-in kernel-space virtual memory on-demand.
	 * The 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 */
	if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
		goto vmalloc_fault;

	/* Enable interrupts if they were enabled in the parent context. */
	if (likely(regs->sstatus & SR_PIE))
		local_irq_enable();

	/*
	 * If we're in an interrupt, have no user context, or are running
	 * in an atomic region, then we must not take the fault.
	 */
	if (unlikely(in_atomic() || !mm))
		goto no_context;

	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);

retry:
	down_read(&mm->mmap_sem);
	vma = find_vma(mm, addr);
	if (unlikely(!vma))
		goto bad_area;
	if (likely(vma->vm_start <= addr))
		goto good_area;
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
		goto bad_area;
	if (unlikely(expand_stack(vma, addr)))
		goto bad_area;

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it.
	 */
good_area:
	code = SEGV_ACCERR;

	switch (cause) {
	case EXC_INST_ACCESS:
		if (!(vma->vm_flags & VM_EXEC))
			goto bad_area;
		break;
	case EXC_LOAD_ACCESS:
		if (!(vma->vm_flags & VM_READ))
			goto bad_area;
		break;
	case EXC_STORE_ACCESS:
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
		flags |= FAULT_FLAG_WRITE;
		break;
	default:
		panic("%s: unhandled cause %lu", __func__, cause);
	}

	/*
	 * If for any reason at all we could not handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	fault = handle_mm_fault(mm, vma, addr, flags);

	/*
	 * If we need to retry but a fatal signal is pending, handle the
	 * signal first. We do not need to release the mmap_sem because it
	 * would already be released in __lock_page_or_retry in mm/filemap.c.
	 */
	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
		return;

	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGBUS)
			goto do_sigbus;
		BUG();
	}

	/*
	 * Major/minor page fault accounting is only done on the
	 * initial attempt. If we go through a retry, it is extremely
	 * likely that the page will be found in page cache at that point.
	 */
	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
		} else {
			tsk->min_flt++;
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
		}
		if (fault & VM_FAULT_RETRY) {
			/*
			 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
			 * of starvation.
			 */
			flags &= ~(FAULT_FLAG_ALLOW_RETRY);
			flags |= FAULT_FLAG_TRIED;

			/*
			 * No need to up_read(&mm->mmap_sem) as we would
			 * have already released it in __lock_page_or_retry
			 * in mm/filemap.c.
			 */
			goto retry;
		}
	}

	up_read(&mm->mmap_sem);
	return;

	/*
	 * Something tried to access memory that isn't in our memory map.
	 * Fix it, but check if it's kernel or user first.
	 */
bad_area:
	up_read(&mm->mmap_sem);
	/* User mode accesses just cause a SIGSEGV */
	if (user_mode(regs)) {
		do_trap(regs, SIGSEGV, code, addr, tsk);
		return;
	}

no_context:
	/* Are we prepared to handle this kernel fault? */
	if (fixup_exception(regs)) {
		return;
	}

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
	bust_spinlocks(1);
	pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
		(addr < PAGE_SIZE) ? "NULL pointer dereference" :
		"paging request", addr);
	die(regs, "Oops");
	do_exit(SIGKILL);

	/*
	 * We ran out of memory, call the OOM killer, and return the userspace
	 * (which will retry the fault, or kill us if we got oom-killed).
	 */
out_of_memory:
	up_read(&mm->mmap_sem);
	if (!user_mode(regs))
		goto no_context;
	pagefault_out_of_memory();
	return;

do_sigbus:
	up_read(&mm->mmap_sem);
	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;
	do_trap(regs, SIGBUS, BUS_ADRERR, addr, tsk);
	return;

vmalloc_fault:
	{
		pgd_t *pgd, *pgd_k;
		pud_t *pud, *pud_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;
		int index;

	        if (user_mode(regs))
			goto bad_area;

		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Do _not_ use "tsk->active_mm->pgd" here.
		 * We might be inside an interrupt in the middle
		 * of a task switch.
		 */
		index = pgd_index(addr);
		pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
		pgd_k = init_mm.pgd + index;

		if (!pgd_present(*pgd_k))
			goto no_context;
		set_pgd(pgd, *pgd_k);

		pud = pud_offset(pgd, addr);
		pud_k = pud_offset(pgd_k, addr);
		if (!pud_present(*pud_k))
			goto no_context;

		/* Since the vmalloc area is global, it is unnecessary
		   to copy individual PTEs */
		pmd = pmd_offset(pud, addr);
		pmd_k = pmd_offset(pud_k, addr);
		if (!pmd_present(*pmd_k))
			goto no_context;
		set_pmd(pmd, *pmd_k);

		/* Make sure the actual PTE exists as well to
		 * catch kernel vmalloc-area accesses to non-mapped
		 * addresses. If we don't do this, this will just
		 * silently loop forever.
		 */
		pte_k = pte_offset_kernel(pmd_k, addr);
		if (!pte_present(*pte_k))
			goto no_context;
		return;
	}
}
Ejemplo n.º 7
0
int watchdog_disable(void)
{
	csr_write(csr_read() & ~WDT_ENABLE);
	return 0;
}
Ejemplo n.º 8
0
void do_handle_trap(arch_regs_t *regs, unsigned long cause)
{
	int rc = VMM_OK;
	bool panic = TRUE;
	const char *msg = "trap handling failed";
	struct vmm_vcpu *vcpu;

	if ((cause == CAUSE_STORE_PAGE_FAULT) &&
	    !(regs->hstatus & HSTATUS_SPV) &&
	    (regs->sepc == preempt_orphan_pc)) {
		regs->sepc += 4;
		vmm_scheduler_preempt_orphan(regs);
		return;
	}

	vmm_scheduler_irq_enter(regs, TRUE);

	vcpu = vmm_scheduler_current_vcpu();
	if (!vcpu || !vcpu->is_normal) {
		rc = VMM_EFAIL;
		msg = "unexpected trap";
		goto done;
	}

	switch (cause) {
	case CAUSE_ILLEGAL_INSTRUCTION:
		msg = "illegal instruction fault failed";
		if (regs->hstatus & HSTATUS_SPV) {
			rc = cpu_vcpu_illegal_insn_fault(vcpu, regs,
							 csr_read(stval));
			panic = FALSE;
		} else {
			rc = VMM_EINVALID;
		}
		break;
	case CAUSE_FETCH_PAGE_FAULT:
	case CAUSE_LOAD_PAGE_FAULT:
	case CAUSE_STORE_PAGE_FAULT:
		msg = "page fault failed";
		if ((regs->hstatus & HSTATUS_SPV) &&
		    (regs->hstatus & HSTATUS_STL)) {
			rc = cpu_vcpu_page_fault(vcpu, regs,
						 cause, csr_read(stval));
			panic = FALSE;
		} else {
			rc = VMM_EINVALID;
		}
		break;
	default:
		rc = VMM_EFAIL;
		break;
	};

	if (rc) {
		vmm_manager_vcpu_halt(vcpu);
	}

done:
	if (rc) {
		do_error(vcpu, regs, cause, msg, rc, panic);
	}

	vmm_scheduler_irq_exit(regs);
}