Esempio n. 1
0
/*
 * In Linux, cacheflush is currently implemented
 * as a whole cache flush (arguments are ignored)
 * we emulate this broken beahior.
 */
int
linux_sys_cacheflush(struct lwp *l, const struct linux_sys_cacheflush_args *uap, register_t *retval)
{
	mips_icache_sync_all();
	mips_dcache_wbinv_all();
	return 0;
}
Esempio n. 2
0
/*
 * Initialize the hardware exception vectors, and the jump table used to
 * call locore cache and TLB management functions, based on the kind
 * of CPU the kernel is running on.
 */
void
mips_vector_init(void)
{
	/*
	 * Make sure that the Wait region logic is not been 
	 * changed
	 */
	if (MipsWaitEnd - MipsWaitStart != 16)
		panic("startup: MIPS wait region not correct");
	/*
	 * Copy down exception vector code.
	 */
	if (MipsTLBMissEnd - MipsTLBMiss > 0x80)
		panic("startup: UTLB code too large");

	if (MipsCacheEnd - MipsCache > 0x80)
		panic("startup: Cache error code too large");

	bcopy(MipsTLBMiss, (void *)MIPS_UTLB_MISS_EXC_VEC,
	      MipsTLBMissEnd - MipsTLBMiss);

	/*
	 * XXXRW: Why don't we install the XTLB handler for all 64-bit
	 * architectures?
	 */
#if defined(__mips_n64) || defined(CPU_RMI) || defined(CPU_NLM) || defined(CPU_BERI) || defined(CPU_CHERI)
/* Fake, but sufficient, for the 32-bit with 64-bit hardware addresses  */
	bcopy(MipsTLBMiss, (void *)MIPS_XTLB_MISS_EXC_VEC,
	      MipsTLBMissEnd - MipsTLBMiss);
#endif

	bcopy(MipsException, (void *)MIPS_GEN_EXC_VEC,
	      MipsExceptionEnd - MipsException);

	bcopy(MipsCache, (void *)MIPS_CACHE_ERR_EXC_VEC,
	      MipsCacheEnd - MipsCache);

#ifdef CPU_CHERI
	bcopy(CHERICCallVector, (void *)CHERI_CCALL_EXC_VEC,
	      CHERICCallVectorEnd - CHERICCallVector);
#endif

	/*
	 * Clear out the I and D caches.
	 */
	mips_icache_sync_all();
	mips_dcache_wbinv_all();

	/* 
	 * Mask all interrupts. Each interrupt will be enabled
	 * when handler is installed for it
	 */
	set_intr_mask(0);

	/* Clear BEV in SR so we start handling our own exceptions */
	mips_wr_status(mips_rd_status() & ~MIPS_SR_BEV);
}
Esempio n. 3
0
void
kloader_hpcmips_jump(kloader_bootfunc_t func, vaddr_t sp,
    struct kloader_bootinfo *info, struct kloader_page_tag *tag)
{

	mips_icache_sync_all();

	(*func)(info, tag);	/* 2nd-bootloader don't use stack */
	/* NOTREACHED */
}
Esempio n. 4
0
/*
 * Initialize the hardware exception vectors, and the jump table used to
 * call locore cache and TLB management functions, based on the kind
 * of CPU the kernel is running on.
 */
void
mips_vector_init(void)
{
	/*
	 * Make sure that the Wait region logic is not been 
	 * changed
	 */
	if (MipsWaitEnd - MipsWaitStart != 16)
		panic("startup: MIPS wait region not correct");
	/*
	 * Copy down exception vector code.
	 */
	if (MipsTLBMissEnd - MipsTLBMiss > 0x80)
		panic("startup: UTLB code too large");

	if (MipsCacheEnd - MipsCache > 0x80)
		panic("startup: Cache error code too large");

	bcopy(MipsTLBMiss, (void *)MIPS_UTLB_MISS_EXC_VEC,
	      MipsTLBMissEnd - MipsTLBMiss);

#ifdef __mips_n64
	bcopy(MipsTLBMiss, (void *)MIPS_XTLB_MISS_EXC_VEC,
	      MipsTLBMissEnd - MipsTLBMiss);
#endif

	bcopy(MipsException, (void *)MIPS_GEN_EXC_VEC,
	      MipsExceptionEnd - MipsException);

	bcopy(MipsCache, (void *)MIPS_CACHE_ERR_EXC_VEC,
	      MipsCacheEnd - MipsCache);

	/*
	 * Clear out the I and D caches.
	 */
	mips_icache_sync_all();
	mips_dcache_wbinv_all();

	/* 
	 * Mask all interrupts. Each interrupt will be enabled
	 * when handler is installed for it
	 */
	set_intr_mask(0);

	/* Clear BEV in SR so we start handling our own exceptions */
	mips_wr_status(mips_rd_status() & ~MIPS_SR_BEV);
}
Esempio n. 5
0
/*
 * Initialize the hardware exception vectors, and the jump table used to
 * call locore cache and TLB management functions, based on the kind
 * of CPU the kernel is running on.
 */
void
mips_vector_init(void)
{
    /*
     * Copy down exception vector code.
     */
    if (MipsTLBMissEnd - MipsTLBMiss > 0x80)
        panic("startup: UTLB code too large");

    if (MipsCacheEnd - MipsCache > 0x80)
        panic("startup: Cache error code too large");

    bcopy(MipsTLBMiss, (void *)MIPS_UTLB_MISS_EXC_VEC,
          MipsTLBMissEnd - MipsTLBMiss);

#if defined(CPU_CNMIPS) || defined(CPU_RMI) || defined(CPU_NLM)
    /* Fake, but sufficient, for the 32-bit with 64-bit hardware addresses  */
    bcopy(MipsTLBMiss, (void *)MIPS3_XTLB_MISS_EXC_VEC,
          MipsTLBMissEnd - MipsTLBMiss);
#endif

    bcopy(MipsException, (void *)MIPS3_GEN_EXC_VEC,
          MipsExceptionEnd - MipsException);

    bcopy(MipsCache, (void *)MIPS3_CACHE_ERR_EXC_VEC,
          MipsCacheEnd - MipsCache);

    /*
     * Clear out the I and D caches.
     */
    mips_icache_sync_all();
    mips_dcache_wbinv_all();

    /*
     * Mask all interrupts. Each interrupt will be enabled
     * when handler is installed for it
     */
    set_intr_mask(0);

    /* Clear BEV in SR so we start handling our own exceptions */
    mips_wr_status(mips_rd_status() & ~MIPS_SR_BEV);
}
Esempio n. 6
0
static void
mips_init(void)
{
    int i, j, cfe_mem_idx, tmp;
    uint64_t maxmem;

#ifdef CFE_ENV
    cfe_env_init();
#endif

    TUNABLE_INT_FETCH("boothowto", &boothowto);

    if (boothowto & RB_VERBOSE)
        bootverbose++;

#ifdef MAXMEM
    tmp = MAXMEM;
#else
    tmp = 0;
#endif
    TUNABLE_INT_FETCH("hw.physmem", &tmp);
    maxmem = (uint64_t)tmp * 1024;

    /*
     * XXX
     * If we used vm_paddr_t consistently in pmap, etc., we could
     * use 64-bit page numbers on !n64 systems, too, like i386
     * does with PAE.
     */
#if !defined(__mips_n64)
    if (maxmem == 0 || maxmem > 0xffffffff)
        maxmem = 0xffffffff;
#endif

#ifdef CFE
    /*
     * Query DRAM memory map from CFE.
     */
    physmem = 0;
    cfe_mem_idx = 0;
    for (i = 0; i < 10; i += 2) {
        int result;
        uint64_t addr, len, type;

        result = cfe_enummem(cfe_mem_idx++, 0, &addr, &len, &type);
        if (result < 0) {
            phys_avail[i] = phys_avail[i + 1] = 0;
            break;
        }

        KASSERT(type == CFE_MI_AVAILABLE,
                ("CFE DRAM region is not available?"));

        if (bootverbose)
            printf("cfe_enummem: 0x%016jx/%ju.\n", addr, len);

        if (maxmem != 0) {
            if (addr >= maxmem) {
                printf("Ignoring %ju bytes of memory at 0x%jx "
                       "that is above maxmem %dMB\n",
                       len, addr,
                       (int)(maxmem / (1024 * 1024)));
                continue;
            }

            if (addr + len > maxmem) {
                printf("Ignoring %ju bytes of memory "
                       "that is above maxmem %dMB\n",
                       (addr + len) - maxmem,
                       (int)(maxmem / (1024 * 1024)));
                len = maxmem - addr;
            }
        }

        phys_avail[i] = addr;
        if (i == 0 && addr == 0) {
            /*
             * If this is the first physical memory segment probed
             * from CFE, omit the region at the start of physical
             * memory where the kernel has been loaded.
             */
            phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
        }
        phys_avail[i + 1] = addr + len;
        physmem += len;
    }

    realmem = btoc(physmem);
#endif

    for (j = 0; j < i; j++)
        dump_avail[j] = phys_avail[j];

    physmem = realmem;

    init_param1();
    init_param2(physmem);
    mips_cpu_init();

    /*
     * Sibyte has a L1 data cache coherent with DMA. This includes
     * on-chip network interfaces as well as PCI/HyperTransport bus
     * masters.
     */
    cpuinfo.cache_coherent_dma = TRUE;

    /*
     * XXX
     * The kernel is running in 32-bit mode but the CFE is running in
     * 64-bit mode. So the SR_KX bit in the status register is turned
     * on by the CFE every time we call into it - for e.g. CFE_CONSOLE.
     *
     * This means that if get a TLB miss for any address above 0xc0000000
     * and the SR_KX bit is set then we will end up in the XTLB exception
     * vector.
     *
     * For now work around this by copying the TLB exception handling
     * code to the XTLB exception vector.
     */
    {
        bcopy(MipsTLBMiss, (void *)MIPS3_XTLB_MISS_EXC_VEC,
              MipsTLBMissEnd - MipsTLBMiss);

        mips_icache_sync_all();
        mips_dcache_wbinv_all();
    }

    pmap_bootstrap();
    mips_proc0_init();
    mutex_init();

    kdb_init();
#ifdef KDB
    if (boothowto & RB_KDB)
        kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Esempio n. 7
0
void
cpu_reboot(int howto, char *bootstr)
{
	static int waittime = -1;

	/* Take a snapshot before clobbering any registers. */
	savectx(curpcb);

	/* If "always halt" was specified as a boot flag, obey. */
	if (boothowto & RB_HALT)
		howto |= RB_HALT;

	boothowto = howto;

	/* If system is cold, just halt. */
	if (cold) {
		boothowto |= RB_HALT;
		goto haltsys;
	}

	if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) {
		waittime = 0;

		/*
		 * Synchronize the disks....
		 */
		vfs_shutdown();

		/*
		 * If we've been adjusting the clock, the todr
		 * will be out of synch; adjust it now.
		 */
		resettodr();
	}

	/* Disable interrupts. */
	splhigh();

	if (boothowto & RB_DUMP)
		dumpsys();

 haltsys:
	/* Run any shutdown hooks. */
	doshutdownhooks();

	pmf_system_shutdown(boothowto);

#if 0
	if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN)
		if (board && board->ab_poweroff)
			board->ab_poweroff();
#endif

	/*
	 * Firmware may autoboot (depending on settings), and we cannot pass
	 * flags to it (at least I haven't figured out how to yet), so
	 * we "pseudo-halt" now.
	 */
	if (boothowto & RB_HALT) {
		printf("\n");
		printf("The operating system has halted.\n");
		printf("Please press any key to reboot.\n\n");
		cnpollc(1);	/* For proper keyboard command handling */
		cngetc();
		cnpollc(0);
	}

	printf("reseting board...\n\n");
	mips_icache_sync_all();
	mips_dcache_wbinv_all();
	atheros_reset();
	__asm volatile("jr	%0" :: "r"(MIPS_RESET_EXC_VEC));
	printf("Oops, back from reset\n\nSpinning...");
	for (;;)
		/* spin forever */ ;	/* XXX */
	/*NOTREACHED*/
}
Esempio n. 8
0
/*
 * Trap is called from locore to handle most types of processor traps.
 */
void
trap(unsigned int status, unsigned int cause, vaddr_t vaddr, vaddr_t opc,
	struct trapframe *frame) 
{
	int type;
	struct lwp *l = curlwp;
	struct proc *p = curproc;
	vm_prot_t ftype;
	ksiginfo_t ksi;
	struct frame *fp;
	extern void fswintrberr(void);
	KSI_INIT_TRAP(&ksi);

	uvmexp.traps++;

	if ((type = TRAPTYPE(cause)) >= LENGTH(trap_type))
		panic("trap: unknown trap type %d", type);

	if (USERMODE(status)) {
		type |= T_USER;
		LWP_CACHE_CREDS(l, p);
	}

	/* Enable interrupts just at it was before the trap. */
	_splset(status & AVR32_STATUS_IMx);

	switch (type) {
	default:
	dopanic:
		(void)splhigh();
		printf("trap: %s in %s mode\n",
			trap_type[TRAPTYPE(cause)],
			USERMODE(status) ? "user" : "kernel");
		printf("status=0x%x, cause=0x%x, epc=%#lx, vaddr=%#lx\n",
			status, cause, opc, vaddr);
		if (curlwp != NULL) {
			fp = (struct frame *)l->l_md.md_regs;
			printf("pid=%d cmd=%s usp=0x%x ",
			    p->p_pid, p->p_comm, (int)fp->f_regs[_R_SP]);
		} else
			printf("curlwp == NULL ");
		printf("ksp=%p\n", &status);
#if defined(DDB)
		kdb_trap(type, (mips_reg_t *) frame);
		/* XXX force halt XXX */
#elif defined(KGDB)
		{
			struct frame *f = (struct frame *)&ddb_regs;
			extern mips_reg_t kgdb_cause, kgdb_vaddr;
			kgdb_cause = cause;
			kgdb_vaddr = vaddr;

			/*
			 * init global ddb_regs, used in db_interface.c routines
			 * shared between ddb and gdb. Send ddb_regs to gdb so
			 * that db_machdep.h macros will work with it, and
			 * allow gdb to alter the PC.
			 */
			db_set_ddb_regs(type, (mips_reg_t *) frame);
			PC_BREAK_ADVANCE(f);
			if (kgdb_trap(type, &ddb_regs)) {
				((mips_reg_t *)frame)[21] = f->f_regs[_R_PC];
				return;
			}
		}
#else
		panic("trap: dopanic: notyet");
#endif
		/*NOTREACHED*/
	case T_TLB_MOD:
		panic("trap: T_TLB_MOD: notyet");
#if notyet
		if (KERNLAND(vaddr)) {
			pt_entry_t *pte;
			unsigned entry;
			paddr_t pa;

			pte = kvtopte(vaddr);
			entry = pte->pt_entry;
			if (!avr32_pte_v(entry) /*|| (entry & mips_pg_m_bit())*/) {
				panic("ktlbmod: invalid pte");
			}
			if (entry & avr32_pte_ropage_bit()) {
				/* write to read only page in the kernel */
				ftype = VM_PROT_WRITE;
				goto kernelfault;
			}
			entry |= mips_pg_m_bit();	/* XXXAVR32 Do it on tlbarlo/ tlbarhi? */
			pte->pt_entry = entry;
			vaddr &= ~PGOFSET;
			MachTLBUpdate(vaddr, entry);
			pa = avr32_tlbpfn_to_paddr(entry);
			if (!IS_VM_PHYSADDR(pa)) {
				printf("ktlbmod: va %#lx pa %#llx\n",
				    vaddr, (long long)pa);
				panic("ktlbmod: unmanaged page");
			}
			pmap_set_modified(pa);
			return; /* KERN */
		}
		/*FALLTHROUGH*/
#endif
	case T_TLB_MOD+T_USER: 
		panic("trap: T_TLB_MOD+T_USER: notyet");
#if notyet
	    {
		pt_entry_t *pte;
		unsigned entry;
		paddr_t pa;
		pmap_t pmap;

		pmap  = p->p_vmspace->vm_map.pmap;
		if (!(pte = pmap_segmap(pmap, vaddr)))
			panic("utlbmod: invalid segmap");
		pte += (vaddr >> PGSHIFT) & (NPTEPG - 1);

		entry = pte->pt_entry;
		if (!avr32_pte_v(entry))
			panic("utlbmod: invalid pte");

		if (entry & avr32_pte_ropage_bit()) {
			/* write to read only page */
			ftype = VM_PROT_WRITE;
			goto pagefault;
		}
		/* entry |= mips_pg_m_bit();  XXXAVR32 Do it on tlbarlo/ tlbarhi? */
		pte->pt_entry = entry;
		vaddr = (vaddr & ~PGOFSET) |
			(pmap->pm_asid << AVR32_TLB_PID_SHIFT);
		MachTLBUpdate(vaddr, entry);
		pa = avr32_tlbpfn_to_paddr(entry);
		if (!IS_VM_PHYSADDR(pa)) {
			printf("utlbmod: va %#lx pa %#llx\n",
			    vaddr, (long long)pa);
			panic("utlbmod: unmanaged page");
		}
		pmap_set_modified(pa);
		if (type & T_USER)
			userret(l);
		return; /* GEN */
	    }
#endif
	case T_TLB_LD_MISS:
		panic("trap: T_TLB_LD_MISS: notyet");
	case T_TLB_ST_MISS:
		ftype = (type == T_TLB_LD_MISS) ? VM_PROT_READ : VM_PROT_WRITE;
		if (KERNLAND(vaddr))
			goto kernelfault;
		panic("trap: T_TLB_ST_MISS: notyet");
#if notyet
		/*
		 * It is an error for the kernel to access user space except
		 * through the copyin/copyout routines.
		 */
		if (l == NULL  || l->l_addr->u_pcb.pcb_onfault == NULL)
			goto dopanic;
		/* check for fuswintr() or suswintr() getting a page fault */
		if (l->l_addr->u_pcb.pcb_onfault == (void *)fswintrberr) {
			frame->tf_regs[TF_EPC] = (int)fswintrberr;
			return; /* KERN */
		}
		goto pagefault;
#endif
	case T_TLB_LD_MISS+T_USER:
		panic("trap: T_TLB_LD_MISS+T_USER: notyet");
#if notyet
		ftype = VM_PROT_READ;
		goto pagefault;
#endif
	case T_TLB_ST_MISS+T_USER:
		panic("trap: T_TLB_ST_MISS+T_USER: notyet");
#if notyet
		ftype = VM_PROT_WRITE;
#endif
	pagefault: ;
	    {
		vaddr_t va;
		struct vmspace *vm;
		struct vm_map *map;
		int rv;

		vm = p->p_vmspace;
		map = &vm->vm_map;
		va = trunc_page(vaddr);

		if ((l->l_flag & LW_SA) && (~l->l_pflag & LP_SA_NOBLOCK)) {
			l->l_savp->savp_faultaddr = (vaddr_t)vaddr;
			l->l_pflag |= LP_SA_PAGEFAULT;
		}

		if (p->p_emul->e_fault)
			rv = (*p->p_emul->e_fault)(p, va, ftype);
		else
			rv = uvm_fault(map, va, ftype);
				
#ifdef VMFAULT_TRACE
		printf(
	    "uvm_fault(%p (pmap %p), %lx (0x%x), %d) -> %d at pc %p\n",
		    map, vm->vm_map.pmap, va, vaddr, ftype, rv, (void*)opc);
#endif
		/*
		 * If this was a stack access we keep track of the maximum
		 * accessed stack size.  Also, if vm_fault gets a protection
		 * failure it is due to accessing the stack region outside
		 * the current limit and we need to reflect that as an access
		 * error.
		 */
		if ((void *)va >= vm->vm_maxsaddr) {
			if (rv == 0){
				uvm_grow(p, va);
			}
			else if (rv == EACCES)
				rv = EFAULT;
		}
		l->l_pflag &= ~LP_SA_PAGEFAULT;
		if (rv == 0) {
			if (type & T_USER) {
				userret(l);
			}
			return; /* GEN */
		}
		if ((type & T_USER) == 0)
			goto copyfault;
		if (rv == ENOMEM) {
			printf("UVM: pid %d (%s), uid %d killed: out of swap\n",
			       p->p_pid, p->p_comm,
			       l->l_cred ?
			       kauth_cred_geteuid(l->l_cred) : (uid_t) -1);
			ksi.ksi_signo = SIGKILL;
			ksi.ksi_code = 0;
		} else {
			if (rv == EACCES) {
				ksi.ksi_signo = SIGBUS;
				ksi.ksi_code = BUS_OBJERR;
			} else {
				ksi.ksi_signo = SIGSEGV;
				ksi.ksi_code = SEGV_MAPERR;
			}
		}
		ksi.ksi_trap = type & ~T_USER;
		ksi.ksi_addr = (void *)vaddr;
		break; /* SIGNAL */
	    }
	kernelfault: ;
	    {
		vaddr_t va;
		int rv;

		va = trunc_page(vaddr);
		rv = uvm_fault(kernel_map, va, ftype);
		if (rv == 0)
			return; /* KERN */
		/*FALLTHROUGH*/
	    }
	case T_ADDR_ERR_LD:	/* misaligned access */
	case T_ADDR_ERR_ST:	/* misaligned access */
	case T_BUS_ERR_LD_ST:	/* BERR asserted to CPU */
	copyfault:
		panic("trap: copyfault: notyet");
#if notyet
		if (l == NULL || l->l_addr->u_pcb.pcb_onfault == NULL)
			goto dopanic;
		frame->tf_regs[TF_EPC] = (intptr_t)l->l_addr->u_pcb.pcb_onfault;
		return; /* KERN */
#endif
#if notyet
	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to CPU */
	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to CPU */
		ksi.ksi_trap = type & ~T_USER;
		ksi.ksi_signo = SIGSEGV; /* XXX */
		ksi.ksi_addr = (void *)vaddr;
		ksi.ksi_code = SEGV_MAPERR; /* XXX */
		break; /* SIGNAL */

	case T_BREAK:
		panic("trap: T_BREAK: notyet");
#if defined(DDB)
		kdb_trap(type, (avr32_reg_t *) frame);
		return;	/* KERN */
#elif defined(KGDB)
		{
			struct frame *f = (struct frame *)&ddb_regs;
			extern avr32_reg_t kgdb_cause, kgdb_vaddr;
			kgdb_cause = cause;
			kgdb_vaddr = vaddr;

			/*
			 * init global ddb_regs, used in db_interface.c routines
			 * shared between ddb and gdb. Send ddb_regs to gdb so
			 * that db_machdep.h macros will work with it, and
			 * allow gdb to alter the PC.
			 */
			db_set_ddb_regs(type, (avr32_reg_t *) frame);
			PC_BREAK_ADVANCE(f);
			if (!kgdb_trap(type, &ddb_regs))
				printf("kgdb: ignored %s\n",
				       trap_type[TRAPTYPE(cause)]);
			else
				((avr32_reg_t *)frame)[21] = f->f_regs[_R_PC];

			return;
		}
#else
		goto dopanic;
#endif
	case T_BREAK+T_USER:
	    {
		vaddr_t va;
		uint32_t instr;
		int rv;

		/* compute address of break instruction */
		va = (DELAYBRANCH(cause)) ? opc + sizeof(int) : opc;

		/* read break instruction */
		instr = fuiword((void *)va);

		if (l->l_md.md_ss_addr != va || instr != MIPS_BREAK_SSTEP) {
			ksi.ksi_trap = type & ~T_USER;
			ksi.ksi_signo = SIGTRAP;
			ksi.ksi_addr = (void *)va;
			ksi.ksi_code = TRAP_TRACE;
			break;
		}
		/*
		 * Restore original instruction and clear BP
		 */
		rv = suiword((void *)va, l->l_md.md_ss_instr);
		if (rv < 0) {
			vaddr_t sa, ea;
			sa = trunc_page(va);
			ea = round_page(va + sizeof(int) - 1);
			rv = uvm_map_protect(&p->p_vmspace->vm_map,
				sa, ea, VM_PROT_ALL, false);
			if (rv == 0) {
				rv = suiword((void *)va, l->l_md.md_ss_instr);
				(void)uvm_map_protect(&p->p_vmspace->vm_map,
				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false);
			}
		}
		mips_icache_sync_all();		/* XXXJRT -- necessary? */
		mips_dcache_wbinv_all();	/* XXXJRT -- necessary? */

		if (rv < 0)
			printf("Warning: can't restore instruction at 0x%lx: 0x%x\n",
				l->l_md.md_ss_addr, l->l_md.md_ss_instr);
		l->l_md.md_ss_addr = 0;
		ksi.ksi_trap = type & ~T_USER;
		ksi.ksi_signo = SIGTRAP;
		ksi.ksi_addr = (void *)va;
		ksi.ksi_code = TRAP_BRKPT;
		break; /* SIGNAL */
	    }
	case T_RES_INST+T_USER:
	case T_COP_UNUSABLE+T_USER:
#if !defined(SOFTFLOAT) && !defined(NOFPU)
		if ((cause & MIPS_CR_COP_ERR) == 0x10000000) {
			struct frame *f;

			f = (struct frame *)l->l_md.md_regs;
			savefpregs(fpcurlwp);	  	/* yield FPA */
			loadfpregs(l);          	/* load FPA */
			fpcurlwp = l;
			l->l_md.md_flags |= MDP_FPUSED;
			f->f_regs[_R_SR] |= MIPS_SR_COP_1_BIT;
		} else
#endif
		{
			MachEmulateInst(status, cause, opc, l->l_md.md_regs);
		}
		userret(l);
		return; /* GEN */
	case T_FPE+T_USER:
		panic ("trap: T_FPE+T_USER: notyet");
#if defined(SOFTFLOAT)
		MachEmulateInst(status, cause, opc, l->l_md.md_regs);
#elif !defined(NOFPU)
		MachFPTrap(status, cause, opc, l->l_md.md_regs);
#endif
		userret(l);
		return; /* GEN */
	case T_OVFLOW+T_USER:
	case T_TRAP+T_USER:
		ksi.ksi_trap = type & ~T_USER;
		ksi.ksi_signo = SIGFPE;
		fp = (struct frame *)l->l_md.md_regs;
		ksi.ksi_addr = (void *)fp->f_regs[_R_PC];
		ksi.ksi_code = FPE_FLTOVF; /* XXX */
		break; /* SIGNAL */
#endif
	}
	panic("trap: post-switch: notyet");
#if notyet
	fp = (struct frame *)l->l_md.md_regs;
	fp->f_regs[_R_CAUSE] = cause;
	fp->f_regs[_R_BADVADDR] = vaddr;
	(*p->p_emul->e_trapsignal)(l, &ksi);
	if ((type & T_USER) == 0)
		panic("trapsignal");
	userret(l);
#endif
	return;
}
Esempio n. 9
0
void
cpu_hatch(struct cpu_info *ci)
{
	struct pmap_tlb_info * const ti = ci->ci_tlb_info;

	/*
	 * Invalidate all the TLB enties (even wired ones) and then reserve
	 * space for the wired TLB entries.
	 */
	mips3_cp0_wired_write(0);
	tlb_invalidate_all();
	mips3_cp0_wired_write(ti->ti_wired);

	/*
	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
	 */
	cpu_hwrena_setup();

	/*
	 * If we are using register zero relative addressing to access cpu_info
	 * in the exception vectors, enter that mapping into TLB now.
	 */
	if (ci->ci_tlb_slot >= 0) {
		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
		const struct tlbmask tlbmask = {
			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
#if (PGSHIFT & 1)
			.tlb_lo0 = tlb_lo,
			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
#else
			.tlb_lo0 = 0,
			.tlb_lo1 = tlb_lo,
#endif
			.tlb_mask = -1,
		};

		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
	}

	/*
	 * Flush the icache just be sure.
	 */
	mips_icache_sync_all();

	/*
	 * Let this CPU do its own initialization (for things that have to be
	 * done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_init)(ci);

	// Show this CPU as present.
	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);

	/*
	 * Announce we are hatched
	 */
	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));

	/*
	 * Now wait to be set free!
	 */
	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
		/* spin, spin, spin */
	}

	/*
	 * initialize the MIPS count/compare clock
	 */
	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
	KASSERT(ci->ci_cycles_per_hz != 0);
	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
	ci->ci_data.cpu_cc_skew = 0;

	/*
	 * Let this CPU do its own post-running initialization
	 * (for things that have to be done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_run)(ci);

	/*
	 * Now turn on interrupts (and verify they are on).
	 */
	spl0();
	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));

	/*
	 * And do a tail call to idle_loop
	 */
	idle_loop(NULL);
}

void
cpu_boot_secondary_processors(void)
{
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;
	for (CPU_INFO_FOREACH(cii, ci)) {
		if (CPU_IS_PRIMARY(ci))
			continue;
		KASSERT(ci->ci_data.cpu_idlelwp);

		/*
		 * Skip this CPU if it didn't sucessfully hatch.
		 */
		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
			continue;

		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
		kcpuset_set(cpus_running, cpu_index(ci));
		// Spin until the cpu calls idle_loop
		for (u_int i = 0; i < 100; i++) {
			if (kcpuset_isset(cpus_running, cpu_index(ci)))
				break;
			delay(1000);
		}
	}
}
Esempio n. 10
0
void
cpu_hatch(struct cpu_info *ci)
{
	struct pmap_tlb_info * const ti = ci->ci_tlb_info;

	/*
	 * Invalidate all the TLB enties (even wired ones) and then reserve
	 * space for the wired TLB entries.
	 */
	mips3_cp0_wired_write(0);
	tlb_invalidate_all();
	mips3_cp0_wired_write(ti->ti_wired);

	/*
	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
	 */
	cpu_hwrena_setup();

	/*
	 * If we are using register zero relative addressing to access cpu_info
	 * in the exception vectors, enter that mapping into TLB now.
	 */
	if (ci->ci_tlb_slot >= 0) {
		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);

		tlb_enter(ci->ci_tlb_slot, -PAGE_SIZE, tlb_lo);
	}

	/*
	 * Flush the icache just be sure.
	 */
	mips_icache_sync_all();

	/*
	 * Let this CPU do its own initialization (for things that have to be
	 * done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_init)(ci);

	/*
	 * Announce we are hatched
	 */
	CPUSET_ADD(cpus_hatched, cpu_index(ci));

	/*
	 * Now wait to be set free!
	 */
	while (! CPUSET_HAS_P(cpus_running, cpu_index(ci))) {
		/* spin, spin, spin */
	}

	/*
	 * initialize the MIPS count/compare clock
	 */
	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
	KASSERT(ci->ci_cycles_per_hz != 0);
	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
	ci->ci_data.cpu_cc_skew = 0;

	/*
	 * Let this CPU do its own post-running initialization
	 * (for things that have to be done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_run)(ci);

	/*
	 * Now turn on interrupts.
	 */
	spl0();

	/*
	 * And do a tail call to idle_loop
	 */
	idle_loop(NULL);
}
Esempio n. 11
0
void
cpu_reboot(int howto, char *bootstr)
{
	static int waittime = -1;
	const struct alchemy_board *board;

	/* Take a snapshot before clobbering any registers. */
	if (curproc)
		savectx((struct user *)curpcb);

	board = board_info();
	KASSERT(board != NULL);

	/* If "always halt" was specified as a boot flag, obey. */
	if (boothowto & RB_HALT)
		howto |= RB_HALT;

	boothowto = howto;

	/* If system is cold, just halt. */
	if (cold) {
		boothowto |= RB_HALT;
		goto haltsys;
	}

	if ((boothowto & RB_NOSYNC) == 0 && waittime < 0) {
		waittime = 0;

		/*
		 * Synchronize the disks....
		 */
		vfs_shutdown();

		/*
		 * If we've been adjusting the clock, the todr
		 * will be out of synch; adjust it now.
		 */
		resettodr();
	}

	/* Disable interrupts. */
	splhigh();

	if (boothowto & RB_DUMP)
		dumpsys();

 haltsys:
	/* Run any shutdown hooks. */
	doshutdownhooks();

	if ((boothowto & RB_POWERDOWN) == RB_POWERDOWN)
		if (board && board->ab_poweroff)
			board->ab_poweroff();

	/*
	 * YAMON may autoboot (depending on settings), and we cannot pass
	 * flags to it (at least I haven't figured out how to yet), so
	 * we "pseudo-halt" now.
	 */
	if (boothowto & RB_HALT) {
		printf("\n");
		printf("The operating system has halted.\n");
		printf("Please press any key to reboot.\n\n");
		cnpollc(1);	/* For proper keyboard command handling */
		cngetc();
		cnpollc(0);
	}

	printf("reseting board...\n\n");

	/*
	 * Try to use board-specific reset logic, which might involve a better
	 * hardware reset.
	 */
	if (board->ab_reboot)
		board->ab_reboot();

#if 1
	/* XXX
	 * For some reason we are leaving the ethernet MAC in a state where
	 * YAMON isn't happy with it.  So just call the reset vector (grr,
	 * Alchemy YAMON doesn't have a "reset" command).
	 */
	mips_icache_sync_all();
	mips_dcache_wbinv_all();
	__asm volatile("jr	%0" :: "r"(MIPS_RESET_EXC_VEC));
#else
	printf("%s\n\n", ((howto & RB_HALT) != 0) ? "halted." : "rebooting...");
	yamon_exit(boothowto);
	printf("Oops, back from yamon_exit()\n\nSpinning...");
#endif
	for (;;)
		/* spin forever */ ;	/* XXX */
	/*NOTREACHED*/
}