Пример #1
0
void
__mp_lock(struct __mp_lock *mpl)
{
	register_t sr;
	struct cpu_info *ci = curcpu();

	/*
	 * Please notice that mpl_count gets incremented twice for the
	 * first lock. This is on purpose. The way we release the lock
	 * in mp_unlock is to decrement the mpl_count and then check if
	 * the lock should be released. Since mpl_count is what we're
	 * spinning on, decrementing it in mpl_unlock to 0 means that
	 * we can't clear mpl_cpu, because we're no longer holding the
	 * lock. In theory mpl_cpu doesn't need to be cleared, but it's
	 * safer to clear it and besides, setting mpl_count to 2 on the
	 * first lock makes most of this code much simpler.
	 */
	while (1) {
		sr = disableintr();
		if (__cpu_cas(&mpl->mpl_count, 0, 1) == 0) {
			mips_sync();
			mpl->mpl_cpu = ci;
		}

		if (mpl->mpl_cpu == ci) {
			mpl->mpl_count++;
			setsr(sr);
			break;
		}
		setsr(sr);
		
		__mp_lock_spin(mpl);
	}
}
Пример #2
0
void fbcon_vga_planes_putc(struct vc_data *conp, struct display *p, int c, int yy, int xx)
{
	int fg = attr_fgcol(p,c);
	int bg = attr_bgcol(p,c);

	int y;
	u8 *cdat = p->fontdata + (c & p->charmask) * fontheight(p);
	char *where = p->screen_base + xx + yy * p->line_length * fontheight(p);

	setmode(2);
	setop(0);
	setsr(0xf);
	setcolor(fg);
	selectmask();

	setmask(0xff);
	writeb(bg, where);
	rmb();
	readb(where); /* fill latches */
	setmode(3);
	wmb();
	for (y = 0; y < fontheight(p); y++, where += p->line_length) 
		writeb(cdat[y], where);
	wmb();
}
Пример #3
0
void fbcon_ega_planes_putc(struct vc_data *conp, struct display *p, int c, int yy, int xx)
{
	int fg = attr_fgcol(p,c);
	int bg = attr_bgcol(p,c);

	int y;
	u8 *cdat = p->fontdata + (c & p->charmask) * fontheight(p);
	char *where = p->screen_base + xx + yy * p->line_length * fontheight(p);

	setmode(0);
	setop(0);
	setsr(0xf);
	setcolor(bg);
	selectmask();

	setmask(0xff);
	for (y = 0; y < fontheight(p); y++, where += p->line_length) 
		rmw(where);

	where -= p->line_length * y;
	setcolor(fg);
	selectmask();
	for (y = 0; y < fontheight(p); y++, where += p->line_length) 
		if (cdat[y]) {
			setmask(cdat[y]);
			rmw(where);
		}
}
Пример #4
0
void fbcon_vga_planes_clear(struct vc_data *conp, struct display *p, int sy, int sx,
		   int height, int width)
{
	int line_ofs = p->line_length - width;
	char *where;
	int x;
	
	setmode(0);
	setop(0);
	setsr(0xf);
	setcolor(attr_bgcol_ec(p, conp));
	selectmask();

	setmask(0xff);

	sy *= fontheight(p);
	height *= fontheight(p);

	where = p->screen_base + sx + sy * p->line_length;
	while (height--) {
		for (x = 0; x < width; x++) {
			writeb(0, where);
			where++;
		}
		where += line_ofs;
	}
}
Пример #5
0
void
dosoftint()
{
	struct cpu_info *ci = curcpu();
	int sir, q, mask;
#ifdef MULTIPROCESSOR
	register_t sr;

	/* Enable interrupts */
	sr = getsr();
	ENABLEIPI();
	__mp_lock(&kernel_lock);
#endif

	while ((sir = ci->ci_softpending) != 0) {
		atomic_clearbits_int(&ci->ci_softpending, sir);

		for (q = SI_NQUEUES - 1; q >= 0; q--) {
			mask = SINTMASK(q);
			if (sir & mask)
				softintr_dispatch(q);
		}
	}

#ifdef MULTIPROCESSOR
	__mp_unlock(&kernel_lock);
	setsr(sr);
#endif
}
Пример #6
0
void fbcon_vga_planes_bmove(struct display *p, int sy, int sx, int dy, int dx,
		   int height, int width)
{
	char *src;
	char *dest;
	int line_ofs;
	int x;

	setmode(1);
	setop(0);
	setsr(0xf);

	sy *= fontheight(p);
	dy *= fontheight(p);
	height *= fontheight(p);

	if (dy < sy || (dy == sy && dx < sx)) {
		line_ofs = p->line_length - width;
		dest = p->screen_base + dx + dy * p->line_length;
		src = p->screen_base + sx + sy * p->line_length;
		while (height--) {
			for (x = 0; x < width; x++) {
				readb(src);
				writeb(0, dest);
				dest++;
				src++;
			}
			src += line_ofs;
			dest += line_ofs;
		}
	} else {
		line_ofs = p->line_length - width;
		dest = p->screen_base + dx + width + (dy + height - 1) * p->line_length;
		src = p->screen_base + sx + width + (sy + height - 1) * p->line_length;
		while (height--) {
			for (x = 0; x < width; x++) {
				dest--;
				src--;
				readb(src);
				writeb(0, dest);
			}
			src -= line_ofs;
			dest -= line_ofs;
		}
	}
}
Пример #7
0
void fbcon_vga_planes_revc(struct display *p, int xx, int yy)
{
	char *where = p->screen_base + xx + yy * p->line_length * fontheight(p);
	int y;
	
	setmode(0);
	setop(0x18);
	setsr(0xf);
	setcolor(0xf);
	selectmask();

	setmask(0xff);
	for (y = 0; y < fontheight(p); y++) {
		rmw(where);
		where += p->line_length;
	}
}
Пример #8
0
void
__mp_unlock(struct __mp_lock *mpl)
{
	register_t sr;

#ifdef MP_LOCKDEBUG
	if (mpl->mpl_cpu != curcpu()) {
		db_printf("__mp_unlock(%p): not held lock\n", mpl);
		Debugger();
	}
#endif

	sr = disableintr();
	if (--mpl->mpl_count == 1) {
		mpl->mpl_cpu = NULL;
		mips_sync();
		mpl->mpl_count = 0;
	}

	setsr(sr);
}
Пример #9
0
int
__mp_release_all(struct __mp_lock *mpl)
{
	int rv = mpl->mpl_count - 1;
	register_t sr;

#ifdef MP_LOCKDEBUG
	if (mpl->mpl_cpu != curcpu()) {
		db_printf("__mp_release_all(%p): not held lock\n", mpl);
		Debugger();
	}
#endif

	sr = disableintr();
	mpl->mpl_cpu = NULL;
	mips_sync();
	mpl->mpl_count = 0;
	setsr(sr);

	return (rv);
}
Пример #10
0
/* 6.96 in my test */
void fbcon_vga_planes_putcs(struct vc_data *conp, struct display *p, const unsigned short *s,
		   int count, int yy, int xx)
{
	int fg = attr_fgcol(p,*s);
	int bg = attr_bgcol(p,*s);

	char *where;
	int n;

	setmode(2);
	setop(0);
	setsr(0xf);
	setcolor(fg);
	selectmask();

	setmask(0xff);
	where = p->screen_base + xx + yy * p->line_length * fontheight(p);
	writeb(bg, where);
	rmb();
	readb(where); /* fill latches */
	setmode(3);	
	wmb();
	for (n = 0; n < count; n++) {
		int y;
		int c = *s++ & p->charmask;
		u8 *cdat = p->fontdata + (c & p->charmask) * fontheight(p);

		for (y = 0; y < fontheight(p); y++, cdat++) {
			writeb (*cdat, where);
			where += p->line_length;
		}
		where += 1 - p->line_length * fontheight(p);
	}
	
	wmb();
}
Пример #11
0
void vga16_putc(unsigned char *cdat, int yy, int xx, int fg, int bg)
{
        int y;
        char *where = fb_mem + xx/8 + yy * fb_line_length;

        setmode(2);
        setop(0);
        setsr(0xf);
        setcolor(fg);
        selectmask();

        setmask(0xff);
        *where = bg;
        rmb();
        *(volatile char*)where; /* fill latches */
        setmode(3);
        wmb();
        *(volatile char*)where; /* fill latches */
        setmode(3);
        wmb();
        for (y = 0; y < 16; y++, where += fb_line_length)
                *where = cdat[y];
        wmb();
}
Пример #12
0
caddr_t
mips_init(int argc, void *argv, caddr_t boot_esym)
{
	char *cp;
	int i;
	u_int cputype;
	vaddr_t xtlb_handler;
	extern char start[], edata[], end[];
	extern char exception[], e_exception[];
	extern char *hw_vendor;

#ifdef MULTIPROCESSOR
	/*
	 * Set curcpu address on primary processor.
	 */
	setcurcpu(&cpu_info_primary);
#endif

	/*
	 * Make sure we can access the extended address space.
	 * Note that r10k and later do not allow XUSEG accesses
	 * from kernel mode unless SR_UX is set.
	 */
	setsr(getsr() | SR_KX | SR_UX);

	/*
	 * Clear the compiled BSS segment in OpenBSD code.
	 */
	bzero(edata, end - edata);

	/*
	 * Reserve space for the symbol table, if it exists.
	 */
	ssym = (char *)*(u_int64_t *)end;

	/* Attempt to locate ELF header and symbol table after kernel. */
	if (end[0] == ELFMAG0 && end[1] == ELFMAG1 &&
	    end[2] == ELFMAG2 && end[3] == ELFMAG3 ) {

		/* ELF header exists directly after kernel. */
		ssym = end;
		esym = boot_esym;
		ekern = esym;

	} else if (((long)ssym - (long)end) >= 0 &&
	    ((long)ssym - (long)end) <= 0x1000 &&
	    ssym[0] == ELFMAG0 && ssym[1] == ELFMAG1 &&
	    ssym[2] == ELFMAG2 && ssym[3] == ELFMAG3 ) {

		/* Pointers exist directly after kernel. */
		esym = (char *)*((u_int64_t *)end + 1);
		ekern = esym;

	} else {

		/* Pointers aren't setup either... */
		ssym = NULL;
		esym = NULL;
		ekern = end;
	}

	/*
	 * Initialize the system type and set up memory layout.
	 * Note that some systems have a more complex memory setup.
	 */
	bios_ident();

	/*
	 * Read and store ARCBios variables for future reference.
	 */
	cp = Bios_GetEnvironmentVariable("ConsoleOut");
	if (cp != NULL && *cp != '\0')
		strlcpy(bios_console, cp, sizeof(bios_console));
	cp = Bios_GetEnvironmentVariable("gfx");
	if (cp != NULL && *cp != '\0')
		strlcpy(bios_graphics, cp, sizeof(bios_graphics));
	cp = Bios_GetEnvironmentVariable("keybd");
	if (cp != NULL && *cp != '\0')
		strlcpy(bios_keyboard, cp, sizeof(bios_keyboard));

	/*
	 * Determine system type and set up configuration record data.
	 */
	hw_vendor = "SGI";
	switch (sys_config.system_type) {
#ifdef TGT_O2
	case SGI_O2:
		bios_printf("Found SGI-IP32, setting up.\n");
		strlcpy(cpu_model, "IP32", sizeof(cpu_model));
		ip32_setup();
		break;
#endif
#ifdef TGT_ORIGIN
	case SGI_IP27:
		bios_printf("Found SGI-IP27, setting up.\n");
		strlcpy(cpu_model, "IP27", sizeof(cpu_model));
		ip27_setup();

		break;

	case SGI_IP35:
		bios_printf("Found SGI-IP35, setting up.\n");
		/* IP27 is intentional, we use the same kernel */
		strlcpy(cpu_model, "IP27", sizeof(cpu_model));
		ip27_setup();

		break;
#endif
#ifdef TGT_OCTANE
	case SGI_OCTANE:
		bios_printf("Found SGI-IP30, setting up.\n");
		strlcpy(cpu_model, "IP30", sizeof(cpu_model));
		ip30_setup();
		break;
#endif
	default:
		bios_printf("Kernel doesn't support this system type!\n");
		bios_printf("Halting system.\n");
		Bios_Halt();
		while(1);
	}

	/*
	 * Look at arguments passed to us and compute boothowto.
	 */
	boothowto = RB_AUTOBOOT;
	dobootopts(argc, argv);

	/*
	 * Figure out where we supposedly booted from.
	 */
	cp = Bios_GetEnvironmentVariable("OSLoadPartition");
	if (cp == NULL)
		cp = "unknown";
	if (strlcpy(osloadpartition, cp, sizeof osloadpartition) >=
	    sizeof osloadpartition)
		bios_printf("Value of `OSLoadPartition' is too large.\n"
		 "The kernel might not be able to find out its root device.\n");

	/*
	 * Read platform-specific environment variables.
	 */
	switch (sys_config.system_type) {
#ifdef TGT_O2
	case SGI_O2:
		/* Get Ethernet address from ARCBIOS. */
		cp = Bios_GetEnvironmentVariable("eaddr");
		if (cp != NULL && strlen(cp) > 0)
			strlcpy(bios_enaddr, cp, sizeof bios_enaddr);
		break;
#endif
	default:
		break;
	}

	/*
	 * Set pagesize to enable use of page macros and functions.
	 * Commit available memory to UVM system.
	 */
	uvmexp.pagesize = PAGE_SIZE;
	uvm_setpagesize();

	for (i = 0; i < MAXMEMSEGS && mem_layout[i].mem_last_page != 0; i++) {
		uint64_t fp, lp;
		uint64_t firstkernpage, lastkernpage;
		unsigned int freelist;
		paddr_t firstkernpa, lastkernpa;

		if (IS_XKPHYS((vaddr_t)start))
			firstkernpa = XKPHYS_TO_PHYS((vaddr_t)start);
		else
			firstkernpa = CKSEG0_TO_PHYS((vaddr_t)start);
		if (IS_XKPHYS((vaddr_t)ekern))
			lastkernpa = XKPHYS_TO_PHYS((vaddr_t)ekern);
		else
			lastkernpa = CKSEG0_TO_PHYS((vaddr_t)ekern);

		firstkernpage = atop(trunc_page(firstkernpa));
		lastkernpage = atop(round_page(lastkernpa));

		fp = mem_layout[i].mem_first_page;
		lp = mem_layout[i].mem_last_page;
		freelist = mem_layout[i].mem_freelist;

		/* Account for kernel and kernel symbol table. */
		if (fp >= firstkernpage && lp < lastkernpage)
			continue;	/* In kernel. */

		if (lp < firstkernpage || fp > lastkernpage) {
			uvm_page_physload(fp, lp, fp, lp, freelist);
			continue;	/* Outside kernel. */
		}

		if (fp >= firstkernpage)
			fp = lastkernpage;
		else if (lp < lastkernpage)
			lp = firstkernpage;
		else { /* Need to split! */
			uint64_t xp = firstkernpage;
			uvm_page_physload(fp, xp, fp, xp, freelist);
			fp = lastkernpage;
		}
		if (lp > fp) {
			uvm_page_physload(fp, lp, fp, lp, freelist);
		}
	}

	/*
	 * Configure cache.
	 */
	switch (bootcpu_hwinfo.type) {
#ifdef CPU_R10000
	case MIPS_R10000:
	case MIPS_R12000:
	case MIPS_R14000:
		cputype = MIPS_R10000;
		break;
#endif
#ifdef CPU_R5000
	case MIPS_R5000:
	case MIPS_RM52X0:
		cputype = MIPS_R5000;
		break;
#endif
#ifdef CPU_RM7000
	case MIPS_RM7000:
	case MIPS_RM9000:
		cputype = MIPS_R5000;
		break;
#endif
	default:
		/*
		 * If we can't identify the cpu type, it must be
		 * r10k-compatible on Octane and Origin families, and
		 * it is likely to be r5k-compatible on O2.
		 */
		switch (sys_config.system_type) {
		case SGI_O2:
			cputype = MIPS_R5000;
			break;
		default:
		case SGI_OCTANE:
		case SGI_IP27:
		case SGI_IP35:
			cputype = MIPS_R10000;
			break;
		}
		break;
	}
	switch (cputype) {
	default:
#if defined(CPU_R5000) || defined(CPU_RM7000)
	case MIPS_R5000:
		Mips5k_ConfigCache(curcpu());
		sys_config._SyncCache = Mips5k_SyncCache;
		sys_config._InvalidateICache = Mips5k_InvalidateICache;
		sys_config._SyncDCachePage = Mips5k_SyncDCachePage;
		sys_config._HitSyncDCache = Mips5k_HitSyncDCache;
		sys_config._IOSyncDCache = Mips5k_IOSyncDCache;
		sys_config._HitInvalidateDCache = Mips5k_HitInvalidateDCache;
		break;
#endif
#ifdef CPU_R10000
	case MIPS_R10000:
		Mips10k_ConfigCache(curcpu());
		sys_config._SyncCache = Mips10k_SyncCache;
		sys_config._InvalidateICache = Mips10k_InvalidateICache;
		sys_config._SyncDCachePage = Mips10k_SyncDCachePage;
		sys_config._HitSyncDCache = Mips10k_HitSyncDCache;
		sys_config._IOSyncDCache = Mips10k_IOSyncDCache;
		sys_config._HitInvalidateDCache = Mips10k_HitInvalidateDCache;
		break;
#endif
	}

	/*
	 * Last chance to call the BIOS. Wiping the TLB means the BIOS' data
	 * areas are demapped on most systems.
	 */
	delay(20*1000);		/* Let any UART FIFO drain... */

	tlb_set_page_mask(TLB_PAGE_MASK);
	tlb_set_wired(0);
	tlb_flush(bootcpu_hwinfo.tlbsize);
	tlb_set_wired(UPAGES / 2);

	/*
	 * Get a console, very early but after initial mapping setup.
	 */
	consinit();
	printf("Initial setup done, switching console.\n");

	/*
	 * Init message buffer.
	 */
	msgbufbase = (caddr_t)pmap_steal_memory(MSGBUFSIZE, NULL, NULL);
	initmsgbuf(msgbufbase, MSGBUFSIZE);

	/*
	 * Allocate U page(s) for proc[0], pm_tlbpid 1.
	 */
	proc0.p_addr = proc0paddr = curcpu()->ci_curprocpaddr =
	    (struct user *)pmap_steal_memory(USPACE, NULL, NULL);
	proc0.p_md.md_regs = (struct trap_frame *)&proc0paddr->u_pcb.pcb_regs;
	tlb_set_pid(1);

	/*
	 * Bootstrap VM system.
	 */
	pmap_bootstrap();

	/*
	 * Copy down exception vector code.
	 */
	bcopy(exception, (char *)CACHE_ERR_EXC_VEC, e_exception - exception);
	bcopy(exception, (char *)GEN_EXC_VEC, e_exception - exception);

	/*
	 * Build proper TLB refill handler trampolines.
	 */
	switch (cputype) {
#if defined(CPU_R5000) || defined(CPU_RM7000)
	case MIPS_R5000:
	    {
		/*
		 * R5000 processors need a specific chip bug workaround
		 * in their tlb handlers.  Theoretically only revision 1
		 * of the processor need it, but there is evidence
		 * later versions also need it.
		 *
		 * This is also necessary on RM52x0 and most RM7k/RM9k,
		 * and is a documented errata for these chips.
		 */
		extern void xtlb_miss_err_r5k;
		xtlb_handler = (vaddr_t)&xtlb_miss_err_r5k;
	    }
		break;
#endif
	default:
	    {
		extern void xtlb_miss;
		xtlb_handler = (vaddr_t)&xtlb_miss;
	    }
		break;
	}

	build_trampoline(TLB_MISS_EXC_VEC, xtlb_handler);
	build_trampoline(XTLB_MISS_EXC_VEC, xtlb_handler);

	/*
	 * Turn off bootstrap exception vectors.
	 */
	setsr(getsr() & ~SR_BOOT_EXC_VEC);
	proc0.p_md.md_regs->sr = getsr();

	/*
	 * Clear out the I and D caches.
	 */
	Mips_SyncCache(curcpu());

#ifdef DDB
	db_machine_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif

	/*
	 * Return new stack pointer.
	 */
	return ((caddr_t)proc0paddr + USPACE - 64);
}
Пример #13
0
/*
 * Interrupt dispatcher.
 */
uint32_t
obio_iointr(uint32_t hwpend, struct trap_frame *frame)
{
	struct cpu_info *ci = curcpu();
	int cpuid = cpu_number();
	uint64_t imr, isr, mask;
	int ipl;
	int bit;
	struct intrhand *ih;
	int rc;
	uint64_t sum0 = CIU_IP2_SUM0(cpuid);
	uint64_t en0 = CIU_IP2_EN0(cpuid);

	isr = bus_space_read_8(&obio_tag, obio_h, sum0);
	imr = bus_space_read_8(&obio_tag, obio_h, en0);
	bit = 63;

	isr &= imr;
	if (isr == 0)
		return 0;	/* not for us */

	/*
	 * Mask all pending interrupts.
	 */
	bus_space_write_8(&obio_tag, obio_h, en0, imr & ~isr);

	/*
	 * If interrupts are spl-masked, mask them and wait for splx()
	 * to reenable them when necessary.
	 */
	if ((mask = isr & obio_imask[cpuid][frame->ipl]) != 0) {
		isr &= ~mask;
		imr &= ~mask;
	}

	/*
	 * Now process allowed interrupts.
	 */
	if (isr != 0) {
		int lvl, bitno;
		uint64_t tmpisr;

		__asm__ (".set noreorder\n");
		ipl = ci->ci_ipl;
		__asm__ ("sync\n\t.set reorder\n");

		/* Service higher level interrupts first */
		for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) {
			tmpisr = isr & (obio_imask[cpuid][lvl] ^ obio_imask[cpuid][lvl - 1]);
			if (tmpisr == 0)
				continue;
			for (bitno = bit, mask = 1UL << bitno; mask != 0;
			    bitno--, mask >>= 1) {
				if ((tmpisr & mask) == 0)
					continue;

				rc = 0;
				for (ih = (struct intrhand *)obio_intrhand[bitno];
					ih != NULL;
				    ih = ih->ih_next) {
#ifdef MULTIPROCESSOR
					u_int32_t sr;
#endif
					splraise(ih->ih_level);
#ifdef MULTIPROCESSOR
					if (ih->ih_level < IPL_IPI) {
						sr = getsr();
						ENABLEIPI();
						if (ipl < IPL_SCHED)
							__mp_lock(&kernel_lock);
					}
#endif
					if ((*ih->ih_fun)(ih->ih_arg) != 0) {
						rc = 1;
						atomic_add_uint64(&ih->ih_count.ec_count, 1);
					}
#ifdef MULTIPROCESSOR
					if (ih->ih_level < IPL_IPI) {
						if (ipl < IPL_SCHED)
							__mp_unlock(&kernel_lock);
						setsr(sr);
					}
#endif
					__asm__ (".set noreorder\n");
					ci->ci_ipl = ipl;
					__asm__ ("sync\n\t.set reorder\n");
				}
				if (rc == 0)
					printf("spurious crime interrupt %d\n", bitno);

				isr ^= mask;
				if ((tmpisr ^= mask) == 0)
					break;
			}
		}

		/*
		 * Reenable interrupts which have been serviced.
		 */
		bus_space_write_8(&obio_tag, obio_h, en0, imr);
	}
Пример #14
0
vaddr_t
mips_init(uint64_t argc, uint64_t argv, uint64_t envp, uint64_t cv,
    char *boot_esym)
{
	uint32_t prid;
	u_long memlo, memhi, cpuspeed;
	vaddr_t xtlb_handler;
	const char *envvar;
	int i;

	extern char start[], edata[], end[];
	extern char exception[], e_exception[];
	extern char *hw_vendor, *hw_prod;
	extern void xtlb_miss;

	/*
	 * Make sure we can access the extended address space.
	 * This is not necessary on real hardware, but some emulators
	 * are not aware of this.
	 */
	setsr(getsr() | SR_KX | SR_UX);

	/*
	 * Clear the compiled BSS segment in OpenBSD code.
	 * PMON is supposed to have done this, though.
	 */

	bzero(edata, end - edata);

	/*
	 * Set up early console output.
	 */

	prid = cp0_get_prid();
	pmon_init((int32_t)argc, (int32_t)argv, (int32_t)envp, (int32_t)cv,
	    prid);
	cn_tab = &pmoncons;

	/*
	 * Reserve space for the symbol table, if it exists.
	 */

	/* Attempt to locate ELF header and symbol table after kernel. */
	if (end[0] == ELFMAG0 && end[1] == ELFMAG1 &&
	    end[2] == ELFMAG2 && end[3] == ELFMAG3) {
		/* ELF header exists directly after kernel. */
		ssym = end;
		esym = boot_esym;
		ekern = esym;
	} else {
		ssym = (char *)(vaddr_t)*(int32_t *)end;
		if (((long)ssym - (long)end) >= 0 &&
		    ((long)ssym - (long)end) <= 0x1000 &&
		    ssym[0] == ELFMAG0 && ssym[1] == ELFMAG1 &&
		    ssym[2] == ELFMAG2 && ssym[3] == ELFMAG3) {
			/* Pointers exist directly after kernel. */
			esym = (char *)(vaddr_t)*((int32_t *)end + 1);
			ekern = esym;
		} else {
			/* Pointers aren't setup either... */
			ssym = NULL;
			esym = NULL;
			ekern = end;
		}
	}

	/*
	 * While the kernel supports other processor types than Loongson,
	 * we are currently not expecting to run on a system with a
	 * different processor.  Just to be on the safe side, refuse to
	 * run on non-Loongson2 processors for now.
	 */

	switch ((prid >> 8) & 0xff) {
	case MIPS_LOONGSON2:
		switch (prid & 0xff) {
#ifdef CPU_LOONGSON2
#ifdef CPU_LOONGSON2C
		case 0x00:
			loongson_ver = 0x2c;
			break;
#endif
		case 0x02:
			loongson_ver = 0x2e;
			break;
		case 0x03:
			loongson_ver = 0x2f;
			break;
#endif
#ifdef CPU_LOONGSON3
		case 0x05:
			loongson_ver = 0x3a;
			break;
#endif
		default:
			break;
		}
	}
	if (loongson_ver == 0) {
		pmon_printf("This kernel doesn't support processor type 0x%x"
		    ", version %d.%d.\n",
		    (prid >> 8) & 0xff, (prid >> 4) & 0x0f, prid & 0x0f);
		goto unsupported;
	}
Пример #15
0
/*
 *  Interrupt handler for targets using the internal count register
 *  as interval clock. Normally the system is run with the clock
 *  interrupt always enabled. Masking is done here and if the clock
 *  can not be run the tick is just counted and handled later when
 *  the clock is logically unmasked again.
 */
uint32_t
cp0_int5(uint32_t mask, struct trapframe *tf)
{
	u_int32_t clkdiff;
	struct cpu_info *ci = curcpu();

	/*
	 * If we got an interrupt before we got ready to process it,
	 * retrigger it as far as possible. cpu_initclocks() will
	 * take care of retriggering it correctly.
	 */
	if (ci->ci_clock_started == 0) {
		cp0_set_compare(cp0_get_count() - 1);

		return CR_INT_5;
	}

	/*
	 * Count how many ticks have passed since the last clock interrupt...
	 */
	clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
	while (clkdiff >= ci->ci_cpu_counter_interval) {
		ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
		clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
		ci->ci_pendingticks++;
	}
	ci->ci_pendingticks++;
	ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;

	/*
	 * Set up next tick, and check if it has just been hit; in this
	 * case count it and schedule one tick ahead.
	 */
	cp0_set_compare(ci->ci_cpu_counter_last);
	clkdiff = cp0_get_count() - ci->ci_cpu_counter_last;
	if ((int)clkdiff >= 0) {
		ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval;
		ci->ci_pendingticks++;
		cp0_set_compare(ci->ci_cpu_counter_last);
	}

	/*
	 * Process clock interrupt unless it is currently masked.
	 */
	if (tf->ipl < IPL_CLOCK) {
#ifdef MULTIPROCESSOR
		register_t sr;

		sr = getsr();
		ENABLEIPI();
#endif
		while (ci->ci_pendingticks) {
			cp0_clock_count.ec_count++;
			hardclock(tf);
			ci->ci_pendingticks--;
		}
#ifdef MULTIPROCESSOR
		setsr(sr);
#endif
	}

	return CR_INT_5;	/* Clock is always on 5 */
}
Пример #16
0
/*
 * Interrupt dispatcher.
 */
uint32_t
INTR_FUNCTIONNAME(uint32_t hwpend, struct trap_frame *frame)
{
	struct cpu_info *ci = curcpu();
	uint64_t imr, isr, mask;
	int ipl;
	int bit;
	struct intrhand *ih;
	int rc, ret;
	INTR_LOCAL_DECLS

	INTR_GETMASKS;

	isr &= imr;
	if (isr == 0)
		return 0;	/* not for us */

	/*
	 * Mask all pending interrupts.
	 */
	INTR_MASKPENDING;

	/*
	 * If interrupts are spl-masked, mask them and wait for splx()
	 * to reenable them when necessary.
	 */
	if ((mask = isr & INTR_IMASK(frame->ipl)) != 0) {
		isr &= ~mask;
		imr &= ~mask;
	}

	/*
	 * Now process allowed interrupts.
	 */
	if (isr != 0) {
		int lvl, bitno;
		uint64_t tmpisr;

		__asm__ (".set noreorder\n");
		ipl = ci->ci_ipl;
		__asm__ ("sync\n\t.set reorder\n");

		/* Service higher level interrupts first */
		for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) {
			tmpisr = isr & (INTR_IMASK(lvl) ^ INTR_IMASK(lvl - 1));
			if (tmpisr == 0)
				continue;
			for (bitno = bit, mask = 1UL << bitno; mask != 0;
			    bitno--, mask >>= 1) {
				if ((tmpisr & mask) == 0)
					continue;

				rc = 0;
				for (ih = INTR_HANDLER(bitno); ih != NULL;
				    ih = ih->ih_next) {
#ifdef MULTIPROCESSOR
					u_int32_t sr;
#endif
#if defined(INTR_HANDLER_SKIP)
					if (INTR_HANDLER_SKIP(ih) != 0)
						continue;
#endif
					splraise(ih->ih_level);
#ifdef MULTIPROCESSOR
					if (ih->ih_level < IPL_IPI) {
						sr = getsr();
						ENABLEIPI();
						if (ipl < IPL_SCHED)
							__mp_lock(&kernel_lock);
					}
#endif
					ret = (*ih->ih_fun)(ih->ih_arg);
					if (ret != 0) {
						rc = 1;
						atomic_add_uint64(&ih->ih_count.ec_count, 1);
					}
#ifdef MULTIPROCESSOR
					if (ih->ih_level < IPL_IPI) {
						if (ipl < IPL_SCHED)
							__mp_unlock(&kernel_lock);
						setsr(sr);
					}
#endif
					__asm__ (".set noreorder\n");
					ci->ci_ipl = ipl;
					__asm__ ("sync\n\t.set reorder\n");
					if (ret == 1)
						break;
				}
				if (rc == 0)
					INTR_SPURIOUS(bitno);

				isr ^= mask;
				if ((tmpisr ^= mask) == 0)
					break;
			}
		}

		/*
		 * Reenable interrupts which have been serviced.
		 */
		INTR_MASKRESTORE;
	}
Пример #17
0
caddr_t
mips_init(int argc, void *argv, caddr_t boot_esym)
{
	char *cp;
	int i;
	caddr_t sd;
	u_int cputype;
	vaddr_t tlb_handler, xtlb_handler;
	extern char start[], edata[], end[];
	extern char exception[], e_exception[];
	extern char *hw_vendor, *hw_prod;
	extern void tlb_miss;
	extern void tlb_miss_err_r5k;
	extern void xtlb_miss;
	extern void xtlb_miss_err_r5k;

	/*
	 * Make sure we can access the extended address space.
	 * Note that r10k and later do not allow XUSEG accesses
	 * from kernel mode unless SR_UX is set.
	 */
	setsr(getsr() | SR_KX | SR_UX);

#ifdef notyet
	/*
	 * Make sure KSEG0 cacheability match what we intend to use.
	 *
	 * XXX This does not work as expected on IP30. Does ARCBios
	 * XXX depend on this?
	 */
	cp0_setcfg((cp0_getcfg() & ~0x07) | CCA_CACHED);
#endif

	/*
	 * Clear the compiled BSS segment in OpenBSD code.
	 */
	bzero(edata, end - edata);

	/*
	 * Reserve space for the symbol table, if it exists.
	 */
	ssym = (char *)*(u_int64_t *)end;

	/* Attempt to locate ELF header and symbol table after kernel. */
	if (end[0] == ELFMAG0 && end[1] == ELFMAG1 &&
	    end[2] == ELFMAG2 && end[3] == ELFMAG3 ) {

		/* ELF header exists directly after kernel. */
		ssym = end;
		esym = boot_esym;
		ekern = esym;

	} else if (((long)ssym - (long)end) >= 0 &&
	    ((long)ssym - (long)end) <= 0x1000 &&
	    ssym[0] == ELFMAG0 && ssym[1] == ELFMAG1 &&
	    ssym[2] == ELFMAG2 && ssym[3] == ELFMAG3 ) {

		/* Pointers exist directly after kernel. */
		esym = (char *)*((u_int64_t *)end + 1);
		ekern = esym;

	} else {

		/* Pointers aren't setup either... */
		ssym = NULL;
		esym = NULL;
		ekern = end;
	}

	/*
	 * Initialize the system type and set up memory layout.
	 * Note that some systems have a more complex memory setup.
	 */
	bios_ident();

	/*
	 * Determine system type and set up configuration record data.
	 */
	hw_vendor = "SGI";
	switch (sys_config.system_type) {
#if defined(TGT_O2)
	case SGI_O2:
		bios_printf("Found SGI-IP32, setting up.\n");
		hw_prod = "O2";
		strlcpy(cpu_model, "IP32", sizeof(cpu_model));
		ip32_setup();

		sys_config.cpu[0].clock = 180000000;  /* Reasonable default */
		cp = Bios_GetEnvironmentVariable("cpufreq");
		if (cp && atoi(cp, 10, NULL) > 100)
			sys_config.cpu[0].clock = atoi(cp, 10, NULL) * 1000000;

		break;
#endif

#if defined(TGT_ORIGIN200) || defined(TGT_ORIGIN2000)
	case SGI_O200:
		bios_printf("Found SGI-IP27, setting up.\n");
		hw_prod = "Origin 200";
		strlcpy(cpu_model, "IP27", sizeof(cpu_model));
		ip27_setup();

		break;

	case SGI_O300:
		bios_printf("Found SGI-IP35, setting up.\n");
		hw_prod = "Origin 300";
		/* IP27 is intentional, we use the same kernel */
		strlcpy(cpu_model, "IP27", sizeof(cpu_model));
		ip27_setup();

		break;
#endif

#if defined(TGT_OCTANE)
	case SGI_OCTANE:
		bios_printf("Found SGI-IP30, setting up.\n");
		hw_prod = "Octane";
		strlcpy(cpu_model, "IP30", sizeof(cpu_model));
		ip30_setup();

		sys_config.cpu[0].clock = 175000000;  /* Reasonable default */
		cp = Bios_GetEnvironmentVariable("cpufreq");
		if (cp && atoi(cp, 10, NULL) > 100)
			sys_config.cpu[0].clock = atoi(cp, 10, NULL) * 1000000;

		break;
#endif

	default:
		bios_printf("Kernel doesn't support this system type!\n");
		bios_printf("Halting system.\n");
		Bios_Halt();
		while(1);
	}

	/*
	 * Read and store console type.
	 */
	cp = Bios_GetEnvironmentVariable("ConsoleOut");
	if (cp != NULL && *cp != '\0')
		strlcpy(bios_console, cp, sizeof bios_console);

	/* Disable serial console if ARCS is telling us to use video. */
	if (strncmp(bios_console, "video", 5) == 0)
		comconsaddr = 0;

	/*
	 * Look at arguments passed to us and compute boothowto.
	 */
	boothowto = RB_AUTOBOOT;

	dobootopts(argc, argv);

	/*
	 * Figure out where we supposedly booted from.
	 */
	cp = Bios_GetEnvironmentVariable("OSLoadPartition");
	if (cp == NULL)
		cp = "unknown";
	if (strlcpy(osloadpartition, cp, sizeof osloadpartition) >=
	    sizeof osloadpartition)
		bios_printf("Value of `OSLoadPartition' is too large.\n"
		 "The kernel might not be able to find out its root device.\n");

	/*
	 * Read platform-specific environment variables.
	 */
	switch (sys_config.system_type) {
#if defined(TGT_O2)
	case SGI_O2:
		/* Get Ethernet address from ARCBIOS. */
		cp = Bios_GetEnvironmentVariable("eaddr");
		if (cp != NULL && strlen(cp) > 0)
			strlcpy(bios_enaddr, cp, sizeof bios_enaddr);
		break;
#endif
	default:
		break;
	}

	/*
	 * Set pagesize to enable use of page macros and functions.
	 * Commit available memory to UVM system.
	 */
	uvmexp.pagesize = PAGE_SIZE;
	uvm_setpagesize();

	for (i = 0; i < MAXMEMSEGS && mem_layout[i].mem_first_page != 0; i++) {
		u_int32_t fp, lp;
		u_int32_t firstkernpage, lastkernpage;
		unsigned int freelist;
		paddr_t firstkernpa, lastkernpa;

		if (IS_XKPHYS((vaddr_t)start))
			firstkernpa = XKPHYS_TO_PHYS((vaddr_t)start);
		else
			firstkernpa = KSEG0_TO_PHYS((vaddr_t)start);
		if (IS_XKPHYS((vaddr_t)ekern))
			lastkernpa = XKPHYS_TO_PHYS((vaddr_t)ekern);
		else
			lastkernpa = KSEG0_TO_PHYS((vaddr_t)ekern);

		firstkernpage = atop(trunc_page(firstkernpa));
		lastkernpage = atop(round_page(lastkernpa));

		fp = mem_layout[i].mem_first_page;
		lp = mem_layout[i].mem_last_page;
		freelist = mem_layout[i].mem_freelist;

		/* Account for kernel and kernel symbol table. */
		if (fp >= firstkernpage && lp < lastkernpage)
			continue;	/* In kernel. */

		if (lp < firstkernpage || fp > lastkernpage) {
			uvm_page_physload(fp, lp, fp, lp, freelist);
			continue;	/* Outside kernel. */
		}

		if (fp >= firstkernpage)
			fp = lastkernpage;
		else if (lp < lastkernpage)
			lp = firstkernpage;
		else { /* Need to split! */
			u_int32_t xp = firstkernpage;
			uvm_page_physload(fp, xp, fp, xp, freelist);
			fp = lastkernpage;
		}
		if (lp > fp)
			uvm_page_physload(fp, lp, fp, lp, freelist);
	}


	switch (sys_config.system_type) {
#if defined(TGT_O2) || defined(TGT_OCTANE)
	case SGI_O2:
	case SGI_OCTANE:
		sys_config.cpu[0].type = (cp0_get_prid() >> 8) & 0xff;
		sys_config.cpu[0].vers_maj = (cp0_get_prid() >> 4) & 0x0f;
		sys_config.cpu[0].vers_min = cp0_get_prid() & 0x0f;
		sys_config.cpu[0].fptype = (cp1_get_prid() >> 8) & 0xff;
		sys_config.cpu[0].fpvers_maj = (cp1_get_prid() >> 4) & 0x0f;
		sys_config.cpu[0].fpvers_min = cp1_get_prid() & 0x0f;

		/*
		 * Configure TLB.
		 */
		switch(sys_config.cpu[0].type) {
		case MIPS_RM7000:
			/* Rev A (version >= 2) CPU's have 64 TLB entries. */
			if (sys_config.cpu[0].vers_maj < 2) {
				sys_config.cpu[0].tlbsize = 48;
			} else {
				sys_config.cpu[0].tlbsize = 64;
			}
			break;

		case MIPS_R10000:
		case MIPS_R12000:
		case MIPS_R14000:
			sys_config.cpu[0].tlbsize = 64;
			break;

		default:
			sys_config.cpu[0].tlbsize = 48;
			break;
		}
		break;
#endif
	default:
		break;
	}

	/*
	 * Configure cache.
	 */
	switch(sys_config.cpu[0].type) {
	case MIPS_R10000:
	case MIPS_R12000:
	case MIPS_R14000:
		cputype = MIPS_R10000;
		break;
	case MIPS_R5000:
	case MIPS_RM7000:
	case MIPS_RM52X0:
	case MIPS_RM9000:
		cputype = MIPS_R5000;
		break;
	default:
		/*
		 * If we can't identify the cpu type, it must be
		 * r10k-compatible on Octane and Origin families, and
		 * it is likely to be r5k-compatible on O2.
		 */
		switch (sys_config.system_type) {
		case SGI_O2:
			cputype = MIPS_R5000;
			break;
		default:
		case SGI_OCTANE:
		case SGI_O200:
		case SGI_O300:
			cputype = MIPS_R10000;
			break;
		}
		break;
	}
	switch (cputype) {
	case MIPS_R10000:
		Mips10k_ConfigCache();
		sys_config._SyncCache = Mips10k_SyncCache;
		sys_config._InvalidateICache = Mips10k_InvalidateICache;
		sys_config._InvalidateICachePage = Mips10k_InvalidateICachePage;
		sys_config._SyncDCachePage = Mips10k_SyncDCachePage;
		sys_config._HitSyncDCache = Mips10k_HitSyncDCache;
		sys_config._IOSyncDCache = Mips10k_IOSyncDCache;
		sys_config._HitInvalidateDCache = Mips10k_HitInvalidateDCache;
		break;
	default:
	case MIPS_R5000:
		Mips5k_ConfigCache();
		sys_config._SyncCache = Mips5k_SyncCache;
		sys_config._InvalidateICache = Mips5k_InvalidateICache;
		sys_config._InvalidateICachePage = Mips5k_InvalidateICachePage;
		sys_config._SyncDCachePage = Mips5k_SyncDCachePage;
		sys_config._HitSyncDCache = Mips5k_HitSyncDCache;
		sys_config._IOSyncDCache = Mips5k_IOSyncDCache;
		sys_config._HitInvalidateDCache = Mips5k_HitInvalidateDCache;
		break;
	}

	/*
	 * Last chance to call the BIOS. Wiping the TLB means the BIOS' data
	 * areas are demapped on most systems.
	 */
	delay(20*1000);		/* Let any UART FIFO drain... */

	sys_config.cpu[0].tlbwired = UPAGES / 2;
	tlb_set_wired(0);
	tlb_flush(sys_config.cpu[0].tlbsize);
	tlb_set_wired(sys_config.cpu[0].tlbwired);

	/*
	 * Get a console, very early but after initial mapping setup.
	 */
	consinit();
	printf("Initial setup done, switching console.\n");

	/*
	 * Init message buffer.
	 */
	msgbufbase = (caddr_t)pmap_steal_memory(MSGBUFSIZE, NULL,NULL);
	initmsgbuf(msgbufbase, MSGBUFSIZE);

	/*
	 * Allocate U page(s) for proc[0], pm_tlbpid 1.
	 */
	proc0.p_addr = proc0paddr = curprocpaddr =
	    (struct user *)pmap_steal_memory(USPACE, NULL, NULL);
	proc0.p_md.md_regs = (struct trap_frame *)&proc0paddr->u_pcb.pcb_regs;
	tlb_set_pid(1);

	/*
	 * Allocate system data structures.
	 */
	i = (vsize_t)allocsys(NULL);
	sd = (caddr_t)pmap_steal_memory(i, NULL, NULL);
	allocsys(sd);

	/*
	 * Bootstrap VM system.
	 */
	pmap_bootstrap();

	/*
	 * Copy down exception vector code.
	 */
	bcopy(exception, (char *)CACHE_ERR_EXC_VEC, e_exception - exception);
	bcopy(exception, (char *)GEN_EXC_VEC, e_exception - exception);

	/*
	 * Build proper TLB refill handler trampolines.
	 */
	switch (cputype) {
	case MIPS_R5000:
		/*
		 * R5000 processors need a specific chip bug workaround
		 * in their tlb handlers.  Theoretically only revision 1
		 * of the processor need it, but there is evidence
		 * later versions also need it.
		 *
		 * This is also necessary on RM52x0; we test on the `rounded'
		 * cputype value instead of sys_config.cpu[0].type; this
		 * causes RM7k and RM9k to be included, just to be on the
		 * safe side.
		 */
		tlb_handler = (vaddr_t)&tlb_miss_err_r5k;
		xtlb_handler = (vaddr_t)&xtlb_miss_err_r5k;
		break;
	default:
		tlb_handler = (vaddr_t)&tlb_miss;
		xtlb_handler = (vaddr_t)&xtlb_miss;
		break;
	}

	build_trampoline(TLB_MISS_EXC_VEC, tlb_handler);
	build_trampoline(XTLB_MISS_EXC_VEC, xtlb_handler);

	/*
	 * Turn off bootstrap exception vectors.
	 */
	setsr(getsr() & ~SR_BOOT_EXC_VEC);
	proc0.p_md.md_regs->sr = getsr();

	/*
	 * Clear out the I and D caches.
	 */
	Mips_SyncCache();

#ifdef DDB
	db_machine_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif

	/*
	 * Return new stack pointer.
	 */
	return ((caddr_t)proc0paddr + USPACE - 64);
}