Пример #1
0
/*
 * Console initialization: called early on from main,
 * before vm init or cpu_startup.  This system is able
 * to use the console for output immediately (via PROM)
 * but can not use it for input until after this point.
 */
void 
consinit(void)
{

	/*
	 * Switch from the PROM console (output only)
	 * to our own console driver.
	 */
	cninit();

#if NKSYMS || defined(DDB) || defined(LKM)
	{
		extern int nsym;
		extern char *ssym, *esym;

		ksyms_init(nsym, ssym, esym);
	}
#endif	/* DDB */

	/*
	 * Now that the console can do input as well as
	 * output, consider stopping for a debugger.
	 */
	if (boothowto & RB_KDB) {
#ifdef KGDB
		/* XXX - Ask on console for kgdb_dev? */
		/* Note: this will just return if kgdb_dev==NODEV */
		kgdb_connect(1);
#else	/* KGDB */
		/* Either DDB or no debugger (just PROM). */
		Debugger();
#endif	/* KGDB */
	}
}
Пример #2
0
void
consinit(void)
{

	/*
	 * Initialize the console before we print anything out.
	 */
	physaccess((void*)virtual_avail,
	    (void*)0x58000000, PAGE_SIZE, PG_RW|PG_CI);
	zs_cnattach((void*)virtual_avail);
	virtual_avail += PAGE_SIZE;

#ifdef KGDB
        kgdb_dev = 1;
        kgdb_attach((void*)zscngetc, (void*)zscnputc, (void *)0);

	if (boothowto & RB_KDB) {
		kgdb_connect(1);
		zscons.cn_putc = zs_kgdb_cnputc;
		zscons.cn_getc = zs_kgdb_cngetc;
	}
#endif
#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif
	sic_enable_int(39, 2, 1, 7, 0); /* NMI */
}
Пример #3
0
/*
 * Decide what to do on panic.
 */
void
kgdb_panic(void)
{

    if (kgdb_active == 0 && kgdb_debug_panic && kgdb_dev != NODEV)
        kgdb_connect(1);
}
Пример #4
0
/*
 * Do general device autoconfiguration,
 * then choose root device (etc.)
 * Called by sys/kern/subr_autoconf.c: configure()
 */
void 
cpu_configure(void)
{

	/*
	 * Consider stopping for a debugger before
	 * autoconfiguration.
	 */
	if (boothowto & RB_KDB) {
#ifdef KGDB
		/* XXX - Ask on console for kgdb_dev? */
		/* Note: this will just return if kgdb_dev==NODEV */
		kgdb_connect(1);
#else	/* KGDB */
		/* Either DDB or no debugger (just PROM). */
		Debugger();
#endif	/* KGDB */
	}

	/* General device autoconfiguration. */
	if (config_rootfound("mainbus", NULL) == NULL)
		panic("%s: mainbus not found", __func__);

	/*
	 * Now that device autoconfiguration is finished,
	 * we can safely enable interrupts.
	 */
	printf("enabling interrupts\n");
	(void)spl0();
}
Пример #5
0
/*
 * kgdb_panic:
 *
 *	Decide what to do on panic.  (This is called by panic(),
 *	like Debugger().)
 */
void
kgdb_panic(void)
{
	if (kgdb_dev >= 0 && kgdb_debug_panic) {
		printf("entering kgdb\n");
		kgdb_connect(kgdb_active == 0);
	}
}
Пример #6
0
void
initppc(vaddr_t startkernel, vaddr_t endkernel)
{
	paddr_t			addr, memsize;

        /* Initialize cache info for memcpy, memset, etc. */
        cpu_probe_cache();

	memset(physmemr, 0, sizeof(physmemr)); 		/* [1].size = 0 */
	memset(availmemr, 0, sizeof(availmemr)); 	/* [1].size = 0 */

	memsize = (PHYSMEM * 1024 * 1024) & ~PGOFSET;

	physmemr[0].start 	= 0;
	physmemr[0].size 	= memsize;

	availmemr[0].start 	= startkernel;
	availmemr[0].size 	= memsize - availmemr[0].start;

	/* Map kernel memory linearly. */
	for (addr = 0; addr < endkernel; addr += TLB_PG_SIZE)
		ppc4xx_tlb_reserve(addr, addr, TLB_PG_SIZE, TLB_EX);

	/* Give design-specific code a hint for reserved mappings. */
	virtex_machdep_init(roundup(memsize, TLB_PG_SIZE), TLB_PG_SIZE,
	    physmemr, availmemr);

	ibm4xx_init(startkernel, endkernel, pic_ext_intr);

#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif
#ifdef IPKDB
	/*
	 * Now trap to IPKDB
	 */
	ipkdb_init();
	if (boothowto & RB_KDB)
		ipkdb_connect(0);
#endif
#ifdef KGDB
	/*
	 * Now trap to KGDB
	 */
	kgdb_connect(1);
#endif /* KGDB */

	/*
	 * Look for the ibm4xx modules in the right place.
	 */
	module_machine = module_machine_ibm4xx;
}
Пример #7
0
void
dreamcast_startup()
{
	extern char edata[], end[];
	paddr_t kernend;

	/* Clear bss */
	memset(edata, 0, end - edata);

	/* Initialize CPU ops. */
	sh_cpu_init(CPU_ARCH_SH4, CPU_PRODUCT_7750);

	/* Console */
	consinit();

	/* Load memory to UVM */
	physmem = atop(IOM_RAM_SIZE);
	kernend = atop(round_page(SH3_P1SEG_TO_PHYS(end)));
	uvm_page_physload(
		kernend, atop(IOM_RAM_BEGIN + IOM_RAM_SIZE),
		kernend, atop(IOM_RAM_BEGIN + IOM_RAM_SIZE),
		VM_FREELIST_DEFAULT);

	/* Initialize proc0 u-area */
	sh_proc0_init();

	/* Initialize pmap and start to address translation */
	pmap_bootstrap();

	/* Debugger. */
#ifdef DDB
	ddb_init(0, NULL, NULL);
#endif
#if defined(KGDB) && (NSCIF > 0)
	if (scif_kgdb_init() == 0) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif /* KGDB && NSCIF > 0 */

	/* Jump to main */
	__asm__ __volatile__(
		"jmp	@%0;"
		"mov	%1, sp"
		:: "r"(main),"r"(proc0.p_md.md_pcb->pcb_sf.sf_r7_bank));
	/* NOTREACHED */
	while (1)
		;
}
Пример #8
0
int
sscomrxintr(void *arg)
{
	struct sscom_softc *sc = arg;
	bus_space_tag_t iot = sc->sc_iot;
	bus_space_handle_t ioh = sc->sc_ioh;
	u_char *put, *end;
	u_int cc;

	if (SSCOM_ISALIVE(sc) == 0)
		return 0;

	SSCOM_LOCK(sc);

	end = sc->sc_ebuf;
	put = sc->sc_rbput;
	cc = sc->sc_rbavail;

	do {
		u_char	msts, delta;
		u_char  uerstat;
		uint32_t ufstat;

		ufstat = bus_space_read_4(iot, ioh, SSCOM_UFSTAT);

		/* XXX: break interrupt with no character? */

		if ( (ufstat & (UFSTAT_RXCOUNT|UFSTAT_RXFULL)) &&
		    !ISSET(sc->sc_rx_flags, RX_IBUF_OVERFLOWED)) {

			while (cc > 0) {
				int cn_trapped = 0;

				/* get status and received character.
				   read status register first */
				uerstat = sscom_geterr(iot, ioh);
				put[0] = sscom_getc(iot, ioh);

				if (ISSET(uerstat, UERSTAT_BREAK)) {
					int con_trapped = 0;
					cn_check_magic(sc->sc_tty->t_dev,
					    CNC_BREAK, sscom_cnm_state);
					if (con_trapped)
						continue;
#if defined(KGDB)
					if (ISSET(sc->sc_hwflags,
					    SSCOM_HW_KGDB)) {
						kgdb_connect(1);
						continue;
					}
#endif
				}

				put[1] = uerstat;
				cn_check_magic(sc->sc_tty->t_dev,
					       put[0], sscom_cnm_state);
				if (!cn_trapped) {
					put += 2;
					if (put >= end)
						put = sc->sc_rbuf;
					cc--;
				}

				ufstat = bus_space_read_4(iot, ioh, SSCOM_UFSTAT);
				if ( (ufstat & (UFSTAT_RXFULL|UFSTAT_RXCOUNT)) == 0 )
					break;
			}

			/*
			 * Current string of incoming characters ended because
			 * no more data was available or we ran out of space.
			 * Schedule a receive event if any data was received.
			 * If we're out of space, turn off receive interrupts.
			 */
			sc->sc_rbput = put;
			sc->sc_rbavail = cc;
			if (!ISSET(sc->sc_rx_flags, RX_TTY_OVERFLOWED))
				sc->sc_rx_ready = 1;

			/*
			 * See if we are in danger of overflowing a buffer. If
			 * so, use hardware flow control to ease the pressure.
			 */
			if (!ISSET(sc->sc_rx_flags, RX_IBUF_BLOCKED) &&
			    cc < sc->sc_r_hiwat) {
				SET(sc->sc_rx_flags, RX_IBUF_BLOCKED);
				sscom_hwiflow(sc);
			}

			/*
			 * If we're out of space, disable receive interrupts
			 * until the queue has drained a bit.
			 */
			if (!cc) {
				SET(sc->sc_rx_flags, RX_IBUF_OVERFLOWED);
				sscom_disable_rxint(sc);
				sc->sc_ucon &= ~UCON_ERRINT;
				bus_space_write_4(iot, ioh, SSCOM_UCON, sc->sc_ucon);
			}
		}


		msts = sc->sc_read_modem_status(sc);
		delta = msts ^ sc->sc_msts;
		sc->sc_msts = msts;

#ifdef notyet
		/*
		 * Pulse-per-second (PSS) signals on edge of DCD?
		 * Process these even if line discipline is ignoring DCD.
		 */
		if (delta & sc->sc_ppsmask) {
			struct timeval tv;
		    	if ((msr & sc->sc_ppsmask) == sc->sc_ppsassert) {
				/* XXX nanotime() */
				microtime(&tv);
				TIMEVAL_TO_TIMESPEC(&tv, 
				    &sc->ppsinfo.assert_timestamp);
				if (sc->ppsparam.mode & PPS_OFFSETASSERT) {
					timespecadd(&sc->ppsinfo.assert_timestamp,
					    &sc->ppsparam.assert_offset,
						    &sc->ppsinfo.assert_timestamp);
				}

#ifdef PPS_SYNC
				if (sc->ppsparam.mode & PPS_HARDPPSONASSERT)
					hardpps(&tv, tv.tv_usec);
#endif
				sc->ppsinfo.assert_sequence++;
				sc->ppsinfo.current_mode = sc->ppsparam.mode;

			} else if ((msr & sc->sc_ppsmask) == sc->sc_ppsclear) {
				/* XXX nanotime() */
				microtime(&tv);
				TIMEVAL_TO_TIMESPEC(&tv, 
				    &sc->ppsinfo.clear_timestamp);
				if (sc->ppsparam.mode & PPS_OFFSETCLEAR) {
					timespecadd(&sc->ppsinfo.clear_timestamp,
					    &sc->ppsparam.clear_offset,
					    &sc->ppsinfo.clear_timestamp);
				}

#ifdef PPS_SYNC
				if (sc->ppsparam.mode & PPS_HARDPPSONCLEAR)
					hardpps(&tv, tv.tv_usec);
#endif
				sc->ppsinfo.clear_sequence++;
				sc->ppsinfo.current_mode = sc->ppsparam.mode;
			}
		}
#endif

		/*
		 * Process normal status changes
		 */
		if (ISSET(delta, sc->sc_msr_mask)) {
			SET(sc->sc_msr_delta, delta);

			/*
			 * Stop output immediately if we lose the output
			 * flow control signal or carrier detect.
			 */
			if (ISSET(~msts, sc->sc_msr_mask)) {
				sc->sc_tbc = 0;
				sc->sc_heldtbc = 0;
#ifdef SSCOM_DEBUG
				if (sscom_debug)
					sscomstatus(sc, "sscomintr  ");
#endif
			}

			sc->sc_st_check = 1;
		}

		/* 
		 * Done handling any receive interrupts. 
		 */

		/*
		 * If we've delayed a parameter change, do it
		 * now, and restart * output.
		 */
		if ((ufstat & UFSTAT_TXCOUNT) == 0) {
			/* XXX: we should check transmitter empty also */

			if (sc->sc_heldchange) {
				sscom_loadchannelregs(sc);
				sc->sc_heldchange = 0;
				sc->sc_tbc = sc->sc_heldtbc;
				sc->sc_heldtbc = 0;
			}
		}


	} while (0);

	SSCOM_UNLOCK(sc);

	/* Wake up the poller. */
	softint_schedule(sc->sc_si);

#ifdef RND_COM
	rnd_add_uint32(&sc->rnd_source, iir | rsr);
#endif

	return 1;
}
Пример #9
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg)
{
	/*
	 * When we enter here, we are using a temporary first level
	 * translation table with section entries in it to cover the TIPB
	 * peripherals and SDRAM.  The temporary first level translation table
	 * is at the end of SDRAM.
	 */

	/* Heads up ... Setup the CPU / MMU / TLB functions. */
	if (set_cpufuncs())
		panic("cpu not recognized!");

	init_clocks();

	/* The console is going to try to map things.  Give pmap a devmap. */
	pmap_devmap_register(devmap);
	consinit();
#ifdef KGDB
	kgdb_port_init();
#endif

#ifdef VERBOSE_INIT_ARM
	/* Talk to the user */
	printf("\nNetBSD/evbarm (OSK5912) booting ...\n");
#endif

#ifdef BOOT_ARGS
	char mi_bootargs[] = BOOT_ARGS;
	parse_mi_bootargs(mi_bootargs);
#endif

#ifdef VERBOSE_INIT_ARM
	printf("initarm: Configuring system ...\n");
#endif

	/*
	 * Set up the variables that define the availability of physical
	 * memory.
	 */
	physical_start = KERNEL_BASE_PHYS;
	physical_end = physical_start + MEMSIZE_BYTES;
	physmem = MEMSIZE_BYTES / PAGE_SIZE;

	/* Fake bootconfig structure for the benefit of pmap.c. */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = physical_start;
	bootconfig.dram[0].pages = physmem;

	/*
	 * Our kernel is at the beginning of memory, so set our free space to
	 * all the memory after the kernel.
	 */
	physical_freestart = KERN_VTOPHYS(round_page((vaddr_t) _end));
	physical_freeend = physical_end;
	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

	/*
	 * This is going to do all the hard work of setting up the first and
	 * and second level page tables.  Pages of memory will be allocated
	 * and mapped for other structures that are required for system
	 * operation.  When it returns, physical_freestart and free_pages will
	 * have been updated to reflect the allocations that were made.  In
	 * addition, kernel_l1pt, kernel_pt_table[], systempage, irqstack,
	 * abtstack, undstack, kernelstack, msgbufphys will be set to point to
	 * the memory that was allocated for them.
	 */
	setup_real_page_tables();

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init.
	 */
	proc0paddr = (struct user *)kernelstack.pv_va;
	lwp0.l_addr = proc0paddr;

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("init subsystems: stacks ");
#endif

	set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("vectors ");
#endif
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
	printf("undefined ");
#endif
	undefined_init();

	/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
	printf("page ");
#endif
	uvm_setpagesize();        /* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend),
	    VM_FREELIST_DEFAULT);

	/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
	printf("pmap ");
#endif
	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);

#ifdef VERBOSE_INIT_ARM
	printf("done.\n");
#endif

#ifdef KGDB
	if (boothowto & RB_KDB) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif

#ifdef DDB
	db_machine_init();

	/* Firmware doesn't load symbols. */
	ddb_init(0, NULL, NULL);

	if (boothowto & RB_KDB)
		Debugger();
#endif

	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Пример #10
0
/*
 * Do all the stuff that locore normally does before calling main().
 */
void
mach_init(int32_t memsize32, u_int bim, int32_t bip32)
{
	intptr_t memsize = (int32_t)memsize32;
	char *kernend;
	char *bip = (char *)(intptr_t)(int32_t)bip32;
	u_long first, last;
	extern char edata[], end[];
	const char *bi_msg;
#if NKSYMS || defined(DDB) || defined(MODULAR)
	char *ssym = 0;
	struct btinfo_symtab *bi_syms;
#endif
	struct btinfo_howto *bi_howto;

	/*
	 * Clear the BSS segment (if needed).
	 */
	if (memcmp(((Elf_Ehdr *)end)->e_ident, ELFMAG, SELFMAG) == 0 &&
	    ((Elf_Ehdr *)end)->e_ident[EI_CLASS] == ELFCLASS) {
		esym = end;
#if NKSYMS || defined(DDB) || defined(MODULAR)
		esym += ((Elf_Ehdr *)end)->e_entry;
#endif
		kernend = (char *)mips_round_page(esym);
		/*
		 * We don't have to clear BSS here
		 * since our bootloader already does it.
		 */
#if 0
		memset(edata, 0, end - edata);
#endif
	} else {
		kernend = (void *)mips_round_page(end);
		/*
		 * No symbol table, so assume we are loaded by
		 * the firmware directly with "bfd" command.
		 * The firmware loader doesn't clear BSS of
		 * a loaded kernel, so do it here.
		 */
		memset(edata, 0, kernend - edata);

	}

	/*
	 * Copy exception-dispatch code down to exception vector.
	 * Initialize locore-function vector.
	 * Clear out the I and D caches.
	 */
	mips_vector_init(NULL, false);

	/* Check for valid bootinfo passed from bootstrap */
	if (bim == BOOTINFO_MAGIC) {
		struct btinfo_magic *bi_magic;

		bootinfo = bip;
		bi_magic = lookup_bootinfo(BTINFO_MAGIC);
		if (bi_magic == NULL) {
			bi_msg = "missing bootinfo structure";
			bim = (uintptr_t)bip;
		} else if (bi_magic->magic != BOOTINFO_MAGIC) {
			bi_msg = "invalid bootinfo structure";
			bim = bi_magic->magic;
		} else
			bi_msg = NULL;
	} else {
		bi_msg = "invalid bootinfo (standalone boot?)";
	}

#if NKSYMS || defined(DDB) || defined(MODULAR)
	bi_syms = lookup_bootinfo(BTINFO_SYMTAB);

	/* Load symbol table if present */
	if (bi_syms != NULL) {
		ssym = (void *)(intptr_t)bi_syms->ssym;
		esym = (void *)(intptr_t)bi_syms->esym;
		kernend = (void *)mips_round_page(esym);
	}
#endif

	bi_howto = lookup_bootinfo(BTINFO_HOWTO);
	if (bi_howto != NULL)
		boothowto = bi_howto->bi_howto;

	cobalt_id = read_board_id();
	if (cobalt_id >= COBALT_MODELS || cobalt_model[cobalt_id] == NULL)
		cpu_setmodel("Cobalt unknown model (board ID %u)",
		    cobalt_id);
	else
		cpu_setmodel("%s", cobalt_model[cobalt_id]);

	switch (cobalt_id) {
	case COBALT_ID_QUBE2700:
	case COBALT_ID_RAQ:
		cpuspeed = 150; /* MHz */
		break;
	case COBALT_ID_QUBE2:
	case COBALT_ID_RAQ2:
		cpuspeed = 250; /* MHz */
		break;
	default:
		/* assume the fastest, so that delay(9) works */
		cpuspeed = 250;
		break;
	}
	curcpu()->ci_cpu_freq = cpuspeed * 1000 * 1000;
	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
	curcpu()->ci_divisor_delay =
	    ((curcpu()->ci_cpu_freq + (1000000 / 2)) / 1000000);
	/* all models have Rm5200, which is CPU_MIPS_DOUBLE_COUNT */
	curcpu()->ci_cycles_per_hz /= 2;
	curcpu()->ci_divisor_delay /= 2;

	physmem = btoc(memsize - MIPS_KSEG0_START);

	consinit();

	KASSERT(&lwp0 == curlwp);
	if (bi_msg != NULL)
		printf("%s: magic=%#x bip=%p\n", bi_msg, bim, bip);

	uvm_setpagesize();

	/*
	 * The boot command is passed in the top 512 bytes,
	 * so don't clobber that.
	 */
	mem_clusters[0].start = 0;
	mem_clusters[0].size = ctob(physmem) - 512;
	mem_cluster_cnt = 1;

	memcpy(bootstring, (char *)(memsize - 512), 512);
	memset((char *)(memsize - 512), 0, 512);
	bootstring[511] = '\0';

	decode_bootstring();

#if NKSYMS || defined(DDB) || defined(MODULAR)
	/* init symbols if present */
	if ((bi_syms != NULL) && (esym != NULL))
		ksyms_addsyms_elf(esym - ssym, ssym, esym);
#endif
	KASSERT(&lwp0 == curlwp);
#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif
#ifdef KGDB
	if (boothowto & RB_KDB)
		kgdb_connect(0);
#endif

	/*
	 * Load the rest of the available pages into the VM system.
	 */
	first = round_page(MIPS_KSEG0_TO_PHYS(kernend));
	last = mem_clusters[0].start + mem_clusters[0].size;
	uvm_page_physload(atop(first), atop(last), atop(first), atop(last),
	    VM_FREELIST_DEFAULT);

	/*
	 * Initialize error message buffer (at end of core).
	 */
	mips_init_msgbuf();

	pmap_bootstrap();

	/*
	 * Allocate space for proc0's USPACE.
	 */
	mips_init_lwp0_uarea();
}
Пример #11
0
static int
internal_command(struct wskbd_softc *sc, u_int *type, keysym_t ksym,
	keysym_t ksym2)
{
#if NWSDISPLAY > 0 && defined(WSDISPLAY_SCROLLSUPPORT)
	u_int state = 0;
#endif
	switch (ksym) {
	case KS_Cmd_VolumeToggle:
		if (*type == WSCONS_EVENT_KEY_DOWN)
			pmf_event_inject(NULL, PMFE_AUDIO_VOLUME_TOGGLE);
		break;
	case KS_Cmd_VolumeUp:
		if (*type == WSCONS_EVENT_KEY_DOWN)
			pmf_event_inject(NULL, PMFE_AUDIO_VOLUME_UP);
		break;
	case KS_Cmd_VolumeDown:
		if (*type == WSCONS_EVENT_KEY_DOWN)
			pmf_event_inject(NULL, PMFE_AUDIO_VOLUME_DOWN);
		break;
#if NWSDISPLAY > 0 && defined(WSDISPLAY_SCROLLSUPPORT)
	case KS_Cmd_ScrollFastUp:
	case KS_Cmd_ScrollFastDown:
		if (*type == WSCONS_EVENT_KEY_DOWN) {
			GETMODSTATE(sc->id->t_modifiers, state);
			if ((sc->sc_scroll_data.mode == WSKBD_SCROLL_MODE_HOLD
			   	&& MOD_ONESET(sc->id, MOD_HOLDSCREEN))
			|| (sc->sc_scroll_data.mode == WSKBD_SCROLL_MODE_NORMAL
				&& sc->sc_scroll_data.modifier == state)) {
					update_modifier(sc->id, *type, 0, MOD_COMMAND);
					wsdisplay_scroll(sc->sc_base.me_dispdv,
						(ksym == KS_Cmd_ScrollFastUp) ?
						WSDISPLAY_SCROLL_BACKWARD :
						WSDISPLAY_SCROLL_FORWARD);
					return (1);
			} else {
				return (0);
			}
		}

	case KS_Cmd_ScrollSlowUp:
	case KS_Cmd_ScrollSlowDown:
		if (*type == WSCONS_EVENT_KEY_DOWN) {
			GETMODSTATE(sc->id->t_modifiers, state);
			if ((sc->sc_scroll_data.mode == WSKBD_SCROLL_MODE_HOLD
			   	&& MOD_ONESET(sc->id, MOD_HOLDSCREEN))
			|| (sc->sc_scroll_data.mode == WSKBD_SCROLL_MODE_NORMAL
				&& sc->sc_scroll_data.modifier == state)) {
					update_modifier(sc->id, *type, 0, MOD_COMMAND);
					wsdisplay_scroll(sc->sc_base.me_dispdv,
					   	(ksym == KS_Cmd_ScrollSlowUp) ?
						WSDISPLAY_SCROLL_BACKWARD | WSDISPLAY_SCROLL_LOW:
						WSDISPLAY_SCROLL_FORWARD | WSDISPLAY_SCROLL_LOW);
					return (1);
			} else {
				return (0);
			}
		}
#endif

	case KS_Cmd:
		update_modifier(sc->id, *type, 0, MOD_COMMAND);
		ksym = ksym2;
		break;

	case KS_Cmd1:
		update_modifier(sc->id, *type, 0, MOD_COMMAND1);
		break;

	case KS_Cmd2:
		update_modifier(sc->id, *type, 0, MOD_COMMAND2);
		break;
	}

	if (*type != WSCONS_EVENT_KEY_DOWN ||
	    (! MOD_ONESET(sc->id, MOD_COMMAND) &&
	     ! MOD_ALLSET(sc->id, MOD_COMMAND1 | MOD_COMMAND2)))
		return (0);

#if defined(DDB) || defined(KGDB)
	if (ksym == KS_Cmd_Debugger) {
		if (sc->sc_isconsole) {
#ifdef DDB
			console_debugger();
#endif
#ifdef KGDB
			kgdb_connect(1);
#endif
		}
		/* discard this key (ddb discarded command modifiers) */
		*type = WSCONS_EVENT_KEY_UP;
		return (1);
	}
#endif

#if NWSDISPLAY > 0
	if (sc->sc_base.me_dispdv == NULL)
		return (0);

	switch (ksym) {
	case KS_Cmd_Screen0:
	case KS_Cmd_Screen1:
	case KS_Cmd_Screen2:
	case KS_Cmd_Screen3:
	case KS_Cmd_Screen4:
	case KS_Cmd_Screen5:
	case KS_Cmd_Screen6:
	case KS_Cmd_Screen7:
	case KS_Cmd_Screen8:
	case KS_Cmd_Screen9:
		wsdisplay_switch(sc->sc_base.me_dispdv, ksym - KS_Cmd_Screen0, 0);
		return (1);
	case KS_Cmd_ResetEmul:
		wsdisplay_reset(sc->sc_base.me_dispdv, WSDISPLAY_RESETEMUL);
		return (1);
	case KS_Cmd_ResetClose:
		wsdisplay_reset(sc->sc_base.me_dispdv, WSDISPLAY_RESETCLOSE);
		return (1);
	case KS_Cmd_BacklightOn:
	case KS_Cmd_BacklightOff:
	case KS_Cmd_BacklightToggle:
		change_displayparam(sc, WSDISPLAYIO_PARAM_BACKLIGHT,
				    ksym == KS_Cmd_BacklightOff ? -1 : 1,
				    ksym == KS_Cmd_BacklightToggle ? 1 : 0);
		return (1);
	case KS_Cmd_BrightnessUp:
	case KS_Cmd_BrightnessDown:
	case KS_Cmd_BrightnessRotate:
		change_displayparam(sc, WSDISPLAYIO_PARAM_BRIGHTNESS,
				    ksym == KS_Cmd_BrightnessDown ? -1 : 1,
				    ksym == KS_Cmd_BrightnessRotate ? 1 : 0);
		return (1);
	case KS_Cmd_ContrastUp:
	case KS_Cmd_ContrastDown:
	case KS_Cmd_ContrastRotate:
		change_displayparam(sc, WSDISPLAYIO_PARAM_CONTRAST,
				    ksym == KS_Cmd_ContrastDown ? -1 : 1,
				    ksym == KS_Cmd_ContrastRotate ? 1 : 0);
		return (1);
	}
#endif

	return (0);
}
Пример #12
0
vaddr_t
initarm_common(vaddr_t kvm_base, vsize_t kvm_size,
	const struct boot_physmem *bp, size_t nbp)
{
	struct bootmem_info * const bmi = &bootmem_info;

#ifdef VERBOSE_INIT_ARM
	printf("nfreeblocks = %u, free_pages = %d (%#x)\n",
	    bmi->bmi_nfreeblocks, bmi->bmi_freepages,
	    bmi->bmi_freepages);
#endif

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init.
	 */
	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

#ifdef VERBOSE_INIT_ARM
	printf("vectors");
#endif
	arm32_vector_init(systempage.pv_va, ARM_VEC_ALL);
#ifdef VERBOSE_INIT_ARM
	printf(" %#"PRIxVADDR"\n", vector_page);
#endif

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("init subsystems: stacks ");
#endif
	set_stackptr(PSR_FIQ32_MODE,
	    fiqstack.pv_va + FIQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("vectors ");
#endif
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
	printf("undefined ");
#endif
	undefined_init();

	/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
	printf("page ");
#endif
	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */

#ifdef VERBOSE_INIT_ARM
	printf("pmap_physload ");
#endif
	KASSERT(bp != NULL || nbp == 0);
	KASSERT(bp == NULL || nbp != 0);

	for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) {
		pv_addr_t * const pv = &bmi->bmi_freeblocks[i];
		paddr_t start = atop(pv->pv_pa);
		const paddr_t end = start + atop(pv->pv_size);

		while (start < end) {
			int vm_freelist = VM_FREELIST_DEFAULT;
			paddr_t segend = end;
			/*
			 * This assumes the bp list is sorted in ascending
			 * order.
			 */
			for (size_t j = 0; j < nbp; j++) {
				paddr_t bp_start = bp[j].bp_start;
				paddr_t bp_end = bp_start + bp[j].bp_pages;
				if (start < bp_start) {
					if (segend > bp_start) {
						segend = bp_start;
					}
					break;
				}
				if (start < bp_end) {
					if (segend > bp_end) {
						segend = bp_end;
					}
					vm_freelist = bp[j].bp_freelist;
					break;
				}
			}
	
			uvm_page_physload(start, segend, start, segend,
			    vm_freelist);
			start = segend;
		}
	}

	/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
	printf("pmap ");
#endif
	pmap_bootstrap(kvm_base, kvm_base + kvm_size);
   
#ifdef __HAVE_MEMORY_DISK__
	md_root_setconf(memory_disk, sizeof memory_disk);
#endif

#ifdef BOOTHOWTO
	boothowto |= BOOTHOWTO;
#endif

#ifdef KGDB
	if (boothowto & RB_KDB) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif

#ifdef DDB
	db_machine_init();
	ddb_init(0, NULL, NULL);

	if (boothowto & RB_KDB)
		Debugger();
#endif

#ifdef VERBOSE_INIT_ARM
	printf("done.\n");
#endif

	/* We return the new stack pointer address */
	return kernelstack.pv_va + USPACE_SVC_STACK_TOP;
}
Пример #13
0
u_int
initarm(void *arg)
{
	int loop;
	int loop1;
	u_int l1pagetable;
	extern int etext __asm("_etext");
	extern int end __asm("_end");
	int progress_counter = 0;

#ifdef DO_MEMORY_DISK
	vm_offset_t md_root_start;
#define MD_ROOT_SIZE (MEMORY_DISK_ROOT_SIZE * DEV_BSIZE)
#endif

#define gpio8(reg) (*(volatile uint8_t *)(ioreg_vaddr(S3C2800_GPIO_BASE) + (reg)))

#define LEDSTEP()  __LED(progress_counter++)

#define pdatc gpio8(GPIO_PDATC)
#define __LED(x)  (pdatc = (pdatc & ~0x07) | (~(x) & 0x07))

	LEDSTEP();
	/*
	 * Heads up ... Setup the CPU / MMU / TLB functions
	 */
	if (set_cpufuncs())
		panic("CPU not recognized!");

	LEDSTEP();


	/* Disable all peripheral interrupts */
	ioreg32(S3C2800_INTCTL_BASE + INTCTL_INTMSK) = 0;

	consinit();
#ifdef VERBOSE_INIT_ARM
	printf("consinit done\n");
#endif

#ifdef KGDB
	LEDSTEP();
	kgdb_port_init();
#endif
	LEDSTEP();

#ifdef VERBOSE_INIT_ARM
	/* Talk to the user */
	printf("\nNetBSD/evbarm (SMDK2800) booting ...\n");
#endif

	/*
	 * Ok we have the following memory map
	 *
	 * Physical Address Range     Description
	 * -----------------------    ----------------------------------
	 * 0x00000000 - 0x00ffffff    Intel flash Memory   (16MB)
	 * 0x02000000 - 0x020fffff    AMD flash Memory   (1MB)
	 * or 			       (depend on DIPSW setting)
	 * 0x00000000 - 0x000fffff    AMD flash Memory   (1MB)
	 * 0x02000000 - 0x02ffffff    Intel flash Memory   (16MB)
	 *
	 * 0x08000000 - 0x09ffffff    SDRAM (32MB)
	 * 0x20000000 - 0x3fffffff    PCI space
	 *
	 * The initarm() has the responsibility for creating the kernel
	 * page tables.
	 * It must also set up various memory pointers that are used
	 * by pmap etc.
	 */

	/* Fake bootconfig structure for the benefit of pmap.c */
	/* XXX must make the memory description h/w independent */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = SDRAM_START;
	bootconfig.dram[0].pages = SDRAM_SIZE / PAGE_SIZE;

	/*
	 * Set up the variables that define the availablilty of
	 * physical memory.  For now, we're going to set
	 * physical_freestart to 0x08200000 (where the kernel
	 * was loaded), and allocate the memory we need downwards.
	 * If we get too close to the bottom of SDRAM, we
	 * will panic.  We will update physical_freestart and
	 * physical_freeend later to reflect what pmap_bootstrap()
	 * wants to see.
	 *
	 * XXX pmap_bootstrap() needs an enema.
	 */
	physical_start = bootconfig.dram[0].address;
	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);

#if DO_MEMORY_DISK
#ifdef MEMORY_DISK_ROOT_ROM
	md_root_start = MEMORY_DISK_ROOT_ADDR;
	boothowto |= RB_RDONLY;
#else
	/* Reserve physmem for ram disk */
	md_root_start = ((physical_end - MD_ROOT_SIZE) & ~(L1_S_SIZE-1));
	printf("Reserve %ld bytes for memory disk\n",  
	    physical_end - md_root_start);
	/* copy fs contents */
	memcpy((void *)md_root_start, (void *)MEMORY_DISK_ROOT_ADDR,
	    MD_ROOT_SIZE);
	physical_end = md_root_start;
#endif
#endif

	physical_freestart = 0x08000000UL;	/* XXX */
	physical_freeend = 0x08200000UL;

	physmem = (physical_end - physical_start) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	/* Tell the user about the memory */
	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
	    physical_start, physical_end - 1);
#endif

	/*
	 * XXX
	 * Okay, the kernel starts 2MB in from the bottom of physical
	 * memory.  We are going to allocate our bootstrap pages downwards
	 * from there.
	 *
	 * We need to allocate some fixed page tables to get the kernel
	 * going.  We allocate one page directory and a number of page
	 * tables and store the physical addresses in the kernel_pt_table
	 * array.
	 *
	 * The kernel page directory must be on a 16K boundary.  The page
	 * tables must be on 4K boundaries.  What we do is allocate the
	 * page directory on the first 16K boundary that we encounter, and
	 * the page tables on 4K boundaries otherwise.  Since we allocate
	 * at least 3 L2 page tables, we are guaranteed to encounter at
	 * least one 16K aligned region.
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Allocating page tables\n");
#endif

	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
	    physical_freestart, free_pages, free_pages);
#endif

	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)				\
	alloc_pages((var).pv_pa, (np));			\
	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;

#define alloc_pages(var, np)				\
	physical_freeend -= ((np) * PAGE_SIZE);		\
	if (physical_freeend < physical_freestart)	\
		panic("initarm: out of memory");	\
	(var) = physical_freeend;			\
	free_pages -= (np);				\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	loop1 = 0;
	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
		/* Are we 16KB aligned for an L1 ? */
		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
		    && kernel_l1pt.pv_pa == 0) {
			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
		} else {
			valloc_pages(kernel_pt_table[loop1],
			    L2_TABLE_SIZE / PAGE_SIZE);
			++loop1;
		}
	}

	/* This should never be able to happen but better confirm that. */
	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
		panic("initarm: Failed to align the kernel page directory\n");

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	alloc_pages(systempage.pv_pa, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, UPAGES);

#ifdef VERBOSE_INIT_ARM
	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
	    irqstack.pv_va);
	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
	    abtstack.pv_va);
	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
	    undstack.pv_va);
	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
	    kernelstack.pv_va);
#endif

	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);

	LEDSTEP();

	/*
	 * Ok we have allocated physical pages for the primary kernel
	 * page tables
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, 0x00000000,
	    &kernel_pt_table[KERNEL_PT_SYS]);
	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);

	/* update the top of the kernel VM */
	pmap_curmaxkvaddr =
	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);

#ifdef VERBOSE_INIT_ARM
	printf("Mapping kernel\n");
#endif

	/* Now we fill in the L2 pagetable for the kernel static code/data */
	{
		size_t textsize = (uintptr_t)&etext - KERNEL_TEXT_BASE;
		size_t totalsize = (uintptr_t)&end - KERNEL_TEXT_BASE;
		u_int logical;

		textsize = (textsize + PGOFSET) & ~PGOFSET;
		totalsize = (totalsize + PGOFSET) & ~PGOFSET;

		logical = 0x00200000;	/* offset of kernel in RAM */

		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, textsize,
		    VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, totalsize - textsize,
		    VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
	}

#ifdef VERBOSE_INIT_ARM
	printf("Constructing L2 page tables\n");
#endif

	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE,
	    PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE,
	    PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE,
	    PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	/* Map the vector page. */
#if 1
	/* MULTI-ICE requires that page 0 is NC/NB so that it can download the
	 * cache-clean code there.  */
	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ | VM_PROT_WRITE, PTE_NOCACHE);
#else
	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
#endif

#ifdef MEMORY_DISK_DYNAMIC
	/* map MD root image */
	pmap_map_chunk(l1pagetable, SMDK2800_MEMORY_DISK_VADDR, md_root_start,
	    MD_ROOT_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);

	md_root_setconf((void *)md_root_start, MD_ROOT_SIZE);
#endif /* MEMORY_DISK_DYNAMIC */
	/*
	 * map integrated peripherals at same address in l1pagetable
	 * so that we can continue to use console.
	 */
	pmap_devmap_bootstrap(l1pagetable, smdk2800_devmap);

	/*
	 * Now we have the real page tables in place so we can switch to them.
	 * Once this is done we will be running with the REAL kernel page
	 * tables.
	 */

	/*
	 * Update the physical_freestart/physical_freeend/free_pages
	 * variables.
	 */
	{
		physical_freestart = physical_start +
		    (((((uintptr_t)&end) + PGOFSET) & ~PGOFSET) - KERNEL_BASE);
		physical_freeend = physical_end;
		free_pages =
		    (physical_freeend - physical_freestart) / PAGE_SIZE;
	}

	/* Switch tables */
#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	    physical_freestart, free_pages, free_pages);
	printf("switching to new L1 page table  @%#lx...", kernel_l1pt.pv_pa);
#endif
	LEDSTEP();
	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	cpu_setttb(kernel_l1pt.pv_pa, true);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init
	 */
	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);

#ifdef VERBOSE_INIT_ARM
	printf("done!\n");
#endif

#if 0
	/*
	 * The IFPGA registers have just moved.
	 * Detach the diagnostic serial port and reattach at the new address.
	 */
	plcomcndetach();
	/*
	 * XXX this should only be done in main() but it useful to
	 * have output earlier ...
	 */
	consinit();
#endif

	LEDSTEP();
#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("init subsystems: stacks ");
#endif

	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	LEDSTEP();

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("vectors ");
#endif
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
	printf("undefined ");
#endif
	undefined_init();

	LEDSTEP();

	/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
	printf("page ");
#endif
	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend),
	    VM_FREELIST_DEFAULT);

	LEDSTEP();
	/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
	printf("pmap ");
#endif
	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);

	LEDSTEP();

	/* Setup the IRQ system */
#ifdef VERBOSE_INIT_ARM
	printf("irq ");
#endif
	/* XXX irq_init(); */

#ifdef VERBOSE_INIT_ARM
	printf("done.\n");
#endif

#ifdef BOOTHOWTO_INIT
	boothowto |= BOOTHOWTO_INIT;
#endif
	{
		uint8_t  gpio = ~gpio8(GPIO_PDATF);
		
		if (gpio & (1<<5)) /* SW3 */
			boothowto ^= RB_SINGLE;
		if (gpio & (1<<7)) /* SW7 */
			boothowto ^= RB_KDB;
#ifdef VERBOSE_INIT_ARM
		printf( "sw: %x boothowto: %x\n", gpio, boothowto );
#endif
	}

#ifdef KGDB
	if (boothowto & RB_KDB) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif

#ifdef DDB
	db_machine_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif

	/* We return the new stack pointer address */
	return (kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Пример #14
0
static void
bootpath_build(void)
{
	const char *cp;
	char *pp;
	struct bootpath *bp;
	int fl;

	/*
	 * Grab boot path from PROM and split into `bootpath' components.
	 */
	memset(bootpath, 0, sizeof(bootpath));
	bp = bootpath;
	cp = prom_getbootpath();
	switch (prom_version()) {
	case PROM_OLDMON:
	case PROM_OBP_V0:
		/*
		 * Build fake bootpath.
		 */
		if (cp != NULL)
			bootpath_fake(bp, cp);
		break;
	case PROM_OBP_V2:
	case PROM_OBP_V3:
	case PROM_OPENFIRM:
		while (cp != NULL && *cp == '/') {
			/* Step over '/' */
			++cp;
			/* Extract name */
			pp = bp->name;
			while (*cp != '@' && *cp != '/' && *cp != '\0')
				*pp++ = *cp++;
			*pp = '\0';
#if defined(SUN4M)
			/*
			 * JS1/OF does not have iommu node in the device
			 * tree, so bootpath will start with the sbus entry.
			 * Add entry for iommu to match attachment. See also
			 * mainbus_attach and iommu_attach.
			 */
			if (CPU_ISSUN4M && bp == bootpath
			    && strcmp(bp->name, "sbus") == 0) {
				printf("bootpath_build: inserting iommu entry\n");
				strcpy(bootpath[0].name, "iommu");
				bootpath[0].val[0] = 0;
				bootpath[0].val[1] = 0x10000000;
				bootpath[0].val[2] = 0;
				++nbootpath;

				strcpy(bootpath[1].name, "sbus");
				if (*cp == '/') {
					/* complete sbus entry */
					bootpath[1].val[0] = 0;
					bootpath[1].val[1] = 0x10001000;
					bootpath[1].val[2] = 0;
					++nbootpath;
					bp = &bootpath[2];
					continue;
				} else
					bp = &bootpath[1];
			}
#endif /* SUN4M */
			if (*cp == '@') {
				cp = str2hex(++cp, &bp->val[0]);
				if (*cp == ',')
					cp = str2hex(++cp, &bp->val[1]);
				if (*cp == ':') {
					/* XXX - we handle just one char */
					/*       skip remainder of paths */
					/*       like "ledma@f,400010:tpe" */
					bp->val[2] = *++cp - 'a';
					while (*++cp != '/' && *cp != '\0')
						/*void*/;
				}
			} else {
				bp->val[0] = -1; /* no #'s: assume unit 0, no
							sbus offset/address */
			}
			++bp;
			++nbootpath;
		}
		bp->name[0] = 0;
		break;
	}

	bootpath_print(bootpath);

	/* Setup pointer to boot flags */
	cp = prom_getbootargs();
	if (cp == NULL)
		return;

	/* Skip any whitespace */
	while (*cp != '-')
		if (*cp++ == '\0')
			return;

	for (;*++cp;) {
		fl = 0;
		BOOT_FLAG(*cp, fl);
		if (!fl) {
			printf("unknown option `%c'\n", *cp);
			continue;
		}
		boothowto |= fl;

		/* specialties */
		if (*cp == 'd') {
#if defined(KGDB)
			kgdb_debug_panic = 1;
			kgdb_connect(1);
#elif defined(DDB)
			Debugger();
#else
			printf("kernel has no debugger\n");
#endif
		}
	}
}
Пример #15
0
void
init_x86_64(paddr_t first_avail)
{
	extern void consinit(void);
	extern struct extent *iomem_ex;
	struct region_descriptor region;
	struct mem_segment_descriptor *ldt_segp;
	int x, first16q, ist;
	u_int64_t seg_start, seg_end;
	u_int64_t seg_start1, seg_end1;

	cpu_init_msrs(&cpu_info_primary);

	proc0.p_addr = proc0paddr;
	cpu_info_primary.ci_curpcb = &proc0.p_addr->u_pcb;

	x86_bus_space_init();

	consinit();	/* XXX SHOULD NOT BE DONE HERE */

	/*
	 * Initailize PAGE_SIZE-dependent variables.
	 */
	uvm_setpagesize();

#if 0
	uvmexp.ncolors = 2;
#endif
 
	/*
	 * Boot arguments are in a single page specified by /boot.
	 *
	 * We require the "new" vector form, as well as memory ranges
	 * to be given in bytes rather than KB.
	 *
	 * locore copies the data into bootinfo[] for us.
	 */
	if ((bootapiver & (BAPIV_VECTOR | BAPIV_BMEMMAP)) ==
	    (BAPIV_VECTOR | BAPIV_BMEMMAP)) {
		if (bootinfo_size >= sizeof(bootinfo))
			panic("boot args too big");

		getbootinfo(bootinfo, bootinfo_size);
	} else
		panic("invalid /boot");

	avail_start = PAGE_SIZE; /* BIOS leaves data in low memory */
				 /* and VM system doesn't work with phys 0 */
#ifdef MULTIPROCESSOR
	if (avail_start < MP_TRAMPOLINE + PAGE_SIZE)
		avail_start = MP_TRAMPOLINE + PAGE_SIZE;
#endif

	/*
	 * Call pmap initialization to make new kernel address space.
	 * We must do this before loading pages into the VM system.
	 */
	pmap_bootstrap(VM_MIN_KERNEL_ADDRESS,
	    IOM_END + trunc_page(KBTOB(biosextmem)));

	if (avail_start != PAGE_SIZE)
		pmap_prealloc_lowmem_ptps();

	if (mem_cluster_cnt == 0) {
		/*
		 * Allocate the physical addresses used by RAM from the iomem
		 * extent map.  This is done before the addresses are
		 * page rounded just to make sure we get them all.
		 */
		if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem),
		    EX_NOWAIT)) {
			/* XXX What should we do? */
			printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM "
			    "IOMEM EXTENT MAP!\n");
		}
		mem_clusters[0].start = 0;
		mem_clusters[0].size = trunc_page(KBTOB(biosbasemem));
		physmem += atop(mem_clusters[0].size);
		if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem),
		    EX_NOWAIT)) {
			/* XXX What should we do? */
			printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM "
			    "IOMEM EXTENT MAP!\n");
		}
#if 0
#if NISADMA > 0
		/*
		 * Some motherboards/BIOSes remap the 384K of RAM that would
		 * normally be covered by the ISA hole to the end of memory
		 * so that it can be used.  However, on a 16M system, this
		 * would cause bounce buffers to be allocated and used.
		 * This is not desirable behaviour, as more than 384K of
		 * bounce buffers might be allocated.  As a work-around,
		 * we round memory down to the nearest 1M boundary if
		 * we're using any isadma devices and the remapped memory
		 * is what puts us over 16M.
		 */
		if (biosextmem > (15*1024) && biosextmem < (16*1024)) {
			char pbuf[9];

			format_bytes(pbuf, sizeof(pbuf),
			    biosextmem - (15*1024));
			printf("Warning: ignoring %s of remapped memory\n",
			    pbuf);
			biosextmem = (15*1024);
		}
#endif
#endif
		mem_clusters[1].start = IOM_END;
		mem_clusters[1].size = trunc_page(KBTOB(biosextmem));
		physmem += atop(mem_clusters[1].size);

		mem_cluster_cnt = 2;

		avail_end = IOM_END + trunc_page(KBTOB(biosextmem));
	}

	/*
	 * If we have 16M of RAM or less, just put it all on
	 * the default free list.  Otherwise, put the first
	 * 16M of RAM on a lower priority free list (so that
	 * all of the ISA DMA'able memory won't be eaten up
	 * first-off).
	 */
	if (avail_end <= (16 * 1024 * 1024))
		first16q = VM_FREELIST_DEFAULT;
	else
		first16q = VM_FREELIST_FIRST16;

	/* Make sure the end of the space used by the kernel is rounded. */
	first_avail = round_page(first_avail);
	kern_end = KERNBASE + first_avail;

	/*
	 * Now, load the memory clusters (which have already been
	 * rounded and truncated) into the VM system.
	 *
	 * NOTE: WE ASSUME THAT MEMORY STARTS AT 0 AND THAT THE KERNEL
	 * IS LOADED AT IOM_END (1M).
	 */
	for (x = 0; x < mem_cluster_cnt; x++) {
		seg_start = mem_clusters[x].start;
		seg_end = mem_clusters[x].start + mem_clusters[x].size;
		seg_start1 = 0;
		seg_end1 = 0;

		if (seg_start > 0xffffffffULL) {
			printf("skipping %lld bytes of memory above 4GB\n",
			    seg_end - seg_start);
			continue;
		}
		if (seg_end > 0x100000000ULL) {
			printf("skipping %lld bytes of memory above 4GB\n",
			    seg_end - 0x100000000ULL);
			seg_end = 0x100000000ULL;
		}

		/*
		 * Skip memory before our available starting point.
		 */
		if (seg_end <= avail_start)
			continue;

		if (avail_start >= seg_start && avail_start < seg_end) {
			if (seg_start != 0)
				panic("init_x86_64: memory doesn't start at 0");
			seg_start = avail_start;
			if (seg_start == seg_end)
				continue;
		}

		/*
		 * If this segment contains the kernel, split it
		 * in two, around the kernel.
		 */
		if (seg_start <= IOM_END && first_avail <= seg_end) {
			seg_start1 = first_avail;
			seg_end1 = seg_end;
			seg_end = IOM_END;
		}

		/* First hunk */
		if (seg_start != seg_end) {
			if (seg_start <= (16 * 1024 * 1024) &&
			    first16q != VM_FREELIST_DEFAULT) {
				u_int64_t tmp;

				if (seg_end > (16 * 1024 * 1024))
					tmp = (16 * 1024 * 1024);
				else
					tmp = seg_end;
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start,
				    (unsigned long long)tmp,
				    atop(seg_start), atop(tmp));
#endif
				uvm_page_physload(atop(seg_start),
				    atop(tmp), atop(seg_start),
				    atop(tmp), first16q);
				seg_start = tmp;
			}

			if (seg_start != seg_end) {
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start,
				    (unsigned long long)seg_end,
				    atop(seg_start), atop(seg_end));
#endif
				uvm_page_physload(atop(seg_start),
				    atop(seg_end), atop(seg_start),
				    atop(seg_end), VM_FREELIST_DEFAULT);
			}
		}

		/* Second hunk */
		if (seg_start1 != seg_end1) {
			if (seg_start1 <= (16 * 1024 * 1024) &&
			    first16q != VM_FREELIST_DEFAULT) {
				u_int64_t tmp;

				if (seg_end1 > (16 * 1024 * 1024))
					tmp = (16 * 1024 * 1024);
				else
					tmp = seg_end1;
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start1,
				    (unsigned long long)tmp,
				    atop(seg_start1), atop(tmp));
#endif
				uvm_page_physload(atop(seg_start1),
				    atop(tmp), atop(seg_start1),
				    atop(tmp), first16q);
				seg_start1 = tmp;
			}

			if (seg_start1 != seg_end1) {
#if DEBUG_MEMLOAD
				printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
				    (unsigned long long)seg_start1,
				    (unsigned long long)seg_end1,
				    atop(seg_start1), atop(seg_end1));
#endif
				uvm_page_physload(atop(seg_start1),
				    atop(seg_end1), atop(seg_start1),
				    atop(seg_end1), VM_FREELIST_DEFAULT);
			}
		}
	}

	/*
	 * Steal memory for the message buffer (at end of core).
	 */
	{
		struct vm_physseg *vps = NULL;
		psize_t sz = round_page(MSGBUFSIZE);
		psize_t reqsz = sz;

		for (x = 0; x < vm_nphysseg; x++) {
			vps = &vm_physmem[x];
			if (ptoa(vps->avail_end) == avail_end)
				break;
		}
		if (x == vm_nphysseg)
			panic("init_x86_64: can't find end of memory");

		/* Shrink so it'll fit in the last segment. */
		if ((vps->avail_end - vps->avail_start) < atop(sz))
			sz = ptoa(vps->avail_end - vps->avail_start);

		vps->avail_end -= atop(sz);
		vps->end -= atop(sz);
		msgbuf_paddr = ptoa(vps->avail_end);

		/* Remove the last segment if it now has no pages. */
		if (vps->start == vps->end) {
			for (vm_nphysseg--; x < vm_nphysseg; x++)
				vm_physmem[x] = vm_physmem[x + 1];
		}

		/* Now find where the new avail_end is. */
		for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
			if (vm_physmem[x].avail_end > avail_end)
				avail_end = vm_physmem[x].avail_end;
		avail_end = ptoa(avail_end);

		/* Warn if the message buffer had to be shrunk. */
		if (sz != reqsz)
			printf("WARNING: %ld bytes not available for msgbuf "
			    "in last cluster (%ld used)\n", reqsz, sz);
	}

	/*
	 * XXXfvdl todo: acpi wakeup code.
	 */

	pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024);

	pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE);
	pmap_kenter_pa(idt_vaddr + PAGE_SIZE, idt_paddr + PAGE_SIZE,
	    VM_PROT_READ|VM_PROT_WRITE);

	pmap_kenter_pa(lo32_vaddr, lo32_paddr, VM_PROT_READ|VM_PROT_WRITE);

	idt = (struct gate_descriptor *)idt_vaddr;
	gdtstore = (char *)(idt + NIDT);
	ldtstore = gdtstore + DYNSEL_START;

	/* make gdt gates and memory segments */
	set_mem_segment(GDT_ADDR_MEM(gdtstore, GCODE_SEL), 0, 0xfffff, SDT_MEMERA,
	    SEL_KPL, 1, 0, 1);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GDATA_SEL), 0, 0xfffff, SDT_MEMRWA,
	    SEL_KPL, 1, 0, 1);

	set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore, LDT_SIZE - 1,
	    SDT_SYSLDT, SEL_KPL, 0);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 0, 1);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 0, 1);

	/* make ldt gates and memory segments */
	setgate((struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL),
	    &IDTVEC(oosyscall), 0, SDT_SYS386CGT, SEL_UPL,
	    GSEL(GCODE_SEL, SEL_KPL));

	*(struct mem_segment_descriptor *)(ldtstore + LUCODE_SEL) =
	    *GDT_ADDR_MEM(gdtstore, GUCODE_SEL);
	*(struct mem_segment_descriptor *)(ldtstore + LUDATA_SEL) =
	    *GDT_ADDR_MEM(gdtstore, GUDATA_SEL);

	/*
	 * 32 bit GDT entries.
	 */

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE32_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0);

	set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA32_SEL), 0,
	    atop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0);

	/*
	 * 32 bit LDT entries.
	 */
	ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUCODE32_SEL);
	set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1,
	    SDT_MEMERA, SEL_UPL, 1, 1, 0);
	ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUDATA32_SEL);
	set_mem_segment(ldt_segp, 0, atop(VM_MAXUSER_ADDRESS32) - 1,
	    SDT_MEMRWA, SEL_UPL, 1, 1, 0);

	/*
	 * Other entries.
	 */
	memcpy((struct gate_descriptor *)(ldtstore + LSOL26CALLS_SEL),
	    (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL),
	    sizeof (struct gate_descriptor));
	memcpy((struct gate_descriptor *)(ldtstore + LBSDICALLS_SEL),
	    (struct gate_descriptor *)(ldtstore + LSYS5CALLS_SEL),
	    sizeof (struct gate_descriptor));

	/* exceptions */
	for (x = 0; x < 32; x++) {
		ist = (x == 8) ? 1 : 0;
		setgate(&idt[x], IDTVEC(exceptions)[x], ist, SDT_SYS386IGT,
		    (x == 3 || x == 4) ? SEL_UPL : SEL_KPL,
		    GSEL(GCODE_SEL, SEL_KPL));
		idt_allocmap[x] = 1;
	}

	/* new-style interrupt gate for syscalls */
	setgate(&idt[128], &IDTVEC(osyscall), 0, SDT_SYS386IGT, SEL_UPL,
	    GSEL(GCODE_SEL, SEL_KPL));
	idt_allocmap[128] = 1;

	setregion(&region, gdtstore, DYNSEL_START - 1);
	lgdt(&region);

	cpu_init_idt();

#ifdef DDB
	db_machine_init();
	ddb_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif
#ifdef KGDB
	kgdb_port_init();
	if (boothowto & RB_KDB) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif

	intr_default_setup();

	softintr_init();
	splraise(IPL_IPI);
	enable_intr();

        /* Make sure maxproc is sane */ 
        if (maxproc > cpu_maxproc())
                maxproc = cpu_maxproc();
}
Пример #16
0
STATIC void
gtmpsc_intr_rx(struct gtmpsc_softc *sc)
{
	gtmpsc_pollrx_t *vrxp;
	uint32_t csr;
	int kick, ix;

	kick = 0;

	/* already handled in gtmpsc_common_getc() */
	if (sc->sc_rcvdrx == sc->sc_rcvrx)
		return;

	ix = sc->sc_rcvdrx;
	vrxp = &sc->sc_poll_sdmapage->rx[ix];
	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
	    ix * sizeof(gtmpsc_pollrx_t),
	    sizeof(sdma_desc_t),
	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
	csr = vrxp->rxdesc.sdma_csr;
	while (!(csr & SDMA_CSR_RX_OWN)) {
		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdma_map,
		    ix * sizeof(gtmpsc_pollrx_t) + sizeof(sdma_desc_t),
		    sizeof(vrxp->rxbuf),
		    BUS_DMASYNC_POSTREAD);
		vrxp->rxdesc.sdma_cnt &= SDMA_RX_CNT_BCNT_MASK;
		if (vrxp->rxdesc.sdma_csr & SDMA_CSR_RX_BR) {
			int cn_trapped = 0;

			cn_check_magic(sc->sc_tty->t_dev,
			    CNC_BREAK, gtmpsc_cnm_state);
			if (cn_trapped)
				continue;
#if defined(KGDB) && !defined(DDB)
			if (ISSET(sc->sc_flags, GTMPSC_KGDB)) {
				kgdb_connect(1);
				continue;
			}
#endif
		}

		sc->sc_rcvcnt += vrxp->rxdesc.sdma_cnt;
		kick = 1;

		ix = (ix + 1) % GTMPSC_NTXDESC;
		vrxp = &sc->sc_poll_sdmapage->rx[ix];
		bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
		    ix * sizeof(gtmpsc_pollrx_t),
		    sizeof(sdma_desc_t),
		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
		csr = vrxp->rxdesc.sdma_csr;
	}
	bus_dmamap_sync(sc->sc_dmat, sc->sc_txdma_map,
	    ix * sizeof(gtmpsc_pollrx_t),
	    sizeof(sdma_desc_t),
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	if (kick) {
		sc->sc_rcvdrx = ix;
		sc->sc_rx_ready = 1;
		softint_schedule(sc->sc_si);
	}
}
Пример #17
0
/* ARGSUSED */
STATIC void
gtmpscattach(device_t parent, device_t self, void *aux)
{
	struct gtmpsc_softc *sc = device_private(self);
	struct marvell_attach_args *mva = aux;
	bus_dma_segment_t segs;
	struct tty *tp;
	int rsegs, err, unit;
	void *kva;

	aprint_naive("\n");
	aprint_normal(": Multi-Protocol Serial Controller\n");

	if (mva->mva_unit != MVA_UNIT_DEFAULT)
		unit = mva->mva_unit;
	else
		unit = (mva->mva_offset == GTMPSC_BASE(0)) ? 0 : 1;

#ifdef MPSC_CONSOLE
	if (cn_tab == &gtmpsc_consdev &&
	    cn_tab->cn_dev == makedev(0, unit)) {
		gtmpsc_cn_softc.sc_dev = self;
		memcpy(sc, &gtmpsc_cn_softc, sizeof(struct gtmpsc_softc));
		sc->sc_flags = GTMPSC_CONSOLE;
	} else
#endif
	{
		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
		    mva->mva_offset, mva->mva_size, &sc->sc_mpsch)) {
			aprint_error_dev(self, "Cannot map MPSC registers\n");
			return;
		}
		if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
		    GTSDMA_BASE(unit), GTSDMA_SIZE, &sc->sc_sdmah)) {
			aprint_error_dev(self, "Cannot map SDMA registers\n");
			return;
		}
		sc->sc_dev = self;
		sc->sc_unit = unit;
		sc->sc_iot = mva->mva_iot;
		sc->sc_dmat = mva->mva_dmat;

		err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
		    &segs, 1, &rsegs, BUS_DMA_NOWAIT);
		if (err) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamem_alloc error 0x%x\n", err);
			goto fail0;
		}
		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &kva,
		    BUS_DMA_NOWAIT);
		if (err) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamem_map error 0x%x\n", err);
			goto fail1;
		}
		memset(kva, 0, PAGE_SIZE);	/* paranoid/superfluous */
		sc->sc_poll_sdmapage = kva;

		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_polltx_t), 1,
		   sizeof(gtmpsc_polltx_t), 0, BUS_DMA_NOWAIT,
		   &sc->sc_txdma_map);
		if (err != 0) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamap_create error 0x%x\n", err);
			goto fail2;
		}
		err = bus_dmamap_load(sc->sc_dmat, sc->sc_txdma_map,
		    sc->sc_poll_sdmapage->tx, sizeof(gtmpsc_polltx_t),
		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
		if (err != 0) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamap_load tx error 0x%x\n", err);
			goto fail3;
		}
		err = bus_dmamap_create(sc->sc_dmat, sizeof(gtmpsc_pollrx_t), 1,
		   sizeof(gtmpsc_pollrx_t), 0, BUS_DMA_NOWAIT,
		   &sc->sc_rxdma_map);
		if (err != 0) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamap_create rx error 0x%x\n", err);
			goto fail4;
		}
		err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxdma_map,
		    sc->sc_poll_sdmapage->rx, sizeof(gtmpsc_pollrx_t),
		    NULL, BUS_DMA_NOWAIT | BUS_DMA_READ | BUS_DMA_WRITE);
		if (err != 0) {
			aprint_error_dev(sc->sc_dev,
			    "bus_dmamap_load rx error 0x%x\n", err);
			goto fail5;
		}

		sc->sc_brg = unit;		/* XXXXX */
		sc->sc_baudrate = GT_MPSC_DEFAULT_BAUD_RATE;
	}
	aprint_normal_dev(self, "with SDMA offset 0x%04x-0x%04x\n",
	    GTSDMA_BASE(unit), GTSDMA_BASE(unit) + GTSDMA_SIZE - 1);

	sc->sc_rx_ready = 0;
	sc->sc_tx_busy = 0;
	sc->sc_tx_done = 0;
	sc->sc_tx_stopped = 0;
	sc->sc_heldchange = 0;

	gtmpsc_txdesc_init(sc);
	gtmpsc_rxdesc_init(sc);

	sc->sc_tty = tp = tty_alloc();
	tp->t_oproc = gtmpscstart;
	tp->t_param = gtmpscparam;
	tty_attach(tp);

	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_HIGH);

	/*
	 * clear any pending SDMA interrupts for this unit
	 */
	(void) gt_sdma_icause(device_parent(sc->sc_dev),
	    SDMA_INTR_RXBUF(sc->sc_unit) |
	    SDMA_INTR_RXERR(sc->sc_unit) |
	    SDMA_INTR_TXBUF(sc->sc_unit) |
	    SDMA_INTR_TXEND(sc->sc_unit));

	sc->sc_si = softint_establish(SOFTINT_SERIAL, gtmpsc_softintr, sc);
	if (sc->sc_si == NULL)
		panic("mpscattach: cannot softint_establish IPL_SOFTSERIAL");

	shutdownhook_establish(gtmpsc_shutdownhook, sc);

	gtmpscinit_stop(sc);
	gtmpscinit_start(sc);

	if (sc->sc_flags & GTMPSC_CONSOLE) {
		int maj;

		/* locate the major number */
		maj = cdevsw_lookup_major(&gtmpsc_cdevsw);

		tp->t_dev = cn_tab->cn_dev =
		    makedev(maj, device_unit(sc->sc_dev));

		aprint_normal_dev(self, "console\n");
	}

#ifdef KGDB
	/*
	 * Allow kgdb to "take over" this port.  If this is
	 * the kgdb device, it has exclusive use.
	 */
	if (sc->sc_unit == gtmpsckgdbport) {
#ifdef MPSC_CONSOLE
		if (sc->sc_unit == MPSC_CONSOLE) {
			aprint_error_dev(self,
			    "(kgdb): cannot share with console\n");
			return;
		}
#endif

		sc->sc_flags |= GTMPSC_KGDB;
		aprint_normal_dev(self, "kgdb\n");

		gtmpsc_txflush(sc);

		kgdb_attach(gtmpsc_kgdb_getc, gtmpsc_kgdb_putc, NULL);
		kgdb_dev = 123;	/* unneeded, only to satisfy some tests */
		gtmpsc_kgdb_attached = 1;
		kgdb_connect(1);
	}
#endif /* KGDB */

	return;


fail5:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxdma_map);
fail4:
	bus_dmamap_unload(sc->sc_dmat, sc->sc_txdma_map);
fail3:
	bus_dmamap_destroy(sc->sc_dmat, sc->sc_txdma_map);
fail2:
	bus_dmamem_unmap(sc->sc_dmat, kva, PAGE_SIZE);
fail1:
	bus_dmamem_free(sc->sc_dmat, &segs, 1);
fail0:
	return;
}
Пример #18
0
/*
 * Halt or reboot the machine after syncing/dumping according to howto.
 */
void
cpu_reboot(int howto, char *what)
{
	static int syncing;
	static char str[256];
	char *ap = str, *ap1 = ap;

	boothowto = howto;
	if (!cold && !(howto & RB_NOSYNC) && !syncing) {
		syncing = 1;
		vfs_shutdown();		/* sync */
		resettodr();		/* set wall clock */
	}

	splhigh();

	if (!cold && (howto & RB_DUMP))
		dumpsys();

	doshutdownhooks();

	pmf_system_shutdown(boothowto);

	if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
		/* Power off here if we know how...*/
	}

	if (howto & RB_HALT) {
		printf("halted\n\n");

		goto reboot;	/* XXX for now... */

#ifdef DDB
		printf("dropping to debugger\n");
		while(1)
			Debugger();
#endif
#ifdef KGDB
		printf("dropping to kgdb\n");
		while(1)
			kgdb_connect(1);
#endif
	}

	printf("rebooting\n\n");
	if (what && *what) {
		if (strlen(what) > sizeof str - 5)
			printf("boot string too large, ignored\n");
		else {
			strcpy(str, what);
			ap1 = ap = str + strlen(str);
			*ap++ = ' ';
		}
	}
	*ap++ = '-';
	if (howto & RB_SINGLE)
		*ap++ = 's';
	if (howto & RB_KDB)
		*ap++ = 'd';
	*ap++ = 0;
	if (ap[-2] == '-')
		*ap1 = 0;

	/* flush cache for msgbuf */
	__syncicache((void *)msgbuf_paddr, round_page(MSGBUFSIZE));

 reboot:
	ppc4xx_reset();

	printf("ppc4xx_reset() failed!\n");
#ifdef DDB
	while(1)
		Debugger();
#endif
#ifdef KGDB
	while(1)
		kgdb_connect(1);
#else
	while (1)
		/* nothing */;
#endif
}
Пример #19
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg)
{
	extern vaddr_t xscale_cache_clean_addr;
	int loop;
	int loop1;
	u_int l1pagetable;
#ifdef DIAGNOSTIC
	extern vsize_t xscale_minidata_clean_size; /* used in KASSERT */
#endif

	/* Register devmap for devices we mapped in start */
	pmap_devmap_register(viper_devmap);

	/* start 32.768 kHz OSC */
	ioreg_write(VIPER_CLKMAN_VBASE + 0x08, 2);
	/* Get ready for splfoo() */
	pxa2x0_intr_bootstrap(VIPER_INTCTL_VBASE);

	/*
	 * Heads up ... Setup the CPU / MMU / TLB functions
	 */
	if (set_cpufuncs())
		panic("cpu not recognized!");

#if 0
	/* Calibrate the delay loop. */
#endif

	/* setup GPIO for BTUART, in case bootloader doesn't take care of it */
	pxa2x0_gpio_bootstrap(VIPER_GPIO_VBASE);
	pxa2x0_gpio_config(viper_gpioconf);

	/* turn on clock to UART block.
	   XXX: this should not be done here. */
	ioreg_write(VIPER_CLKMAN_VBASE+CLKMAN_CKEN, CKEN_FFUART|CKEN_BTUART |
	    ioreg_read(VIPER_CLKMAN_VBASE+CLKMAN_CKEN));

	consinit();
#ifdef KGDB
	kgdb_port_init();
#endif
	/* Talk to the user */
	printf("\nNetBSD/evbarm (viper) booting ...\n");

#if 0
	/*
	 * Examine the boot args string for options we need to know about
	 * now.
	 */
	process_kernel_args((char *)nwbootinfo.bt_args);
#endif

	printf("initarm: Configuring system ...\n");

	/* Fake bootconfig structure for the benefit of pmap.c */
	/* XXX must make the memory description h/w independent */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = MEMSTART;
	bootconfig.dram[0].pages = MEMSIZE / PAGE_SIZE;

	/*
	 * Set up the variables that define the availablilty of
	 * physical memory.  For now, we're going to set
	 * physical_freestart to 0xa0200000 (where the kernel
	 * was loaded), and allocate the memory we need downwards.
	 * If we get too close to the page tables that RedBoot
	 * set up, we will panic.  We will update physical_freestart
	 * and physical_freeend later to reflect what pmap_bootstrap()
	 * wants to see.
	 *
	 * XXX pmap_bootstrap() needs an enema.
	 * (now that would be truly hardcore XXX)
	 */
	physical_start = bootconfig.dram[0].address;
	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);

	physical_freestart = 0xa0009000UL;
	physical_freeend = 0xa0200000UL;

	physmem = (physical_end - physical_start) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	/* Tell the user about the memory */
	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
	    physical_start, physical_end - 1);
#endif

	/*
	 * Okay, the kernel starts 2MB in from the bottom of physical
	 * memory.  We are going to allocate our bootstrap pages downwards
	 * from there.
	 *
	 * We need to allocate some fixed page tables to get the kernel
	 * going.  We allocate one page directory and a number of page
	 * tables and store the physical addresses in the kernel_pt_table
	 * array.
	 *
	 * The kernel page directory must be on a 16K boundary.  The page
	 * tables must be on 4K boundaries.  What we do is allocate the
	 * page directory on the first 16K boundary that we encounter, and
	 * the page tables on 4K boundaries otherwise.  Since we allocate
	 * at least 3 L2 page tables, we are guaranteed to encounter at
	 * least one 16K aligned region.
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Allocating page tables\n");
#endif

	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
	       physical_freestart, free_pages, free_pages);
#endif

	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)				\
	alloc_pages((var).pv_pa, (np));			\
	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;

#define alloc_pages(var, np)				\
	physical_freeend -= ((np) * PAGE_SIZE);		\
	if (physical_freeend < physical_freestart)	\
		panic("initarm: out of memory");	\
	(var) = physical_freeend;			\
	free_pages -= (np);				\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	loop1 = 0;
	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
		/* Are we 16KB aligned for an L1 ? */
		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
		    && kernel_l1pt.pv_pa == 0) {
			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
		} else {
			valloc_pages(kernel_pt_table[loop1],
			    L2_TABLE_SIZE / PAGE_SIZE);
			++loop1;
		}
	}

	/* This should never be able to happen but better confirm that. */
	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
		panic("initarm: Failed to align the kernel page directory");

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	alloc_pages(systempage.pv_pa, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, UPAGES);

	/* Allocate enough pages for cleaning the Mini-Data cache. */
	KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
	valloc_pages(minidataclean, 1);

#ifdef VERBOSE_INIT_ARM
	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
	    irqstack.pv_va); 
	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
	    abtstack.pv_va); 
	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
	    undstack.pv_va); 
	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
	    kernelstack.pv_va); 
#endif

	/*
	 * XXX Defer this to later so that we can reclaim the memory
	 * XXX used by the RedBoot page tables.
	 */
	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);

	/*
	 * Ok we have allocated physical pages for the primary kernel
	 * page tables
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, 0x00000000,
	    &kernel_pt_table[KERNEL_PT_SYS]);
	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);

	/* update the top of the kernel VM */
	pmap_curmaxkvaddr =
	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);

#ifdef VERBOSE_INIT_ARM
	printf("Mapping kernel\n");
#endif

	/* Now we fill in the L2 pagetable for the kernel static code/data */
	{
		extern char etext[], _end[];
		size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
		size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
		u_int logical;

		textsize = (textsize + PGOFSET) & ~PGOFSET;
		totalsize = (totalsize + PGOFSET) & ~PGOFSET;
		
		logical = 0x00200000;	/* offset of kernel in RAM */

		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, totalsize - textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	}

#ifdef VERBOSE_INIT_ARM
	printf("Constructing L2 page tables\n");
#endif

	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
	    minidataclean.pv_pa);

	/* Map the vector page. */
#if 1
	/* MULTI-ICE requires that page 0 is NC/NB so that it can download the
	 * cache-clean code there.  */
	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
#else
	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#endif

	/*
	 * map integrated peripherals at same address in l1pagetable
	 * so that we can continue to use console.
	 */
	pmap_devmap_bootstrap(l1pagetable, viper_devmap);

	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	/*
	 * Now we have the real page tables in place so we can switch to them.
	 * Once this is done we will be running with the REAL kernel page
	 * tables.
	 */

	/*
	 * Update the physical_freestart/physical_freeend/free_pages
	 * variables.
	 */
	{
		extern char _end[];

		physical_freestart = physical_start +
		    (((((uintptr_t) _end) + PGOFSET) & ~PGOFSET) -
		     KERNEL_BASE);
		physical_freeend = physical_end;
		free_pages =
		    (physical_freeend - physical_freestart) / PAGE_SIZE;
	}

	/* Switch tables */
#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	       physical_freestart, free_pages, free_pages);
	printf("switching to new L1 page table  @%#lx...", kernel_l1pt.pv_pa);
#endif

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init
	 */
	proc0paddr = (struct user *)kernelstack.pv_va;
	lwp0.l_addr = proc0paddr;

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	printf("init subsystems: stacks ");

	set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
	printf("vectors ");
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
	printf("undefined ");
	undefined_init();

	/* Load memory into UVM. */
	printf("page ");
	uvm_setpagesize();        /* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend),
	    VM_FREELIST_DEFAULT);

	/* Boot strap pmap telling it where the kernel page table is */
	printf("pmap ");
	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);

#ifdef __HAVE_MEMORY_DISK__
	md_root_setconf(memory_disk, sizeof memory_disk);
#endif

#ifdef KGDB
	if (boothowto & RB_KDB) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif

#ifdef DDB
	db_machine_init();

	/* Firmware doesn't load symbols. */
	ddb_init(0, NULL, NULL);

	if (boothowto & RB_KDB)
		Debugger();
#endif

	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Пример #20
0
void
machine_startup(int argc, char *argv[], struct bootinfo *bi)
{
	extern char edata[], end[];
	vaddr_t kernend;
	size_t symbolsize;
	int i;
	char *p;
	/*
	 * this routines stack is never polluted since stack pointer
	 * is lower than kernel text segment, and at exiting, stack pointer
	 * is changed to proc0.
	 */
	struct kloader_bootinfo kbi;

	/* Symbol table size */
	symbolsize = 0;
	if (memcmp(&end, ELFMAG, SELFMAG) == 0) {
		Elf_Ehdr *eh = (void *)end;
		Elf_Shdr *sh = (void *)(end + eh->e_shoff);
		for(i = 0; i < eh->e_shnum; i++, sh++)
			if (sh->sh_offset > 0 &&
			    (sh->sh_offset + sh->sh_size) > symbolsize)
				symbolsize = sh->sh_offset + sh->sh_size;
	}

	/* Clear BSS */
	memset(edata, 0, end - edata);

	/* Setup bootinfo */
	bootinfo = &kbi.bootinfo;
	memcpy(bootinfo, bi, sizeof(struct bootinfo));
	if (bootinfo->magic == BOOTINFO_MAGIC) {
		platid.dw.dw0 = bootinfo->platid_cpu;
		platid.dw.dw1 = bootinfo->platid_machine;
	}

	/* CPU initialize */
	if (platid_match(&platid, &platid_mask_CPU_SH_3))
		sh_cpu_init(CPU_ARCH_SH3, CPU_PRODUCT_7709A);
	else if (platid_match(&platid, &platid_mask_CPU_SH_4))
		sh_cpu_init(CPU_ARCH_SH4, CPU_PRODUCT_7750);

	/* Start to determine heap area */
	kernend = (vaddr_t)sh3_round_page(end + symbolsize);

	/* Setup bootstrap options */
	makebootdev("wd0"); /* default boot device */
	boothowto = 0;
	for (i = 1; i < argc; i++) { /* skip 1st arg (kernel name). */
		char *cp = argv[i];
		switch (*cp) {
		case 'b':
			/* boot device: -b=sd0 etc. */
			p = cp + 2;
#ifdef NFS
			if (strcmp(p, "nfs") == 0)
				mountroot = nfs_mountroot;
			else
				makebootdev(p);
#else /* NFS */
			makebootdev(p);
#endif /* NFS */
			break;
		default:
			BOOT_FLAG(*cp, boothowto);
			break;
		}
	}

#ifdef MFS
	/*
	 * Check to see if a mini-root was loaded into memory. It resides
	 * at the start of the next page just after the end of BSS.
	 */
	if (boothowto & RB_MINIROOT) {
		size_t fssz;
		fssz = sh3_round_page(mfs_initminiroot((void *)kernend));
#ifdef MEMORY_DISK_DYNAMIC
		md_root_setconf((caddr_t)kernend, fssz);
#endif
		kernend += fssz;
	}
#endif /* MFS */

	/* Console */
	consinit();
#ifdef HPC_DEBUG_LCD
	dbg_lcd_test();
#endif
	/* copy boot parameter for kloader */
	kloader_bootinfo_set(&kbi, argc, argv, bi, TRUE);

	/* Find memory cluster. and load to UVM */
	physmem = mem_cluster_init(SH3_P1SEG_TO_PHYS(kernend));
	_DPRINTF("total memory = %dMbyte\n", (int)(sh3_ptob(physmem) >> 20));
	mem_cluster_load();

	/* Initialize proc0 u-area */
	sh_proc0_init();

	/* Initialize pmap and start to address translation */
	pmap_bootstrap();

	/* Debugger. */
#ifdef DDB
	if (symbolsize) {
		ddb_init(symbolsize, &end, end + symbolsize);
		_DPRINTF("symbol size = %d byte\n", symbolsize);
	}
	if (boothowto & RB_KDB)
		Debugger();
#endif /* DDB */
#ifdef KGDB
	if (boothowto & RB_KDB) {
		if (kgdb_dev == NODEV) {
			printf("no kgdb console.\n");
		} else {
			kgdb_debug_init = 1;
			kgdb_connect(1);
		}
	}
#endif /* KGDB */

	/* Jump to main */
	__asm__ __volatile__(
		"jmp	@%0;"
		"mov	%1, sp"
		:: "r"(main),"r"(proc0.p_md.md_pcb->pcb_sf.sf_r7_bank));
	/* NOTREACHED */
	while (1)
		;
}