static void
mips_init(void)
{
	int i;

	for (i = 0; i < 10; i++) {
		phys_avail[i] = 0;
	}

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS((vm_offset_t)&end);
	phys_avail[1] = ctob(realmem);

	physmem = realmem;

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
#ifdef DDB
	kdb_init();
#endif
}
Example #2
0
static void
mips_init(void)
{
	int i;

	printf("entry: mips_init()\n");

	bootverbose = 1;
	realmem = btoc(32 << 20);

	for (i = 0; i < 10; i++) {
		phys_avail[i] = 0;
	}

	/* phys_avail regions are in bytes */
	dump_avail[0] = phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	dump_avail[1] = phys_avail[1] = ctob(realmem);

	physmem = realmem;

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Example #3
0
static void
mips_init(void)
{
	int i;
#ifdef FDT
	struct mem_region mr[FDT_MEM_REGIONS];
	uint64_t val;
	int mr_cnt;
	int j;
#endif

	for (i = 0; i < 10; i++) {
		phys_avail[i] = 0;
	}

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	dump_avail[0] = phys_avail[0];
	dump_avail[1] = phys_avail[1];

	physmem = realmem;

#ifdef FDT
	if (fdt_get_mem_regions(mr, &mr_cnt, &val) == 0) {

		physmem = btoc(val);

		KASSERT((phys_avail[0] >= mr[0].mr_start) && \
			(phys_avail[0] < (mr[0].mr_start + mr[0].mr_size)),
			("First region is not within FDT memory range"));

		/* Limit size of the first region */
		phys_avail[1] = (mr[0].mr_start + MIN(mr[0].mr_size, ctob(realmem)));
		dump_avail[1] = phys_avail[1];

		/* Add the rest of regions */
		for (i = 1, j = 2; i < mr_cnt; i++, j+=2) {
			phys_avail[j] = mr[i].mr_start;
			phys_avail[j+1] = (mr[i].mr_start + mr[i].mr_size);
			dump_avail[j] = phys_avail[j];
			dump_avail[j+1] = phys_avail[j+1];
		}
	}
#endif

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Example #4
0
__dead void
landisk_startup(int howto, char *_esym)
{
	u_int32_t ramsize;

	/* Start to determine heap area */
	esym = _esym;
	kernend = (vaddr_t)round_page((vaddr_t)esym);

	boothowto = howto;

	ramsize = getramsize();

	/* Initialize CPU ops. */
	sh_cpu_init(CPU_ARCH_SH4, CPU_PRODUCT_7751R);	

	/* Initialize early console */
	consinit();

	/* Load memory to UVM */
	if (ramsize == 0 || ramsize > 512 * 1024 * 1024)
		ramsize = IOM_RAM_SIZE;
	physmem = atop(ramsize);
	kernend = atop(round_page(SH3_P1SEG_TO_PHYS(kernend)));
	uvm_page_physload(atop(IOM_RAM_BEGIN),
	    atop(IOM_RAM_BEGIN + ramsize), kernend,
	    atop(IOM_RAM_BEGIN + ramsize), 0);
	cpu_init_kcore_hdr();	/* need to be done before pmap_bootstrap */

	/* Initialize proc0 u-area */
	sh_proc0_init();

	/* Initialize pmap and start to address translation */
	pmap_bootstrap();

#if defined(DDB)
	db_machine_init();
	ddb_init();
	if (boothowto & RB_KDB) {
		Debugger();
	}
#endif

	/* Jump to main */
	__asm volatile(
		"jmp	@%0\n\t"
		" mov	%1, sp"
		:: "r" (main), "r" (proc0.p_md.md_pcb->pcb_sf.sf_r7_bank));
	/* NOTREACHED */
	for (;;) ;
}
Example #5
0
void
dreamcast_startup()
{
	extern char edata[], end[];
	paddr_t kernend;

	/* Clear bss */
	memset(edata, 0, end - edata);

	/* Initialize CPU ops. */
	sh_cpu_init(CPU_ARCH_SH4, CPU_PRODUCT_7750);

	/* Console */
	consinit();

	/* Load memory to UVM */
	physmem = atop(IOM_RAM_SIZE);
	kernend = atop(round_page(SH3_P1SEG_TO_PHYS(end)));
	uvm_page_physload(
		kernend, atop(IOM_RAM_BEGIN + IOM_RAM_SIZE),
		kernend, atop(IOM_RAM_BEGIN + IOM_RAM_SIZE),
		VM_FREELIST_DEFAULT);

	/* Initialize proc0 u-area */
	sh_proc0_init();

	/* Initialize pmap and start to address translation */
	pmap_bootstrap();

	/* Debugger. */
#ifdef DDB
	ddb_init(0, NULL, NULL);
#endif
#if defined(KGDB) && (NSCIF > 0)
	if (scif_kgdb_init() == 0) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif /* KGDB && NSCIF > 0 */

	/* Jump to main */
	__asm__ __volatile__(
		"jmp	@%0;"
		"mov	%1, sp"
		:: "r"(main),"r"(proc0.p_md.md_pcb->pcb_sf.sf_r7_bank));
	/* NOTREACHED */
	while (1)
		;
}
Example #6
0
/*
 * This function is called from _bootstrap() to initialize
 * pre-vm-sytem virtual memory.  All this really does is to
 * set virtual_avail to the first page following preloaded
 * data (i.e. the kernel and its symbol table) and special
 * things that may be needed very early (lwp0 upages).
 * Once that is done, pmap_bootstrap() is called to do the
 * usual preparations for our use of the MMU.
 */
static void
_vm_init(void)
{
	vaddr_t nextva;

	/*
	 * First preserve our symbol table, which might have been
	 * loaded after our BSS area by the boot loader.  However,
	 * if DDB is not part of this kernel, ignore the symbols.
	 */
	esym = end + 4;
#if defined(DDB)
	/* This will advance esym past the symbols. */
	_save_symtab();
#endif

	/*
	 * Steal some special-purpose, already mapped pages.
	 * Note: msgbuf is setup in machdep.c:cpu_startup()
	 */
	nextva = m68k_round_page(esym);

	/*
	 * Setup the u-area pages (stack, etc.) for lwp0.
	 * This is done very early (here) to make sure the
	 * fault handler works in case we hit an early bug.
	 * (The fault handler may reference lwp0 stuff.)
	 */
	uvm_lwp_setuarea(&lwp0, nextva);
	memset((void *)nextva, 0, USPACE);

	nextva += USPACE;

	/*
	 * Now that lwp0 exists, make it the "current" one.
	 */
	curlwp = &lwp0;
	curpcb = lwp_getpcb(&lwp0);

	/* This does most of the real work. */
	pmap_bootstrap(nextva);
}
Example #7
0
void
hppa_init()
{
	extern int kernel_text, end;
	struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT;
	struct pdc_coproc pdc_coproc PDC_ALIGNMENT;
	vm_offset_t v, vstart, vend;
	register int pdcerr;
	int usehpt;

	/* init PDC iface, so we can call em easy */
	pdc_init();

	/* calculate cpu speed */
	cpu_hzticks = (PAGE0->mem_10msec * 100) / hz;
	delay_init();

	/*
	 * get cache parameters from the PDC
	 */
	if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT,
			       &pdc_cache)) < 0) {
#ifdef DIAGNOSTIC
                printf("Warning: PDC_CACHE call Ret'd %d\n", pdcerr);
#endif
	}

	dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1;
	dcache_size = pdc_cache.dc_size;
	dcache_stride = pdc_cache.dc_stride;
	icache_stride = pdc_cache.ic_stride;

	/*
	 * purge TLBs and flush caches
	 */
	if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL) < 0)
		printf("WARNING: BTLB purge failed\n");
	ptlball();
	fcacheall();

	/* calculate HPT size */
	hpt_hashsize = PAGE0->imm_max_mem / NBPG;
	mtctl(hpt_hashsize - 1, CR_HPTMASK);

	/*
	 * If we want to use the HW TLB support, ensure that it exists.
	 */
	if (pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) &&
	    !pdc_hwtlb.min_size && !pdc_hwtlb.max_size) {
		printf("WARNING: no HW tlb walker\n");
		usehpt = 0;
	} else {
		usehpt = 1;
#ifdef DEBUG
		printf("hwtlb: %u-%u, %u/",
		       pdc_hwtlb.min_size, pdc_hwtlb.max_size, hpt_hashsize);
#endif
		if (hpt_hashsize > pdc_hwtlb.max_size)
			hpt_hashsize = pdc_hwtlb.max_size;
		else if (hpt_hashsize < pdc_hwtlb.min_size)
			hpt_hashsize = pdc_hwtlb.min_size;
#ifdef DEBUG
		printf("%u (0x%x)\n", hpt_hashsize,
		       hpt_hashsize * sizeof(struct hpt_entry));
#endif
	}
	
	totalphysmem = PAGE0->imm_max_mem / NBPG;
	resvmem = ((vm_offset_t)&kernel_text) / NBPG;

	vstart = hppa_round_page(&end);
	vend = VM_MAX_KERNEL_ADDRESS;

	/* we hope this won't fail */
	hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF,
				(caddr_t)mem_ex_storage,
				sizeof(mem_ex_storage),
				EX_NOCOALESCE|EX_NOWAIT);
	if (extent_alloc_region(hppa_ex, 0, (vm_offset_t)PAGE0->imm_max_mem,
				EX_NOWAIT))
		panic("cannot reserve main memory");

	/*
	 * Allocate space for system data structures.  We are given
	 * a starting virtual address and we return a final virtual
	 * address; along the way we set each data structure pointer.
	 *
	 * We call allocsys() with 0 to find out how much space we want,
	 * allocate that much and fill it with zeroes, and the call
	 * allocsys() again with the correct base virtual address.
	 */

	v = vstart;
#define	valloc(name, type, num)	\
	    (name) = (type *)v; v = (vm_offset_t)((name)+(num))

#ifdef REAL_CLISTS
	valloc(cfree, struct cblock, nclist);
#endif
	valloc(callout, struct callout, ncallout);
	nswapmap = maxproc * 2;
	valloc(swapmap, struct map, nswapmap);
#ifdef SYSVSHM
	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
#endif
#ifdef SYSVSEM 
	valloc(sema, struct semid_ds, seminfo.semmni);
	valloc(sem, struct sem, seminfo.semmns); 
	/* This is pretty disgusting! */
	valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
#endif
#ifdef SYSVMSG
	valloc(msgpool, char, msginfo.msgmax);
	valloc(msgmaps, struct msgmap, msginfo.msgseg);
	valloc(msghdrs, struct msg, msginfo.msgtql);
	valloc(msqids, struct msqid_ds, msginfo.msgmni);
#endif

#ifndef BUFCACHEPERCENT
#define BUFCACHEPERCENT 10
#endif /* BUFCACHEPERCENT */

	if (bufpages == 0)
		bufpages = totalphysmem / BUFCACHEPERCENT / CLSIZE;
	if (nbuf == 0) {
		nbuf = bufpages;
		if (nbuf < 16)
			nbuf = 16;
	}

	/* Restrict to at most 70% filled kvm */
	if (nbuf * MAXBSIZE >
	    (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 10)
		nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
		    MAXBSIZE * 7 / 10;

	/* More buffer pages than fits into the buffers is senseless.  */
	if (bufpages > nbuf * MAXBSIZE / CLBYTES)
		bufpages = nbuf * MAXBSIZE / CLBYTES;

	if (nswbuf == 0) {
		nswbuf = (nbuf / 2) & ~1;	/* force even */
		if (nswbuf > 256)
			nswbuf = 256;		/* sanity */
	}

	valloc(swbuf, struct buf, nswbuf);
	valloc(buf, struct buf, nbuf);
#undef valloc
	bzero ((void *)vstart, (v - vstart));
	vstart = v;

	pmap_bootstrap(&vstart, &vend);
	physmem = totalphysmem - btoc(vstart);

	/* alloc msgbuf */
	if (!(msgbufp = (void *)pmap_steal_memory(sizeof(struct msgbuf),
						  NULL, NULL)))
		panic("cannot allocate msgbuf");
	msgbufmapped = 1;

#ifdef DEBUG
	printf("mem: %x+%x, %x\n", physmem, resvmem, totalphysmem);
#endif
	/* Turn on the HW TLB assist */
	if (usehpt) {
		if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_TLB,
				       PDC_TLB_CONFIG, &pdc_hwtlb, hpt_table,
				       sizeof(struct hpt_entry) * hpt_hashsize,
				       PDC_TLB_WORD3)) < 0) {
			printf("Warning: HW TLB init failed (%d), disabled\n",
			       pdcerr);
			usehpt = 0;
		} else
			printf("HW TLB(%d entries at 0x%x) initialized (%d)\n",
			       hpt_hashsize, hpt_table, pdcerr);
	}

        /*
         * Locate any coprocessors and enable them by setting up the CCR.
         * SFU's are ignored (since we dont have any).  Also, initialize
         * the floating point registers here.
         */
        if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT,
			       &pdc_coproc)) < 0)
                printf("WARNING: PDC_COPROC call Ret'd %d\n", pdcerr);
	else {
#ifdef DEBUG
		printf("pdc_coproc: %x, %x\n", pdc_coproc.ccr_enable,
		       pdc_coproc.ccr_present);
#endif
	}
        copr_sfu_config = pdc_coproc.ccr_enable;
        mtctl(copr_sfu_config & CCR_MASK, CR_CCR);
/*
        fprinit(&fpcopr_version);
	fpcopr_version = (fpcopr_version & 0x003ff800) >> 11;
        mtctl(CR_CCR, 0);
*/
        /*
         * Clear the FAULT light (so we know when we get a real one)
         * PDC_COPROC apparently turns it on (for whatever reason).
         */
        pdcerr = PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0;
        (void) (*pdc)(PDC_CHASSIS, PDC_CHASSIS_DISP, pdcerr);

#ifdef DDB
	ddb_init();
#endif
#ifdef DEBUG
	printf("hppa_init: leaving\n");
#endif
	kernelmapped++;
}
Example #8
0
void *
initarm(struct arm_boot_params *abp)
{
#define	next_chunk2(a,b)	(((a) + (b)) &~ ((b)-1))
#define	next_page(a)		next_chunk2(a,PAGE_SIZE)
	struct pv_addr  kernel_l1pt;
	struct pv_addr  dpcpu;
	int loop, i;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t freemem_pt;
	vm_offset_t afterkern;
	vm_offset_t freemem_after;
	vm_offset_t lastaddr;
	uint32_t memsize;

	/* kernel text starts where we were loaded at boot */
#define	KERNEL_TEXT_OFF		(abp->abp_physaddr  - PHYSADDR)
#define	KERNEL_TEXT_BASE	(KERNBASE + KERNEL_TEXT_OFF)
#define	KERNEL_TEXT_PHYS	(PHYSADDR + KERNEL_TEXT_OFF)

	lastaddr = parse_boot_param(abp);
	arm_physmem_kernaddr = abp->abp_physaddr;
	set_cpufuncs();		/* NB: sets cputype */
	pcpu_init(pcpup, 0, sizeof(struct pcpu));
	PCPU_SET(curthread, &thread0);

	if (envmode == 1)
		kern_envp = static_env;
	/* Do basic tuning, hz etc */
      	init_param1();
		
	/*
	 * We allocate memory downwards from where we were loaded
	 * by RedBoot; first the L1 page table, then NUM_KERNEL_PTS
	 * entries in the L2 page table.  Past that we re-align the
	 * allocation boundary so later data structures (stacks, etc)
	 * can be mapped with different attributes (write-back vs
	 * write-through).  Note this leaves a gap for expansion
	 * (or might be repurposed).
	 */
	freemempos = abp->abp_physaddr;

	/* macros to simplify initial memory allocation */
#define alloc_pages(var, np) do {					\
	freemempos -= (np * PAGE_SIZE);					\
	(var) = freemempos;						\
	/* NB: this works because locore maps PA=VA */			\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));			\
} while (0)
#define	valloc_pages(var, np) do {					\
	alloc_pages((var).pv_pa, (np));					\
	(var).pv_va = (var).pv_pa + (KERNVIRTADDR - abp->abp_physaddr);	\
} while (0)

	/* force L1 page table alignment */
	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
		freemempos -= PAGE_SIZE;
	/* allocate contiguous L1 page table */
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	/* now allocate L2 page tables; they are linked to L1 below */
	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_pa = freemempos +
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_va =
			    kernel_pt_table[loop].pv_pa +
				(KERNVIRTADDR - abp->abp_physaddr);
		}
	}
	freemem_pt = freemempos;		/* base of allocated pt's */

	/*
	 * Re-align allocation boundary so we can map the area
	 * write-back instead of write-through for the stacks and
	 * related structures allocated below.
	 */
	freemempos = PHYSADDR + 0x100000;
	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate dynamic per-cpu area. */
	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
	dpcpu_init((void *)dpcpu.pv_va, 0);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	alloc_pages(minidataclean.pv_pa, 1);
	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);

	/*
	 * Now construct the L1 page table.  First map the L2
	 * page tables into the L1 so we can replace L1 mappings
	 * later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_va;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
	pmap_link_l2pt(l1pagetable, IXP425_IO_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO]);
	pmap_link_l2pt(l1pagetable, IXP425_MCU_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO + 1]);
	pmap_link_l2pt(l1pagetable, IXP425_PCI_MEM_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO + 2]);
	pmap_link_l2pt(l1pagetable, KERNBASE,
	    &kernel_pt_table[KERNEL_PT_BEFOREKERN]);
	pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, 0x100000,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, PHYSADDR + 0x100000,
	    0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_PHYS,
	    next_chunk2(((uint32_t)lastaddr) - KERNEL_TEXT_BASE, L1_S_SIZE),
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	freemem_after = next_page((int)lastaddr);
	afterkern = round_page(next_chunk2((vm_offset_t)lastaddr, L1_S_SIZE));
	for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
		pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
		    &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
	}
	pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);


	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, afterkern,
	    minidataclean.pv_pa);

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	if (cpu_is_ixp43x())
		arm_devmap_bootstrap(l1pagetable, ixp435_devmap);
	else
		arm_devmap_bootstrap(l1pagetable, ixp425_devmap);
	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	set_stackptrs(0);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross relocations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();
	cpu_setup();

	/* ready to setup the console (XXX move earlier if possible) */
	cninit();
	/*
	 * Fetch the RAM size from the MCU registers.  The
	 * expansion bus was mapped above so we can now read 'em.
	 */
	if (cpu_is_ixp43x())
		memsize = ixp435_ddram_size();
	else
		memsize = ixp425_sdram_size();

	undefined_init();

	init_proc0(kernelstack.pv_va);

	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

	pmap_curmaxkvaddr = afterkern + PAGE_SIZE;
	vm_max_kernel_address = 0xe0000000;
	pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt);
	msgbufp = (void*)msgbufpv.pv_va;
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();

	/*
	 * Add the physical ram we have available.
	 *
	 * Exclude the kernel, and all the things we allocated which immediately
	 * follow the kernel, from the VM allocation pool but not from crash
	 * dumps.  virtual_avail is a global variable which tracks the kva we've
	 * "allocated" while setting up pmaps.
	 *
	 * Prepare the list of physical memory available to the vm subsystem.
	 */
	arm_physmem_hardware_region(PHYSADDR, memsize);
	arm_physmem_exclude_region(freemem_pt, KERNPHYSADDR -
	    freemem_pt, EXFLAG_NOALLOC);
	arm_physmem_exclude_region(freemempos, KERNPHYSADDR - 0x100000 -
	    freemempos, EXFLAG_NOALLOC);
	arm_physmem_exclude_region(abp->abp_physaddr, 
	    virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC);
	arm_physmem_init_kernel_globals();

	init_param2(physmem);
	kdb_init();

	/* use static kernel environment if so configured */
	if (envmode == 1)
		kern_envp = static_env;

	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
#undef next_page
#undef next_chunk2
}
Example #9
0
void
initarm(struct arm64_bootparams *abp)
{
	struct efi_map_header *efihdr;
	struct pcpu *pcpup;
	vm_offset_t lastaddr;
	caddr_t kmdp;
	vm_paddr_t mem_len;
	int i;

	/* Set the module data location */
	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);

	/* Find the kernel address */
	kmdp = preload_search_by_type("elf kernel");
	if (kmdp == NULL)
		kmdp = preload_search_by_type("elf64 kernel");

	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
	kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);

#ifdef FDT
	try_load_dtb(kmdp);
#endif

	/* Find the address to start allocating from */
	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);

	/* Load the physical memory ranges */
	physmap_idx = 0;
	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
	add_efi_map_entries(efihdr, physmap, &physmap_idx);

	/* Print the memory map */
	mem_len = 0;
	for (i = 0; i < physmap_idx; i += 2) {
		dump_avail[i] = physmap[i];
		dump_avail[i + 1] = physmap[i + 1];
		mem_len += physmap[i + 1] - physmap[i];
	}
	dump_avail[i] = 0;
	dump_avail[i + 1] = 0;

	/* Set the pcpu data, this is needed by pmap_bootstrap */
	pcpup = &__pcpu[0];
	pcpu_init(pcpup, 0, sizeof(struct pcpu));

	/*
	 * Set the pcpu pointer with a backup in tpidr_el1 to be
	 * loaded when entering the kernel from userland.
	 */
	__asm __volatile(
	    "mov x18, %0 \n"
	    "msr tpidr_el1, %0" :: "r"(pcpup));

	PCPU_SET(curthread, &thread0);

	/* Do basic tuning, hz etc */
	init_param1();

	cache_setup();

	/* Bootstrap enough of pmap  to enter the kernel proper */
	pmap_bootstrap(abp->kern_l1pt, KERNBASE - abp->kern_delta,
	    lastaddr - KERNBASE);

	arm_devmap_bootstrap(0, NULL);

	cninit();

	init_proc0(abp->kern_stack);
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();
	init_param2(physmem);

	dbg_monitor_init();
	kdb_init();

	early_boot = 0;
}
Example #10
0
u_int
at91bus_setup(BootConfig *mem)
{
	int loop;
	int loop1;
	u_int l1pagetable;

	consinit();

#ifdef	VERBOSE_INIT_ARM
	printf("\nNetBSD/AT91 booting ...\n");
#endif

	// setup the CPU / MMU / TLB functions:
	if (set_cpufuncs())
		panic("%s: cpu not recognized", __FUNCTION__);

#ifdef	VERBOSE_INIT_ARM
	printf("%s: configuring system...\n", __FUNCTION__);
#endif

	/*
	 * Setup the variables that define the availability of
	 * physical memory.
	 */
	physical_start = mem->dram[0].address;
	physical_end = mem->dram[0].address + mem->dram[0].pages * PAGE_SIZE;

	physical_freestart = mem->dram[0].address + 0x9000ULL;
	physical_freeend = KERNEL_BASE_PHYS;
	physmem = (physical_end - physical_start) / PAGE_SIZE;

#ifdef	VERBOSE_INIT_ARM
	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
	       physical_start, physical_end - 1);
#endif

	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

#ifdef	VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
	       physical_freestart, free_pages, free_pages);
#endif
	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)				\
	alloc_pages((var).pv_pa, (np));			\
	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;

#define alloc_pages(var, np)				\
	physical_freeend -= ((np) * PAGE_SIZE);		\
	if (physical_freeend < physical_freestart)	\
		panic("initarm: out of memory");	\
	(var) = physical_freeend;			\
	free_pages -= (np);				\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	loop1 = 0;
	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
		/* Are we 16KB aligned for an L1 ? */
		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
		    && kernel_l1pt.pv_pa == 0) {
			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
		} else {
			valloc_pages(kernel_pt_table[loop1],
			    L2_TABLE_SIZE / PAGE_SIZE);
			++loop1;
		}
	}

	/* This should never be able to happen but better confirm that. */
	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
		panic("initarm: Failed to align the kernel page directory");

	/*
	 * Allocate a page for the system vectors page
	 */
	valloc_pages(systempage, 1);
	systempage.pv_va = 0x00000000;

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, UPAGES);

#ifdef VERBOSE_INIT_ARM
	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
	    irqstack.pv_va); 
	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
	    abtstack.pv_va); 
	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
	    undstack.pv_va); 
	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
	    kernelstack.pv_va); 
#endif

	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);

	/*
	 * Ok we have allocated physical pages for the primary kernel
	 * page tables.  Save physical_freeend for when we give whats left 
	 * of memory below 2Mbyte to UVM.
	 */

	physical_freeend_low = physical_freeend;

#ifdef VERBOSE_INIT_ARM
	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, 0x00000000, &kernel_pt_table[KERNEL_PT_SYS]);
	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);

	/* update the top of the kernel VM */
	pmap_curmaxkvaddr =
	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);

#ifdef VERBOSE_INIT_ARM
	printf("Mapping kernel\n");
#endif

	/* Now we fill in the L2 pagetable for the kernel static code/data */
	{
		extern char etext[], _end[];
		size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
		size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
		u_int logical;

		textsize = (textsize + PGOFSET) & ~PGOFSET;
		totalsize = (totalsize + PGOFSET) & ~PGOFSET;

		logical = KERNEL_BASE_PHYS - mem->dram[0].address;	/* offset of kernel in RAM */
		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, totalsize - textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	}

#ifdef VERBOSE_INIT_ARM
	printf("Constructing L2 page tables\n");
#endif

	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_LOW, systempage.pv_pa,
		       VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	/* Map the statically mapped devices. */
	pmap_devmap_bootstrap(l1pagetable, at91_devmap());

	/*
	 * Update the physical_freestart/physical_freeend/free_pages
	 * variables.
	 */
	{
		extern char _end[];

		physical_freestart = physical_start +
		    (((((uintptr_t) _end) + PGOFSET) & ~PGOFSET) -
		     KERNEL_BASE);
		physical_freeend = physical_end;
		free_pages =
		    (physical_freeend - physical_freestart) / PAGE_SIZE;
	}

	/*
	 * Now we have the real page tables in place so we can switch to them.
	 * Once this is done we will be running with the REAL kernel page
	 * tables.
	 */

	/* Switch tables */
#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	       physical_freestart, free_pages, free_pages);
	printf("switching to new L1 page table  @%#lx...", kernel_l1pt.pv_pa);
#endif
	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	cpu_setttb(kernel_l1pt.pv_pa, true);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init
	 */
	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);

#ifdef VERBOSE_INIT_ARM
	printf("done!\n");
#endif

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

	/* @@@@ check this out: @@@ */
	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("init subsystems: stacks ");
#endif

	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("vectors ");
#endif
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
	printf("undefined ");
#endif
	undefined_init();

	/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
	printf("page ");
#endif
	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend),
	    VM_FREELIST_DEFAULT);
	uvm_page_physload(atop(physical_start), atop(physical_freeend_low),
	    atop(physical_start), atop(physical_freeend_low),
	    VM_FREELIST_DEFAULT);

	/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
	printf("pmap ");
#endif
	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);

	/* Setup the IRQ system */
#ifdef VERBOSE_INIT_ARM
	printf("irq ");
#endif
	at91_intr_init();

#ifdef VERBOSE_INIT_ARM
	printf("done.\n");
#endif

#ifdef BOOTHOWTO
	boothowto = BOOTHOWTO;
#endif
	boothowto = AB_VERBOSE | AB_DEBUG; // @@@@

#ifdef IPKDB
	/* Initialise ipkdb */
	ipkdb_init();
	if (boothowto & RB_KDB)
		ipkdb_connect(0);
#endif

#ifdef DDB
	db_machine_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif
#if 0
	printf("test data abort...\n");
	*((volatile uint32_t*)(0x1234567F)) = 0xdeadbeef;
#endif

#ifdef VERBOSE_INIT_ARM
  	printf("%s: returning new stack pointer 0x%lX\n", __FUNCTION__, (kernelstack.pv_va + USPACE_SVC_STACK_TOP));
#endif

	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Example #11
0
void
platform_start(__register_t a0, __register_t a1,
    __register_t a2 __unused, __register_t a3 __unused)
{
	uint64_t platform_counter_freq;
	vm_offset_t kernend;
	int argc = a0;
	char **argv = (char **)a1;
	int i, mem;


	/* clear the BSS and SBSS segments */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	/* Initialize pcpu stuff */
	mips_pcpu0_init();

	/*
	 * Looking for mem=XXM argument
	 */
	mem = 0; /* Just something to start with */
	for (i=0; i < argc; i++) {
		if (strncmp(argv[i], "mem=", 4) == 0) {
			mem = strtol(argv[i] + 4, NULL, 0);
			break;
		}
	}

	bootverbose = 1;
	if (mem > 0)
		realmem = btoc(mem << 20);
	else
		realmem = btoc(32 << 20);

	for (i = 0; i < 10; i++) {
		phys_avail[i] = 0;
	}

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	dump_avail[0] = phys_avail[0];
	dump_avail[1] = phys_avail[1];

	physmem = realmem;

	/* 
	 * ns8250 uart code uses DELAY so ticker should be inititalized 
	 * before cninit. And tick_init_params refers to hz, so * init_param1 
	 * should be called first.
	 */
	init_param1();
	/* TODO: parse argc,argv */
	platform_counter_freq = 330000000UL;
	mips_timer_init_params(platform_counter_freq, 1);
	cninit();
	/* Panic here, after cninit */ 
	if (mem == 0)
		panic("No mem=XX parameter in arguments");

	printf("cmd line: ");
	for (i=0; i < argc; i++)
		printf("%s ", argv[i]);
	printf("\n");

	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Example #12
0
void
platform_start(__register_t a0 __unused, __register_t a1 __unused, 
    __register_t a2 __unused, __register_t a3 __unused)
{
	uint64_t platform_counter_freq;
	int argc = 0, i;
	char **argv = NULL, **envp = NULL;
	vm_offset_t kernend;

	/* 
	 * clear the BSS and SBSS segments, this should be first call in
	 * the function
	 */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	/* Initialize pcpu stuff */
	mips_pcpu0_init();

	/*
	 * Until some more sensible abstractions for uboot/redboot
	 * environment handling, we have to make this a compile-time
	 * hack.  The existing code handles the uboot environment
	 * very incorrectly so we should just ignore initialising
	 * the relevant pointers.
	 */
#ifndef	AR71XX_ENV_UBOOT
	argc = a0;
	argv = (char**)a1;
	envp = (char**)a2;
#endif
	/* 
	 * Protect ourselves from garbage in registers 
	 */
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i += 2) {
			if (strcmp(envp[i], "memsize") == 0)
				realmem = btoc(strtoul(envp[i+1], NULL, 16));
		}
	}

	/*
	 * Just wild guess. RedBoot let us down and didn't reported 
	 * memory size
	 */
	if (realmem == 0)
		realmem = btoc(32*1024*1024);

	/*
	 * Allow build-time override in case Redboot lies
	 * or in other situations (eg where there's u-boot)
	 * where there isn't (yet) a convienent method of
	 * being told how much RAM is available.
	 *
	 * This happens on at least the Ubiquiti LS-SR71A
	 * board, where redboot says there's 16mb of RAM
	 * but in fact there's 32mb.
	 */
#if	defined(AR71XX_REALMEM)
		realmem = btoc(AR71XX_REALMEM);
#endif

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	dump_avail[0] = phys_avail[0];
	dump_avail[1] = phys_avail[1] - phys_avail[0];

	physmem = realmem;

	/*
	 * ns8250 uart code uses DELAY so ticker should be inititalized 
	 * before cninit. And tick_init_params refers to hz, so * init_param1 
	 * should be called first.
	 */
	init_param1();

	/* Detect the system type - this is needed for subsequent chipset-specific calls */
	ar71xx_detect_sys_type();
	ar71xx_detect_sys_frequency();

	platform_counter_freq = ar71xx_cpu_freq();
	mips_timer_init_params(platform_counter_freq, 1);
	cninit();
	init_static_kenv(boot1_env, sizeof(boot1_env));

	printf("CPU platform: %s\n", ar71xx_get_system_type());
	printf("CPU Frequency=%d MHz\n", u_ar71xx_cpu_freq / 1000000);
	printf("CPU DDR Frequency=%d MHz\n", u_ar71xx_ddr_freq / 1000000);
	printf("CPU AHB Frequency=%d MHz\n", u_ar71xx_ahb_freq / 1000000);
	printf("platform frequency: %lld\n", platform_counter_freq);
	printf("CPU reference clock: %d MHz\n", u_ar71xx_refclk / 1000000);
	printf("arguments: \n");
	printf("  a0 = %08x\n", a0);
	printf("  a1 = %08x\n", a1);
	printf("  a2 = %08x\n", a2);
	printf("  a3 = %08x\n", a3);

	/*
	 * XXX this code is very redboot specific.
	 */
	printf("Cmd line:");
	if (MIPS_IS_VALID_PTR(argv)) {
		for (i = 0; i < argc; i++) {
			printf(" %s", argv[i]);
			parse_argv(argv[i]);
		}
	}
	else
		printf ("argv is invalid");
	printf("\n");

	printf("Environment:\n");
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i+=2) {
			printf("  %s = %s\n", envp[i], envp[i+1]);
			setenv(envp[i], envp[i+1]);
		}
	}
	else 
		printf ("envp is invalid\n");

	/* Redboot if_arge MAC address is in the environment */
	ar71xx_redboot_get_macaddr();

	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();

	/*
	 * Reset USB devices 
	 */
	ar71xx_init_usb_peripheral();

	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Example #13
0
/*
 * Do all the stuff that locore normally does before calling main().
 * Process arguments passed to us by the prom monitor.
 * Return the first page address following the system.
 */
void
mach_init(int x_boothowto, int x_bootdev, int x_bootname, int x_maxmem)
{
	u_long first, last;
	char *kernend;
	struct btinfo_magic *bi_magic;
	struct btinfo_bootarg *bi_arg;
	struct btinfo_systype *bi_systype;
#if NKSYMS || defined(DDB) || defined(MODULAR)
	struct btinfo_symtab *bi_sym;
	int nsym = 0;
	char *ssym, *esym;

	ssym = esym = NULL;	/* XXX: gcc */
#endif
	bi_arg = NULL;

	bootinfo = (void *)BOOTINFO_ADDR;	/* XXX */
	bi_magic = lookup_bootinfo(BTINFO_MAGIC);
	if (bi_magic && bi_magic->magic == BOOTINFO_MAGIC) {
		bi_arg = lookup_bootinfo(BTINFO_BOOTARG);
		if (bi_arg) {
			x_boothowto = bi_arg->howto;
			x_bootdev = bi_arg->bootdev;
			x_maxmem = bi_arg->maxmem;
		}
#if NKSYMS || defined(DDB) || defined(MODULAR)
		bi_sym = lookup_bootinfo(BTINFO_SYMTAB);
		if (bi_sym) {
			nsym = bi_sym->nsym;
			ssym = (void *)bi_sym->ssym;
			esym = (void *)bi_sym->esym;
		}
#endif

		bi_systype = lookup_bootinfo(BTINFO_SYSTYPE);
		if (bi_systype)
			systype = bi_systype->type;
	} else {
		/*
		 * Running kernel is loaded by non-native loader;
		 * clear the BSS segment here.
		 */
		memset(edata, 0, end - edata);
	}

	if (systype == 0) 
		systype = NEWS3400;	/* XXX compatibility for old boot */

#ifdef news5000
	if (systype == NEWS5000) {
		int i;
		char *bootspec = (char *)x_bootdev;

		if (bi_arg == NULL)
			panic("news5000 requires BTINFO_BOOTARG to boot");

		_sip = (void *)bi_arg->sip;
		x_maxmem = _sip->apbsi_memsize;
		x_maxmem -= 0x00100000;	/* reserve 1MB for ROM monitor */
		if (strncmp(bootspec, "scsi", 4) == 0) {
			x_bootdev = (5 << 28) | 0;	 /* magic, sd */
			bootspec += 4;
			if (*bootspec != '(' /*)*/)
				goto bootspec_end;
			i = strtoul(bootspec + 1, &bootspec, 10);
			x_bootdev |= (i << 24);		/* bus */
			if (*bootspec != ',')
				goto bootspec_end;
			i = strtoul(bootspec + 1, &bootspec, 10);
			x_bootdev |= (i / 10) << 20;	/* controller */
			x_bootdev |= (i % 10) << 16;	/* unit */
			if (*bootspec != ',')
				goto bootspec_end;
			i = strtoul(bootspec + 1, &bootspec, 10);
			x_bootdev |= (i << 8);		/* partition */
		}
 bootspec_end:
		consinit();
	}
#endif

	/*
	 * Save parameters into kernel work area.
	 */
	*(int *)(MIPS_PHYS_TO_KSEG1(MACH_MAXMEMSIZE_ADDR)) = x_maxmem;
	*(int *)(MIPS_PHYS_TO_KSEG1(MACH_BOOTDEV_ADDR)) = x_bootdev;
	*(int *)(MIPS_PHYS_TO_KSEG1(MACH_BOOTSW_ADDR)) = x_boothowto;

	kernend = (char *)mips_round_page(end);
#if NKSYMS || defined(DDB) || defined(MODULAR)
	if (nsym)
		kernend = (char *)mips_round_page(esym);
#endif

	/*
	 * Set the VM page size.
	 */
	uvm_setpagesize();

	boothowto = x_boothowto;
	bootdev = x_bootdev;
	physmem = btoc(x_maxmem);

	/*
	 * Now that we know how much memory we have, initialize the
	 * mem cluster array.
	 */
	mem_clusters[0].start = 0;		/* XXX is this correct? */
	mem_clusters[0].size  = ctob(physmem);
	mem_cluster_cnt = 1;

	/*
	 * Copy exception-dispatch code down to exception vector.
	 * Initialize locore-function vector.
	 * Clear out the I and D caches.
	 */
	mips_vector_init(NULL, false);

	/*
	 * We know the CPU type now.  Initialize our DMA tags (might
	 * need this early).
	 */
	newsmips_bus_dma_init();

#if NKSYMS || defined(DDB) || defined(MODULAR)
	if (nsym)
		ksyms_addsyms_elf(esym - ssym, ssym, esym);
#endif

#ifdef KADB
	boothowto |= RB_KDB;
#endif

	/*
	 * Check to see if a mini-root was loaded into memory. It resides
	 * at the start of the next page just after the end of BSS.
	 */
	if (boothowto & RB_MINIROOT)
		kernend += round_page(mfs_initminiroot(kernend));

	/*
	 * Load the rest of the available pages into the VM system.
	 */
	first = round_page(MIPS_KSEG0_TO_PHYS(kernend));
	last = mem_clusters[0].start + mem_clusters[0].size;
	uvm_page_physload(atop(first), atop(last), atop(first), atop(last),
	    VM_FREELIST_DEFAULT);

	/*
	 * Initialize error message buffer (at end of core).
	 */
	mips_init_msgbuf();

	/*
	 * Initialize the virtual memory system.
	 */
	pmap_bootstrap();

	/*
	 * Allocate uarea page for lwp0 and set it.
	 */
	mips_init_lwp0_uarea();

	/*
	 * Determine what model of computer we are running on.
	 */
	switch (systype) {
#ifdef news3400
	case NEWS3400:
		news3400_init();
		strcpy(cpu_model, idrom.id_machine);
		if (strcmp(cpu_model, "news3400") == 0 ||
		    strcmp(cpu_model, "news3200") == 0 ||
		    strcmp(cpu_model, "news3700") == 0) {
			/*
			 * Set up interrupt handling and I/O addresses.
			 */
			hardware_intr = news3400_intr;
			cpuspeed = 10;
		} else {
			printf("kernel not configured for machine %s\n",
			    cpu_model);
		}
		break;
#endif

#ifdef news5000
	case NEWS5000:
		news5000_init();
		strcpy(cpu_model, idrom.id_machine);
		if (strcmp(cpu_model, "news5000") == 0 ||
		    strcmp(cpu_model, "news5900") == 0) {
			/*
			 * Set up interrupt handling and I/O addresses.
			 */
			hardware_intr = news5000_intr;
			cpuspeed = 50;	/* ??? XXX */
		} else {
			printf("kernel not configured for machine %s\n",
			    cpu_model);
		}
		break;
#endif

	default:
		printf("kernel not configured for systype %d\n", systype);
		break;
	}
}
u_int
initarm(void *arg)
{
	int loop;
	int loop1;
	u_int l1pagetable;
	extern char _end[];

	/*
	 * Turn the led off, then turn it yellow.
	 * 0x80 - red; 0x04 - fan; 0x02 - green.
	 */
	ISA_PUTBYTE(0x338, 0x04);
	ISA_PUTBYTE(0x338, 0x86);

	/*
	 * Set up a diagnostic console so we can see what's going
	 * on.
	 */
	cn_tab = &kcomcons;

	/* Talk to the user */
	printf("\nNetBSD/netwinder booting ...\n");

	/*
	 * Heads up ... Setup the CPU / MMU / TLB functions
	 */
	if (set_cpufuncs())
		panic("CPU not recognized!");

	/*
	 * We are currently running with the MMU enabled and the
	 * entire address space mapped VA==PA, except for the
	 * first 64MB of RAM is also double-mapped at 0xf0000000.
	 * There is an L1 page table at 0x00008000.
	 *
	 * We also have the 21285's PCI I/O space mapped where
	 * we expect it.
	 */

	printf("initarm: Configuring system ...\n");

	/*
	 * Copy out the boot info passed by the firmware.  Note that
	 * early versions of NeTTrom fill this in with bogus values,
	 * so we need to sanity check it.
	 */
	memcpy(&nwbootinfo, (void *)(KERNEL_BASE + 0x100),
	    sizeof(nwbootinfo));
#ifdef VERBOSE_INIT_ARM
	printf("NeTTrom boot info:\n");
	printf("\tpage size = 0x%08lx\n", nwbootinfo.bi_pagesize);
	printf("\tnpages = %ld (0x%08lx)\n", nwbootinfo.bi_nrpages,
	    nwbootinfo.bi_nrpages);
	printf("\trootdev = 0x%08lx\n", nwbootinfo.bi_rootdev);
	printf("\tcmdline = %s\n", nwbootinfo.bi_cmdline);
#endif
	if (nwbootinfo.bi_nrpages != 0x02000 &&
	    nwbootinfo.bi_nrpages != 0x04000 &&
	    nwbootinfo.bi_nrpages != 0x08000 &&
	    nwbootinfo.bi_nrpages != 0x10000) {
		nwbootinfo.bi_pagesize = 0xdeadbeef;
		nwbootinfo.bi_nrpages = 0x01000;	/* 16MB */
		nwbootinfo.bi_rootdev = 0;
	}

	/* Fake bootconfig structure for the benefit of pmap.c */
	/* XXX must make the memory description h/w independent */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = 0;
	bootconfig.dram[0].pages = nwbootinfo.bi_nrpages;

	/*
	 * Set up the variables that define the availablilty of
	 * physical memory.
	 *
	 * Since the NetWinder NeTTrom doesn't load ELF symbols
	 * for us, we can safely assume that everything after end[]
	 * is free.  We start there and allocate upwards.
	 */
	physical_start = bootconfig.dram[0].address;
	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);

	physical_freestart = ((((vaddr_t) _end) + PGOFSET) & ~PGOFSET) -
	    KERNEL_BASE;
	physical_freeend = physical_end;
	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	       physical_freestart, free_pages, free_pages);
#endif

	physmem = (physical_end - physical_start) / PAGE_SIZE;

	/* Tell the user about the memory */
	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
	    physical_start, physical_end - 1);

	/*
	 * Okay, we need to allocate some fixed page tables to get the
	 * kernel going.  We allocate one page directory and a number
	 * of page tables and store the physical addresses in the
	 * kernel_pt_table array.
	 *
	 * The kernel page directory must be on a 16K boundary.  The page
	 * tables must be on 4K boundaries.  What we do is allocate the
	 * page directory on the first 16K boundary that we encounter,
	 * and the page tables on 4K boundaries otherwise.  Since we
	 * allocate at least 3 L2 page tables, we are guaranteed to
	 * encounter at least one 16K aligned region.
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Allocating page tables\n");
#endif

	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)			\
	alloc_pages((var).pv_pa, (np));		\
	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;

#define alloc_pages(var, np)			\
	(var) = physical_freestart;		\
	physical_freestart += ((np) * PAGE_SIZE);\
	free_pages -= (np);			\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	loop1 = 0;
	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
		/* Are we 16KB aligned for an L1 ? */
		if ((physical_freestart & (L1_TABLE_SIZE - 1)) == 0
		    && kernel_l1pt.pv_pa == 0) {
			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
		} else {
			valloc_pages(kernel_pt_table[loop1],
			    L2_TABLE_SIZE / PAGE_SIZE);
			++loop1;
		}
	}

	/* This should never be able to happen but better confirm that. */
	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
		panic("initarm: Failed to align the kernel page directory");

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	alloc_pages(systempage.pv_pa, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, UPAGES);

#ifdef VERBOSE_INIT_ARM
	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
	    irqstack.pv_va); 
	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
	    abtstack.pv_va); 
	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
	    undstack.pv_va); 
	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
	    kernelstack.pv_va); 
#endif

	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);

	/*
	 * Ok we have allocated physical pages for the primary kernel
	 * page tables
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif

	/*
	 * Now we start consturction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, 0x00000000,
	    &kernel_pt_table[KERNEL_PT_SYS]);
	pmap_link_l2pt(l1pagetable, KERNEL_BASE,
	    &kernel_pt_table[KERNEL_PT_KERNEL]);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);

	/* update the top of the kernel VM */
	pmap_curmaxkvaddr =
	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);

#ifdef VERBOSE_INIT_ARM
	printf("Mapping kernel\n");
#endif

	/* Now we fill in the L2 pagetable for the kernel static code/data */
	{
		/*
		 * The kernel starts in the first 1MB of RAM, and we'd
		 * like to use a section mapping for text, so we'll just
		 * map from KERNEL_BASE to etext[] to _end[].
		 */

		extern char etext[];
		size_t textsize = (uintptr_t) etext - KERNEL_BASE;
		size_t totalsize = (uintptr_t) _end - KERNEL_BASE;
		u_int logical;

		textsize = (textsize + PGOFSET) & ~PGOFSET;
		totalsize = (totalsize + PGOFSET) & ~PGOFSET;

		textsize = textsize & ~PGOFSET;
		totalsize = (totalsize + PGOFSET) & ~PGOFSET;

		logical = 0;		/* offset into RAM */

		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, totalsize - textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	}

#ifdef VERBOSE_INIT_ARM
	printf("Constructing L2 page tables\n");
#endif

	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	/*
	 * Map devices we can map w/ section mappings.
	 */
	loop = 0;
	while (l1_sec_table[loop].size) {
		vsize_t sz;

#ifdef VERBOSE_INIT_ARM
		printf("%08lx -> %08lx @ %08lx\n", l1_sec_table[loop].pa,
		    l1_sec_table[loop].pa + l1_sec_table[loop].size - 1,
		    l1_sec_table[loop].va);
#endif
		for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_S_SIZE)
			pmap_map_section(l1pagetable,
			    l1_sec_table[loop].va + sz,
			    l1_sec_table[loop].pa + sz,
			    l1_sec_table[loop].prot,
			    l1_sec_table[loop].cache);
		++loop;
	}

	/*
	 * Now we have the real page tables in place so we can switch to them.
	 * Once this is done we will be running with the REAL kernel page
	 * tables.
	 */

	/* Switch tables */
#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	       physical_freestart, free_pages, free_pages);
	printf("switching to new L1 page table  @%#lx...", kernel_l1pt.pv_pa);
#endif

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	cpu_setttb(kernel_l1pt.pv_pa, true);
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init
	 */
	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);

#ifdef VERBOSE_INIT_ARM
	printf("done!\n");
#endif

	/*
	 * XXX this should only be done in main() but it useful to
	 * have output earlier ...
	 */
	consinit();

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	printf("init subsystems: stacks ");

	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
	printf("vectors ");
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
	printf("undefined ");
	undefined_init();

	/* Load memory into UVM. */
	printf("page ");
	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */

	/* XXX Always one RAM block -- nuke the loop. */
	for (loop = 0; loop < bootconfig.dramblocks; loop++) {
		paddr_t start = (paddr_t)bootconfig.dram[loop].address;
		paddr_t end = start + (bootconfig.dram[loop].pages * PAGE_SIZE);
#if NISADMA > 0
		paddr_t istart, isize;
		extern struct arm32_dma_range *footbridge_isa_dma_ranges;
		extern int footbridge_isa_dma_nranges;
#endif

		if (start < physical_freestart)
			start = physical_freestart;
		if (end > physical_freeend)
			end = physical_freeend;

#if 0
		printf("%d: %lx -> %lx\n", loop, start, end - 1);
#endif

#if NISADMA > 0
		if (arm32_dma_range_intersect(footbridge_isa_dma_ranges,
					      footbridge_isa_dma_nranges,
					      start, end - start,
					      &istart, &isize)) {
			/*
			 * Place the pages that intersect with the
			 * ISA DMA range onto the ISA DMA free list.
			 */
#if 0
			printf("    ISADMA 0x%lx -> 0x%lx\n", istart,
			    istart + isize - 1);
#endif
			uvm_page_physload(atop(istart),
			    atop(istart + isize), atop(istart),
			    atop(istart + isize), VM_FREELIST_ISADMA);

			/*
			 * Load the pieces that come before the
			 * intersection onto the default free list.
			 */
			if (start < istart) {
#if 0
				printf("    BEFORE 0x%lx -> 0x%lx\n",
				    start, istart - 1);
#endif
				uvm_page_physload(atop(start),
				    atop(istart), atop(start),
				    atop(istart), VM_FREELIST_DEFAULT);
			}

			/*
			 * Load the pieces that come after the
			 * intersection onto the default free list.
			 */
			if ((istart + isize) < end) {
#if 0
				printf("     AFTER 0x%lx -> 0x%lx\n",
				    (istart + isize), end - 1);
#endif
				uvm_page_physload(atop(istart + isize),
				    atop(end), atop(istart + isize),
				    atop(end), VM_FREELIST_DEFAULT);
			}
		} else {
			uvm_page_physload(atop(start), atop(end),
			    atop(start), atop(end), VM_FREELIST_DEFAULT);
		}
#else /* NISADMA > 0 */
		uvm_page_physload(atop(start), atop(end),
		    atop(start), atop(end), VM_FREELIST_DEFAULT);
#endif /* NISADMA > 0 */
	}

	/* Boot strap pmap telling it where the kernel page table is */
	printf("pmap ");
	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);

	/* Now that pmap is inited, we can set cpu_reset_address */
	cpu_reset_address_paddr = vtophys((vaddr_t)netwinder_reset);

	/* Setup the IRQ system */
	printf("irq ");
	footbridge_intr_init();
	printf("done.\n");

	/*
	 * Warn the user if the bootinfo was bogus.  We already
	 * faked up some safe values.
	 */
	if (nwbootinfo.bi_pagesize == 0xdeadbeef)
		printf("WARNING: NeTTrom boot info corrupt\n");

#ifdef DDB
	db_machine_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif

	/* Turn the led green */
	ISA_PUTBYTE(0x338, 0x06);

	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Example #15
0
/*
 * locore.s code calls bootstrap() just before calling main().
 *
 * What we try to do is as follows:
 * - Initialize PROM and the console
 * - Read in part of information provided by a bootloader and find out
 *   kernel load and end addresses
 * - Initialize ksyms
 * - Find out number of active CPUs
 * - Finalize the bootstrap by calling pmap_bootstrap() 
 *
 * We will try to run out of the prom until we get out of pmap_bootstrap().
 */
void
bootstrap(void *o0, void *bootargs, void *bootsize, void *o3, void *ofw)
{
	void *bi;
	long bmagic;
	char buf[32];

#if NKSYMS || defined(DDB) || defined(MODULAR)
	struct btinfo_symtab *bi_sym;
#endif
	struct btinfo_count *bi_count;
	struct btinfo_kernend *bi_kend;
	struct btinfo_tlb *bi_tlb;
	struct btinfo_boothowto *bi_howto;

	extern void *romtba;
	extern void* get_romtba(void);
	extern void  OF_val2sym32(void *);
	extern void OF_sym2val32(void *);
	extern struct consdev consdev_prom;

	/* Save OpenFrimware entry point */
	romp   = ofw;
	romtba = get_romtba();

	prom_init();
	console_instance = promops.po_stdout;
	console_node = OF_instance_to_package(promops.po_stdout);

	/* Initialize the PROM console so printf will not panic */
	cn_tab = &consdev_prom;
	(*cn_tab->cn_init)(cn_tab);

	DPRINTF(ACDB_BOOTARGS,
		("sparc64_init(%p, %p, %p, %p, %p)\n", o0, bootargs, bootsize,
			o3, ofw));

	/* Extract bootinfo pointer */
	if ((long)bootsize >= (4 * sizeof(uint64_t))) {
		/* Loaded by 64-bit bootloader */
		bi = (void*)(u_long)(((uint64_t*)bootargs)[3]);
		bmagic = (long)(((uint64_t*)bootargs)[0]);
	} else if ((long)bootsize >= (4 * sizeof(uint32_t))) {
		/* Loaded by 32-bit bootloader */
		bi = (void*)(u_long)(((uint32_t*)bootargs)[3]);
		bmagic = (long)(((uint32_t*)bootargs)[0]);
	} else {
		printf("Bad bootinfo size.\n");
die_old_boot_loader:
		printf("This kernel requires NetBSD boot loader version 1.9 "
		       "or newer\n");
		panic("sparc64_init.");
	}

	DPRINTF(ACDB_BOOTARGS,
		("sparc64_init: bmagic=%lx, bi=%p\n", bmagic, bi));

	/* Read in the information provided by NetBSD boot loader */
	if (SPARC_MACHINE_OPENFIRMWARE != bmagic) {
		printf("No bootinfo information.\n");
		goto die_old_boot_loader;
	}

	bootinfo = (void*)(u_long)((uint64_t*)bi)[1];
	LOOKUP_BOOTINFO(bi_kend, BTINFO_KERNEND);

	if (bi_kend->addr == (vaddr_t)0) {
		panic("Kernel end address is not found in bootinfo.\n");
	}

#if NKSYMS || defined(DDB) || defined(MODULAR)
	LOOKUP_BOOTINFO(bi_sym, BTINFO_SYMTAB);
	ksyms_addsyms_elf(bi_sym->nsym, (int *)(u_long)bi_sym->ssym,
			(int *)(u_long)bi_sym->esym);
#ifdef DDB
#ifdef __arch64__
	/* This can only be installed on an 64-bit system cause otherwise our stack is screwed */
	OF_set_symbol_lookup(OF_sym2val, OF_val2sym);
#else
	OF_set_symbol_lookup(OF_sym2val32, OF_val2sym32);
#endif
#endif
#endif
	if (OF_getprop(findroot(), "compatible", buf, sizeof(buf)) > 0) {
		if (strcmp(buf, "sun4us") == 0)
			setcputyp(CPU_SUN4US);
		else if (strcmp(buf, "sun4v") == 0)
			setcputyp(CPU_SUN4V);
	}

	bi_howto = lookup_bootinfo(BTINFO_BOOTHOWTO);
	if (bi_howto)
		boothowto = bi_howto->boothowto;

	LOOKUP_BOOTINFO(bi_count, BTINFO_DTLB_SLOTS);
	kernel_dtlb_slots = bi_count->count;
	kernel_itlb_slots = kernel_dtlb_slots-1;
	bi_count = lookup_bootinfo(BTINFO_ITLB_SLOTS);
	if (bi_count)
		kernel_itlb_slots = bi_count->count;
	LOOKUP_BOOTINFO(bi_tlb, BTINFO_DTLB);
	kernel_tlbs = &bi_tlb->tlb[0];

	get_ncpus();
	pmap_bootstrap(KERNBASE, bi_kend->addr);
}
Example #16
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg0, void *arg1, void *arg2)
{
	extern vaddr_t xscale_cache_clean_addr;
	extern cpu_kcore_hdr_t cpu_kcore_hdr;
	int loop;
	int loop1;
	u_int l1pagetable;
	pv_addr_t kernel_l1pt;
	paddr_t memstart;
	psize_t memsize;
	extern u_int32_t esym;	/* &_end if no symbols are loaded */

#ifdef DIAGNOSTIC
	extern vsize_t xscale_minidata_clean_size; /* used in KASSERT */
#endif

	/* setup a serial console for very early boot */
	consinit();

	/*
	 * Heads up ... Setup the CPU / MMU / TLB functions
	 */
	if (set_cpufuncs())
		panic("cpu not recognized!");

	/*
	 * Examine the boot args string for options we need to know about
	 * now.
	 */
	/* XXX should really be done after setting up the console, but we
	 * XXX need to parse the console selection flags right now. */
	process_kernel_args((char *)0xa0200000 - MAX_BOOT_STRING - 1);

	/* Calibrate the delay loop. */
#if 1
	i80321_calibrate_delay();
#endif

	/* Talk to the user */
	printf("\nOpenBSD/armish booting ...\n");

	/*
	 * Reset the secondary PCI bus.  RedBoot doesn't stop devices
	 * on the PCI bus before handing us control, so we have to
	 * do this.
	 *
	 * XXX This is arguably a bug in RedBoot, and doing this reset
	 * XXX could be problematic in the future if we encounter an
	 * XXX application where the PPB in the i80312 is used as a
	 * XXX PPB.
	 */
//#define VERBOSE_INIT_ARM

	/*
	 * Fetch the SDRAM start/size from the i80312 SDRAM configuration
	 * registers.
	 */
	i80321_sdram_bounds(&obio_bs_tag, VERDE_PMMR_BASE + VERDE_MCU_BASE,
	    &memstart, &memsize);

#define DEBUG
#ifdef DEBUG
	printf("initarm: Configuring system ...\n");
#endif

	/* Fake bootconfig structure for the benefit of pmap.c */
	/* XXX must make the memory description h/w independant */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = memstart;
	bootconfig.dram[0].pages = memsize / PAGE_SIZE;

	/*
	 * Set up the variables that define the availablilty of
	 * physical memory.  For now, we're going to set
	 * physical_freestart to 0xa0200000 (where the kernel
	 * was loaded), and allocate the memory we need downwards.
	 * If we get too close to the page tables that RedBoot
	 * set up, we will panic.  We will update physical_freestart
	 * and physical_freeend later to reflect what pmap_bootstrap()
	 * wants to see.
	 *
	 * XXX pmap_bootstrap() needs an enema.
	 */
	physical_start = bootconfig.dram[0].address;
	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);

	physical_freestart = 0xa0009000UL;
	physical_freeend = 0xa0200000UL;

	physmem = (physical_end - physical_start) / PAGE_SIZE;

#ifdef DEBUG
	/* Tell the user about the memory */
	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
	    physical_start, physical_end - 1);
#endif

	/*
	 * Okay, the kernel starts 2MB in from the bottom of physical
	 * memory.  We are going to allocate our bootstrap pages downwards
	 * from there.
	 *
	 * We need to allocate some fixed page tables to get the kernel
	 * going.  We allocate one page directory and a number of page
	 * tables and store the physical addresses in the kernel_pt_table
	 * array.
	 *
	 * The kernel page directory must be on a 16K boundary.  The page
	 * tables must be on 4K boundaries.  What we do is allocate the
	 * page directory on the first 16K boundary that we encounter, and
	 * the page tables on 4K boundaries otherwise.  Since we allocate
	 * at least 3 L2 page tables, we are guaranteed to encounter at
	 * least one 16K aligned region.
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Allocating page tables\n");
#endif

	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
	       physical_freestart, free_pages, free_pages);
#endif

	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)				\
	alloc_pages((var).pv_pa, (np));			\
	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;

#define alloc_pages(var, np)				\
	physical_freeend -= ((np) * PAGE_SIZE);		\
	if (physical_freeend < physical_freestart)	\
		panic("initarm: out of memory");	\
	(var) = physical_freeend;			\
	free_pages -= (np);				\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	loop1 = 0;
	kernel_l1pt.pv_pa = 0;
	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
		/* Are we 16KB aligned for an L1 ? */
		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
		    && kernel_l1pt.pv_pa == 0) {
			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
		} else {
			valloc_pages(kernel_pt_table[loop1],
			    L2_TABLE_SIZE / PAGE_SIZE);
			++loop1;
		}
	}

	/* This should never be able to happen but better confirm that. */
	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
		panic("initarm: Failed to align the kernel page directory");

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	alloc_pages(systempage.pv_pa, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, UPAGES);

	/* Allocate enough pages for cleaning the Mini-Data cache. */
	KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
	valloc_pages(minidataclean, 1);

#ifdef VERBOSE_INIT_ARM
	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
	    irqstack.pv_va); 
	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
	    abtstack.pv_va); 
	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
	    undstack.pv_va); 
	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
	    kernelstack.pv_va); 
#endif

	/*
	 * XXX Defer this to later so that we can reclaim the memory
	 * XXX used by the RedBoot page tables.
	 */
	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);

	/*
	 * Ok we have allocated physical pages for the primary kernel
	 * page tables
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;

#ifdef HIGH_VECT
	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
#else
	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, 0x00000000,
	    &kernel_pt_table[KERNEL_PT_SYS]);
#endif
	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
#if 0
	pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
	    &kernel_pt_table[KERNEL_PT_IOPXS]);
#endif

	/* update the top of the kernel VM */
	pmap_curmaxkvaddr =
	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);

#ifdef VERBOSE_INIT_ARM
	printf("Mapping kernel\n");
#endif

	/* Now we fill in the L2 pagetable for the kernel static code/data
	 * and the symbol table. */
	{
		extern char etext[];
#ifdef VERBOSE_INIT_ARM
		extern char _end[];
#endif
		size_t textsize = (u_int32_t) etext - KERNEL_TEXT_BASE;
		size_t totalsize = esym - KERNEL_TEXT_BASE;
		u_int logical;

#ifdef VERBOSE_INIT_ARM
		printf("kernelsize text %x total %x end %xesym %x\n",
		    textsize, totalsize, _end, esym);
#endif

		textsize = round_page(textsize);
		totalsize = round_page(totalsize);
		
		logical = 0x00200000;	/* offset of kernel in RAM */

		/* Update dump information */
		cpu_kcore_hdr.kernelbase = KERNEL_BASE;
		cpu_kcore_hdr.kerneloffs = logical;
		cpu_kcore_hdr.staticsize = totalsize;

		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, textsize,
		    PROT_READ | PROT_WRITE, PTE_CACHE);
		pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, totalsize - textsize,
		    PROT_READ | PROT_WRITE, PTE_CACHE);
	}

#ifdef VERBOSE_INIT_ARM
	printf("Constructing L2 page tables\n");
#endif

	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    UPAGES * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, PROT_READ | PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    PROT_READ | PROT_WRITE, PTE_PAGETABLE);
	}

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
	    minidataclean.pv_pa);

	/* Map the vector page. */
#ifdef HIGH_VECT
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    PROT_READ | PROT_WRITE, PTE_CACHE);
#else
        pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    PROT_READ | PROT_WRITE, PTE_CACHE);
#endif

	pmap_devmap_bootstrap(l1pagetable, iq80321_devmap);

	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	/*
	 * Now we have the real page tables in place so we can switch to them.
	 * Once this is done we will be running with the REAL kernel page
	 * tables.
	 */

	/*
	 * Update the physical_freestart/physical_freeend/free_pages
	 * variables.
	 */
	{
		physical_freestart = physical_start - KERNEL_BASE +
		    round_page(esym);
		physical_freeend = physical_end;
		free_pages =
		    (physical_freeend - physical_freestart) / PAGE_SIZE;
	}
#ifdef VERBOSE_INIT_ARM
	printf("physical_freestart %x end %x\n", physical_freestart,
	    physical_freeend);
#endif

	/* be a client to all domains */
	cpu_domains(0x55555555);
	/* Switch tables */
#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	       physical_freestart, free_pages, free_pages);
	printf("switching to new L1 page table  @%#lx...", kernel_l1pt.pv_pa);
#endif


	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init
	 */
	proc0paddr = (struct user *)kernelstack.pv_va;
	proc0.p_addr = proc0paddr;

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

#ifdef HIGH_VECT
	arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
#else
	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
#endif

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("init subsystems: stacks ");
#endif

	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("vectors ");
#endif
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
	printf("undefined ");
#endif
	undefined_init();

	/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
	printf("page ");
#endif
	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend), 0);

	/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
	printf("pmap ");
#endif
	pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, KERNEL_VM_BASE,
	    KERNEL_VM_BASE + KERNEL_VM_SIZE);

	/* Update dump information */
	cpu_kcore_hdr.pmap_kernel_l1 = (u_int32_t)pmap_kernel()->pm_l1;
	cpu_kcore_hdr.pmap_kernel_l2 = (u_int32_t)&(pmap_kernel()->pm_l2);

	/* Setup the IRQ system */
#ifdef VERBOSE_INIT_ARM
	printf("irq ");
#endif
	i80321intc_intr_init();

#ifdef VERBOSE_INIT_ARM
	printf("done.\n");
#endif

#ifdef DDB
	db_machine_init();

	/* Firmware doesn't load symbols. */
	ddb_init();

	if (boothowto & RB_KDB)
		Debugger();
#endif
    
	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Example #17
0
/*
 * Do all the stuff that locore normally does before calling main().
 */
void
mach_init(int32_t memsize32, u_int bim, int32_t bip32)
{
	intptr_t memsize = (int32_t)memsize32;
	char *kernend;
	char *bip = (char *)(intptr_t)(int32_t)bip32;
	u_long first, last;
	extern char edata[], end[];
	const char *bi_msg;
#if NKSYMS || defined(DDB) || defined(MODULAR)
	char *ssym = 0;
	struct btinfo_symtab *bi_syms;
#endif
	struct btinfo_howto *bi_howto;

	/*
	 * Clear the BSS segment (if needed).
	 */
	if (memcmp(((Elf_Ehdr *)end)->e_ident, ELFMAG, SELFMAG) == 0 &&
	    ((Elf_Ehdr *)end)->e_ident[EI_CLASS] == ELFCLASS) {
		esym = end;
#if NKSYMS || defined(DDB) || defined(MODULAR)
		esym += ((Elf_Ehdr *)end)->e_entry;
#endif
		kernend = (char *)mips_round_page(esym);
		/*
		 * We don't have to clear BSS here
		 * since our bootloader already does it.
		 */
#if 0
		memset(edata, 0, end - edata);
#endif
	} else {
		kernend = (void *)mips_round_page(end);
		/*
		 * No symbol table, so assume we are loaded by
		 * the firmware directly with "bfd" command.
		 * The firmware loader doesn't clear BSS of
		 * a loaded kernel, so do it here.
		 */
		memset(edata, 0, kernend - edata);

	}

	/*
	 * Copy exception-dispatch code down to exception vector.
	 * Initialize locore-function vector.
	 * Clear out the I and D caches.
	 */
	mips_vector_init(NULL, false);

	/* Check for valid bootinfo passed from bootstrap */
	if (bim == BOOTINFO_MAGIC) {
		struct btinfo_magic *bi_magic;

		bootinfo = bip;
		bi_magic = lookup_bootinfo(BTINFO_MAGIC);
		if (bi_magic == NULL) {
			bi_msg = "missing bootinfo structure";
			bim = (uintptr_t)bip;
		} else if (bi_magic->magic != BOOTINFO_MAGIC) {
			bi_msg = "invalid bootinfo structure";
			bim = bi_magic->magic;
		} else
			bi_msg = NULL;
	} else {
		bi_msg = "invalid bootinfo (standalone boot?)";
	}

#if NKSYMS || defined(DDB) || defined(MODULAR)
	bi_syms = lookup_bootinfo(BTINFO_SYMTAB);

	/* Load symbol table if present */
	if (bi_syms != NULL) {
		ssym = (void *)(intptr_t)bi_syms->ssym;
		esym = (void *)(intptr_t)bi_syms->esym;
		kernend = (void *)mips_round_page(esym);
	}
#endif

	bi_howto = lookup_bootinfo(BTINFO_HOWTO);
	if (bi_howto != NULL)
		boothowto = bi_howto->bi_howto;

	cobalt_id = read_board_id();
	if (cobalt_id >= COBALT_MODELS || cobalt_model[cobalt_id] == NULL)
		cpu_setmodel("Cobalt unknown model (board ID %u)",
		    cobalt_id);
	else
		cpu_setmodel("%s", cobalt_model[cobalt_id]);

	switch (cobalt_id) {
	case COBALT_ID_QUBE2700:
	case COBALT_ID_RAQ:
		cpuspeed = 150; /* MHz */
		break;
	case COBALT_ID_QUBE2:
	case COBALT_ID_RAQ2:
		cpuspeed = 250; /* MHz */
		break;
	default:
		/* assume the fastest, so that delay(9) works */
		cpuspeed = 250;
		break;
	}
	curcpu()->ci_cpu_freq = cpuspeed * 1000 * 1000;
	curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
	curcpu()->ci_divisor_delay =
	    ((curcpu()->ci_cpu_freq + (1000000 / 2)) / 1000000);
	/* all models have Rm5200, which is CPU_MIPS_DOUBLE_COUNT */
	curcpu()->ci_cycles_per_hz /= 2;
	curcpu()->ci_divisor_delay /= 2;

	physmem = btoc(memsize - MIPS_KSEG0_START);

	consinit();

	KASSERT(&lwp0 == curlwp);
	if (bi_msg != NULL)
		printf("%s: magic=%#x bip=%p\n", bi_msg, bim, bip);

	uvm_setpagesize();

	/*
	 * The boot command is passed in the top 512 bytes,
	 * so don't clobber that.
	 */
	mem_clusters[0].start = 0;
	mem_clusters[0].size = ctob(physmem) - 512;
	mem_cluster_cnt = 1;

	memcpy(bootstring, (char *)(memsize - 512), 512);
	memset((char *)(memsize - 512), 0, 512);
	bootstring[511] = '\0';

	decode_bootstring();

#if NKSYMS || defined(DDB) || defined(MODULAR)
	/* init symbols if present */
	if ((bi_syms != NULL) && (esym != NULL))
		ksyms_addsyms_elf(esym - ssym, ssym, esym);
#endif
	KASSERT(&lwp0 == curlwp);
#ifdef DDB
	if (boothowto & RB_KDB)
		Debugger();
#endif
#ifdef KGDB
	if (boothowto & RB_KDB)
		kgdb_connect(0);
#endif

	/*
	 * Load the rest of the available pages into the VM system.
	 */
	first = round_page(MIPS_KSEG0_TO_PHYS(kernend));
	last = mem_clusters[0].start + mem_clusters[0].size;
	uvm_page_physload(atop(first), atop(last), atop(first), atop(last),
	    VM_FREELIST_DEFAULT);

	/*
	 * Initialize error message buffer (at end of core).
	 */
	mips_init_msgbuf();

	pmap_bootstrap();

	/*
	 * Allocate space for proc0's USPACE.
	 */
	mips_init_lwp0_uarea();
}
Example #18
0
void
hp700_init(int argc, char *argv[], char *envp[])
{
	int hpmc_br_instr;
	int *p = (int *) i_hpmach_chk;
	register struct mapping *mp;
	int i;
	vm_offset_t addr;
	int pdcerr;
	vm_offset_t first_page;

	struct pdc_coproc pdc_coproc;
	struct pdc_cache pdc_cache;
	struct pdc_model pdc_model;
	struct pdc_iodc_read pdc_iodc;
	extern int crashdump(void);
#ifdef	BTLB
	struct pdc_btlb pdc_btlb;
#endif
#ifdef	HPT
	struct pdc_hwtlb pdc_hwtlb;
	extern struct hpt_entry *hpt_table;
	extern int usehpt;
#endif	

	first_page = move_bootstrap();

	if (argc >= 1 && argc <= 4) {
		char *btstring = boot_string;
		char *src = (argc == 1 ? envp[5] : argv[2]);

		i = 0;
		while (*src != '\0' && i++ <= BOOT_LINE_LENGTH)
			*btstring++ = *src++;
		*btstring = '\0';
	}

	pdc = PAGE0->mem_pdc;

	delay_init();
	pdc_console_init();

	printf("%s", version);

	/*
	 * Determine what the boot program is using as its console
	 * so that we can use the same device.
	 */
	pdcerr = (*pdc)(PDC_IODC, PDC_IODC_READ, &pdc_iodc,
			PAGE0->mem_cons.pz_hpa, PDC_IODC_INDEX_DATA,
			&cons_iodc, sizeof(cons_iodc));
	if (pdcerr == 0)
		bcopy((char *)&PAGE0->mem_cons.pz_dp, (char *)&cons_dp,
		      sizeof(struct device_path));
	else
		printf("Warning: can't id console boot device (PDC Ret'd %d)\n",
		       pdcerr);

        /*
         * Read boot device from PROM
         */
	pdcerr = (*PAGE0->mem_pdc)(PDC_IODC, PDC_IODC_READ, &pdc_iodc,
	                           PAGE0->mem_boot.pz_hpa, PDC_IODC_INDEX_DATA,
	                           &boot_iodc, sizeof(boot_iodc));
	if (pdcerr == 0)
		bcopy((char *)&PAGE0->mem_boot.pz_dp, (char *)&boot_dp,
		      sizeof(struct device_path));
	else
		printf("Warning: can't id boot device (PDC Ret'd %d)\n",
		       pdcerr);
	
	/*
	 * Setup the transfer of control addr to point to the crash dump
	 * initialization code.
	 */
	PAGE0->ivec_toc = crashdump;

	/*
	 * get cache parameters from the PDC
	 */
	(*PAGE0->mem_pdc)(PDC_CACHE, PDC_CACHE_DFLT, &pdc_cache);

	dcache_line_size = pdc_cache.dc_conf.cc_line * 16;
	dcache_line_mask = dcache_line_size - 1;
	dcache_block_size = dcache_line_size * pdc_cache.dc_conf.cc_block;

	dcache_size = pdc_cache.dc_size;
	dcache_base = pdc_cache.dc_base;
	dcache_stride = pdc_cache.dc_stride;
	dcache_count = pdc_cache.dc_count;
	dcache_loop = pdc_cache.dc_loop;

	icache_line_size = pdc_cache.ic_conf.cc_line * 16;
	icache_line_mask = icache_line_size - 1;
	icache_block_size = icache_line_size * pdc_cache.ic_conf.cc_block;

	icache_base = pdc_cache.ic_base;
	icache_stride = pdc_cache.ic_stride;
	icache_count = pdc_cache.ic_count;
	icache_loop = pdc_cache.ic_loop;

	/*
	 * purge TLBs and flush caches
	 */
	ptlball(&pdc_cache);

#ifdef	BTLB
        /*
         * get block tlb information for clearing
         */
	pdcerr = (*pdc)(PDC_BLOCK_TLB, PDC_BTLB_DEFAULT, &pdc_btlb);
	
        if (pdcerr != 0)
                printf("Warning: PDC_BTLB call Ret'd %d\n", pdcerr);

	switch (pdc_btlb.finfo.num_c) {
	/* S-Chip specific */
	case 0: 
		cputype = CPU_PCXS;
		for (i = 0; i < pdc_btlb.finfo.num_i; i++)
			purge_block_itlb(i);
		for (i = 0; i < pdc_btlb.finfo.num_d; i++)
			purge_block_dtlb(i);
		break;
	/* L-Chip specific */
	case 8:
		cputype = CPU_PCXL;
		for (i = 0; i < pdc_btlb.finfo.num_c; i++)
			purge_L_block_ctlb(i);
		break;
	/* T-Chip specific */
	case 16:
		cputype = CPU_PCXT;
		for (i = 0; i < pdc_btlb.finfo.num_c; i++)
			purge_block_ctlb(i);
		break;
	default:
		panic("unrecognized block-TLB, cannot purge block TLB(s)");
		/* NOTREACHED */
	}
#endif

	fcacheall();

	/*
	 * get the cpu type
	 */
	(*PAGE0->mem_pdc)(PDC_MODEL, PDC_MODEL_INFO, &pdc_model);

	machtype = pdc_model.hvers >> 4;

	cpuinfo(&pdc_cache);

	if (dcache_line_size != CACHE_LINE_SIZE)
		printf("WARNING: data cache line size = %d bytes, %s\n",
		       dcache_line_size, "THIS IS *VERY* BAD!");

	/*
	 * Get the instruction to do branch to PDC_HPMC from PDC.  If
	 * successful, then insert the instruction at the beginning
	 * of the HPMC handler.
	 */
	if ((*PAGE0->mem_pdc)(PDC_INSTR, PDC_INSTR_DFLT, &hpmc_br_instr) == 0)
		p[0] = hpmc_br_instr;
	else
		p[0] = 0;

	/* 
	 * Now compute the checksum of the hpmc interrupt vector entry
	 */
	p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]);

	/*
	 * setup page size for Mach
	 */
	page_size = HP700_PGBYTES;
	vm_set_page_size();

	/*
	 * configure the devices including memory. Passes back size of 
	 * physical memory in mem_size.
	 */
	busconf();

	/* 
	 * Zero out BSS of kernel before doing anything else. The location
	 * pointed to by &edata is included in the data section.
	 */
	bzero((char*)((vm_offset_t) &edata + 4), (vm_offset_t) &end - 
	      (vm_offset_t) &edata - 4);

        /*
         * Locate any coprocessors and enable them by setting up the CCR.
         * SFU's are ignored (since we dont have any).  Also, initialize
         * the floating point registers here.
         */
        if ((pdcerr = (*pdc)(PDC_COPROC, PDC_COPROC_DFLT, &pdc_coproc)) < 0)
                printf("Warning: PDC_COPROC call Ret'd %d\n", pdcerr);
        copr_sfu_config = pdc_coproc.ccr_enable;
        mtctl(CR_CCR, copr_sfu_config & CCR_MASK);
        fprinit(&fpcopr_version);
	fpcopr_version = (fpcopr_version & 0x003ff800) >> 11;
        mtctl(CR_CCR, 0);

        /*
         * Clear the FAULT light (so we know when we get a real one)
         * PDC_COPROC apparently turns it on (for whatever reason).
         */
        pdcerr = PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0;
        (void) (*pdc)(PDC_CHASSIS, PDC_CHASSIS_DISP, pdcerr);

#ifdef TIMEX
	/*
	 * Enable the quad-store instruction.
	 */
	pdcerr = (*pdc)(PDC_MODEL, PDC_MODEL_ENSPEC,
			&pdc_model, pdc_model.pot_key);
	if (pdcerr < 0)
		printf("Warning: PDC enable FP quad-store Ret'd %d\n", pdcerr);
#endif


	/*
	 * Intialize the Event Trace Analysis Package
	 * Static Phase: 1 of 2
	 */
	etap_init_phase1();

	/*
	 * on the hp700 the value in &etext is a pointer to the last word
	 * in the text section. Similarly &edata and &end are pointers to
	 * the last words in the section. We want to change this so that 
	 * these pointers point past the sections that they terminate.
	 */
	text_start = trunc_page((vm_offset_t) &start_text);
	text_end = round_page((vm_offset_t) &etext + 4);

	/*
	 * before we go to all the work to initialize the VM see if we really 
	 * linked the image past the end of the PDC/IODC area.
	 */
	if (text_start < 0x10800)
		panic("kernel text mapped over PDC and IODC memory");

	/*
	 * find ranges of physical memory that isn't allocated to the kernel
	 */

	avail_start = round_page(first_page);
	first_avail = avail_start;
	avail_end = trunc_page(mem_size);
	
	/*
	 * bootstrap the rest of the virtual memory system
	 */
#ifdef MAXMEMBYTES
	if ((avail_end - avail_start) > MAXMEMBYTES) {
		mem_size  = trunc_page(MAXMEMBYTES);
		avail_end = mem_size;
	}
#endif

#ifdef HPT
	/*
	 * If we want to use the HW TLB support, ensure that it exists.
	 */
	if (usehpt &&
	    !((*pdc)(PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) == 0 &&
	      (pdc_hwtlb.min_size || pdc_hwtlb.max_size)))
		usehpt = 0;
#endif

	pmap_bootstrap(&avail_start, &avail_end);

	/*
	 * set limits on virtual memory and kernel equivalenced memory
	 */
	virtual_avail = avail_end;
	virtual_end = trunc_page(VM_MAX_KERNEL_ADDRESS);

	/*
	 * pmap_bootstrap allocated memory for data structures that must
	 * be equivalently mapped.
	 */
	equiv_end = (long) round_page((vm_offset_t) &end);
	io_end = 0xF0000000;	/* XXX */

	/*
	 * Do block mapping. We are mapping from 0, up through the first
	 * power of 2 address above the end of the equiv region. This 
	 * means some memory gets block mapped that should not be, but
	 * so be it (we make the text writable also :-)). We do this to
	 * conserve block entries since we hope to use them for other
	 * purposes (someday).
	 */
	addr = avail_start;
	if (addr != 1 << log2(addr))
		addr = 1 << log2(addr);

#ifdef	BTLB
	if(pdc_btlb.finfo.num_c)
		printf("%d BTLB entries found.  Block mapping up to 0x%x (0x%x)\n",
		       pdc_btlb.finfo.num_c, addr, avail_start);

	/*
	 * XXX L-CHIP vs T-CHIP vs S-CHIP difference in Block TLB insertion.
	 */
	switch (pdc_btlb.finfo.num_c) {
	/* S-CHIP */
	case 0:
		pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_ICACHE);
		pmap_block_map(0, addr, VM_PROT_READ|VM_PROT_WRITE,
			       0, BLK_DCACHE);
		break;
	/* L-CHIP */
	case 8:
		pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_LCOMBINED);
		break;
	/* T-CHIP */
	case 16:
		pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_COMBINED);
		break;
	default:
		panic("unrecognized block-TLB, cannot map kernel");
		/* NOTREACHED */
	}
#endif

#ifdef	HPT
	/*
	 * Turn on the HW TLB assist.
	 */
	if (usehpt) {
		pdcerr = (*pdc)(PDC_TLB, PDC_TLB_CONFIG,
				&pdc_hwtlb, hpt_table,
				sizeof(struct hpt_entry) * HP700_HASHSIZE,
				PDC_TLB_WORD3);
		if (pdcerr) {
			printf("Warning: HW TLB init failed (%d), disabled\n",
			       pdcerr);
			usehpt = 0;
		} else
			printf("HW TLB initialized (%d entries at 0x%x)\n",
			       HP700_HASHSIZE, hpt_table);
	}
#endif

	/*
	 * map the PDC and IODC area for kernel read/write
	 * XXX - should this be read only?
	 */
	(void) pmap_map(0, 0, text_start, VM_PROT_READ | VM_PROT_WRITE);

	/*
	 * map the kernel text area.
	 */
#if KGDB
	(void) pmap_map(text_start, text_start, text_end, 
			VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_WRITE);
#else
	(void) pmap_map(text_start, text_start, text_end, 
			VM_PROT_READ | VM_PROT_EXECUTE);
#endif

	/*
	 * map the data section of the kernel
	 */
	(void) pmap_map(text_end, text_end, avail_start,
			VM_PROT_READ | VM_PROT_WRITE);

#ifndef IO_HACK
	/*
	 * map the I/O pages
	 */
	(void) pmap_map(trunc_page(io_size), trunc_page(io_size),
			0, VM_PROT_READ | VM_PROT_WRITE);
#endif

#if 0
	/*
	 * map the breakpoint page
	 */
	(void) pmap_map(break_page, break_page, break_page+HP700_PAGE_SIZE,
			VM_PROT_READ | VM_PROT_EXECUTE);
#endif

	/*
	 * map the interrupt stack red zone.
	 */
	addr = trunc_page((vm_offset_t) &intstack_top);
	(void) pmap_map(addr, addr, addr + PAGE_SIZE, VM_PROT_READ);

	vm_on = 1;
}
Example #19
0
caddr_t
mips_init(int argc, void *argv, caddr_t boot_esym)
{
	char *cp;
	int i;
	caddr_t sd;
	u_int cputype;
	vaddr_t tlb_handler, xtlb_handler;
	extern char start[], edata[], end[];
	extern char exception[], e_exception[];
	extern char *hw_vendor, *hw_prod;
	extern void tlb_miss;
	extern void tlb_miss_err_r5k;
	extern void xtlb_miss;
	extern void xtlb_miss_err_r5k;

	/*
	 * Make sure we can access the extended address space.
	 * Note that r10k and later do not allow XUSEG accesses
	 * from kernel mode unless SR_UX is set.
	 */
	setsr(getsr() | SR_KX | SR_UX);

#ifdef notyet
	/*
	 * Make sure KSEG0 cacheability match what we intend to use.
	 *
	 * XXX This does not work as expected on IP30. Does ARCBios
	 * XXX depend on this?
	 */
	cp0_setcfg((cp0_getcfg() & ~0x07) | CCA_CACHED);
#endif

	/*
	 * Clear the compiled BSS segment in OpenBSD code.
	 */
	bzero(edata, end - edata);

	/*
	 * Reserve space for the symbol table, if it exists.
	 */
	ssym = (char *)*(u_int64_t *)end;

	/* Attempt to locate ELF header and symbol table after kernel. */
	if (end[0] == ELFMAG0 && end[1] == ELFMAG1 &&
	    end[2] == ELFMAG2 && end[3] == ELFMAG3 ) {

		/* ELF header exists directly after kernel. */
		ssym = end;
		esym = boot_esym;
		ekern = esym;

	} else if (((long)ssym - (long)end) >= 0 &&
	    ((long)ssym - (long)end) <= 0x1000 &&
	    ssym[0] == ELFMAG0 && ssym[1] == ELFMAG1 &&
	    ssym[2] == ELFMAG2 && ssym[3] == ELFMAG3 ) {

		/* Pointers exist directly after kernel. */
		esym = (char *)*((u_int64_t *)end + 1);
		ekern = esym;

	} else {

		/* Pointers aren't setup either... */
		ssym = NULL;
		esym = NULL;
		ekern = end;
	}

	/*
	 * Initialize the system type and set up memory layout.
	 * Note that some systems have a more complex memory setup.
	 */
	bios_ident();

	/*
	 * Determine system type and set up configuration record data.
	 */
	hw_vendor = "SGI";
	switch (sys_config.system_type) {
#if defined(TGT_O2)
	case SGI_O2:
		bios_printf("Found SGI-IP32, setting up.\n");
		hw_prod = "O2";
		strlcpy(cpu_model, "IP32", sizeof(cpu_model));
		ip32_setup();

		sys_config.cpu[0].clock = 180000000;  /* Reasonable default */
		cp = Bios_GetEnvironmentVariable("cpufreq");
		if (cp && atoi(cp, 10, NULL) > 100)
			sys_config.cpu[0].clock = atoi(cp, 10, NULL) * 1000000;

		break;
#endif

#if defined(TGT_ORIGIN200) || defined(TGT_ORIGIN2000)
	case SGI_O200:
		bios_printf("Found SGI-IP27, setting up.\n");
		hw_prod = "Origin 200";
		strlcpy(cpu_model, "IP27", sizeof(cpu_model));
		ip27_setup();

		break;

	case SGI_O300:
		bios_printf("Found SGI-IP35, setting up.\n");
		hw_prod = "Origin 300";
		/* IP27 is intentional, we use the same kernel */
		strlcpy(cpu_model, "IP27", sizeof(cpu_model));
		ip27_setup();

		break;
#endif

#if defined(TGT_OCTANE)
	case SGI_OCTANE:
		bios_printf("Found SGI-IP30, setting up.\n");
		hw_prod = "Octane";
		strlcpy(cpu_model, "IP30", sizeof(cpu_model));
		ip30_setup();

		sys_config.cpu[0].clock = 175000000;  /* Reasonable default */
		cp = Bios_GetEnvironmentVariable("cpufreq");
		if (cp && atoi(cp, 10, NULL) > 100)
			sys_config.cpu[0].clock = atoi(cp, 10, NULL) * 1000000;

		break;
#endif

	default:
		bios_printf("Kernel doesn't support this system type!\n");
		bios_printf("Halting system.\n");
		Bios_Halt();
		while(1);
	}

	/*
	 * Read and store console type.
	 */
	cp = Bios_GetEnvironmentVariable("ConsoleOut");
	if (cp != NULL && *cp != '\0')
		strlcpy(bios_console, cp, sizeof bios_console);

	/* Disable serial console if ARCS is telling us to use video. */
	if (strncmp(bios_console, "video", 5) == 0)
		comconsaddr = 0;

	/*
	 * Look at arguments passed to us and compute boothowto.
	 */
	boothowto = RB_AUTOBOOT;

	dobootopts(argc, argv);

	/*
	 * Figure out where we supposedly booted from.
	 */
	cp = Bios_GetEnvironmentVariable("OSLoadPartition");
	if (cp == NULL)
		cp = "unknown";
	if (strlcpy(osloadpartition, cp, sizeof osloadpartition) >=
	    sizeof osloadpartition)
		bios_printf("Value of `OSLoadPartition' is too large.\n"
		 "The kernel might not be able to find out its root device.\n");

	/*
	 * Read platform-specific environment variables.
	 */
	switch (sys_config.system_type) {
#if defined(TGT_O2)
	case SGI_O2:
		/* Get Ethernet address from ARCBIOS. */
		cp = Bios_GetEnvironmentVariable("eaddr");
		if (cp != NULL && strlen(cp) > 0)
			strlcpy(bios_enaddr, cp, sizeof bios_enaddr);
		break;
#endif
	default:
		break;
	}

	/*
	 * Set pagesize to enable use of page macros and functions.
	 * Commit available memory to UVM system.
	 */
	uvmexp.pagesize = PAGE_SIZE;
	uvm_setpagesize();

	for (i = 0; i < MAXMEMSEGS && mem_layout[i].mem_first_page != 0; i++) {
		u_int32_t fp, lp;
		u_int32_t firstkernpage, lastkernpage;
		unsigned int freelist;
		paddr_t firstkernpa, lastkernpa;

		if (IS_XKPHYS((vaddr_t)start))
			firstkernpa = XKPHYS_TO_PHYS((vaddr_t)start);
		else
			firstkernpa = KSEG0_TO_PHYS((vaddr_t)start);
		if (IS_XKPHYS((vaddr_t)ekern))
			lastkernpa = XKPHYS_TO_PHYS((vaddr_t)ekern);
		else
			lastkernpa = KSEG0_TO_PHYS((vaddr_t)ekern);

		firstkernpage = atop(trunc_page(firstkernpa));
		lastkernpage = atop(round_page(lastkernpa));

		fp = mem_layout[i].mem_first_page;
		lp = mem_layout[i].mem_last_page;
		freelist = mem_layout[i].mem_freelist;

		/* Account for kernel and kernel symbol table. */
		if (fp >= firstkernpage && lp < lastkernpage)
			continue;	/* In kernel. */

		if (lp < firstkernpage || fp > lastkernpage) {
			uvm_page_physload(fp, lp, fp, lp, freelist);
			continue;	/* Outside kernel. */
		}

		if (fp >= firstkernpage)
			fp = lastkernpage;
		else if (lp < lastkernpage)
			lp = firstkernpage;
		else { /* Need to split! */
			u_int32_t xp = firstkernpage;
			uvm_page_physload(fp, xp, fp, xp, freelist);
			fp = lastkernpage;
		}
		if (lp > fp)
			uvm_page_physload(fp, lp, fp, lp, freelist);
	}


	switch (sys_config.system_type) {
#if defined(TGT_O2) || defined(TGT_OCTANE)
	case SGI_O2:
	case SGI_OCTANE:
		sys_config.cpu[0].type = (cp0_get_prid() >> 8) & 0xff;
		sys_config.cpu[0].vers_maj = (cp0_get_prid() >> 4) & 0x0f;
		sys_config.cpu[0].vers_min = cp0_get_prid() & 0x0f;
		sys_config.cpu[0].fptype = (cp1_get_prid() >> 8) & 0xff;
		sys_config.cpu[0].fpvers_maj = (cp1_get_prid() >> 4) & 0x0f;
		sys_config.cpu[0].fpvers_min = cp1_get_prid() & 0x0f;

		/*
		 * Configure TLB.
		 */
		switch(sys_config.cpu[0].type) {
		case MIPS_RM7000:
			/* Rev A (version >= 2) CPU's have 64 TLB entries. */
			if (sys_config.cpu[0].vers_maj < 2) {
				sys_config.cpu[0].tlbsize = 48;
			} else {
				sys_config.cpu[0].tlbsize = 64;
			}
			break;

		case MIPS_R10000:
		case MIPS_R12000:
		case MIPS_R14000:
			sys_config.cpu[0].tlbsize = 64;
			break;

		default:
			sys_config.cpu[0].tlbsize = 48;
			break;
		}
		break;
#endif
	default:
		break;
	}

	/*
	 * Configure cache.
	 */
	switch(sys_config.cpu[0].type) {
	case MIPS_R10000:
	case MIPS_R12000:
	case MIPS_R14000:
		cputype = MIPS_R10000;
		break;
	case MIPS_R5000:
	case MIPS_RM7000:
	case MIPS_RM52X0:
	case MIPS_RM9000:
		cputype = MIPS_R5000;
		break;
	default:
		/*
		 * If we can't identify the cpu type, it must be
		 * r10k-compatible on Octane and Origin families, and
		 * it is likely to be r5k-compatible on O2.
		 */
		switch (sys_config.system_type) {
		case SGI_O2:
			cputype = MIPS_R5000;
			break;
		default:
		case SGI_OCTANE:
		case SGI_O200:
		case SGI_O300:
			cputype = MIPS_R10000;
			break;
		}
		break;
	}
	switch (cputype) {
	case MIPS_R10000:
		Mips10k_ConfigCache();
		sys_config._SyncCache = Mips10k_SyncCache;
		sys_config._InvalidateICache = Mips10k_InvalidateICache;
		sys_config._InvalidateICachePage = Mips10k_InvalidateICachePage;
		sys_config._SyncDCachePage = Mips10k_SyncDCachePage;
		sys_config._HitSyncDCache = Mips10k_HitSyncDCache;
		sys_config._IOSyncDCache = Mips10k_IOSyncDCache;
		sys_config._HitInvalidateDCache = Mips10k_HitInvalidateDCache;
		break;
	default:
	case MIPS_R5000:
		Mips5k_ConfigCache();
		sys_config._SyncCache = Mips5k_SyncCache;
		sys_config._InvalidateICache = Mips5k_InvalidateICache;
		sys_config._InvalidateICachePage = Mips5k_InvalidateICachePage;
		sys_config._SyncDCachePage = Mips5k_SyncDCachePage;
		sys_config._HitSyncDCache = Mips5k_HitSyncDCache;
		sys_config._IOSyncDCache = Mips5k_IOSyncDCache;
		sys_config._HitInvalidateDCache = Mips5k_HitInvalidateDCache;
		break;
	}

	/*
	 * Last chance to call the BIOS. Wiping the TLB means the BIOS' data
	 * areas are demapped on most systems.
	 */
	delay(20*1000);		/* Let any UART FIFO drain... */

	sys_config.cpu[0].tlbwired = UPAGES / 2;
	tlb_set_wired(0);
	tlb_flush(sys_config.cpu[0].tlbsize);
	tlb_set_wired(sys_config.cpu[0].tlbwired);

	/*
	 * Get a console, very early but after initial mapping setup.
	 */
	consinit();
	printf("Initial setup done, switching console.\n");

	/*
	 * Init message buffer.
	 */
	msgbufbase = (caddr_t)pmap_steal_memory(MSGBUFSIZE, NULL,NULL);
	initmsgbuf(msgbufbase, MSGBUFSIZE);

	/*
	 * Allocate U page(s) for proc[0], pm_tlbpid 1.
	 */
	proc0.p_addr = proc0paddr = curprocpaddr =
	    (struct user *)pmap_steal_memory(USPACE, NULL, NULL);
	proc0.p_md.md_regs = (struct trap_frame *)&proc0paddr->u_pcb.pcb_regs;
	tlb_set_pid(1);

	/*
	 * Allocate system data structures.
	 */
	i = (vsize_t)allocsys(NULL);
	sd = (caddr_t)pmap_steal_memory(i, NULL, NULL);
	allocsys(sd);

	/*
	 * Bootstrap VM system.
	 */
	pmap_bootstrap();

	/*
	 * Copy down exception vector code.
	 */
	bcopy(exception, (char *)CACHE_ERR_EXC_VEC, e_exception - exception);
	bcopy(exception, (char *)GEN_EXC_VEC, e_exception - exception);

	/*
	 * Build proper TLB refill handler trampolines.
	 */
	switch (cputype) {
	case MIPS_R5000:
		/*
		 * R5000 processors need a specific chip bug workaround
		 * in their tlb handlers.  Theoretically only revision 1
		 * of the processor need it, but there is evidence
		 * later versions also need it.
		 *
		 * This is also necessary on RM52x0; we test on the `rounded'
		 * cputype value instead of sys_config.cpu[0].type; this
		 * causes RM7k and RM9k to be included, just to be on the
		 * safe side.
		 */
		tlb_handler = (vaddr_t)&tlb_miss_err_r5k;
		xtlb_handler = (vaddr_t)&xtlb_miss_err_r5k;
		break;
	default:
		tlb_handler = (vaddr_t)&tlb_miss;
		xtlb_handler = (vaddr_t)&xtlb_miss;
		break;
	}

	build_trampoline(TLB_MISS_EXC_VEC, tlb_handler);
	build_trampoline(XTLB_MISS_EXC_VEC, xtlb_handler);

	/*
	 * Turn off bootstrap exception vectors.
	 */
	setsr(getsr() & ~SR_BOOT_EXC_VEC);
	proc0.p_md.md_regs->sr = getsr();

	/*
	 * Clear out the I and D caches.
	 */
	Mips_SyncCache();

#ifdef DDB
	db_machine_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif

	/*
	 * Return new stack pointer.
	 */
	return ((caddr_t)proc0paddr + USPACE - 64);
}
Example #20
0
/*
 * Do all the stuff that locore normally does before calling main().
 */
void
mach_init(long fwhandle, long magic, long bootdata, long reserved)
{
	void *kernend, *p0;
	u_long first, last;
	extern char edata[], end[];
	int i;
	uint32_t config;

	/* XXX this code must run on the target CPU */
	config = mips3_cp0_config_read();
	config &= ~MIPS3_CONFIG_K0_MASK;
	config |= 0x05;				/* XXX.  cacheable coherent */
	mips3_cp0_config_write(config);

	/* Zero BSS.  XXXCGD: uh, is this really necessary still?  */
	memset(edata, 0, end - edata);

	/*
	 * Copy the bootinfo structure from the boot loader.
	 * this has to be done before mips_vector_init is
	 * called because we may need CFE's TLB handler
	 */

	if (magic == BOOTINFO_MAGIC)
		memcpy(&bootinfo, (struct bootinfo_v1 *)bootdata,
		    sizeof bootinfo);
	else if (reserved == CFE_EPTSEAL) {
		magic = BOOTINFO_MAGIC;
		bzero(&bootinfo, sizeof bootinfo);
		bootinfo.version = BOOTINFO_VERSION;
		bootinfo.fwhandle = fwhandle;
		bootinfo.fwentry = bootdata;
		bootinfo.ssym = (vaddr_t)end;
		bootinfo.esym = (vaddr_t)end;
	}

	kernend = (void *)mips_round_page(end);
#if NKSYMS || defined(DDB) || defined(LKM)
	if (magic == BOOTINFO_MAGIC) {
		ksym_start = (void *)bootinfo.ssym;
		ksym_end   = (void *)bootinfo.esym;
		kernend = (void *)mips_round_page((vaddr_t)ksym_end);
	}
#endif

	consinit();

	uvm_setpagesize();

	/*
	 * Copy exception-dispatch code down to exception vector.
	 * Initialize locore-function vector.
	 * Clear out the I and D caches.
	 */
	mips_vector_init();

#ifdef DEBUG
	printf("fwhandle=%08X magic=%08X bootdata=%08X reserved=%08X\n",
	    (u_int)fwhandle, (u_int)magic, (u_int)bootdata, (u_int)reserved);
#endif

	strcpy(cpu_model, "sb1250");

	if (magic == BOOTINFO_MAGIC) {
		int idx;
		int added;
		uint64_t start, len, type;

		cfe_init(bootinfo.fwhandle, bootinfo.fwentry);
		cfe_present = 1;

		idx = 0;
		physmem = 0;
		mem_cluster_cnt = 0;
		while (cfe_enummem(idx, 0, &start, &len, &type) == 0) {
			added = 0;
			printf("Memory Block #%d start %08"PRIx64"X len %08"PRIx64"X: %s: ",
			    idx, start, len, (type == CFE_MI_AVAILABLE) ?
			    "Available" : "Reserved");
			if ((type == CFE_MI_AVAILABLE) &&
			    (mem_cluster_cnt < VM_PHYSSEG_MAX)) {
				/*
				 * XXX Ignore memory above 256MB for now, it
				 * XXX needs special handling.
				 */
				if (start < (256*1024*1024)) {
				    physmem += btoc(((int) len));
				    mem_clusters[mem_cluster_cnt].start =
					(long) start;
				    mem_clusters[mem_cluster_cnt].size =
					(long) len;
				    mem_cluster_cnt++;
				    added = 1;
				}
			}
			if (added)
				printf("added to map\n");
			else
				printf("not added to map\n");
			idx++;
		}

	} else {
		/*
		 * Handle the case of not being called from the firmware.
		 */
		/* XXX hardwire to 32MB; should be kernel config option */
		physmem = 32 * 1024 * 1024 / 4096;
		mem_clusters[0].start = 0;
		mem_clusters[0].size = ctob(physmem);
		mem_cluster_cnt = 1;
	}


	for (i = 0; i < sizeof(bootinfo.boot_flags); i++) {
		switch (bootinfo.boot_flags[i]) {
		case '\0':
			break;
		case ' ':
			continue;
		case '-':
			while (bootinfo.boot_flags[i] != ' ' &&
			    bootinfo.boot_flags[i] != '\0') {
				switch (bootinfo.boot_flags[i]) {
				case 'a':
					boothowto |= RB_ASKNAME;
					break;
				case 'd':
					boothowto |= RB_KDB;
					break;
				case 's':
					boothowto |= RB_SINGLE;
					break;
				}
				i++;
			}
		}
	}

	/*
	 * Load the rest of the available pages into the VM system.
	 * The first chunk is tricky because we have to avoid the
	 * kernel, but the rest are easy.
	 */
	first = round_page(MIPS_KSEG0_TO_PHYS(kernend));
	last = mem_clusters[0].start + mem_clusters[0].size;
	uvm_page_physload(atop(first), atop(last), atop(first), atop(last),
		VM_FREELIST_DEFAULT);

	for (i = 1; i < mem_cluster_cnt; i++) {
		first = round_page(mem_clusters[i].start);
		last = mem_clusters[i].start + mem_clusters[i].size;
		uvm_page_physload(atop(first), atop(last), atop(first),
		    atop(last), VM_FREELIST_DEFAULT);
	}

	/*
	 * Initialize error message buffer (at end of core).
	 */
	mips_init_msgbuf();

	/*
	 * Allocate space for proc0's USPACE
	 */
	p0 = (void *)pmap_steal_memory(USPACE, NULL, NULL);
	lwp0.l_addr = proc0paddr = (struct user *)p0;
	lwp0.l_md.md_regs = (struct frame *)((char *)p0 + USPACE) - 1;
	proc0paddr->u_pcb.pcb_context[11] =
	    MIPS_INT_MASK | MIPS_SR_INT_IE; /* SR */

	pmap_bootstrap();

	/*
	 * Initialize debuggers, and break into them, if appropriate.
	 */
#if NKSYMS || defined(DDB) || defined(LKM)
	ksyms_init(((uintptr_t)ksym_end - (uintptr_t)ksym_start),
	    ksym_start, ksym_end);
#endif

	if (boothowto & RB_KDB) {
#if defined(DDB)
		Debugger();
#endif
	}
}
Example #21
0
void *
initarm(void *arg, void *arg2)
{
#define	next_chunk2(a,b)	(((a) + (b)) &~ ((b)-1))
#define	next_page(a)		next_chunk2(a,PAGE_SIZE)
	struct pv_addr  kernel_l1pt;
	struct pv_addr  dpcpu;
	int loop, i;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t freemem_pt;
	vm_offset_t afterkern;
	vm_offset_t freemem_after;
	vm_offset_t lastaddr;
	uint32_t memsize;

	set_cpufuncs();		/* NB: sets cputype */
	lastaddr = fake_preload_metadata();
	pcpu_init(pcpup, 0, sizeof(struct pcpu));
	PCPU_SET(curthread, &thread0);

	/* Do basic tuning, hz etc */
      	init_param1();
		
	/*
	 * We allocate memory downwards from where we were loaded
	 * by RedBoot; first the L1 page table, then NUM_KERNEL_PTS
	 * entries in the L2 page table.  Past that we re-align the
	 * allocation boundary so later data structures (stacks, etc)
	 * can be mapped with different attributes (write-back vs
	 * write-through).  Note this leaves a gap for expansion
	 * (or might be repurposed).
	 */
	freemempos = KERNPHYSADDR;

	/* macros to simplify initial memory allocation */
#define alloc_pages(var, np) do {					\
	freemempos -= (np * PAGE_SIZE);					\
	(var) = freemempos;						\
	/* NB: this works because locore maps PA=VA */			\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));			\
} while (0)
#define	valloc_pages(var, np) do {					\
	alloc_pages((var).pv_pa, (np));					\
	(var).pv_va = (var).pv_pa + (KERNVIRTADDR - KERNPHYSADDR);	\
} while (0)

	/* force L1 page table alignment */
	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
		freemempos -= PAGE_SIZE;
	/* allocate contiguous L1 page table */
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	/* now allocate L2 page tables; they are linked to L1 below */
	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_pa = freemempos +
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_va = 
			    kernel_pt_table[loop].pv_pa +
				(KERNVIRTADDR - KERNPHYSADDR);
		}
	}
	freemem_pt = freemempos;		/* base of allocated pt's */

	/*
	 * Re-align allocation boundary so we can map the area
	 * write-back instead of write-through for the stacks and
	 * related structures allocated below.
	 */
	freemempos = PHYSADDR + 0x100000;
	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate dynamic per-cpu area. */
	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
	dpcpu_init((void *)dpcpu.pv_va, 0);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	alloc_pages(minidataclean.pv_pa, 1);
	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
#ifdef ARM_USE_SMALL_ALLOC
	freemempos -= PAGE_SIZE;
	freemem_pt = trunc_page(freemem_pt);
	freemem_after = freemempos - ((freemem_pt - (PHYSADDR + 0x100000)) /
	    PAGE_SIZE) * sizeof(struct arm_small_page);
	arm_add_smallalloc_pages(
	    (void *)(freemem_after + (KERNVIRTADDR - KERNPHYSADDR)),
	    (void *)0xc0100000,
	    freemem_pt - (PHYSADDR + 0x100000), 1);
	freemem_after -= ((freemem_after - (PHYSADDR + 0x1000)) / PAGE_SIZE) *
	    sizeof(struct arm_small_page);
	arm_add_smallalloc_pages(
	    (void *)(freemem_after + (KERNVIRTADDR - KERNPHYSADDR)),
	    (void *)0xc0001000,
	    trunc_page(freemem_after) - (PHYSADDR + 0x1000), 0);
	freemempos = trunc_page(freemem_after);
	freemempos -= PAGE_SIZE;
#endif

	/*
	 * Now construct the L1 page table.  First map the L2
	 * page tables into the L1 so we can replace L1 mappings
	 * later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_va;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
	pmap_link_l2pt(l1pagetable, IXP425_IO_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO]);
	pmap_link_l2pt(l1pagetable, IXP425_MCU_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO + 1]);
	pmap_link_l2pt(l1pagetable, IXP425_PCI_MEM_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO + 2]);
	pmap_link_l2pt(l1pagetable, KERNBASE,
	    &kernel_pt_table[KERNEL_PT_BEFOREKERN]);
	pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, 0x100000,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, PHYSADDR + 0x100000,
	    0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_PHYS,
	    next_chunk2(((uint32_t)lastaddr) - KERNEL_TEXT_BASE, L1_S_SIZE),
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	freemem_after = next_page((int)lastaddr);
	afterkern = round_page(next_chunk2((vm_offset_t)lastaddr, L1_S_SIZE));
	for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
		pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
		    &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
	}
	pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa, 
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

#ifdef ARM_USE_SMALL_ALLOC
	if ((freemem_after + 2 * PAGE_SIZE) <= afterkern) {
		arm_add_smallalloc_pages((void *)(freemem_after),
		    (void*)(freemem_after + PAGE_SIZE),
		    afterkern - (freemem_after + PAGE_SIZE), 0);
		    
	}
#endif

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, afterkern,
	    minidataclean.pv_pa);

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	if (cpu_is_ixp43x())
		pmap_devmap_bootstrap(l1pagetable, ixp435_devmap);
	else
		pmap_devmap_bootstrap(l1pagetable, ixp425_devmap);
	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE*PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE*PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE*PAGE_SIZE);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross relocations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();
	/* ready to setup the console (XXX move earlier if possible) */
	cninit();
	/*
	 * Fetch the RAM size from the MCU registers.  The
	 * expansion bus was mapped above so we can now read 'em.
	 */
	if (cpu_is_ixp43x())
		memsize = ixp435_ddram_size();
	else
		memsize = ixp425_sdram_size();
	physmem = memsize / PAGE_SIZE;

	/* Set stack for exception handlers */

	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;
	undefined_init();

	proc_linkup0(&proc0, &thread0);
	thread0.td_kstack = kernelstack.pv_va;
	thread0.td_pcb = (struct pcb *)
		(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
	thread0.td_pcb->pcb_flags = 0;
	thread0.td_frame = &proc0_tf;
	pcpup->pc_curpcb = thread0.td_pcb;

	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

	pmap_curmaxkvaddr = afterkern + PAGE_SIZE;
	dump_avail[0] = PHYSADDR;
	dump_avail[1] = PHYSADDR + memsize;
	dump_avail[2] = 0;
	dump_avail[3] = 0;

	pmap_bootstrap(pmap_curmaxkvaddr, 0xd0000000, &kernel_l1pt);
	msgbufp = (void*)msgbufpv.pv_va;
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();

	i = 0;
#ifdef ARM_USE_SMALL_ALLOC
	phys_avail[i++] = PHYSADDR;
	phys_avail[i++] = PHYSADDR + PAGE_SIZE; 	/*
					 *XXX: Gross hack to get our
					 * pages in the vm_page_array.
					 */
#endif
	phys_avail[i++] = round_page(virtual_avail - KERNBASE + PHYSADDR);
	phys_avail[i++] = trunc_page(PHYSADDR + memsize - 1);
	phys_avail[i++] = 0;
	phys_avail[i] = 0;

	init_param2(physmem);
	kdb_init();

	/* use static kernel environment if so configured */
	if (envmode == 1)
		kern_envp = static_env;

	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
#undef next_page
#undef next_chunk2
}
Example #22
0
static void
mips_init(void)
{
	struct mem_region mr[FDT_MEM_REGIONS];
	uint64_t val;
	int i, j, mr_cnt;
	char *memsize;

	printf("entry: mips_init()\n");

	bootverbose = 1;

	for (i = 0; i < 10; i++)
		phys_avail[i] = 0;

	dump_avail[0] = phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);

	/*
	 * The most low memory MT7621 can have. Currently MT7621 is the chip
	 * that supports the most memory, so that seems reasonable.
	 */
	realmem = btoc(448 * 1024 * 1024);

	if (fdt_get_mem_regions(mr, &mr_cnt, &val) == 0) {
		physmem = btoc(val);

		printf("RAM size: %ldMB (from FDT)\n",
		    ctob(physmem) / (1024 * 1024));

		KASSERT((phys_avail[0] >= mr[0].mr_start) && \
			(phys_avail[0] < (mr[0].mr_start + mr[0].mr_size)),
			("First region is not within FDT memory range"));

		/* Limit size of the first region */
		phys_avail[1] = (mr[0].mr_start +
		    MIN(mr[0].mr_size, ctob(realmem)));
		dump_avail[1] = phys_avail[1];

		/* Add the rest of the regions */
		for (i = 1, j = 2; i < mr_cnt; i++, j+=2) {
			phys_avail[j] = mr[i].mr_start;
			phys_avail[j+1] = (mr[i].mr_start + mr[i].mr_size);
			dump_avail[j] = phys_avail[j];
			dump_avail[j+1] = phys_avail[j+1];
		}
	} else {
		if ((memsize = kern_getenv("memsize")) != NULL) {
			physmem = btoc(strtol(memsize, NULL, 0) << 20);
			printf("RAM size: %ldMB (from memsize)\n",
			    ctob(physmem) / (1024 * 1024));
		} else { /* All else failed, assume 32MB */
			physmem = btoc(32 * 1024 * 1024);
			printf("RAM size: %ldMB (assumed)\n",
			    ctob(physmem) / (1024 * 1024));
		}

		if (ctob(physmem) < (448 * 1024 * 1024)) {
			/*
			 * Anything up to 448MB is assumed to be directly
			 * mappable as low memory...
			 */
			dump_avail[1] = phys_avail[1] = ctob(physmem);
		} else if (mtk_soc_get_socid() == MTK_SOC_MT7621) {
			/*
			 * On MT7621 the low memory is limited to 448MB, the
			 * rest is high memory, mapped at 0x20000000
			 */
			phys_avail[1] = 448 * 1024 * 1024;
			phys_avail[2] = 0x20000000;
			phys_avail[3] = phys_avail[2] + ctob(physmem) -
			    phys_avail[1];
			dump_avail[1] = phys_avail[1] - phys_avail[0];
			dump_avail[2] = phys_avail[2];
			dump_avail[3] = phys_avail[3] - phys_avail[2];
		} else {
			/*
			 * We have > 448MB RAM and we're not MT7621? Currently
			 * there is no such chip, so we'll just limit the RAM to
			 * 32MB and let the user know...
			 */
			printf("Unknown chip, assuming 32MB RAM\n");
			physmem = btoc(32 * 1024 * 1024);
			dump_avail[1] = phys_avail[1] = ctob(physmem);
		}
	}

	if (physmem < realmem)
		realmem = physmem;

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Example #23
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg)
{
	extern vaddr_t xscale_cache_clean_addr;
#ifdef DIAGNOSTIC
	extern vsize_t xscale_minidata_clean_size;
#endif
	int loop;
	int loop1;
	u_int l1pagetable;
	paddr_t memstart;
	psize_t memsize;

	/*
	 * Clear out the 7-segment display.  Whee, the first visual
	 * indication that we're running kernel code.
	 */
	iq80321_7seg(' ', ' ');

	/* Calibrate the delay loop. */
	i80321_calibrate_delay();
	i80321_hardclock_hook = iq80321_hardclock_hook;

	/*
	 * Since we map the on-board devices VA==PA, and the kernel
	 * is running VA==PA, it's possible for us to initialize
	 * the console now.
	 */
	consinit();

#ifdef VERBOSE_INIT_ARM
	/* Talk to the user */
	printf("\nNetBSD/evbarm (IQ80321) booting ...\n");
#endif

	/*
	 * Heads up ... Setup the CPU / MMU / TLB functions
	 */
	if (set_cpufuncs())
		panic("CPU not recognized!");

	/*
	 * We are currently running with the MMU enabled and the
	 * entire address space mapped VA==PA, except for the
	 * first 64M of RAM is also double-mapped at 0xc0000000.
	 * There is an L1 page table at 0xa0004000.
	 */

	/*
	 * Fetch the SDRAM start/size from the i80321 SDRAM configuration
	 * registers.
	 */
	i80321_sdram_bounds(&obio_bs_tag, VERDE_PMMR_BASE + VERDE_MCU_BASE,
	    &memstart, &memsize);

#ifdef VERBOSE_INIT_ARM
	printf("initarm: Configuring system ...\n");
#endif

	/* Fake bootconfig structure for the benefit of pmap.c */
	/* XXX must make the memory description h/w independent */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = memstart;
	bootconfig.dram[0].pages = memsize / PAGE_SIZE;

	/*
	 * Set up the variables that define the availablilty of
	 * physical memory.  For now, we're going to set
	 * physical_freestart to 0xa0200000 (where the kernel
	 * was loaded), and allocate the memory we need downwards.
	 * If we get too close to the L1 table that we set up, we
	 * will panic.  We will update physical_freestart and
	 * physical_freeend later to reflect what pmap_bootstrap()
	 * wants to see.
	 *
	 * XXX pmap_bootstrap() needs an enema.
	 */
	physical_start = bootconfig.dram[0].address;
	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);

	physical_freestart = 0xa0009000UL;
	physical_freeend = 0xa0200000UL;

	physmem = (physical_end - physical_start) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	/* Tell the user about the memory */
	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
	    physical_start, physical_end - 1);
#endif

	/*
	 * Okay, the kernel starts 2MB in from the bottom of physical
	 * memory.  We are going to allocate our bootstrap pages downwards
	 * from there.
	 *
	 * We need to allocate some fixed page tables to get the kernel
	 * going.  We allocate one page directory and a number of page
	 * tables and store the physical addresses in the kernel_pt_table
	 * array.
	 *
	 * The kernel page directory must be on a 16K boundary.  The page
	 * tables must be on 4K boundaries.  What we do is allocate the
	 * page directory on the first 16K boundary that we encounter, and
	 * the page tables on 4K boundaries otherwise.  Since we allocate
	 * at least 3 L2 page tables, we are guaranteed to encounter at
	 * least one 16K aligned region.
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Allocating page tables\n");
#endif

	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
	       physical_freestart, free_pages, free_pages);
#endif

	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)				\
	alloc_pages((var).pv_pa, (np));			\
	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;

#define alloc_pages(var, np)				\
	physical_freeend -= ((np) * PAGE_SIZE);		\
	if (physical_freeend < physical_freestart)	\
		panic("initarm: out of memory");	\
	(var) = physical_freeend;			\
	free_pages -= (np);				\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	loop1 = 0;
	kernel_l1pt.pv_pa = 0;
	kernel_l1pt.pv_va = 0;
	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
		/* Are we 16KB aligned for an L1 ? */
		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
		    && kernel_l1pt.pv_pa == 0) {
			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
		} else {
			valloc_pages(kernel_pt_table[loop1],
			    L2_TABLE_SIZE / PAGE_SIZE);
			++loop1;
		}
	}

	/* This should never be able to happen but better confirm that. */
	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
		panic("initarm: Failed to align the kernel page directory");

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	alloc_pages(systempage.pv_pa, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, UPAGES);

	/* Allocate enough pages for cleaning the Mini-Data cache. */
	KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
	valloc_pages(minidataclean, 1);

#ifdef VERBOSE_INIT_ARM
	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
	    irqstack.pv_va); 
	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
	    abtstack.pv_va); 
	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
	    undstack.pv_va); 
	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
	    kernelstack.pv_va); 
#endif

	/*
	 * XXX Defer this to later so that we can reclaim the memory
	 * XXX used by the RedBoot page tables.
	 */
	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);

	/*
	 * Ok we have allocated physical pages for the primary kernel
	 * page tables
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
	pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
	    &kernel_pt_table[KERNEL_PT_IOPXS]);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);

	/* update the top of the kernel VM */
	pmap_curmaxkvaddr =
	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);

#ifdef VERBOSE_INIT_ARM
	printf("Mapping kernel\n");
#endif

	/* Now we fill in the L2 pagetable for the kernel static code/data */
	{
		extern char etext[], _end[];
		size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
		size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
		u_int logical;

		textsize = (textsize + PGOFSET) & ~PGOFSET;
		totalsize = (totalsize + PGOFSET) & ~PGOFSET;
		
		logical = 0x00200000;	/* offset of kernel in RAM */

		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, totalsize - textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	}

#ifdef VERBOSE_INIT_ARM
	printf("Constructing L2 page tables\n");
#endif

	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
	    minidataclean.pv_pa);

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	/* Map the statically mapped devices. */
	pmap_devmap_bootstrap(l1pagetable, iq80321_devmap);

	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	/*
	 * Now we have the real page tables in place so we can switch to them.
	 * Once this is done we will be running with the REAL kernel page
	 * tables.
	 */

	/*
	 * Update the physical_freestart/physical_freeend/free_pages
	 * variables.
	 */
	{
		extern char _end[];

		physical_freestart = physical_start +
		    (((((uintptr_t) _end) + PGOFSET) & ~PGOFSET) -
		     KERNEL_BASE);
		physical_freeend = physical_end;
		free_pages =
		    (physical_freeend - physical_freestart) / PAGE_SIZE;
	}

	/* Switch tables */
#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	       physical_freestart, free_pages, free_pages);
	printf("switching to new L1 page table  @%#lx...", kernel_l1pt.pv_pa);
#endif
	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init
	 */
	proc0paddr = (struct user *)kernelstack.pv_va;
	lwp0.l_addr = proc0paddr;

#ifdef VERBOSE_INIT_ARM
	printf("done!\n");
#endif

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

	arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("init subsystems: stacks ");
#endif

	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("vectors ");
#endif
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
	printf("undefined ");
#endif
	undefined_init();

	/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
	printf("page ");
#endif
	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend),
	    VM_FREELIST_DEFAULT);

	/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
	printf("pmap ");
#endif
	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);

	/* Setup the IRQ system */
#ifdef VERBOSE_INIT_ARM
	printf("irq ");
#endif
	i80321_intr_init();

#ifdef VERBOSE_INIT_ARM
	printf("done.\n");
#endif

#ifdef BOOTHOWTO
	boothowto = BOOTHOWTO;
#endif

#if NKSYMS || defined(DDB) || defined(LKM)
	/* Firmware doesn't load symbols. */
	ksyms_init(0, NULL, NULL);
#endif

#ifdef DDB
	db_machine_init();
	if (boothowto & RB_KDB)
		Debugger();
#endif

	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Example #24
0
void *
initarm(struct arm_boot_params *abp)
{
	struct pv_addr  kernel_l1pt;
	struct pv_addr  dpcpu;
	int loop, i;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t freemem_pt;
	vm_offset_t afterkern;
	vm_offset_t freemem_after;
	vm_offset_t lastaddr;
	uint32_t memsize, memstart;

	lastaddr = parse_boot_param(abp);
	arm_physmem_kernaddr = abp->abp_physaddr;
	set_cpufuncs();
	pcpu_init(pcpup, 0, sizeof(struct pcpu));
	PCPU_SET(curthread, &thread0);

	/* Do basic tuning, hz etc */
	init_param1();

	freemempos = 0xa0200000;
	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)			\
	alloc_pages((var).pv_pa, (np));		\
	(var).pv_va = (var).pv_pa + 0x20000000;

#define alloc_pages(var, np)			\
	freemempos -= (np * PAGE_SIZE);		\
	(var) = freemempos;		\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
		freemempos -= PAGE_SIZE;
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_pa = freemempos +
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_va =
			    kernel_pt_table[loop].pv_pa + 0x20000000;
		}
	}
	freemem_pt = freemempos;
	freemempos = 0xa0100000;
	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate dynamic per-cpu area. */
	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
	dpcpu_init((void *)dpcpu.pv_va, 0);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	alloc_pages(minidataclean.pv_pa, 1);
	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
	/*
	 * Allocate memory for the l1 and l2 page tables. The scheme to avoid
	 * wasting memory by allocating the l1pt on the first 16k memory was
	 * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for
	 * this to work (which is supposed to be the case).
	 */

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_va;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
	pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
	    &kernel_pt_table[KERNEL_PT_IOPXS]);
	pmap_link_l2pt(l1pagetable, KERNBASE,
	    &kernel_pt_table[KERNEL_PT_BEFOREKERN]);
	pmap_map_chunk(l1pagetable, KERNBASE, IQ80321_SDRAM_START, 0x100000,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, IQ80321_SDRAM_START + 0x100000,
	    0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, IQ80321_SDRAM_START + 0x200000,
	    (((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1),
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
	afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) & ~(L1_S_SIZE
	    - 1));
	for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
		pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
		    &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
	}
	pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, afterkern,
	    minidataclean.pv_pa);

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	arm_devmap_bootstrap(l1pagetable, ep80219_devmap);
	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	set_stackptrs(0);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross relocations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();
	cpu_setup("");

	/*
	 * Fetch the SDRAM start/size from the i80321 SDRAM configration
	 * registers.
	 */
	i80321_calibrate_delay();
	i80321_sdram_bounds(obio_bs_tag, IQ80321_80321_VBASE + VERDE_MCU_BASE,
	    &memstart, &memsize);
	physmem = memsize / PAGE_SIZE;
	cninit();

	undefined_init();
				
	init_proc0(kernelstack.pv_va);
	
	/* Enable MMU, I-cache, D-cache, write buffer. */

	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
	vm_max_kernel_address = 0xd0000000;
	pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt);
	msgbufp = (void*)msgbufpv.pv_va;
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();
	
	/*
	 * Add the physical ram we have available.
	 *
	 * Exclude the kernel (and all the things we allocated which immediately
	 * follow the kernel) from the VM allocation pool but not from crash
	 * dumps.  virtual_avail is a global variable which tracks the kva we've
	 * "allocated" while setting up pmaps.
	 *
	 * Prepare the list of physical memory available to the vm subsystem.
	 */
	arm_physmem_hardware_region(IQ80321_SDRAM_START, memsize);
	arm_physmem_exclude_region(abp->abp_physaddr, 
	    virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC);
	arm_physmem_init_kernel_globals();

	init_param2(physmem);
	kdb_init();
	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
}
Example #25
0
static void
mips_init(void)
{
    int i, j, cfe_mem_idx, tmp;
    uint64_t maxmem;

#ifdef CFE_ENV
    cfe_env_init();
#endif

    TUNABLE_INT_FETCH("boothowto", &boothowto);

    if (boothowto & RB_VERBOSE)
        bootverbose++;

#ifdef MAXMEM
    tmp = MAXMEM;
#else
    tmp = 0;
#endif
    TUNABLE_INT_FETCH("hw.physmem", &tmp);
    maxmem = (uint64_t)tmp * 1024;

    /*
     * XXX
     * If we used vm_paddr_t consistently in pmap, etc., we could
     * use 64-bit page numbers on !n64 systems, too, like i386
     * does with PAE.
     */
#if !defined(__mips_n64)
    if (maxmem == 0 || maxmem > 0xffffffff)
        maxmem = 0xffffffff;
#endif

#ifdef CFE
    /*
     * Query DRAM memory map from CFE.
     */
    physmem = 0;
    cfe_mem_idx = 0;
    for (i = 0; i < 10; i += 2) {
        int result;
        uint64_t addr, len, type;

        result = cfe_enummem(cfe_mem_idx++, 0, &addr, &len, &type);
        if (result < 0) {
            phys_avail[i] = phys_avail[i + 1] = 0;
            break;
        }

        KASSERT(type == CFE_MI_AVAILABLE,
                ("CFE DRAM region is not available?"));

        if (bootverbose)
            printf("cfe_enummem: 0x%016jx/%ju.\n", addr, len);

        if (maxmem != 0) {
            if (addr >= maxmem) {
                printf("Ignoring %ju bytes of memory at 0x%jx "
                       "that is above maxmem %dMB\n",
                       len, addr,
                       (int)(maxmem / (1024 * 1024)));
                continue;
            }

            if (addr + len > maxmem) {
                printf("Ignoring %ju bytes of memory "
                       "that is above maxmem %dMB\n",
                       (addr + len) - maxmem,
                       (int)(maxmem / (1024 * 1024)));
                len = maxmem - addr;
            }
        }

        phys_avail[i] = addr;
        if (i == 0 && addr == 0) {
            /*
             * If this is the first physical memory segment probed
             * from CFE, omit the region at the start of physical
             * memory where the kernel has been loaded.
             */
            phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
        }
        phys_avail[i + 1] = addr + len;
        physmem += len;
    }

    realmem = btoc(physmem);
#endif

    for (j = 0; j < i; j++)
        dump_avail[j] = phys_avail[j];

    physmem = realmem;

    init_param1();
    init_param2(physmem);
    mips_cpu_init();

    /*
     * Sibyte has a L1 data cache coherent with DMA. This includes
     * on-chip network interfaces as well as PCI/HyperTransport bus
     * masters.
     */
    cpuinfo.cache_coherent_dma = TRUE;

    /*
     * XXX
     * The kernel is running in 32-bit mode but the CFE is running in
     * 64-bit mode. So the SR_KX bit in the status register is turned
     * on by the CFE every time we call into it - for e.g. CFE_CONSOLE.
     *
     * This means that if get a TLB miss for any address above 0xc0000000
     * and the SR_KX bit is set then we will end up in the XTLB exception
     * vector.
     *
     * For now work around this by copying the TLB exception handling
     * code to the XTLB exception vector.
     */
    {
        bcopy(MipsTLBMiss, (void *)MIPS3_XTLB_MISS_EXC_VEC,
              MipsTLBMissEnd - MipsTLBMiss);

        mips_icache_sync_all();
        mips_dcache_wbinv_all();
    }

    pmap_bootstrap();
    mips_proc0_init();
    mutex_init();

    kdb_init();
#ifdef KDB
    if (boothowto & RB_KDB)
        kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Example #26
0
void
platform_start(__register_t a0, __register_t a1, __register_t a2 __unused,
    __register_t a3)
{
	const struct octeon_feature_description *ofd;
	uint64_t platform_counter_freq;
	int rv;

	mips_postboot_fixup();

	/*
	 * Initialize boot parameters so that we can determine things like
	 * which console we shoud use, etc.
	 */
	octeon_boot_params_init(a3);

	/* Initialize pcpu stuff */
	mips_pcpu0_init();
	mips_timer_early_init(cvmx_sysinfo_get()->cpu_clock_hz);

	/* Initialize console.  */
	cninit();

	/*
	 * Display information about the CPU.
	 */
#if !defined(OCTEON_MODEL)
	printf("Using runtime CPU model checks.\n");
#else
	printf("Compiled for CPU model: " __XSTRING(OCTEON_MODEL) "\n");
#endif
	strcpy(cpu_model, octeon_model_get_string(cvmx_get_proc_id()));
	printf("CPU Model: %s\n", cpu_model);
	printf("CPU clock: %uMHz  Core Mask: %#x\n",
	       cvmx_sysinfo_get()->cpu_clock_hz / 1000000,
	       cvmx_sysinfo_get()->core_mask);
	rv = octeon_model_version_check(cvmx_get_proc_id());
	if (rv == -1)
		panic("%s: kernel not compatible with this processor.", __func__);

	/*
	 * Display information about the board.
	 */
#if defined(OCTEON_BOARD_CAPK_0100ND)
	strcpy(cpu_board, "CAPK-0100ND");
	if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_CN3010_EVB_HS5) {
		panic("Compiled for %s, but board type is %s.", cpu_board,
		       cvmx_board_type_to_string(cvmx_sysinfo_get()->board_type));
	}
#else
	strcpy(cpu_board,
	       cvmx_board_type_to_string(cvmx_sysinfo_get()->board_type));
#endif
	printf("Board: %s\n", cpu_board);
	printf("Board Type: %u  Revision: %u/%u\n",
	       cvmx_sysinfo_get()->board_type,
	       cvmx_sysinfo_get()->board_rev_major,
	       cvmx_sysinfo_get()->board_rev_minor);
	printf("Serial number: %s\n", cvmx_sysinfo_get()->board_serial_number);

	/*
	 * Additional on-chip hardware/settings.
	 *
	 * XXX Display PCI host/target?  What else?
	 */
	printf("MAC address base: %6D (%u configured)\n",
	       cvmx_sysinfo_get()->mac_addr_base, ":",
	       cvmx_sysinfo_get()->mac_addr_count);


	octeon_ciu_reset();
	/*
	 * Convert U-Boot 'bootoctlinux' loader command line arguments into
	 * boot flags and kernel environment variables.
	 */
	bootverbose = 1;
	octeon_init_kenv(a3);

	/*
	 * For some reason on the cn38xx simulator ebase register is set to
	 * 0x80001000 at bootup time.  Move it back to the default, but
	 * when we move to having support for multiple executives, we need
	 * to rethink this.
	 */
	mips_wr_ebase(0x80000000);

	octeon_memory_init();
	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
	cpu_clock = cvmx_sysinfo_get()->cpu_clock_hz;
	platform_counter_freq = cpu_clock;
	octeon_timecounter.tc_frequency = cpu_clock;
	platform_timecounter = &octeon_timecounter;
	mips_timer_init_params(platform_counter_freq, 0);
	set_cputicker(octeon_get_ticks, cpu_clock, 0);

#ifdef SMP
	/*
	 * Clear any pending IPIs.
	 */
	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(0), 0xffffffff);
#endif

	printf("Octeon SDK: %s\n", OCTEON_SDK_VERSION_STRING);
	printf("Available Octeon features:");
	for (ofd = octeon_feature_descriptions; ofd->ofd_string != NULL; ofd++)
		if (octeon_has_feature(ofd->ofd_feature))
			printf(" %s", ofd->ofd_string);
	printf("\n");
}
Example #27
0
void *
initarm(struct arm_boot_params *abp)
{
	struct pv_addr	kernel_l1pt;
	int loop;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t afterkern;
	vm_offset_t lastaddr;

	int i;
	uint32_t memsize;

	boothowto = 0;  /* Likely not needed */
	lastaddr = parse_boot_param(abp);
	i = 0;
	set_cpufuncs();
	cpufuncs.cf_sleep = s3c24x0_sleep;

	pcpu0_init();

	/* Do basic tuning, hz etc */
	init_param1();

#define KERNEL_TEXT_BASE (KERNBASE)
	freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
	/* Define a macro to simplify memory allocation */
#define valloc_pages(var, np)			\
	alloc_pages((var).pv_va, (np));		\
	(var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR);

#define alloc_pages(var, np)			\
	(var) = freemempos;			\
	freemempos += (np * PAGE_SIZE);		\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
		freemempos += PAGE_SIZE;
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_va = freemempos -
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_pa =
			    kernel_pt_table[loop].pv_va - KERNVIRTADDR +
			    KERNPHYSADDR;
		}
	}
	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_va;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
	    &kernel_pt_table[KERNEL_PT_SYS]);
	for (i = 0; i < KERNEL_PT_KERN_NUM; i++)
		pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE,
		    &kernel_pt_table[KERNEL_PT_KERN + i]);
	pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR,
	   (((uint32_t)(lastaddr) - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1),
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE
	    - 1));
	for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
		pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE,
		    &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
	}

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa,
	    msgbufsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);


	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	arm_devmap_bootstrap(l1pagetable, s3c24x0_devmap);

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */

	cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
	set_stackptrs(0);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross reloations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();

	/* Disable all peripheral interrupts */
	ioreg_write32(S3C24X0_INTCTL_BASE + INTCTL_INTMSK, ~0);
	memsize = board_init();
	/* Find pclk for uart */
	switch(ioreg_read32(S3C24X0_GPIO_BASE + GPIO_GSTATUS1) >> 16) {
	case 0x3241:
		s3c2410_clock_freq2(S3C24X0_CLKMAN_BASE, NULL, NULL,
		    &s3c2410_pclk);
		break;
	case 0x3244:
		s3c2440_clock_freq2(S3C24X0_CLKMAN_BASE, NULL, NULL,
		    &s3c2410_pclk);
		break;
	}
	cninit();

	/* Set stack for exception handlers */
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;
	undefined_init();
	
	init_proc0(kernelstack.pv_va);			
	
	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

	pmap_curmaxkvaddr = afterkern + 0x100000 * (KERNEL_PT_KERN_NUM - 1);
	arm_dump_avail_init(memsize, sizeof(dump_avail) / sizeof(dump_avail[0]));
	vm_max_kernel_address = KERNVIRTADDR + 3 * memsize;
	pmap_bootstrap(freemempos, &kernel_l1pt);
	msgbufp = (void*)msgbufpv.pv_va;
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();

	physmem = memsize / PAGE_SIZE;

	phys_avail[0] = virtual_avail - KERNVIRTADDR + KERNPHYSADDR;
	phys_avail[1] = PHYSADDR + memsize;
	phys_avail[2] = 0;
	phys_avail[3] = 0;

	init_param2(physmem);
	kdb_init();

	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
}
Example #28
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg)
{
	/*
	 * When we enter here, we are using a temporary first level
	 * translation table with section entries in it to cover the TIPB
	 * peripherals and SDRAM.  The temporary first level translation table
	 * is at the end of SDRAM.
	 */

	/* Heads up ... Setup the CPU / MMU / TLB functions. */
	if (set_cpufuncs())
		panic("cpu not recognized!");

	init_clocks();

	/* The console is going to try to map things.  Give pmap a devmap. */
	pmap_devmap_register(devmap);
	consinit();
#ifdef KGDB
	kgdb_port_init();
#endif

#ifdef VERBOSE_INIT_ARM
	/* Talk to the user */
	printf("\nNetBSD/evbarm (OSK5912) booting ...\n");
#endif

#ifdef BOOT_ARGS
	char mi_bootargs[] = BOOT_ARGS;
	parse_mi_bootargs(mi_bootargs);
#endif

#ifdef VERBOSE_INIT_ARM
	printf("initarm: Configuring system ...\n");
#endif

	/*
	 * Set up the variables that define the availability of physical
	 * memory.
	 */
	physical_start = KERNEL_BASE_PHYS;
	physical_end = physical_start + MEMSIZE_BYTES;
	physmem = MEMSIZE_BYTES / PAGE_SIZE;

	/* Fake bootconfig structure for the benefit of pmap.c. */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = physical_start;
	bootconfig.dram[0].pages = physmem;

	/*
	 * Our kernel is at the beginning of memory, so set our free space to
	 * all the memory after the kernel.
	 */
	physical_freestart = KERN_VTOPHYS(round_page((vaddr_t) _end));
	physical_freeend = physical_end;
	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

	/*
	 * This is going to do all the hard work of setting up the first and
	 * and second level page tables.  Pages of memory will be allocated
	 * and mapped for other structures that are required for system
	 * operation.  When it returns, physical_freestart and free_pages will
	 * have been updated to reflect the allocations that were made.  In
	 * addition, kernel_l1pt, kernel_pt_table[], systempage, irqstack,
	 * abtstack, undstack, kernelstack, msgbufphys will be set to point to
	 * the memory that was allocated for them.
	 */
	setup_real_page_tables();

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init.
	 */
	proc0paddr = (struct user *)kernelstack.pv_va;
	lwp0.l_addr = proc0paddr;

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("init subsystems: stacks ");
#endif

	set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("vectors ");
#endif
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
	printf("undefined ");
#endif
	undefined_init();

	/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
	printf("page ");
#endif
	uvm_setpagesize();        /* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend),
	    VM_FREELIST_DEFAULT);

	/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
	printf("pmap ");
#endif
	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);

#ifdef VERBOSE_INIT_ARM
	printf("done.\n");
#endif

#ifdef KGDB
	if (boothowto & RB_KDB) {
		kgdb_debug_init = 1;
		kgdb_connect(1);
	}
#endif

#ifdef DDB
	db_machine_init();

	/* Firmware doesn't load symbols. */
	ddb_init(0, NULL, NULL);

	if (boothowto & RB_KDB)
		Debugger();
#endif

	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Example #29
0
void *
initarm(void *arg, void *arg2)
{
	struct pcpu *pc;
	struct pv_addr  kernel_l1pt;
	struct pv_addr	md_addr;
	struct pv_addr	md_bla;
	struct pv_addr  dpcpu;
	int loop;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t lastalloced;
	vm_offset_t lastaddr;
	uint32_t memsize = 32 * 1024 * 1024;
	sa1110_uart_vaddr = SACOM1_VBASE;

	boothowto = RB_VERBOSE | RB_SINGLE;
	cninit();
	set_cpufuncs();
	lastaddr = fake_preload_metadata();
	physmem = memsize / PAGE_SIZE;
	pc = &__pcpu;
	pcpu_init(pc, 0, sizeof(struct pcpu));
	PCPU_SET(curthread, &thread0);

	/* Do basic tuning, hz etc */
	init_param1();
		
	physical_start = (vm_offset_t) KERNBASE;
	physical_end =  lastaddr;
	physical_freestart = (((vm_offset_t)physical_end) + PAGE_MASK) & ~PAGE_MASK;
	md_addr.pv_va = md_addr.pv_pa = MDROOT_ADDR;
	freemempos = (vm_offset_t)round_page(physical_freestart);
	memset((void *)freemempos, 0, 256*1024);
		/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)			\
	alloc_pages((var).pv_pa, (np));		\
	(var).pv_va = (var).pv_pa;

#define alloc_pages(var, np)			\
	(var) = freemempos;		\
	freemempos += ((np) * PAGE_SIZE);\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	while ((freemempos & (L1_TABLE_SIZE - 1)) != 0)
		freemempos += PAGE_SIZE;
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	valloc_pages(md_bla, L2_TABLE_SIZE / PAGE_SIZE);
	alloc_pages(sa1_cache_clean_addr, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_pa = freemempos +
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_va = 
			    kernel_pt_table[loop].pv_pa;
		}
	}

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate dynamic per-cpu area. */
	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
	dpcpu_init((void *)dpcpu.pv_va, 0);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	lastalloced = kernelstack.pv_va;

	/*
	 * Allocate memory for the l1 and l2 page tables. The scheme to avoid
	 * wasting memory by allocating the l1pt on the first 16k memory was
	 * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for
	 * this to work (which is supposed to be the case).
	 */

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;


	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, 0x00000000,
	    &kernel_pt_table[KERNEL_PT_SYS]);
	pmap_link_l2pt(l1pagetable, KERNBASE,
	    &kernel_pt_table[KERNEL_PT_KERNEL]);
	pmap_link_l2pt(l1pagetable, 0xd0000000,
	    &kernel_pt_table[KERNEL_PT_IO]);
	pmap_link_l2pt(l1pagetable, lastalloced & ~((L1_S_SIZE * 4) - 1),
	    &kernel_pt_table[KERNEL_PT_L1]);
	pmap_link_l2pt(l1pagetable, 0x90000000, &kernel_pt_table[KERNEL_PT_IRQ]);
	pmap_link_l2pt(l1pagetable, MDROOT_ADDR,
	    &md_bla);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00100000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
	pmap_map_chunk(l1pagetable, KERNBASE, KERNBASE,
	    ((uint32_t)lastaddr - KERNBASE), VM_PROT_READ|VM_PROT_WRITE,
	    PTE_CACHE);
	/* Map the DPCPU pages */
	pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, DPCPU_SIZE,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, md_addr.pv_va, md_addr.pv_pa,
	    MD_ROOT_SIZE * 1024, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}
	pmap_map_chunk(l1pagetable, md_bla.pv_va, md_bla.pv_pa, L2_TABLE_SIZE,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	/* Map the vector page. */
	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	/* Map the statically mapped devices. */
	pmap_devmap_bootstrap(l1pagetable, assabet_devmap);
	pmap_map_chunk(l1pagetable, sa1_cache_clean_addr, 0xf0000000, 
	    CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;
	undefined_init();
	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross relocations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();

	bootverbose = 1;

	/* Set stack for exception handlers */
	
	proc_linkup0(&proc0, &thread0);
	thread0.td_kstack = kernelstack.pv_va;
	thread0.td_pcb = (struct pcb *)
		(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
	thread0.td_pcb->pcb_flags = 0;
	thread0.td_frame = &proc0_tf;
	
	
	/* Enable MMU, I-cache, D-cache, write buffer. */

	cpufunc_control(0x337f, 0x107d);
	arm_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);

	pmap_curmaxkvaddr = freemempos + KERNEL_PT_VMDATA_NUM * 0x400000;

	dump_avail[0] = phys_avail[0] = round_page(virtual_avail);
	dump_avail[1] = phys_avail[1] = 0xc0000000 + 0x02000000 - 1;
	dump_avail[2] = phys_avail[2] = 0;
	dump_avail[3] = phys_avail[3] = 0;
					
	mutex_init();
	pmap_bootstrap(freemempos, 0xd0000000, &kernel_l1pt);

	init_param2(physmem);
	kdb_init();
	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
}
Example #30
0
void
platform_start(__register_t a0 __unused, __register_t a1 __unused, 
    __register_t a2 __unused, __register_t a3 __unused)
{
	uint64_t platform_counter_freq;
	uint32_t reg;
	int argc, i, count = 0;
	char **argv, **envp;
	vm_offset_t kernend;

	/* 
	 * clear the BSS and SBSS segments, this should be first call in
	 * the function
	 */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	/* Initialize pcpu stuff */
	mips_pcpu0_init();

	argc = a0;
	argv = (char**)a1;
	envp = (char**)a2;
	/* 
	 * Protect ourselves from garbage in registers 
	 */
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i += 2)
		{
			if (strcmp(envp[i], "memsize") == 0)
				realmem = btoc(strtoul(envp[i+1], NULL, 16));
			else if (strcmp(envp[i], "ethaddr") == 0) {
				count = sscanf(envp[i+1], "%x.%x.%x.%x.%x.%x", 
				    &ar711_base_mac[0], &ar711_base_mac[1],
				    &ar711_base_mac[2], &ar711_base_mac[3],
				    &ar711_base_mac[4], &ar711_base_mac[5]);
				if (count < 6)
					memset(ar711_base_mac, 0,
					    sizeof(ar711_base_mac));
			}
		}
	}

	/*
	 * Just wild guess. RedBoot let us down and didn't reported 
	 * memory size
	 */
	if (realmem == 0)
		realmem = btoc(32*1024*1024);

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	physmem = realmem;

	/*
	 * ns8250 uart code uses DELAY so ticker should be inititalized 
	 * before cninit. And tick_init_params refers to hz, so * init_param1 
	 * should be called first.
	 */
	init_param1();
	platform_counter_freq = ar71xx_cpu_freq();
	mips_timer_init_params(platform_counter_freq, 1);
	cninit();
	init_static_kenv(boot1_env, sizeof(boot1_env));

	printf("platform frequency: %lld\n", platform_counter_freq);
	printf("arguments: \n");
	printf("  a0 = %08x\n", a0);
	printf("  a1 = %08x\n", a1);
	printf("  a2 = %08x\n", a2);
	printf("  a3 = %08x\n", a3);

	printf("Cmd line:");
	if (MIPS_IS_VALID_PTR(argv)) {
		for (i = 0; i < argc; i++) {
			printf(" %s", argv[i]);
			parse_argv(argv[i]);
		}
	}
	else
		printf ("argv is invalid");
	printf("\n");

	printf("Environment:\n");
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i+=2) {
			printf("  %s = %s\n", envp[i], envp[i+1]);
			setenv(envp[i], envp[i+1]);
		}
	}
	else 
		printf ("envp is invalid\n");

	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();

	/*
	 * Reset USB devices 
	 */
	reg = ATH_READ_REG(AR71XX_RST_RESET);
	reg |= 
	    RST_RESET_USB_OHCI_DLL | RST_RESET_USB_HOST | RST_RESET_USB_PHY;
	ATH_WRITE_REG(AR71XX_RST_RESET, reg);
	DELAY(1000);
	reg &= 
	    ~(RST_RESET_USB_OHCI_DLL | RST_RESET_USB_HOST | RST_RESET_USB_PHY);
	ATH_WRITE_REG(AR71XX_RST_RESET, reg);
	
	ATH_WRITE_REG(AR71XX_USB_CTRL_CONFIG,
	    USB_CTRL_CONFIG_OHCI_DES_SWAP | USB_CTRL_CONFIG_OHCI_BUF_SWAP |
	    USB_CTRL_CONFIG_EHCI_DES_SWAP | USB_CTRL_CONFIG_EHCI_BUF_SWAP);

	ATH_WRITE_REG(AR71XX_USB_CTRL_FLADJ, 
	    (32 << USB_CTRL_FLADJ_HOST_SHIFT) | (3 << USB_CTRL_FLADJ_A5_SHIFT));
	DELAY(1000);

	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}