Beispiel #1
0
static void
mips_init(void)
{
	int i;

	printf("entry: mips_init()\n");

	bootverbose = 1;
	realmem = btoc(32 << 20);

	for (i = 0; i < 10; i++) {
		phys_avail[i] = 0;
	}

	/* phys_avail regions are in bytes */
	dump_avail[0] = phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	dump_avail[1] = phys_avail[1] = ctob(realmem);

	physmem = realmem;

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
static void
mips_init(void)
{
	int i;

	for (i = 0; i < 10; i++) {
		phys_avail[i] = 0;
	}

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS((vm_offset_t)&end);
	phys_avail[1] = ctob(realmem);

	physmem = realmem;

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
#ifdef DDB
	kdb_init();
#endif
}
Beispiel #3
0
static void
mips_init(void)
{
	int i;
#ifdef FDT
	struct mem_region mr[FDT_MEM_REGIONS];
	uint64_t val;
	int mr_cnt;
	int j;
#endif

	for (i = 0; i < 10; i++) {
		phys_avail[i] = 0;
	}

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	dump_avail[0] = phys_avail[0];
	dump_avail[1] = phys_avail[1];

	physmem = realmem;

#ifdef FDT
	if (fdt_get_mem_regions(mr, &mr_cnt, &val) == 0) {

		physmem = btoc(val);

		KASSERT((phys_avail[0] >= mr[0].mr_start) && \
			(phys_avail[0] < (mr[0].mr_start + mr[0].mr_size)),
			("First region is not within FDT memory range"));

		/* Limit size of the first region */
		phys_avail[1] = (mr[0].mr_start + MIN(mr[0].mr_size, ctob(realmem)));
		dump_avail[1] = phys_avail[1];

		/* Add the rest of regions */
		for (i = 1, j = 2; i < mr_cnt; i++, j+=2) {
			phys_avail[j] = mr[i].mr_start;
			phys_avail[j+1] = (mr[i].mr_start + mr[i].mr_size);
			dump_avail[j] = phys_avail[j];
			dump_avail[j+1] = phys_avail[j+1];
		}
	}
#endif

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Beispiel #4
0
void *
initarm(struct arm_boot_params *abp)
{
	struct pv_addr	kernel_l1pt;
	int loop;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t afterkern;
	vm_offset_t lastaddr;

	int i;
	uint32_t memsize;

	boothowto = 0;  /* Likely not needed */
	lastaddr = parse_boot_param(abp);
	i = 0;
	set_cpufuncs();
	cpufuncs.cf_sleep = s3c24x0_sleep;

	pcpu0_init();

	/* Do basic tuning, hz etc */
	init_param1();

#define KERNEL_TEXT_BASE (KERNBASE)
	freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
	/* Define a macro to simplify memory allocation */
#define valloc_pages(var, np)			\
	alloc_pages((var).pv_va, (np));		\
	(var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR);

#define alloc_pages(var, np)			\
	(var) = freemempos;			\
	freemempos += (np * PAGE_SIZE);		\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
		freemempos += PAGE_SIZE;
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_va = freemempos -
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_pa =
			    kernel_pt_table[loop].pv_va - KERNVIRTADDR +
			    KERNPHYSADDR;
		}
	}
	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_va;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
	    &kernel_pt_table[KERNEL_PT_SYS]);
	for (i = 0; i < KERNEL_PT_KERN_NUM; i++)
		pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE,
		    &kernel_pt_table[KERNEL_PT_KERN + i]);
	pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR,
	   (((uint32_t)(lastaddr) - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1),
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE
	    - 1));
	for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
		pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE,
		    &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
	}

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa,
	    msgbufsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);


	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	arm_devmap_bootstrap(l1pagetable, s3c24x0_devmap);

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */

	cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
	set_stackptrs(0);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross reloations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();

	/* Disable all peripheral interrupts */
	ioreg_write32(S3C24X0_INTCTL_BASE + INTCTL_INTMSK, ~0);
	memsize = board_init();
	/* Find pclk for uart */
	switch(ioreg_read32(S3C24X0_GPIO_BASE + GPIO_GSTATUS1) >> 16) {
	case 0x3241:
		s3c2410_clock_freq2(S3C24X0_CLKMAN_BASE, NULL, NULL,
		    &s3c2410_pclk);
		break;
	case 0x3244:
		s3c2440_clock_freq2(S3C24X0_CLKMAN_BASE, NULL, NULL,
		    &s3c2410_pclk);
		break;
	}
	cninit();

	/* Set stack for exception handlers */
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;
	undefined_init();
	
	init_proc0(kernelstack.pv_va);			
	
	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

	pmap_curmaxkvaddr = afterkern + 0x100000 * (KERNEL_PT_KERN_NUM - 1);
	arm_dump_avail_init(memsize, sizeof(dump_avail) / sizeof(dump_avail[0]));
	vm_max_kernel_address = KERNVIRTADDR + 3 * memsize;
	pmap_bootstrap(freemempos, &kernel_l1pt);
	msgbufp = (void*)msgbufpv.pv_va;
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();

	physmem = memsize / PAGE_SIZE;

	phys_avail[0] = virtual_avail - KERNVIRTADDR + KERNPHYSADDR;
	phys_avail[1] = PHYSADDR + memsize;
	phys_avail[2] = 0;
	phys_avail[3] = 0;

	init_param2(physmem);
	kdb_init();

	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
}
Beispiel #5
0
static void
mips_init(void)
{
	struct mem_region mr[FDT_MEM_REGIONS];
	uint64_t val;
	int i, j, mr_cnt;
	char *memsize;

	printf("entry: mips_init()\n");

	bootverbose = 1;

	for (i = 0; i < 10; i++)
		phys_avail[i] = 0;

	dump_avail[0] = phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);

	/*
	 * The most low memory MT7621 can have. Currently MT7621 is the chip
	 * that supports the most memory, so that seems reasonable.
	 */
	realmem = btoc(448 * 1024 * 1024);

	if (fdt_get_mem_regions(mr, &mr_cnt, &val) == 0) {
		physmem = btoc(val);

		printf("RAM size: %ldMB (from FDT)\n",
		    ctob(physmem) / (1024 * 1024));

		KASSERT((phys_avail[0] >= mr[0].mr_start) && \
			(phys_avail[0] < (mr[0].mr_start + mr[0].mr_size)),
			("First region is not within FDT memory range"));

		/* Limit size of the first region */
		phys_avail[1] = (mr[0].mr_start +
		    MIN(mr[0].mr_size, ctob(realmem)));
		dump_avail[1] = phys_avail[1];

		/* Add the rest of the regions */
		for (i = 1, j = 2; i < mr_cnt; i++, j+=2) {
			phys_avail[j] = mr[i].mr_start;
			phys_avail[j+1] = (mr[i].mr_start + mr[i].mr_size);
			dump_avail[j] = phys_avail[j];
			dump_avail[j+1] = phys_avail[j+1];
		}
	} else {
		if ((memsize = kern_getenv("memsize")) != NULL) {
			physmem = btoc(strtol(memsize, NULL, 0) << 20);
			printf("RAM size: %ldMB (from memsize)\n",
			    ctob(physmem) / (1024 * 1024));
		} else { /* All else failed, assume 32MB */
			physmem = btoc(32 * 1024 * 1024);
			printf("RAM size: %ldMB (assumed)\n",
			    ctob(physmem) / (1024 * 1024));
		}

		if (ctob(physmem) < (448 * 1024 * 1024)) {
			/*
			 * Anything up to 448MB is assumed to be directly
			 * mappable as low memory...
			 */
			dump_avail[1] = phys_avail[1] = ctob(physmem);
		} else if (mtk_soc_get_socid() == MTK_SOC_MT7621) {
			/*
			 * On MT7621 the low memory is limited to 448MB, the
			 * rest is high memory, mapped at 0x20000000
			 */
			phys_avail[1] = 448 * 1024 * 1024;
			phys_avail[2] = 0x20000000;
			phys_avail[3] = phys_avail[2] + ctob(physmem) -
			    phys_avail[1];
			dump_avail[1] = phys_avail[1] - phys_avail[0];
			dump_avail[2] = phys_avail[2];
			dump_avail[3] = phys_avail[3] - phys_avail[2];
		} else {
			/*
			 * We have > 448MB RAM and we're not MT7621? Currently
			 * there is no such chip, so we'll just limit the RAM to
			 * 32MB and let the user know...
			 */
			printf("Unknown chip, assuming 32MB RAM\n");
			physmem = btoc(32 * 1024 * 1024);
			dump_avail[1] = phys_avail[1] = ctob(physmem);
		}
	}

	if (physmem < realmem)
		realmem = physmem;

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Beispiel #6
0
static void
mips_init(void)
{
    int i, j, cfe_mem_idx, tmp;
    uint64_t maxmem;

#ifdef CFE_ENV
    cfe_env_init();
#endif

    TUNABLE_INT_FETCH("boothowto", &boothowto);

    if (boothowto & RB_VERBOSE)
        bootverbose++;

#ifdef MAXMEM
    tmp = MAXMEM;
#else
    tmp = 0;
#endif
    TUNABLE_INT_FETCH("hw.physmem", &tmp);
    maxmem = (uint64_t)tmp * 1024;

    /*
     * XXX
     * If we used vm_paddr_t consistently in pmap, etc., we could
     * use 64-bit page numbers on !n64 systems, too, like i386
     * does with PAE.
     */
#if !defined(__mips_n64)
    if (maxmem == 0 || maxmem > 0xffffffff)
        maxmem = 0xffffffff;
#endif

#ifdef CFE
    /*
     * Query DRAM memory map from CFE.
     */
    physmem = 0;
    cfe_mem_idx = 0;
    for (i = 0; i < 10; i += 2) {
        int result;
        uint64_t addr, len, type;

        result = cfe_enummem(cfe_mem_idx++, 0, &addr, &len, &type);
        if (result < 0) {
            phys_avail[i] = phys_avail[i + 1] = 0;
            break;
        }

        KASSERT(type == CFE_MI_AVAILABLE,
                ("CFE DRAM region is not available?"));

        if (bootverbose)
            printf("cfe_enummem: 0x%016jx/%ju.\n", addr, len);

        if (maxmem != 0) {
            if (addr >= maxmem) {
                printf("Ignoring %ju bytes of memory at 0x%jx "
                       "that is above maxmem %dMB\n",
                       len, addr,
                       (int)(maxmem / (1024 * 1024)));
                continue;
            }

            if (addr + len > maxmem) {
                printf("Ignoring %ju bytes of memory "
                       "that is above maxmem %dMB\n",
                       (addr + len) - maxmem,
                       (int)(maxmem / (1024 * 1024)));
                len = maxmem - addr;
            }
        }

        phys_avail[i] = addr;
        if (i == 0 && addr == 0) {
            /*
             * If this is the first physical memory segment probed
             * from CFE, omit the region at the start of physical
             * memory where the kernel has been loaded.
             */
            phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
        }
        phys_avail[i + 1] = addr + len;
        physmem += len;
    }

    realmem = btoc(physmem);
#endif

    for (j = 0; j < i; j++)
        dump_avail[j] = phys_avail[j];

    physmem = realmem;

    init_param1();
    init_param2(physmem);
    mips_cpu_init();

    /*
     * Sibyte has a L1 data cache coherent with DMA. This includes
     * on-chip network interfaces as well as PCI/HyperTransport bus
     * masters.
     */
    cpuinfo.cache_coherent_dma = TRUE;

    /*
     * XXX
     * The kernel is running in 32-bit mode but the CFE is running in
     * 64-bit mode. So the SR_KX bit in the status register is turned
     * on by the CFE every time we call into it - for e.g. CFE_CONSOLE.
     *
     * This means that if get a TLB miss for any address above 0xc0000000
     * and the SR_KX bit is set then we will end up in the XTLB exception
     * vector.
     *
     * For now work around this by copying the TLB exception handling
     * code to the XTLB exception vector.
     */
    {
        bcopy(MipsTLBMiss, (void *)MIPS3_XTLB_MISS_EXC_VEC,
              MipsTLBMissEnd - MipsTLBMiss);

        mips_icache_sync_all();
        mips_dcache_wbinv_all();
    }

    pmap_bootstrap();
    mips_proc0_init();
    mutex_init();

    kdb_init();
#ifdef KDB
    if (boothowto & RB_KDB)
        kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Beispiel #7
0
void
platform_start(__register_t a0 __unused, __register_t a1 __unused, 
    __register_t a2 __unused, __register_t a3 __unused)
{
	uint64_t platform_counter_freq;
	int argc = 0, i;
	char **argv = NULL, **envp = NULL;
	vm_offset_t kernend;

	/* 
	 * clear the BSS and SBSS segments, this should be first call in
	 * the function
	 */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	/* Initialize pcpu stuff */
	mips_pcpu0_init();

	/*
	 * Until some more sensible abstractions for uboot/redboot
	 * environment handling, we have to make this a compile-time
	 * hack.  The existing code handles the uboot environment
	 * very incorrectly so we should just ignore initialising
	 * the relevant pointers.
	 */
#ifndef	AR71XX_ENV_UBOOT
	argc = a0;
	argv = (char**)a1;
	envp = (char**)a2;
#endif
	/* 
	 * Protect ourselves from garbage in registers 
	 */
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i += 2) {
			if (strcmp(envp[i], "memsize") == 0)
				realmem = btoc(strtoul(envp[i+1], NULL, 16));
		}
	}

	/*
	 * Just wild guess. RedBoot let us down and didn't reported 
	 * memory size
	 */
	if (realmem == 0)
		realmem = btoc(32*1024*1024);

	/*
	 * Allow build-time override in case Redboot lies
	 * or in other situations (eg where there's u-boot)
	 * where there isn't (yet) a convienent method of
	 * being told how much RAM is available.
	 *
	 * This happens on at least the Ubiquiti LS-SR71A
	 * board, where redboot says there's 16mb of RAM
	 * but in fact there's 32mb.
	 */
#if	defined(AR71XX_REALMEM)
		realmem = btoc(AR71XX_REALMEM);
#endif

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	dump_avail[0] = phys_avail[0];
	dump_avail[1] = phys_avail[1] - phys_avail[0];

	physmem = realmem;

	/*
	 * ns8250 uart code uses DELAY so ticker should be inititalized 
	 * before cninit. And tick_init_params refers to hz, so * init_param1 
	 * should be called first.
	 */
	init_param1();

	/* Detect the system type - this is needed for subsequent chipset-specific calls */
	ar71xx_detect_sys_type();
	ar71xx_detect_sys_frequency();

	platform_counter_freq = ar71xx_cpu_freq();
	mips_timer_init_params(platform_counter_freq, 1);
	cninit();
	init_static_kenv(boot1_env, sizeof(boot1_env));

	printf("CPU platform: %s\n", ar71xx_get_system_type());
	printf("CPU Frequency=%d MHz\n", u_ar71xx_cpu_freq / 1000000);
	printf("CPU DDR Frequency=%d MHz\n", u_ar71xx_ddr_freq / 1000000);
	printf("CPU AHB Frequency=%d MHz\n", u_ar71xx_ahb_freq / 1000000);
	printf("platform frequency: %lld\n", platform_counter_freq);
	printf("CPU reference clock: %d MHz\n", u_ar71xx_refclk / 1000000);
	printf("arguments: \n");
	printf("  a0 = %08x\n", a0);
	printf("  a1 = %08x\n", a1);
	printf("  a2 = %08x\n", a2);
	printf("  a3 = %08x\n", a3);

	/*
	 * XXX this code is very redboot specific.
	 */
	printf("Cmd line:");
	if (MIPS_IS_VALID_PTR(argv)) {
		for (i = 0; i < argc; i++) {
			printf(" %s", argv[i]);
			parse_argv(argv[i]);
		}
	}
	else
		printf ("argv is invalid");
	printf("\n");

	printf("Environment:\n");
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i+=2) {
			printf("  %s = %s\n", envp[i], envp[i+1]);
			setenv(envp[i], envp[i+1]);
		}
	}
	else 
		printf ("envp is invalid\n");

	/* Redboot if_arge MAC address is in the environment */
	ar71xx_redboot_get_macaddr();

	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();

	/*
	 * Reset USB devices 
	 */
	ar71xx_init_usb_peripheral();

	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Beispiel #8
0
void *
initarm(void *arg, void *arg2)
{
#define	next_chunk2(a,b)	(((a) + (b)) &~ ((b)-1))
#define	next_page(a)		next_chunk2(a,PAGE_SIZE)
	struct pv_addr  kernel_l1pt;
	struct pv_addr  dpcpu;
	int loop, i;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t freemem_pt;
	vm_offset_t afterkern;
	vm_offset_t freemem_after;
	vm_offset_t lastaddr;
	uint32_t memsize;

	set_cpufuncs();		/* NB: sets cputype */
	lastaddr = fake_preload_metadata();
	pcpu_init(pcpup, 0, sizeof(struct pcpu));
	PCPU_SET(curthread, &thread0);

	/* Do basic tuning, hz etc */
      	init_param1();
		
	/*
	 * We allocate memory downwards from where we were loaded
	 * by RedBoot; first the L1 page table, then NUM_KERNEL_PTS
	 * entries in the L2 page table.  Past that we re-align the
	 * allocation boundary so later data structures (stacks, etc)
	 * can be mapped with different attributes (write-back vs
	 * write-through).  Note this leaves a gap for expansion
	 * (or might be repurposed).
	 */
	freemempos = KERNPHYSADDR;

	/* macros to simplify initial memory allocation */
#define alloc_pages(var, np) do {					\
	freemempos -= (np * PAGE_SIZE);					\
	(var) = freemempos;						\
	/* NB: this works because locore maps PA=VA */			\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));			\
} while (0)
#define	valloc_pages(var, np) do {					\
	alloc_pages((var).pv_pa, (np));					\
	(var).pv_va = (var).pv_pa + (KERNVIRTADDR - KERNPHYSADDR);	\
} while (0)

	/* force L1 page table alignment */
	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
		freemempos -= PAGE_SIZE;
	/* allocate contiguous L1 page table */
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	/* now allocate L2 page tables; they are linked to L1 below */
	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_pa = freemempos +
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_va = 
			    kernel_pt_table[loop].pv_pa +
				(KERNVIRTADDR - KERNPHYSADDR);
		}
	}
	freemem_pt = freemempos;		/* base of allocated pt's */

	/*
	 * Re-align allocation boundary so we can map the area
	 * write-back instead of write-through for the stacks and
	 * related structures allocated below.
	 */
	freemempos = PHYSADDR + 0x100000;
	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate dynamic per-cpu area. */
	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
	dpcpu_init((void *)dpcpu.pv_va, 0);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	alloc_pages(minidataclean.pv_pa, 1);
	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
#ifdef ARM_USE_SMALL_ALLOC
	freemempos -= PAGE_SIZE;
	freemem_pt = trunc_page(freemem_pt);
	freemem_after = freemempos - ((freemem_pt - (PHYSADDR + 0x100000)) /
	    PAGE_SIZE) * sizeof(struct arm_small_page);
	arm_add_smallalloc_pages(
	    (void *)(freemem_after + (KERNVIRTADDR - KERNPHYSADDR)),
	    (void *)0xc0100000,
	    freemem_pt - (PHYSADDR + 0x100000), 1);
	freemem_after -= ((freemem_after - (PHYSADDR + 0x1000)) / PAGE_SIZE) *
	    sizeof(struct arm_small_page);
	arm_add_smallalloc_pages(
	    (void *)(freemem_after + (KERNVIRTADDR - KERNPHYSADDR)),
	    (void *)0xc0001000,
	    trunc_page(freemem_after) - (PHYSADDR + 0x1000), 0);
	freemempos = trunc_page(freemem_after);
	freemempos -= PAGE_SIZE;
#endif

	/*
	 * Now construct the L1 page table.  First map the L2
	 * page tables into the L1 so we can replace L1 mappings
	 * later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_va;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
	pmap_link_l2pt(l1pagetable, IXP425_IO_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO]);
	pmap_link_l2pt(l1pagetable, IXP425_MCU_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO + 1]);
	pmap_link_l2pt(l1pagetable, IXP425_PCI_MEM_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO + 2]);
	pmap_link_l2pt(l1pagetable, KERNBASE,
	    &kernel_pt_table[KERNEL_PT_BEFOREKERN]);
	pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, 0x100000,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, PHYSADDR + 0x100000,
	    0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_PHYS,
	    next_chunk2(((uint32_t)lastaddr) - KERNEL_TEXT_BASE, L1_S_SIZE),
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	freemem_after = next_page((int)lastaddr);
	afterkern = round_page(next_chunk2((vm_offset_t)lastaddr, L1_S_SIZE));
	for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
		pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
		    &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
	}
	pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa, 
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

#ifdef ARM_USE_SMALL_ALLOC
	if ((freemem_after + 2 * PAGE_SIZE) <= afterkern) {
		arm_add_smallalloc_pages((void *)(freemem_after),
		    (void*)(freemem_after + PAGE_SIZE),
		    afterkern - (freemem_after + PAGE_SIZE), 0);
		    
	}
#endif

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, afterkern,
	    minidataclean.pv_pa);

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	if (cpu_is_ixp43x())
		pmap_devmap_bootstrap(l1pagetable, ixp435_devmap);
	else
		pmap_devmap_bootstrap(l1pagetable, ixp425_devmap);
	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE*PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE*PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE*PAGE_SIZE);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross relocations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();
	/* ready to setup the console (XXX move earlier if possible) */
	cninit();
	/*
	 * Fetch the RAM size from the MCU registers.  The
	 * expansion bus was mapped above so we can now read 'em.
	 */
	if (cpu_is_ixp43x())
		memsize = ixp435_ddram_size();
	else
		memsize = ixp425_sdram_size();
	physmem = memsize / PAGE_SIZE;

	/* Set stack for exception handlers */

	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;
	undefined_init();

	proc_linkup0(&proc0, &thread0);
	thread0.td_kstack = kernelstack.pv_va;
	thread0.td_pcb = (struct pcb *)
		(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
	thread0.td_pcb->pcb_flags = 0;
	thread0.td_frame = &proc0_tf;
	pcpup->pc_curpcb = thread0.td_pcb;

	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

	pmap_curmaxkvaddr = afterkern + PAGE_SIZE;
	dump_avail[0] = PHYSADDR;
	dump_avail[1] = PHYSADDR + memsize;
	dump_avail[2] = 0;
	dump_avail[3] = 0;

	pmap_bootstrap(pmap_curmaxkvaddr, 0xd0000000, &kernel_l1pt);
	msgbufp = (void*)msgbufpv.pv_va;
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();

	i = 0;
#ifdef ARM_USE_SMALL_ALLOC
	phys_avail[i++] = PHYSADDR;
	phys_avail[i++] = PHYSADDR + PAGE_SIZE; 	/*
					 *XXX: Gross hack to get our
					 * pages in the vm_page_array.
					 */
#endif
	phys_avail[i++] = round_page(virtual_avail - KERNBASE + PHYSADDR);
	phys_avail[i++] = trunc_page(PHYSADDR + memsize - 1);
	phys_avail[i++] = 0;
	phys_avail[i] = 0;

	init_param2(physmem);
	kdb_init();

	/* use static kernel environment if so configured */
	if (envmode == 1)
		kern_envp = static_env;

	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
#undef next_page
#undef next_chunk2
}
Beispiel #9
0
void
initarm(struct arm64_bootparams *abp)
{
	struct efi_map_header *efihdr;
	struct pcpu *pcpup;
	vm_offset_t lastaddr;
	caddr_t kmdp;
	vm_paddr_t mem_len;
	int i;

	/* Set the module data location */
	preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);

	/* Find the kernel address */
	kmdp = preload_search_by_type("elf kernel");
	if (kmdp == NULL)
		kmdp = preload_search_by_type("elf64 kernel");

	boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
	kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);

#ifdef FDT
	try_load_dtb(kmdp);
#endif

	/* Find the address to start allocating from */
	lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);

	/* Load the physical memory ranges */
	physmap_idx = 0;
	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
	add_efi_map_entries(efihdr, physmap, &physmap_idx);

	/* Print the memory map */
	mem_len = 0;
	for (i = 0; i < physmap_idx; i += 2) {
		dump_avail[i] = physmap[i];
		dump_avail[i + 1] = physmap[i + 1];
		mem_len += physmap[i + 1] - physmap[i];
	}
	dump_avail[i] = 0;
	dump_avail[i + 1] = 0;

	/* Set the pcpu data, this is needed by pmap_bootstrap */
	pcpup = &__pcpu[0];
	pcpu_init(pcpup, 0, sizeof(struct pcpu));

	/*
	 * Set the pcpu pointer with a backup in tpidr_el1 to be
	 * loaded when entering the kernel from userland.
	 */
	__asm __volatile(
	    "mov x18, %0 \n"
	    "msr tpidr_el1, %0" :: "r"(pcpup));

	PCPU_SET(curthread, &thread0);

	/* Do basic tuning, hz etc */
	init_param1();

	cache_setup();

	/* Bootstrap enough of pmap  to enter the kernel proper */
	pmap_bootstrap(abp->kern_l1pt, KERNBASE - abp->kern_delta,
	    lastaddr - KERNBASE);

	arm_devmap_bootstrap(0, NULL);

	cninit();

	init_proc0(abp->kern_stack);
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();
	init_param2(physmem);

	dbg_monitor_init();
	kdb_init();

	early_boot = 0;
}
Beispiel #10
0
void *
initarm(struct arm_boot_params *abp)
{
    struct pv_addr  kernel_l1pt;
    struct pv_addr  dpcpu;
    int loop;
    u_int l1pagetable;
    vm_offset_t freemempos;
    vm_offset_t freemem_pt;
    vm_offset_t afterkern;
    vm_offset_t freemem_after;
    vm_offset_t lastaddr;
    int i, j;
    uint32_t memsize[PXA2X0_SDRAM_BANKS], memstart[PXA2X0_SDRAM_BANKS];

    lastaddr = parse_boot_param(abp);
    set_cpufuncs();
    pcpu_init(pcpup, 0, sizeof(struct pcpu));
    PCPU_SET(curthread, &thread0);

    /* Do basic tuning, hz etc */
    init_param1();

    freemempos = 0xa0200000;
    /* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)			\
	alloc_pages((var).pv_pa, (np));		\
	(var).pv_va = (var).pv_pa + 0x20000000;

#define alloc_pages(var, np)			\
	freemempos -= (np * PAGE_SIZE);		\
	(var) = freemempos;		\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

    while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
        freemempos -= PAGE_SIZE;
    valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
    for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
        if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
            valloc_pages(kernel_pt_table[loop],
                         L2_TABLE_SIZE / PAGE_SIZE);
        } else {
            kernel_pt_table[loop].pv_pa = freemempos +
                                          (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
                                          L2_TABLE_SIZE_REAL;
            kernel_pt_table[loop].pv_va =
                kernel_pt_table[loop].pv_pa + 0x20000000;
        }
    }
    freemem_pt = freemempos;
    freemempos = 0xa0100000;
    /*
     * Allocate a page for the system page mapped to V0x00000000
     * This page will just contain the system vectors and can be
     * shared by all processes.
     */
    valloc_pages(systempage, 1);

    /* Allocate dynamic per-cpu area. */
    valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
    dpcpu_init((void *)dpcpu.pv_va, 0);

    /* Allocate stacks for all modes */
    valloc_pages(irqstack, IRQ_STACK_SIZE);
    valloc_pages(abtstack, ABT_STACK_SIZE);
    valloc_pages(undstack, UND_STACK_SIZE);
    valloc_pages(kernelstack, KSTACK_PAGES);
    alloc_pages(minidataclean.pv_pa, 1);
    valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
#ifdef ARM_USE_SMALL_ALLOC
    freemempos -= PAGE_SIZE;
    freemem_pt = trunc_page(freemem_pt);
    freemem_after = freemempos - ((freemem_pt - 0xa0100000) /
                                  PAGE_SIZE) * sizeof(struct arm_small_page);
    arm_add_smallalloc_pages((void *)(freemem_after + 0x20000000)
                             , (void *)0xc0100000, freemem_pt - 0xa0100000, 1);
    freemem_after -= ((freemem_after - 0xa0001000) / PAGE_SIZE) *
                     sizeof(struct arm_small_page);
    arm_add_smallalloc_pages((void *)(freemem_after + 0x20000000)
                             , (void *)0xc0001000, trunc_page(freemem_after) - 0xa0001000, 0);
    freemempos = trunc_page(freemem_after);
    freemempos -= PAGE_SIZE;
#endif
    /*
     * Allocate memory for the l1 and l2 page tables. The scheme to avoid
     * wasting memory by allocating the l1pt on the first 16k memory was
     * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for
     * this to work (which is supposed to be the case).
     */

    /*
     * Now we start construction of the L1 page table
     * We start by mapping the L2 page tables into the L1.
     * This means that we can replace L1 mappings later on if necessary
     */
    l1pagetable = kernel_l1pt.pv_va;

    /* Map the L2 pages tables in the L1 page table */
    pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
                   &kernel_pt_table[KERNEL_PT_SYS]);
#if 0 /* XXXBJR: What is this?  Don't know if there's an analogue. */
    pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
                   &kernel_pt_table[KERNEL_PT_IOPXS]);
#endif
    pmap_link_l2pt(l1pagetable, KERNBASE,
                   &kernel_pt_table[KERNEL_PT_BEFOREKERN]);
    pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000,
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000,
                   0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
    pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000,
                   (((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1),
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
    afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) &
                           ~(L1_S_SIZE - 1));
    for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
        pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
                       &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
    }
    pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa,
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

#ifdef ARM_USE_SMALL_ALLOC
    if ((freemem_after + 2 * PAGE_SIZE) <= afterkern) {
        arm_add_smallalloc_pages((void *)(freemem_after),
                                 (void*)(freemem_after + PAGE_SIZE),
                                 afterkern - (freemem_after + PAGE_SIZE), 0);
    }
#endif

    /* Map the Mini-Data cache clean area. */
    xscale_setup_minidata(l1pagetable, afterkern,
                          minidataclean.pv_pa);

    /* Map the vector page. */
    pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
                   VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
    pmap_devmap_bootstrap(l1pagetable, pxa_devmap);

    /*
     * Give the XScale global cache clean code an appropriately
     * sized chunk of unmapped VA space starting at 0xff000000
     * (our device mappings end before this address).
     */
    xscale_cache_clean_addr = 0xff000000U;

    cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
    setttb(kernel_l1pt.pv_pa);
    cpu_tlb_flushID();
    cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

    /*
     * Pages were allocated during the secondary bootstrap for the
     * stacks for different CPU modes.
     * We must now set the r13 registers in the different CPU modes to
     * point to these stacks.
     * Since the ARM stacks use STMFD etc. we must set r13 to the top end
     * of the stack memory.
     */
    set_stackptrs(0);

    /*
     * We must now clean the cache again....
     * Cleaning may be done by reading new data to displace any
     * dirty data in the cache. This will have happened in setttb()
     * but since we are boot strapping the addresses used for the read
     * may have just been remapped and thus the cache could be out
     * of sync. A re-clean after the switch will cure this.
     * After booting there are no gross relocations of the kernel thus
     * this problem will not occur after initarm().
     */
    cpu_idcache_wbinv_all();

    /*
     * Sort out bus_space for on-board devices.
     */
    pxa_obio_tag_init();

    /*
     * Fetch the SDRAM start/size from the PXA2X0 SDRAM configration
     * registers.
     */
    pxa_probe_sdram(obio_tag, PXA2X0_MEMCTL_BASE, memstart, memsize);

    physmem = 0;
    for (i = 0; i < PXA2X0_SDRAM_BANKS; i++) {
        physmem += memsize[i] / PAGE_SIZE;
    }

    /* Fire up consoles. */
    cninit();

    /* Set stack for exception handlers */
    data_abort_handler_address = (u_int)data_abort_handler;
    prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
    undefined_handler_address = (u_int)undefinedinstruction_bounce;
    undefined_init();

    init_proc0(kernelstack.pv_va);

    /* Enable MMU, I-cache, D-cache, write buffer. */
    arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

    pmap_curmaxkvaddr = afterkern + PAGE_SIZE;
    /*
     * ARM USE_SMALL_ALLOC uses dump_avail, so it must be filled before
     * calling pmap_bootstrap.
     */
    i = 0;
    for (j = 0; j < PXA2X0_SDRAM_BANKS; j++) {
        if (memsize[j] > 0) {
            dump_avail[i++] = round_page(memstart[j]);
            dump_avail[i++] =
                trunc_page(memstart[j] + memsize[j]);
        }
    }
    dump_avail[i] = 0;
    dump_avail[i] = 0;
    vm_max_kernel_address = 0xd0000000;
    pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt);
    msgbufp = (void*)msgbufpv.pv_va;
    msgbufinit(msgbufp, msgbufsize);
    mutex_init();

    i = 0;
#ifdef ARM_USE_SMALL_ALLOC
    phys_avail[i++] = 0xa0000000;
    phys_avail[i++] = 0xa0001000; 	/*
					 *XXX: Gross hack to get our
					 * pages in the vm_page_array
					 . */
#endif
    for (j = 0; j < PXA2X0_SDRAM_BANKS; j++) {
        if (memsize[j] > 0) {
            phys_avail[i] = round_page(memstart[j]);
            dump_avail[i++] = round_page(memstart[j]);
            phys_avail[i] =
                trunc_page(memstart[j] + memsize[j]);
            dump_avail[i++] =
                trunc_page(memstart[j] + memsize[j]);
        }
    }

    dump_avail[i] = 0;
    phys_avail[i++] = 0;
    dump_avail[i] = 0;
    phys_avail[i] = 0;
#ifdef ARM_USE_SMALL_ALLOC
    phys_avail[2] = round_page(virtual_avail - KERNBASE + phys_avail[2]);
#else
    phys_avail[0] = round_page(virtual_avail - KERNBASE + phys_avail[0]);
#endif

    init_param2(physmem);
    kdb_init();
    return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
                     sizeof(struct pcb)));
}
Beispiel #11
0
void
platform_start(__register_t a0, __register_t a1,
    __register_t a2 __unused, __register_t a3 __unused)
{
	uint64_t platform_counter_freq;
	vm_offset_t kernend;
	int argc = a0;
	char **argv = (char **)a1;
	int i, mem;


	/* clear the BSS and SBSS segments */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	/* Initialize pcpu stuff */
	mips_pcpu0_init();

	/*
	 * Looking for mem=XXM argument
	 */
	mem = 0; /* Just something to start with */
	for (i=0; i < argc; i++) {
		if (strncmp(argv[i], "mem=", 4) == 0) {
			mem = strtol(argv[i] + 4, NULL, 0);
			break;
		}
	}

	bootverbose = 1;
	if (mem > 0)
		realmem = btoc(mem << 20);
	else
		realmem = btoc(32 << 20);

	for (i = 0; i < 10; i++) {
		phys_avail[i] = 0;
	}

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	dump_avail[0] = phys_avail[0];
	dump_avail[1] = phys_avail[1];

	physmem = realmem;

	/* 
	 * ns8250 uart code uses DELAY so ticker should be inititalized 
	 * before cninit. And tick_init_params refers to hz, so * init_param1 
	 * should be called first.
	 */
	init_param1();
	/* TODO: parse argc,argv */
	platform_counter_freq = 330000000UL;
	mips_timer_init_params(platform_counter_freq, 1);
	cninit();
	/* Panic here, after cninit */ 
	if (mem == 0)
		panic("No mem=XX parameter in arguments");

	printf("cmd line: ");
	for (i=0; i < argc; i++)
		printf("%s ", argv[i]);
	printf("\n");

	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Beispiel #12
0
void
platform_start(__register_t a0 __unused, __register_t a1 __unused, 
    __register_t a2 __unused, __register_t a3 __unused)
{
	uint64_t platform_counter_freq;
	uint32_t reg;
	int argc, i, count = 0;
	char **argv, **envp;
	vm_offset_t kernend;

	/* 
	 * clear the BSS and SBSS segments, this should be first call in
	 * the function
	 */
	kernend = (vm_offset_t)&end;
	memset(&edata, 0, kernend - (vm_offset_t)(&edata));

	mips_postboot_fixup();

	/* Initialize pcpu stuff */
	mips_pcpu0_init();

	argc = a0;
	argv = (char**)a1;
	envp = (char**)a2;
	/* 
	 * Protect ourselves from garbage in registers 
	 */
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i += 2)
		{
			if (strcmp(envp[i], "memsize") == 0)
				realmem = btoc(strtoul(envp[i+1], NULL, 16));
			else if (strcmp(envp[i], "ethaddr") == 0) {
				count = sscanf(envp[i+1], "%x.%x.%x.%x.%x.%x", 
				    &ar711_base_mac[0], &ar711_base_mac[1],
				    &ar711_base_mac[2], &ar711_base_mac[3],
				    &ar711_base_mac[4], &ar711_base_mac[5]);
				if (count < 6)
					memset(ar711_base_mac, 0,
					    sizeof(ar711_base_mac));
			}
		}
	}

	/*
	 * Just wild guess. RedBoot let us down and didn't reported 
	 * memory size
	 */
	if (realmem == 0)
		realmem = btoc(32*1024*1024);

	/* phys_avail regions are in bytes */
	phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
	phys_avail[1] = ctob(realmem);

	physmem = realmem;

	/*
	 * ns8250 uart code uses DELAY so ticker should be inititalized 
	 * before cninit. And tick_init_params refers to hz, so * init_param1 
	 * should be called first.
	 */
	init_param1();
	platform_counter_freq = ar71xx_cpu_freq();
	mips_timer_init_params(platform_counter_freq, 1);
	cninit();
	init_static_kenv(boot1_env, sizeof(boot1_env));

	printf("platform frequency: %lld\n", platform_counter_freq);
	printf("arguments: \n");
	printf("  a0 = %08x\n", a0);
	printf("  a1 = %08x\n", a1);
	printf("  a2 = %08x\n", a2);
	printf("  a3 = %08x\n", a3);

	printf("Cmd line:");
	if (MIPS_IS_VALID_PTR(argv)) {
		for (i = 0; i < argc; i++) {
			printf(" %s", argv[i]);
			parse_argv(argv[i]);
		}
	}
	else
		printf ("argv is invalid");
	printf("\n");

	printf("Environment:\n");
	if (MIPS_IS_VALID_PTR(envp)) {
		for (i = 0; envp[i]; i+=2) {
			printf("  %s = %s\n", envp[i], envp[i+1]);
			setenv(envp[i], envp[i+1]);
		}
	}
	else 
		printf ("envp is invalid\n");

	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();

	/*
	 * Reset USB devices 
	 */
	reg = ATH_READ_REG(AR71XX_RST_RESET);
	reg |= 
	    RST_RESET_USB_OHCI_DLL | RST_RESET_USB_HOST | RST_RESET_USB_PHY;
	ATH_WRITE_REG(AR71XX_RST_RESET, reg);
	DELAY(1000);
	reg &= 
	    ~(RST_RESET_USB_OHCI_DLL | RST_RESET_USB_HOST | RST_RESET_USB_PHY);
	ATH_WRITE_REG(AR71XX_RST_RESET, reg);
	
	ATH_WRITE_REG(AR71XX_USB_CTRL_CONFIG,
	    USB_CTRL_CONFIG_OHCI_DES_SWAP | USB_CTRL_CONFIG_OHCI_BUF_SWAP |
	    USB_CTRL_CONFIG_EHCI_DES_SWAP | USB_CTRL_CONFIG_EHCI_BUF_SWAP);

	ATH_WRITE_REG(AR71XX_USB_CTRL_FLADJ, 
	    (32 << USB_CTRL_FLADJ_HOST_SHIFT) | (3 << USB_CTRL_FLADJ_A5_SHIFT));
	DELAY(1000);

	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Beispiel #13
0
void
platform_start(__register_t a0, __register_t a1, __register_t a2 __unused,
    __register_t a3)
{
	const struct octeon_feature_description *ofd;
	uint64_t platform_counter_freq;
	int rv;

	mips_postboot_fixup();

	/*
	 * Initialize boot parameters so that we can determine things like
	 * which console we shoud use, etc.
	 */
	octeon_boot_params_init(a3);

	/* Initialize pcpu stuff */
	mips_pcpu0_init();
	mips_timer_early_init(cvmx_sysinfo_get()->cpu_clock_hz);

	/* Initialize console.  */
	cninit();

	/*
	 * Display information about the CPU.
	 */
#if !defined(OCTEON_MODEL)
	printf("Using runtime CPU model checks.\n");
#else
	printf("Compiled for CPU model: " __XSTRING(OCTEON_MODEL) "\n");
#endif
	strcpy(cpu_model, octeon_model_get_string(cvmx_get_proc_id()));
	printf("CPU Model: %s\n", cpu_model);
	printf("CPU clock: %uMHz  Core Mask: %#x\n",
	       cvmx_sysinfo_get()->cpu_clock_hz / 1000000,
	       cvmx_sysinfo_get()->core_mask);
	rv = octeon_model_version_check(cvmx_get_proc_id());
	if (rv == -1)
		panic("%s: kernel not compatible with this processor.", __func__);

	/*
	 * Display information about the board.
	 */
#if defined(OCTEON_BOARD_CAPK_0100ND)
	strcpy(cpu_board, "CAPK-0100ND");
	if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_CN3010_EVB_HS5) {
		panic("Compiled for %s, but board type is %s.", cpu_board,
		       cvmx_board_type_to_string(cvmx_sysinfo_get()->board_type));
	}
#else
	strcpy(cpu_board,
	       cvmx_board_type_to_string(cvmx_sysinfo_get()->board_type));
#endif
	printf("Board: %s\n", cpu_board);
	printf("Board Type: %u  Revision: %u/%u\n",
	       cvmx_sysinfo_get()->board_type,
	       cvmx_sysinfo_get()->board_rev_major,
	       cvmx_sysinfo_get()->board_rev_minor);
	printf("Serial number: %s\n", cvmx_sysinfo_get()->board_serial_number);

	/*
	 * Additional on-chip hardware/settings.
	 *
	 * XXX Display PCI host/target?  What else?
	 */
	printf("MAC address base: %6D (%u configured)\n",
	       cvmx_sysinfo_get()->mac_addr_base, ":",
	       cvmx_sysinfo_get()->mac_addr_count);


	octeon_ciu_reset();
	/*
	 * Convert U-Boot 'bootoctlinux' loader command line arguments into
	 * boot flags and kernel environment variables.
	 */
	bootverbose = 1;
	octeon_init_kenv(a3);

	/*
	 * For some reason on the cn38xx simulator ebase register is set to
	 * 0x80001000 at bootup time.  Move it back to the default, but
	 * when we move to having support for multiple executives, we need
	 * to rethink this.
	 */
	mips_wr_ebase(0x80000000);

	octeon_memory_init();
	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
	cpu_clock = cvmx_sysinfo_get()->cpu_clock_hz;
	platform_counter_freq = cpu_clock;
	octeon_timecounter.tc_frequency = cpu_clock;
	platform_timecounter = &octeon_timecounter;
	mips_timer_init_params(platform_counter_freq, 0);
	set_cputicker(octeon_get_ticks, cpu_clock, 0);

#ifdef SMP
	/*
	 * Clear any pending IPIs.
	 */
	cvmx_write_csr(CVMX_CIU_MBOX_CLRX(0), 0xffffffff);
#endif

	printf("Octeon SDK: %s\n", OCTEON_SDK_VERSION_STRING);
	printf("Available Octeon features:");
	for (ofd = octeon_feature_descriptions; ofd->ofd_string != NULL; ofd++)
		if (octeon_has_feature(ofd->ofd_feature))
			printf(" %s", ofd->ofd_string);
	printf("\n");
}
Beispiel #14
0
static void
mips_init(void)
{
	int i, j;

	printf("entry: mips_init()\n");

#ifdef CFE
	/*
	 * Query DRAM memory map from CFE.
	 */
	physmem = 0;
	for (i = 0; i < 10; i += 2) {
		int result;
		uint64_t addr, len, type;

		result = cfe_enummem(i / 2, 0, &addr, &len, &type);
		if (result < 0) {
			BCM_TRACE("There is no phys memory for: %d\n", i);
			phys_avail[i] = phys_avail[i + 1] = 0;
			break;
		}
		if (type != CFE_MI_AVAILABLE) {
			BCM_TRACE("phys memory is not available: %d\n", i);
			continue;
		}

		phys_avail[i] = addr;
		if (i == 0 && addr == 0) {
			/*
			 * If this is the first physical memory segment probed
			 * from CFE, omit the region at the start of physical
			 * memory where the kernel has been loaded.
			 */
			phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end);
		}
		
		BCM_TRACE("phys memory is available for: %d\n", i);
		BCM_TRACE(" => addr =  %jx\n", addr);
		BCM_TRACE(" => len =  %jd\n", len);

		phys_avail[i + 1] = addr + len;
		physmem += len;
	}

	BCM_TRACE("Total phys memory is : %ld\n", physmem);
	realmem = btoc(physmem);
#endif

	for (j = 0; j < i; j++)
		dump_avail[j] = phys_avail[j];

	physmem = realmem;

	init_param1();
	init_param2(physmem);
	mips_cpu_init();
	pmap_bootstrap();
	mips_proc0_init();
	mutex_init();
	kdb_init();
#ifdef KDB
	if (boothowto & RB_KDB)
		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
}
Beispiel #15
0
void *
initarm(struct arm_boot_params *abp)
{
	struct pv_addr  kernel_l1pt;
	struct pv_addr  dpcpu;
	int loop, i;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t freemem_pt;
	vm_offset_t afterkern;
	vm_offset_t freemem_after;
	vm_offset_t lastaddr;
	uint32_t memsize, memstart;

	lastaddr = parse_boot_param(abp);
	arm_physmem_kernaddr = abp->abp_physaddr;
	set_cpufuncs();
	pcpu_init(pcpup, 0, sizeof(struct pcpu));
	PCPU_SET(curthread, &thread0);

	/* Do basic tuning, hz etc */
	init_param1();

	freemempos = 0xa0200000;
	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)			\
	alloc_pages((var).pv_pa, (np));		\
	(var).pv_va = (var).pv_pa + 0x20000000;

#define alloc_pages(var, np)			\
	freemempos -= (np * PAGE_SIZE);		\
	(var) = freemempos;		\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
		freemempos -= PAGE_SIZE;
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_pa = freemempos +
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_va =
			    kernel_pt_table[loop].pv_pa + 0x20000000;
		}
	}
	freemem_pt = freemempos;
	freemempos = 0xa0100000;
	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate dynamic per-cpu area. */
	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
	dpcpu_init((void *)dpcpu.pv_va, 0);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	alloc_pages(minidataclean.pv_pa, 1);
	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
	/*
	 * Allocate memory for the l1 and l2 page tables. The scheme to avoid
	 * wasting memory by allocating the l1pt on the first 16k memory was
	 * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for
	 * this to work (which is supposed to be the case).
	 */

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_va;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
	pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
	    &kernel_pt_table[KERNEL_PT_IOPXS]);
	pmap_link_l2pt(l1pagetable, KERNBASE,
	    &kernel_pt_table[KERNEL_PT_BEFOREKERN]);
	pmap_map_chunk(l1pagetable, KERNBASE, IQ80321_SDRAM_START, 0x100000,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, IQ80321_SDRAM_START + 0x100000,
	    0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, IQ80321_SDRAM_START + 0x200000,
	    (((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1),
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
	afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) & ~(L1_S_SIZE
	    - 1));
	for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
		pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
		    &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
	}
	pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, afterkern,
	    minidataclean.pv_pa);

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	arm_devmap_bootstrap(l1pagetable, ep80219_devmap);
	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	set_stackptrs(0);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross relocations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();
	cpu_setup("");

	/*
	 * Fetch the SDRAM start/size from the i80321 SDRAM configration
	 * registers.
	 */
	i80321_calibrate_delay();
	i80321_sdram_bounds(obio_bs_tag, IQ80321_80321_VBASE + VERDE_MCU_BASE,
	    &memstart, &memsize);
	physmem = memsize / PAGE_SIZE;
	cninit();

	undefined_init();
				
	init_proc0(kernelstack.pv_va);
	
	/* Enable MMU, I-cache, D-cache, write buffer. */

	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
	vm_max_kernel_address = 0xd0000000;
	pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt);
	msgbufp = (void*)msgbufpv.pv_va;
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();
	
	/*
	 * Add the physical ram we have available.
	 *
	 * Exclude the kernel (and all the things we allocated which immediately
	 * follow the kernel) from the VM allocation pool but not from crash
	 * dumps.  virtual_avail is a global variable which tracks the kva we've
	 * "allocated" while setting up pmaps.
	 *
	 * Prepare the list of physical memory available to the vm subsystem.
	 */
	arm_physmem_hardware_region(IQ80321_SDRAM_START, memsize);
	arm_physmem_exclude_region(abp->abp_physaddr, 
	    virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC);
	arm_physmem_init_kernel_globals();

	init_param2(physmem);
	kdb_init();
	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
}
void *
initarm(void *arg, void *arg2)
{
	struct pcpu *pc;
	struct pv_addr  kernel_l1pt;
	struct pv_addr	md_addr;
	struct pv_addr	md_bla;
	struct pv_addr  dpcpu;
	int loop;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t lastalloced;
	vm_offset_t lastaddr;
	uint32_t memsize = 32 * 1024 * 1024;
	sa1110_uart_vaddr = SACOM1_VBASE;

	boothowto = RB_VERBOSE | RB_SINGLE;
	cninit();
	set_cpufuncs();
	lastaddr = fake_preload_metadata();
	physmem = memsize / PAGE_SIZE;
	pc = &__pcpu;
	pcpu_init(pc, 0, sizeof(struct pcpu));
	PCPU_SET(curthread, &thread0);

	/* Do basic tuning, hz etc */
	init_param1();
		
	physical_start = (vm_offset_t) KERNBASE;
	physical_end =  lastaddr;
	physical_freestart = (((vm_offset_t)physical_end) + PAGE_MASK) & ~PAGE_MASK;
	md_addr.pv_va = md_addr.pv_pa = MDROOT_ADDR;
	freemempos = (vm_offset_t)round_page(physical_freestart);
	memset((void *)freemempos, 0, 256*1024);
		/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)			\
	alloc_pages((var).pv_pa, (np));		\
	(var).pv_va = (var).pv_pa;

#define alloc_pages(var, np)			\
	(var) = freemempos;		\
	freemempos += ((np) * PAGE_SIZE);\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	while ((freemempos & (L1_TABLE_SIZE - 1)) != 0)
		freemempos += PAGE_SIZE;
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	valloc_pages(md_bla, L2_TABLE_SIZE / PAGE_SIZE);
	alloc_pages(sa1_cache_clean_addr, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_pa = freemempos +
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_va = 
			    kernel_pt_table[loop].pv_pa;
		}
	}

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate dynamic per-cpu area. */
	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
	dpcpu_init((void *)dpcpu.pv_va, 0);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	lastalloced = kernelstack.pv_va;

	/*
	 * Allocate memory for the l1 and l2 page tables. The scheme to avoid
	 * wasting memory by allocating the l1pt on the first 16k memory was
	 * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for
	 * this to work (which is supposed to be the case).
	 */

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;


	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, 0x00000000,
	    &kernel_pt_table[KERNEL_PT_SYS]);
	pmap_link_l2pt(l1pagetable, KERNBASE,
	    &kernel_pt_table[KERNEL_PT_KERNEL]);
	pmap_link_l2pt(l1pagetable, 0xd0000000,
	    &kernel_pt_table[KERNEL_PT_IO]);
	pmap_link_l2pt(l1pagetable, lastalloced & ~((L1_S_SIZE * 4) - 1),
	    &kernel_pt_table[KERNEL_PT_L1]);
	pmap_link_l2pt(l1pagetable, 0x90000000, &kernel_pt_table[KERNEL_PT_IRQ]);
	pmap_link_l2pt(l1pagetable, MDROOT_ADDR,
	    &md_bla);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00100000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
	pmap_map_chunk(l1pagetable, KERNBASE, KERNBASE,
	    ((uint32_t)lastaddr - KERNBASE), VM_PROT_READ|VM_PROT_WRITE,
	    PTE_CACHE);
	/* Map the DPCPU pages */
	pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, DPCPU_SIZE,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, md_addr.pv_va, md_addr.pv_pa,
	    MD_ROOT_SIZE * 1024, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    KSTACK_PAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}
	pmap_map_chunk(l1pagetable, md_bla.pv_va, md_bla.pv_pa, L2_TABLE_SIZE,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	/* Map the vector page. */
	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	/* Map the statically mapped devices. */
	pmap_devmap_bootstrap(l1pagetable, assabet_devmap);
	pmap_map_chunk(l1pagetable, sa1_cache_clean_addr, 0xf0000000, 
	    CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);

	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;
	undefined_init();
	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross relocations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();

	bootverbose = 1;

	/* Set stack for exception handlers */
	
	proc_linkup0(&proc0, &thread0);
	thread0.td_kstack = kernelstack.pv_va;
	thread0.td_pcb = (struct pcb *)
		(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
	thread0.td_pcb->pcb_flags = 0;
	thread0.td_frame = &proc0_tf;
	
	
	/* Enable MMU, I-cache, D-cache, write buffer. */

	cpufunc_control(0x337f, 0x107d);
	arm_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);

	pmap_curmaxkvaddr = freemempos + KERNEL_PT_VMDATA_NUM * 0x400000;

	dump_avail[0] = phys_avail[0] = round_page(virtual_avail);
	dump_avail[1] = phys_avail[1] = 0xc0000000 + 0x02000000 - 1;
	dump_avail[2] = phys_avail[2] = 0;
	dump_avail[3] = phys_avail[3] = 0;
					
	mutex_init();
	pmap_bootstrap(freemempos, 0xd0000000, &kernel_l1pt);

	init_param2(physmem);
	kdb_init();
	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
}
Beispiel #17
0
void *
initarm(struct arm_boot_params *abp)
{
#define	next_chunk2(a,b)	(((a) + (b)) &~ ((b)-1))
#define	next_page(a)		next_chunk2(a,PAGE_SIZE)
	struct pv_addr  kernel_l1pt;
	struct pv_addr  dpcpu;
	int loop, i;
	u_int l1pagetable;
	vm_offset_t freemempos;
	vm_offset_t freemem_pt;
	vm_offset_t afterkern;
	vm_offset_t freemem_after;
	vm_offset_t lastaddr;
	uint32_t memsize;

	/* kernel text starts where we were loaded at boot */
#define	KERNEL_TEXT_OFF		(abp->abp_physaddr  - PHYSADDR)
#define	KERNEL_TEXT_BASE	(KERNBASE + KERNEL_TEXT_OFF)
#define	KERNEL_TEXT_PHYS	(PHYSADDR + KERNEL_TEXT_OFF)

	lastaddr = parse_boot_param(abp);
	arm_physmem_kernaddr = abp->abp_physaddr;
	set_cpufuncs();		/* NB: sets cputype */
	pcpu_init(pcpup, 0, sizeof(struct pcpu));
	PCPU_SET(curthread, &thread0);

	if (envmode == 1)
		kern_envp = static_env;
	/* Do basic tuning, hz etc */
      	init_param1();
		
	/*
	 * We allocate memory downwards from where we were loaded
	 * by RedBoot; first the L1 page table, then NUM_KERNEL_PTS
	 * entries in the L2 page table.  Past that we re-align the
	 * allocation boundary so later data structures (stacks, etc)
	 * can be mapped with different attributes (write-back vs
	 * write-through).  Note this leaves a gap for expansion
	 * (or might be repurposed).
	 */
	freemempos = abp->abp_physaddr;

	/* macros to simplify initial memory allocation */
#define alloc_pages(var, np) do {					\
	freemempos -= (np * PAGE_SIZE);					\
	(var) = freemempos;						\
	/* NB: this works because locore maps PA=VA */			\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));			\
} while (0)
#define	valloc_pages(var, np) do {					\
	alloc_pages((var).pv_pa, (np));					\
	(var).pv_va = (var).pv_pa + (KERNVIRTADDR - abp->abp_physaddr);	\
} while (0)

	/* force L1 page table alignment */
	while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
		freemempos -= PAGE_SIZE;
	/* allocate contiguous L1 page table */
	valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
	/* now allocate L2 page tables; they are linked to L1 below */
	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
			valloc_pages(kernel_pt_table[loop],
			    L2_TABLE_SIZE / PAGE_SIZE);
		} else {
			kernel_pt_table[loop].pv_pa = freemempos +
			    (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
			    L2_TABLE_SIZE_REAL;
			kernel_pt_table[loop].pv_va =
			    kernel_pt_table[loop].pv_pa +
				(KERNVIRTADDR - abp->abp_physaddr);
		}
	}
	freemem_pt = freemempos;		/* base of allocated pt's */

	/*
	 * Re-align allocation boundary so we can map the area
	 * write-back instead of write-through for the stacks and
	 * related structures allocated below.
	 */
	freemempos = PHYSADDR + 0x100000;
	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	valloc_pages(systempage, 1);

	/* Allocate dynamic per-cpu area. */
	valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
	dpcpu_init((void *)dpcpu.pv_va, 0);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, KSTACK_PAGES);
	alloc_pages(minidataclean.pv_pa, 1);
	valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);

	/*
	 * Now construct the L1 page table.  First map the L2
	 * page tables into the L1 so we can replace L1 mappings
	 * later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_va;

	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
	pmap_link_l2pt(l1pagetable, IXP425_IO_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO]);
	pmap_link_l2pt(l1pagetable, IXP425_MCU_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO + 1]);
	pmap_link_l2pt(l1pagetable, IXP425_PCI_MEM_VBASE,
	    &kernel_pt_table[KERNEL_PT_IO + 2]);
	pmap_link_l2pt(l1pagetable, KERNBASE,
	    &kernel_pt_table[KERNEL_PT_BEFOREKERN]);
	pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR, 0x100000,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, PHYSADDR + 0x100000,
	    0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_PHYS,
	    next_chunk2(((uint32_t)lastaddr) - KERNEL_TEXT_BASE, L1_S_SIZE),
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	freemem_after = next_page((int)lastaddr);
	afterkern = round_page(next_chunk2((vm_offset_t)lastaddr, L1_S_SIZE));
	for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
		pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
		    &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
	}
	pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);


	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, afterkern,
	    minidataclean.pv_pa);

	/* Map the vector page. */
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	if (cpu_is_ixp43x())
		arm_devmap_bootstrap(l1pagetable, ixp435_devmap);
	else
		arm_devmap_bootstrap(l1pagetable, ixp425_devmap);
	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
	set_stackptrs(0);

	/*
	 * We must now clean the cache again....
	 * Cleaning may be done by reading new data to displace any
	 * dirty data in the cache. This will have happened in setttb()
	 * but since we are boot strapping the addresses used for the read
	 * may have just been remapped and thus the cache could be out
	 * of sync. A re-clean after the switch will cure this.
	 * After booting there are no gross relocations of the kernel thus
	 * this problem will not occur after initarm().
	 */
	cpu_idcache_wbinv_all();
	cpu_setup();

	/* ready to setup the console (XXX move earlier if possible) */
	cninit();
	/*
	 * Fetch the RAM size from the MCU registers.  The
	 * expansion bus was mapped above so we can now read 'em.
	 */
	if (cpu_is_ixp43x())
		memsize = ixp435_ddram_size();
	else
		memsize = ixp425_sdram_size();

	undefined_init();

	init_proc0(kernelstack.pv_va);

	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);

	pmap_curmaxkvaddr = afterkern + PAGE_SIZE;
	vm_max_kernel_address = 0xe0000000;
	pmap_bootstrap(pmap_curmaxkvaddr, &kernel_l1pt);
	msgbufp = (void*)msgbufpv.pv_va;
	msgbufinit(msgbufp, msgbufsize);
	mutex_init();

	/*
	 * Add the physical ram we have available.
	 *
	 * Exclude the kernel, and all the things we allocated which immediately
	 * follow the kernel, from the VM allocation pool but not from crash
	 * dumps.  virtual_avail is a global variable which tracks the kva we've
	 * "allocated" while setting up pmaps.
	 *
	 * Prepare the list of physical memory available to the vm subsystem.
	 */
	arm_physmem_hardware_region(PHYSADDR, memsize);
	arm_physmem_exclude_region(freemem_pt, KERNPHYSADDR -
	    freemem_pt, EXFLAG_NOALLOC);
	arm_physmem_exclude_region(freemempos, KERNPHYSADDR - 0x100000 -
	    freemempos, EXFLAG_NOALLOC);
	arm_physmem_exclude_region(abp->abp_physaddr, 
	    virtual_avail - KERNVIRTADDR, EXFLAG_NOALLOC);
	arm_physmem_init_kernel_globals();

	init_param2(physmem);
	kdb_init();

	/* use static kernel environment if so configured */
	if (envmode == 1)
		kern_envp = static_env;

	return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
	    sizeof(struct pcb)));
#undef next_page
#undef next_chunk2
}