示例#1
0
文件: pmap.c 项目: Darge/mimiker
int main() { /* Simple test */
  pmap_t pmap;
  pmap_init(&pmap);
  pmap.asid = 10;

  set_active_pmap(&pmap);
  vm_page_t *pg1 = pm_alloc(4);

  vaddr_t ex_addr = PAGESIZE * 10;
  pmap_map(&pmap, ex_addr, pg1->paddr, pg1->size, PMAP_VALID | PMAP_DIRTY);

  int *x = (int *) ex_addr;
  for (int i = 0; i < 1024 * pg1->size; i++)
    *(x + i) = i;
  for (int i = 0; i < 1024 * pg1->size; i++)
    assert(*(x + i) == i);

  vm_page_t *pg2 = pm_alloc(1);

  ex_addr = PAGESIZE * 2000;
  pmap_map(&pmap, ex_addr, pg2->paddr, pg2->size, PMAP_VALID | PMAP_DIRTY);

  x = (int *) ex_addr;
  for (int i = 0; i < 1024 * pg2->size; i++)
    *(x + i) = i;
  for (int i = 0; i < 1024 * pg2->size; i++)
    assert(*(x + i) == i);

  pm_free(pg1);
  pm_free(pg2);

  pmap_delete(&pmap);
  kprintf("Tests passed\n");
  return 0;
}
示例#2
0
文件: pmap.c 项目: DJHartley/xnu
/**
 * io_map
 *
 * Maps an IO region and returns its virtual address.
 */
vm_offset_t
io_map(vm_offset_t phys_addr, vm_size_t size, unsigned int flags)
{
	vm_offset_t	start;

	if (kernel_map == VM_MAP_NULL) {
	    /*
	     * VM is not initialized.  Grab memory.
	     */
	    start = virt_begin;
	    virt_begin += round_page(size);

	    (void) pmap_map_bd(start, phys_addr, phys_addr + round_page(size),
			       VM_PROT_READ|VM_PROT_WRITE,
			       flags);
	}
	else {
	    (void) kmem_alloc_pageable(kernel_map, &start, round_page(size));
	    (void) pmap_map(start, phys_addr, phys_addr + round_page(size),
			    VM_PROT_READ|VM_PROT_WRITE,
			    flags);
	}

	return (start);
}
示例#3
0
int
sun68k_bus_map(bus_space_tag_t t, bus_type_t iospace, bus_addr_t addr,
    bus_size_t size, int flags, vaddr_t vaddr, bus_space_handle_t *hp)
{
	bus_size_t	offset;
	vaddr_t v;

	/*
	 * If we suspect there might be one, try to find
	 * and use a PROM mapping.
	 */
	if ((flags & _SUN68K_BUS_MAP_USE_PROM) != 0 &&
	     find_prom_map(addr, iospace, size, &v) == 0) {
		*hp = (bus_space_handle_t)v;
		return (0);
	}

	/*
	 * Adjust the user's request to be page-aligned.
	 */
	offset = addr & PGOFSET;
	addr -= offset;
	size += offset;
	size = m68k_round_page(size);
	if (size == 0) {
		printf("sun68k_bus_map: zero size\n");
		return (EINVAL);
	}

	/* Get some kernel virtual address space. */
	if (vaddr)
		v = vaddr;
	else
		v = uvm_km_alloc(kernel_map, size, 0,
		    UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	if (v == 0)
		panic("sun68k_bus_map: no memory");

	/* note: preserve page offset */
	*hp = (bus_space_handle_t)(v | offset);

	/*
	 * Map the device.  
	 */
	addr |= iospace | PMAP_NC;
	pmap_map(v, addr, addr + size, VM_PROT_ALL);

	return (0);
}
示例#4
0
vaddr_t
pmap_bootstrap_md(vaddr_t vaddr)
{
	/*
	 * Get ethernet buffer - need ETHERPAGES pages physically contiguous
	 * below 16MB.
	 */
	if (vaddr < 0x01000000 - ptoa(ETHERPAGES)) {
		etherlen = ptoa(ETHERPAGES);
		etherbuf = (void *)vaddr;

		vaddr = pmap_map(vaddr, avail_start, avail_start + etherlen,
		    UVM_PROT_RW, CACHE_INH);

		virtual_avail += etherlen;
		avail_start += etherlen;
	}

	return vaddr;
}
示例#5
0
文件: bus_subr.c 项目: ryo/netbsd-src
/*
 * Make a permanent mapping for a device.
 */
void *
bus_mapin(int bustype, int pa, int sz)
{
	vaddr_t va;
	int off;

	if ((bustype < 0) || (bustype >= BUS__NTYPES))
		panic("bus_mapin: bustype");

	off = pa & PGOFSET;
	pa -= off;
	sz += off;
	sz = m68k_round_page(sz);

	/* Borrow PROM mappings if we can. */
	if (bustype == BUS_OBIO) {
		if (find_prom_map(pa, PMAP_OBIO, sz, &va) == 0)
			goto done;
	}

	pa &= bus_info[bustype].mask;
	pa |= bus_info[bustype].base;
	pa |= bus_info[bustype].type;
	pa |= PMAP_NC;	/* non-cached */

	/* Get some kernel virtual address space. */
	va = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	if (va == 0)
		panic("bus_mapin");

	/* Map it to the specified bus. */
	pmap_map(va, pa, pa + sz, VM_PROT_ALL);

done:
	return ((void*)(va + off));
}
示例#6
0
文件: machdep.c 项目: mosconi/openbsd
/*
 * Machine-dependent startup code
 */
void
cpu_startup()
{
#ifdef DEBUG
	extern int pmapdebug;
	int opmapdebug = pmapdebug;
#endif
	vaddr_t minaddr, maxaddr;
	paddr_t msgbufpa;
	extern struct user *proc0paddr;

#ifdef DEBUG
	pmapdebug = 0;
#endif

	if (CPU_ISSUN4M)
		stackgap_random = STACKGAP_RANDOM_SUN4M;

	/*
	 * Re-map the message buffer from its temporary address
	 * at KERNBASE to MSGBUF_VA.
	 */

	/* Get physical address of the message buffer */
	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &msgbufpa);

	/* Invalidate the current mapping at KERNBASE. */
	pmap_kremove((vaddr_t)KERNBASE, PAGE_SIZE);
	pmap_update(pmap_kernel());

	/* Enter the new mapping */
	pmap_map(MSGBUF_VA, msgbufpa, msgbufpa + PAGE_SIZE,
	    PROT_READ | PROT_WRITE);

	/* Re-initialize the message buffer. */
	initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE);

	proc0.p_addr = proc0paddr;

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	/*identifycpu();*/
	printf("real mem = %lu (%luMB)\n", ptoa(physmem),
	    ptoa(physmem)/1024/1024);

	/*
	 * uvm_km_init() has allocated all the virtual memory below the
	 * end of the kernel image. If VM_MIN_KERNEL_ADDRESS is below
	 * KERNBASE, we need to reclaim that range.
	 */
	if (vm_min_kernel_address < (vaddr_t)KERNBASE) {
		uvm_unmap(kernel_map, vm_min_kernel_address, (vaddr_t)KERNBASE);
	}

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Set up userland PIE limits. PIE is disabled on sun4/4c/4e due
	 * to the limited address space.
	 */
	if (CPU_ISSUN4M) {
		vm_pie_max_addr = VM_MAXUSER_ADDRESS / 4;
	}

	dvma_init();

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif
	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free)/1024/1024);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();
}
示例#7
0
void
hp700_init(int argc, char *argv[], char *envp[])
{
	int hpmc_br_instr;
	int *p = (int *) i_hpmach_chk;
	register struct mapping *mp;
	int i;
	vm_offset_t addr;
	int pdcerr;
	vm_offset_t first_page;

	struct pdc_coproc pdc_coproc;
	struct pdc_cache pdc_cache;
	struct pdc_model pdc_model;
	struct pdc_iodc_read pdc_iodc;
	extern int crashdump(void);
#ifdef	BTLB
	struct pdc_btlb pdc_btlb;
#endif
#ifdef	HPT
	struct pdc_hwtlb pdc_hwtlb;
	extern struct hpt_entry *hpt_table;
	extern int usehpt;
#endif	

	first_page = move_bootstrap();

	if (argc >= 1 && argc <= 4) {
		char *btstring = boot_string;
		char *src = (argc == 1 ? envp[5] : argv[2]);

		i = 0;
		while (*src != '\0' && i++ <= BOOT_LINE_LENGTH)
			*btstring++ = *src++;
		*btstring = '\0';
	}

	pdc = PAGE0->mem_pdc;

	delay_init();
	pdc_console_init();

	printf("%s", version);

	/*
	 * Determine what the boot program is using as its console
	 * so that we can use the same device.
	 */
	pdcerr = (*pdc)(PDC_IODC, PDC_IODC_READ, &pdc_iodc,
			PAGE0->mem_cons.pz_hpa, PDC_IODC_INDEX_DATA,
			&cons_iodc, sizeof(cons_iodc));
	if (pdcerr == 0)
		bcopy((char *)&PAGE0->mem_cons.pz_dp, (char *)&cons_dp,
		      sizeof(struct device_path));
	else
		printf("Warning: can't id console boot device (PDC Ret'd %d)\n",
		       pdcerr);

        /*
         * Read boot device from PROM
         */
	pdcerr = (*PAGE0->mem_pdc)(PDC_IODC, PDC_IODC_READ, &pdc_iodc,
	                           PAGE0->mem_boot.pz_hpa, PDC_IODC_INDEX_DATA,
	                           &boot_iodc, sizeof(boot_iodc));
	if (pdcerr == 0)
		bcopy((char *)&PAGE0->mem_boot.pz_dp, (char *)&boot_dp,
		      sizeof(struct device_path));
	else
		printf("Warning: can't id boot device (PDC Ret'd %d)\n",
		       pdcerr);
	
	/*
	 * Setup the transfer of control addr to point to the crash dump
	 * initialization code.
	 */
	PAGE0->ivec_toc = crashdump;

	/*
	 * get cache parameters from the PDC
	 */
	(*PAGE0->mem_pdc)(PDC_CACHE, PDC_CACHE_DFLT, &pdc_cache);

	dcache_line_size = pdc_cache.dc_conf.cc_line * 16;
	dcache_line_mask = dcache_line_size - 1;
	dcache_block_size = dcache_line_size * pdc_cache.dc_conf.cc_block;

	dcache_size = pdc_cache.dc_size;
	dcache_base = pdc_cache.dc_base;
	dcache_stride = pdc_cache.dc_stride;
	dcache_count = pdc_cache.dc_count;
	dcache_loop = pdc_cache.dc_loop;

	icache_line_size = pdc_cache.ic_conf.cc_line * 16;
	icache_line_mask = icache_line_size - 1;
	icache_block_size = icache_line_size * pdc_cache.ic_conf.cc_block;

	icache_base = pdc_cache.ic_base;
	icache_stride = pdc_cache.ic_stride;
	icache_count = pdc_cache.ic_count;
	icache_loop = pdc_cache.ic_loop;

	/*
	 * purge TLBs and flush caches
	 */
	ptlball(&pdc_cache);

#ifdef	BTLB
        /*
         * get block tlb information for clearing
         */
	pdcerr = (*pdc)(PDC_BLOCK_TLB, PDC_BTLB_DEFAULT, &pdc_btlb);
	
        if (pdcerr != 0)
                printf("Warning: PDC_BTLB call Ret'd %d\n", pdcerr);

	switch (pdc_btlb.finfo.num_c) {
	/* S-Chip specific */
	case 0: 
		cputype = CPU_PCXS;
		for (i = 0; i < pdc_btlb.finfo.num_i; i++)
			purge_block_itlb(i);
		for (i = 0; i < pdc_btlb.finfo.num_d; i++)
			purge_block_dtlb(i);
		break;
	/* L-Chip specific */
	case 8:
		cputype = CPU_PCXL;
		for (i = 0; i < pdc_btlb.finfo.num_c; i++)
			purge_L_block_ctlb(i);
		break;
	/* T-Chip specific */
	case 16:
		cputype = CPU_PCXT;
		for (i = 0; i < pdc_btlb.finfo.num_c; i++)
			purge_block_ctlb(i);
		break;
	default:
		panic("unrecognized block-TLB, cannot purge block TLB(s)");
		/* NOTREACHED */
	}
#endif

	fcacheall();

	/*
	 * get the cpu type
	 */
	(*PAGE0->mem_pdc)(PDC_MODEL, PDC_MODEL_INFO, &pdc_model);

	machtype = pdc_model.hvers >> 4;

	cpuinfo(&pdc_cache);

	if (dcache_line_size != CACHE_LINE_SIZE)
		printf("WARNING: data cache line size = %d bytes, %s\n",
		       dcache_line_size, "THIS IS *VERY* BAD!");

	/*
	 * Get the instruction to do branch to PDC_HPMC from PDC.  If
	 * successful, then insert the instruction at the beginning
	 * of the HPMC handler.
	 */
	if ((*PAGE0->mem_pdc)(PDC_INSTR, PDC_INSTR_DFLT, &hpmc_br_instr) == 0)
		p[0] = hpmc_br_instr;
	else
		p[0] = 0;

	/* 
	 * Now compute the checksum of the hpmc interrupt vector entry
	 */
	p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]);

	/*
	 * setup page size for Mach
	 */
	page_size = HP700_PGBYTES;
	vm_set_page_size();

	/*
	 * configure the devices including memory. Passes back size of 
	 * physical memory in mem_size.
	 */
	busconf();

	/* 
	 * Zero out BSS of kernel before doing anything else. The location
	 * pointed to by &edata is included in the data section.
	 */
	bzero((char*)((vm_offset_t) &edata + 4), (vm_offset_t) &end - 
	      (vm_offset_t) &edata - 4);

        /*
         * Locate any coprocessors and enable them by setting up the CCR.
         * SFU's are ignored (since we dont have any).  Also, initialize
         * the floating point registers here.
         */
        if ((pdcerr = (*pdc)(PDC_COPROC, PDC_COPROC_DFLT, &pdc_coproc)) < 0)
                printf("Warning: PDC_COPROC call Ret'd %d\n", pdcerr);
        copr_sfu_config = pdc_coproc.ccr_enable;
        mtctl(CR_CCR, copr_sfu_config & CCR_MASK);
        fprinit(&fpcopr_version);
	fpcopr_version = (fpcopr_version & 0x003ff800) >> 11;
        mtctl(CR_CCR, 0);

        /*
         * Clear the FAULT light (so we know when we get a real one)
         * PDC_COPROC apparently turns it on (for whatever reason).
         */
        pdcerr = PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0;
        (void) (*pdc)(PDC_CHASSIS, PDC_CHASSIS_DISP, pdcerr);

#ifdef TIMEX
	/*
	 * Enable the quad-store instruction.
	 */
	pdcerr = (*pdc)(PDC_MODEL, PDC_MODEL_ENSPEC,
			&pdc_model, pdc_model.pot_key);
	if (pdcerr < 0)
		printf("Warning: PDC enable FP quad-store Ret'd %d\n", pdcerr);
#endif


	/*
	 * Intialize the Event Trace Analysis Package
	 * Static Phase: 1 of 2
	 */
	etap_init_phase1();

	/*
	 * on the hp700 the value in &etext is a pointer to the last word
	 * in the text section. Similarly &edata and &end are pointers to
	 * the last words in the section. We want to change this so that 
	 * these pointers point past the sections that they terminate.
	 */
	text_start = trunc_page((vm_offset_t) &start_text);
	text_end = round_page((vm_offset_t) &etext + 4);

	/*
	 * before we go to all the work to initialize the VM see if we really 
	 * linked the image past the end of the PDC/IODC area.
	 */
	if (text_start < 0x10800)
		panic("kernel text mapped over PDC and IODC memory");

	/*
	 * find ranges of physical memory that isn't allocated to the kernel
	 */

	avail_start = round_page(first_page);
	first_avail = avail_start;
	avail_end = trunc_page(mem_size);
	
	/*
	 * bootstrap the rest of the virtual memory system
	 */
#ifdef MAXMEMBYTES
	if ((avail_end - avail_start) > MAXMEMBYTES) {
		mem_size  = trunc_page(MAXMEMBYTES);
		avail_end = mem_size;
	}
#endif

#ifdef HPT
	/*
	 * If we want to use the HW TLB support, ensure that it exists.
	 */
	if (usehpt &&
	    !((*pdc)(PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) == 0 &&
	      (pdc_hwtlb.min_size || pdc_hwtlb.max_size)))
		usehpt = 0;
#endif

	pmap_bootstrap(&avail_start, &avail_end);

	/*
	 * set limits on virtual memory and kernel equivalenced memory
	 */
	virtual_avail = avail_end;
	virtual_end = trunc_page(VM_MAX_KERNEL_ADDRESS);

	/*
	 * pmap_bootstrap allocated memory for data structures that must
	 * be equivalently mapped.
	 */
	equiv_end = (long) round_page((vm_offset_t) &end);
	io_end = 0xF0000000;	/* XXX */

	/*
	 * Do block mapping. We are mapping from 0, up through the first
	 * power of 2 address above the end of the equiv region. This 
	 * means some memory gets block mapped that should not be, but
	 * so be it (we make the text writable also :-)). We do this to
	 * conserve block entries since we hope to use them for other
	 * purposes (someday).
	 */
	addr = avail_start;
	if (addr != 1 << log2(addr))
		addr = 1 << log2(addr);

#ifdef	BTLB
	if(pdc_btlb.finfo.num_c)
		printf("%d BTLB entries found.  Block mapping up to 0x%x (0x%x)\n",
		       pdc_btlb.finfo.num_c, addr, avail_start);

	/*
	 * XXX L-CHIP vs T-CHIP vs S-CHIP difference in Block TLB insertion.
	 */
	switch (pdc_btlb.finfo.num_c) {
	/* S-CHIP */
	case 0:
		pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_ICACHE);
		pmap_block_map(0, addr, VM_PROT_READ|VM_PROT_WRITE,
			       0, BLK_DCACHE);
		break;
	/* L-CHIP */
	case 8:
		pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_LCOMBINED);
		break;
	/* T-CHIP */
	case 16:
		pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_COMBINED);
		break;
	default:
		panic("unrecognized block-TLB, cannot map kernel");
		/* NOTREACHED */
	}
#endif

#ifdef	HPT
	/*
	 * Turn on the HW TLB assist.
	 */
	if (usehpt) {
		pdcerr = (*pdc)(PDC_TLB, PDC_TLB_CONFIG,
				&pdc_hwtlb, hpt_table,
				sizeof(struct hpt_entry) * HP700_HASHSIZE,
				PDC_TLB_WORD3);
		if (pdcerr) {
			printf("Warning: HW TLB init failed (%d), disabled\n",
			       pdcerr);
			usehpt = 0;
		} else
			printf("HW TLB initialized (%d entries at 0x%x)\n",
			       HP700_HASHSIZE, hpt_table);
	}
#endif

	/*
	 * map the PDC and IODC area for kernel read/write
	 * XXX - should this be read only?
	 */
	(void) pmap_map(0, 0, text_start, VM_PROT_READ | VM_PROT_WRITE);

	/*
	 * map the kernel text area.
	 */
#if KGDB
	(void) pmap_map(text_start, text_start, text_end, 
			VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_WRITE);
#else
	(void) pmap_map(text_start, text_start, text_end, 
			VM_PROT_READ | VM_PROT_EXECUTE);
#endif

	/*
	 * map the data section of the kernel
	 */
	(void) pmap_map(text_end, text_end, avail_start,
			VM_PROT_READ | VM_PROT_WRITE);

#ifndef IO_HACK
	/*
	 * map the I/O pages
	 */
	(void) pmap_map(trunc_page(io_size), trunc_page(io_size),
			0, VM_PROT_READ | VM_PROT_WRITE);
#endif

#if 0
	/*
	 * map the breakpoint page
	 */
	(void) pmap_map(break_page, break_page, break_page+HP700_PAGE_SIZE,
			VM_PROT_READ | VM_PROT_EXECUTE);
#endif

	/*
	 * map the interrupt stack red zone.
	 */
	addr = trunc_page((vm_offset_t) &intstack_top);
	(void) pmap_map(addr, addr, addr + PAGE_SIZE, VM_PROT_READ);

	vm_on = 1;
}
示例#8
0
/*
 * Machine-dependent startup code
 */
void
cpu_startup()
{
	caddr_t v;
	int sz;
#ifdef DEBUG
	extern int pmapdebug;
	int opmapdebug = pmapdebug;
#endif
	vaddr_t minaddr, maxaddr;
	extern struct user *proc0paddr;

#ifdef DEBUG
	pmapdebug = 0;
#endif

	if (CPU_ISSUN4M) {
		extern int stackgap_random;

		stackgap_random = STACKGAP_RANDOM_SUN4M;
	}

	/*
	 * fix message buffer mapping, note phys addr of msgbuf is 0
	 */
	pmap_map(MSGBUF_VA, 0, MSGBUFSIZE, VM_PROT_READ|VM_PROT_WRITE);
	initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE);

	proc0.p_addr = proc0paddr;

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	/*identifycpu();*/
	printf("real mem = %u (%uMB)\n", ptoa(physmem),
	    ptoa(physmem)/1024/1024);

	/*
	 * Find out how much space we need, allocate it,
	 * and then give everything true virtual addresses.
	 */
	sz = (int)allocsys((caddr_t)0);

	if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");

	if (allocsys(v) - v != sz)
		panic("startup: table size inconsistency");

	/*
	 * Determine how many buffers to allocate.
	 * We allocate bufcachepercent% of memory for buffer space.
	 */
	if (bufpages == 0)
		bufpages = physmem * bufcachepercent / 100;

	/* Restrict to at most 25% filled kvm */
	if (bufpages >
	    (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) 
		bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
		    PAGE_SIZE / 4;

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a map for physio.  Others use a submap of the kernel
	 * map, but we want one completely separate, even though it uses
	 * the same pmap.
	 */
	dvma_base = CPU_ISSUN4M ? DVMA4M_BASE : DVMA_BASE;
	dvma_end = CPU_ISSUN4M ? DVMA4M_END : DVMA_END;
#if defined(SUN4M)
	if (CPU_ISSUN4M) {
		/*
		 * The DVMA space we want partially overrides kernel_map.
		 * Allocate it in kernel_map as well to prevent it from being
		 * used for other things.
		 */
		if (uvm_map(kernel_map, &dvma_base,
		    vm_map_max(kernel_map) - dvma_base,
                    NULL, UVM_UNKNOWN_OFFSET, 0,
                    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
                                UVM_ADV_NORMAL, 0)))
			panic("startup: can not steal dvma map");
	}
#endif
	phys_map = uvm_map_create(pmap_kernel(), dvma_base, dvma_end,
	    VM_MAP_INTRSAFE);
	if (phys_map == NULL)
		panic("unable to create DVMA map");

	/*
	 * Allocate DVMA space and dump into a privately managed
	 * resource map for double mappings which is usable from
	 * interrupt contexts.
	 */
	if (uvm_km_valloc_wait(phys_map, (dvma_end-dvma_base)) != dvma_base)
		panic("unable to allocate from DVMA map");
	dvmamap_extent = extent_create("dvmamap", dvma_base, dvma_end,
				       M_DEVBUF, NULL, 0, EX_NOWAIT);
	if (dvmamap_extent == 0)
		panic("unable to allocate extent for dvma");

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif
	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free)/1024/1024);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();

	/* Early interrupt handlers initialization */
	intr_init();
}
示例#9
0
void
dumpsys(void)
{
	u_long totalbytesleft, bytes, i, n, memseg;
	u_long maddr;
	daddr_t blkno;
	int (*dump)(dev_t, daddr_t, caddr_t, size_t);
	int error;

	/* Save registers. */
	savectx(&dumppcb);

	if (dumpdev == NODEV)
		return;

	/*
	 * For dumps during autoconfiguration,
	 * if dump device has already configured...
	 */
	if (dumpsize == 0)
		dumpconf();
	if (dumplo <= 0 || dumpsize == 0) {
		printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
		    minor(dumpdev));
		return;
	}
	printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
	    minor(dumpdev), dumplo);

	error = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
	printf("dump ");
	if (error == -1) {
		printf("area unavailable\n");
		return;
	}

	if ((error = cpu_dump()) != 0)
		goto err;

	totalbytesleft = ptoa(cpu_dump_mempagecnt());
	blkno = dumplo + cpu_dumpsize();
	dump = bdevsw[major(dumpdev)].d_dump;
	error = 0;

	for (memseg = 0; memseg < mem_cluster_cnt; memseg++) {
		maddr = mem_clusters[memseg].start;
		bytes = mem_clusters[memseg].size;

		for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
			/* Print out how many MBs we have left to go. */
			if ((totalbytesleft % (1024*1024)) == 0)
				printf("%ld ", totalbytesleft / (1024 * 1024));

			/* Limit size for next transfer. */
			n = bytes - i;
			if (n > BYTES_PER_DUMP)
				n = BYTES_PER_DUMP;

			(void) pmap_map(dumpspace, maddr, maddr + n,
			    VM_PROT_READ);

			error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
			if (error)
				goto err;
			maddr += n;
			blkno += btodb(n);		/* XXX? */

#if 0	/* XXX this doesn't work.  grr. */
			/* operator aborting dump? */
			if (sget() != NULL) {
				error = EINTR;
				break;
			}
#endif
		}
	}

 err:
	switch (error) {

	case ENXIO:
		printf("device bad\n");
		break;

	case EFAULT:
		printf("device not ready\n");
		break;

	case EINVAL:
		printf("area improper\n");
		break;

	case EIO:
		printf("i/o error\n");
		break;

	case EINTR:
		printf("aborted from console\n");
		break;

	case 0:
		printf("succeeded\n");
		break;

	default:
		printf("error %d\n", error);
		break;
	}
	printf("\n\n");
	delay(5000000);		/* 5 seconds */
}
示例#10
0
static int
loadk(char *file, u_long *marks)
{
    int fd, error, flags;
    vaddr_t va;
    paddr_t pa;
    u_long minsize, size;
    vaddr_t extra;

    /*
     * Regardless of the address where we load the kernel, we need to
     * make sure it has enough valid space to use during pmap_bootstrap.
     * locore.s tries to map the 512KB following the kernel image, and
     * we need to make sure this extra room does not overwrite PROM data
     * (such as the PROM page tables which are immediately below 4MB on
     * most sun4c).
     */
    extra = 512 * 1024;

    if ((fd = open(file, O_RDONLY)) < 0)
        return (errno ? errno : ENOENT);

    /*
     * We need to know whether we are booting off a tape or not,
     * because we can not seek backwards off tapes.
     */

    if (files[fd].f_flags & F_RAW) {
        flags = (COUNT_KERNEL & ~COUNT_SYM) | (LOAD_KERNEL & ~LOAD_SYM);
        minsize = FOURMB;
        va = 0xf8000000;		/* KERNBASE */
#ifdef DEBUG
        printf("Tape boot: expecting a bsd.rd kernel smaller than %p\n",
               minsize);
#endif
        /* compensate for extra room below */
        minsize -= extra;
    } else {
        /*
         * If we did not load a random.seed file yet, try and load
         * one.
         */
        if (rnd_loaded == 0) {
            /*
             * Some PROM do not like having a network device
             * open()ed twice; better close and reopen after
             * trying to get randomness.
             */
            close(fd);

            rnd_loaded = loadrandom(BOOTRANDOM, rnddata,
                                    sizeof(rnddata));

            if ((fd = open(file, O_RDONLY)) < 0)
                return (errno ? errno : ENOENT);
        }

        flags = LOAD_KERNEL;
        marks[MARK_START] = 0;

        /*
         * Even though we just have opened the file, the gzip code
         * has tried to read from it. Be sure to reset position in
         * case the file is not compressed (transparent mode isn't
         * so transparent...)
         */
        if (lseek(fd, 0, SEEK_SET) == (off_t)-1) {
            error = errno;
            goto out;
        }

        if ((error = fdloadfile(fd, marks, COUNT_KERNEL)) != 0)
            goto out;

        /* rewind file for the actual load operation later */
        if (lseek(fd, 0, SEEK_SET) == (off_t)-1) {
            error = errno;
            goto out;
        }

        minsize = marks[MARK_END] - marks[MARK_START];

        /* We want that leading 16K in front of the kernel image */
        minsize += PROM_LOADADDR;
        va = marks[MARK_START] - PROM_LOADADDR;
    }

    /*
     * If the kernel would entirely fit under the boot code, and the
     * boot code has been loaded 1:1, we do not need to allocate
     * breathing room after it.
     */
    size = minsize + extra;
    if (compat != 0) {
        if (minsize + extra <= RELOC2 - LOWSTACK)
            size = RELOC2 - LOWSTACK;
        else
            compat = 0;
    }

    /* Get a physical load address */
#ifdef DEBUG
    printf("kernel footprint %p, requesting %p\n", minsize, size);
#endif
    pa = getphysmem(size);
    if (pa == (paddr_t)-1) {
        /*
         * The extra bootstrap memory estimate might have been
         * too much, if physical memory doesn't have any contiguous
         * large chunks (e.g. on sun4c systems with 4MB regions).
         * If that increase caused us to cross a 4MB boundary, try
         * to limit ourselves to a 4MB multiple.
         */
        if (compat == 0 && size / FOURMB != minsize / FOURMB) {
            size = roundup(minsize, FOURMB);
#ifdef DEBUG
            printf("now trying %p\n", size);
#endif
            pa = getphysmem(size);
        }
        if (pa == (paddr_t)-1) {
            error = EFBIG;
            goto out;
        }
    }

    printf("Loading at physical address %lx\n", pa);
    if (pmap_map(va, pa, size) != 0) {
        error = EFAULT;
        goto out;
    }

    /* try and double-map at VA 0 for compatibility */
    if (pa + size > bstart) {
#ifdef DEBUG
        printf("WARNING: %s is too large for compat mode.\n"
               "If your kernel is too old, it will not run correctly.\n",
               file);
#endif
    } else {
        if (pa != 0 && pmap_map(0, pa, size) != 0) {
            error = EFAULT;
            goto out;
        }
    }

    marks[MARK_START] = 0;
    error = fdloadfile(fd, marks, flags);
out:
    close(fd);
    return (error);
}
示例#11
0
void ppc_vm_init(unsigned int memory_size, boot_args *args)
{
	unsigned int htabmask;
	unsigned int i;
	vm_offset_t  addr;
	int boot_task_end_offset;
#if	NCPUS > 1
	const char *cpus;
#endif	/* NCPUS > 1 */

	printf("mem_size = %d M\n",memory_size / (1024 * 1024));

#ifdef __MACHO__
	/* Now retrieve addresses for end, edata, and etext 
	 * from MACH-O headers.
	 */


	etext = (vm_offset_t) sectTEXTB + sectSizeTEXT;
	edata = (vm_offset_t) sectDATAB + sectSizeDATA;
	end = getlastaddr();
#endif

	/* Stitch valid memory regions together - they may be contiguous
	 * even though they're not already glued together
	 */

	/* Go through the list of memory regions passed in via the args
	 * and copy valid entries into the pmap_mem_regions table, adding
	 * further calculated entries.
	 */
	
	
	/* Initialise the pmap system, using space above `first_avail'*/

#ifndef	__MACHO__
	free_regions[free_regions_count].start =
	  	round_page((unsigned int)&_ExceptionVectorsEnd -
			   (unsigned int)&_ExceptionVectorsStart);
#else
	/* On MACH-O generated kernels, the Exception Vectors
	 * are already mapped and loaded at 0 -- no relocation
	 * or freeing of memory is needed
	 */

	free_regions[free_regions_count].start = round_page((unsigned int)&_ExceptionVectorsEnd) + 4096;
#endif

	/* If we are on a PDM machine memory at 1M might be used
	 * for video. TODO NMGS call video driver to do this
	 * somehow
	 */


	/* For PowerMac, first_avail is set to above the bootstrap task.
         * TODO NMGS - different screen modes - might free mem?
         */

	first_avail = round_page(args->first_avail);


	/* map in the exception vectors */
	/*
	 * map the kernel text, data and bss. Don't forget other regions too
	 */
	for (i = 0; i < args->kern_info.region_count; i++) {
#if	MACH_KDB
		if (args->kern_info.regions[i].prot == VM_PROT_NONE &&
		    i == args->kern_info.region_count - 1) {
			/* assume that's the kernel symbol table */
			kern_sym_start = args->kern_info.regions[i].addr;
			kern_sym_size = args->kern_info.regions[i].size;
			printf("kernel symbol table at 0x%x size 0x%x\n",
			       kern_sym_start, kern_sym_size);
			args->kern_info.regions[i].prot |=
				(VM_PROT_WRITE|VM_PROT_READ);
		}
#endif	/* MACH_KDB */

#ifdef __MACHO__
		/* Skip the VECTORS segment */
		if (args->kern_info.regions[i].addr == 0)
			continue;
#endif

	boot_region_count = args->task_info.region_count;
	boot_size = 0;
	boot_task_end_offset = 0;
	/* Map bootstrap task pages 1-1 so that user_bootstrap can find it */
	for (i = 0; i < boot_region_count; i++) {
		if (args->task_info.regions[i].mapped) {
			/* kernel requires everything page aligned */
#if DEBUG
			printf("mapping virt 0x%08x to phys 0x%08x end 0x%x, prot=0x%b\n",
				 ppc_trunc_page(args->task_info.base_addr + 
					args->task_info.regions[i].offset),
				 ppc_trunc_page(args->task_info.base_addr + 
					args->task_info.regions[i].offset),
				 ppc_round_page(args->task_info.base_addr + 
					args->task_info.regions[i].offset +
					args->task_info.regions[i].size),
				 args->task_info.regions[i].prot,
				 "\x10\1READ\2WRITE\3EXEC");
#endif /* DEBUG */

			(void)pmap_map(
				  ppc_trunc_page(args->task_info.base_addr + 
				      args->task_info.regions[i].offset),
			          ppc_trunc_page(args->task_info.base_addr + 
				      args->task_info.regions[i].offset),
			          ppc_round_page(args->task_info.base_addr +
				      args->task_info.regions[i].offset +
				      args->task_info.regions[i].size),
			          args->task_info.regions[i].prot);

			/* Count the size of mapped space */
			boot_size += args->task_info.regions[i].size;

			/* There may be an overlapping physical page
			 * mapped to two different virtual addresses
			 */
			if (boot_task_end_offset >
			    args->task_info.regions[i].offset) {
				boot_size -= boot_task_end_offset - 
					args->task_info.regions[i].offset;
#if DEBUG
				printf("WARNING - bootstrap overlaps regions\n");
#endif /* DEBUG */
			}

			boot_task_end_offset =
				args->task_info.regions[i].offset +
				args->task_info.regions[i].size;
		}
	}

	if (boot_region_count) {

		/* Add a new region to the bootstrap task for it's stack */
		args->task_info.regions[boot_region_count].addr =
			BOOT_STACK_BASE;
		args->task_info.regions[boot_region_count].size =
			BOOT_STACK_SIZE;
		args->task_info.regions[boot_region_count].mapped = FALSE;
		boot_region_count++;
		
		boot_start        = args->task_info.base_addr;
		boot_region_desc  = (vm_offset_t) args->task_info.regions;
		/* TODO NMGS need to put param info onto top of boot stack */
		boot_task_thread_state.r1   = BOOT_STACK_PTR-0x100;
		boot_task_thread_state.srr0 = args->task_info.entry;
		boot_task_thread_state.srr1 =
			MSR_MARK_SYSCALL(MSR_EXPORT_MASK_SET);
		
		boot_thread_state_flavor = PPC_THREAD_STATE;
		boot_thread_state_count  = PPC_THREAD_STATE_COUNT;
		boot_thread_state        =
			(thread_state_t)&boot_task_thread_state;
	}



}
示例#12
0
int
grfattach(struct hp_device *hd)
{
	register char *rom;
	register struct sti_entry *ep;
	register char *cp;
	struct modtab *mptr = (struct modtab *)hd->hp_addr;
	struct grf_softc *gp = &grf_softc[hd->hp_unit];
	struct grfdev *gd = &grfdev[hd->hp_unit];
	int devtype;
	static int firstime = 1;

	if (gp->g_flags & GF_ALIVE)
		return(1);

	/*
	 * Locate STI ROM.
	 * On some machines it may not be part of the HPA space.
	 * On these, busconf will stash the address in m_stirom.
	 */
	rom = (char *)mptr->m_stirom;
	if (rom == 0)
		rom = (char *)mptr->m_hpa;

	/*
	 * Change page protection on `sticode' to KERNEL:rwx USER:rx.
	 * At this time, I dont know if users will be executing these
	 * routines; for now we'll give them permission to do so.
	 */
	if (firstime) {
#ifdef MACH_KERNEL
		pmap_map(STICODE_ALGN, STICODE_ALGN,
			 STICODE_ALGN + (STI_CODESIZ * STI_CODECNT * NGRF),
			 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_WRITE);
#else
		register u_int pg = btop(STICODE_ALGN);
		register u_int pgcnt =
			(STI_CODESIZ * STI_CODECNT * NGRF + (NBPG-1)) / NBPG;

		while (pgcnt--)
			setaccess(pg++, PDE_AR_URXKW, 0, PDEAR);
#endif
		firstime = 0;
	}

	devtype = STI_DEVTYP(STI_TYPE_BWGRF, rom);

	/*
	 * Set addrs and type for stiload
	 */
	gd->romaddr = rom;
	gd->hpa = (char *)mptr->m_hpa;
	gd->type = devtype;

	/*
	 * Set `ep' to unit's STI routine entry points and `cp' to
	 * page-aligned code space.  Load STI routines and be sure
	 * to flush the (data) cache afterward; we actually flush
	 * both caches as we only call this routine a couple times.
	 */
	ep = &stientry[hd->hp_unit];
	cp = (char *) (STICODE_ALGN + STI_CODESIZ * STI_CODECNT * hd->hp_unit);

	cp = stiload(&ep->init_graph, gd,
		     STI_IGADDR(devtype, rom), STI_SMADDR(devtype, rom), cp);
	cp = stiload(&ep->state_mgmt, gd,
		     STI_SMADDR(devtype, rom), STI_FUADDR(devtype, rom), cp);
	cp = stiload(&ep->font_unpmv, gd,
		     STI_FUADDR(devtype, rom), STI_BMADDR(devtype, rom), cp);
	cp = stiload(&ep->block_move, gd,
		     STI_BMADDR(devtype, rom), STI_STADDR(devtype, rom), cp);
	cp = stiload(&ep->self_test, gd,
		     STI_STADDR(devtype, rom), STI_EHADDR(devtype, rom), cp);
	cp = stiload(&ep->excep_hdlr, gd,
		     STI_EHADDR(devtype, rom), STI_ICADDR(devtype, rom), cp);
	cp = stiload(&ep->inq_conf, gd,
		     STI_ICADDR(devtype, rom), STI_EADDR(devtype, rom), cp);

	fcacheall();

	gd->ep = &stientry[hd->hp_unit];
	gp->g_data = (caddr_t) gd;
	gp->g_sw = &grfsw[0];
	if ((*gp->g_sw->gd_init)(gp) == 0) {
		gp->g_data = (caddr_t) 0;
		return(0);
	}

	gp->g_flags = GF_ALIVE;
	return(1);
}
示例#13
0
/*
 * Machine-dependent startup code
 */
void
cpu_startup()
{
#ifdef DEBUG
	extern int pmapdebug;
	int opmapdebug = pmapdebug;
#endif
	vaddr_t minaddr, maxaddr;
	extern struct user *proc0paddr;

#ifdef DEBUG
	pmapdebug = 0;
#endif

	/*
	 * fix message buffer mapping
	 */
	pmap_map(MSGBUF_VA, MSGBUF_PA, MSGBUF_PA + MSGBUFSIZE, UVM_PROT_RW);
	initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE);

	proc0.p_addr = proc0paddr;

	/* I would print this earlier, but I want it in the message buffer */
	if (kap_maskcheck() == 0) {
		printf("WARNING: KAP M2C3 or earlier mask detected.\n"
"THE PROCESSOR IN THIS MACHINE SUFFERS FROM SEVERE HARDWARE ISSUES.\n"
"M2C3 PROCESSORS MAY RUN RELIABLY ENOUGH, OLDER WILL DEFINITELY NOT.\n\n");
	}

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	/*identifycpu();*/
	printf("real mem = %d (%dMB)\n", ptoa(physmem),
	    ptoa(physmem) / 1024 / 1024);

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a map for physio.  Others use a submap of the kernel
	 * map, but we want one completely separate, even though it uses
	 * the same pmap.
	 */
	dvma_base = CPU_ISSUN4M ? DVMA4M_BASE : DVMA_BASE;
	dvma_end = CPU_ISSUN4M ? DVMA4M_END : DVMA_END;
	phys_map = uvm_map_create(pmap_kernel(), dvma_base, dvma_end,
	    VM_MAP_INTRSAFE);
	if (phys_map == NULL)
		panic("unable to create DVMA map");

	/*
	 * Allocate DVMA space and dump into a privately managed
	 * resource map for double mappings which is usable from
	 * interrupt contexts.
	 */
	if (uvm_km_valloc_wait(phys_map, (dvma_end-dvma_base)) != dvma_base)
		panic("unable to allocate from DVMA map");
	dvmamap_extent = extent_create("dvmamap", dvma_base, dvma_end,
				       M_DEVBUF, NULL, 0, EX_NOWAIT);
	if (dvmamap_extent == 0)
		panic("unable to allocate extent for dvma");

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif
	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free) / 1024 / 1024);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();

	/* Early interrupt handlers initialization */
	intr_init();
}
示例#14
0
static int
loadk(char *kernel, u_long *marks)
{
	int fd, error;
	vaddr_t va;
	paddr_t pa;
	u_long size;
	int flags = LOAD_KERNEL;

	if ((fd = open(kernel, 0)) < 0)
		return (errno ? errno : ENOENT);

	marks[MARK_START] = 0;
	if ((error = fdloadfile(fd, marks, COUNT_KERNEL)) != 0)
		goto out;

	size = marks[MARK_END] - marks[MARK_START];

	/* We want that leading 16K in front of the kernel image */
	size += PROM_LOADADDR;
	va = marks[MARK_START] - PROM_LOADADDR;

	/*
	 * Extra space for bootinfo and kernel bootstrap.
	 * In compat mode, we get to re-use the space occupied by the
	 * boot program. Traditionally, we've silently assumed that
	 * is enough for the kernel to work with.
	 */
	size += BOOTINFO_SIZE;
	if (!compatmode)
		size += 512 * 1024;

	/* Get a physical load address */
	pa = getphysmem(size);
	if (pa == (paddr_t)-1) {
		error = EFBIG;
		goto out;
	}

	if (boothowto & AB_VERBOSE)
		printf("Loading at physical address %lx\n", pa);
	if (pmap_map(va, pa, size) != 0) {
		error = EFAULT;
		goto out;
	}

	/* XXX - to do: inspect kernel image and set compat mode */
	if (compatmode) {
		/* Double-map at VA 0 for compatibility */
		if (pa + size >= bstart) {
			printf("%s: too large for compat mode\n", kernel);
			error = EFBIG;
			goto out;
		}

		if (pa != 0 && pmap_map(0, pa, size) != 0) {
			error = EFAULT;
			goto out;
		}
		loadaddrmask = 0x07ffffffUL;
	}

	if (bootdev_isfloppy(prom_bootdevice))
		flags &= ~LOAD_BACKWARDS;

	marks[MARK_START] = 0;
	error = fdloadfile(fd, marks, flags);
out:
	close(fd);
	return (error);
}
示例#15
0
/*
 * Machine-dependent startup code
 */
void
cpu_startup()
{
#ifdef DEBUG
	extern int pmapdebug;
	int opmapdebug = pmapdebug;
#endif
	vaddr_t minaddr, maxaddr;
	paddr_t msgbufpa;
	extern struct user *proc0paddr;

#ifdef DEBUG
	pmapdebug = 0;
#endif

	if (CPU_ISSUN4M) {
		extern int stackgap_random;

		stackgap_random = STACKGAP_RANDOM_SUN4M;
	}

	/*
	 * Re-map the message buffer from its temporary address
	 * at KERNBASE to MSGBUF_VA.
	 */

	/* Get physical address of the message buffer */
	pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &msgbufpa);

	/* Invalidate the current mapping at KERNBASE. */
	pmap_kremove((vaddr_t)KERNBASE, PAGE_SIZE);
	pmap_update(pmap_kernel());

	/* Enter the new mapping */
	pmap_map(MSGBUF_VA, msgbufpa, msgbufpa + PAGE_SIZE,
	    VM_PROT_READ | VM_PROT_WRITE);

	/* Re-initialize the message buffer. */
	initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE);

	proc0.p_addr = proc0paddr;

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	/*identifycpu();*/
	printf("real mem = %u (%uMB)\n", ptoa(physmem),
	    ptoa(physmem)/1024/1024);

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a map for physio.  Others use a submap of the kernel
	 * map, but we want one completely separate, even though it uses
	 * the same pmap.
	 */
	dvma_base = CPU_ISSUN4M ? DVMA4M_BASE : DVMA_BASE;
	dvma_end = CPU_ISSUN4M ? DVMA4M_END : DVMA_END;
	dvmamap_extent = extent_create("dvmamap", dvma_base, dvma_end,
				       M_DEVBUF, NULL, 0, EX_NOWAIT);
	if (dvmamap_extent == NULL)
		panic("unable to allocate extent for dvma");

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif
	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free)/1024/1024);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();

	/* Early interrupt handlers initialization */
	intr_init();
}