Example #1
0
/*
 * cpu_startup: allocate memory for variable-sized tables,
 * initialize cpu, and do autoconfiguration.
 */
void
cpu_startup()
{
	unsigned i;
	caddr_t v;
	vaddr_t minaddr, maxaddr;
	vsize_t size;
#ifdef DEBUG
	extern int pmapdebug;
	int opmapdebug = pmapdebug;

	pmapdebug = 0;
#endif

	/*
	 * Initialize error message buffer (at end of core).
	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
	 */
	for (i = 0; i < atop(MSGBUFSIZE); i++)
		pmap_kenter_pa((vaddr_t)msgbufp + i * PAGE_SIZE,
		    avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
	pmap_update(pmap_kernel());
	initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE));

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf("%s", version);
	identifycpu();
	printf("real mem = %u (%uMB)\n", ptoa(physmem),
	    ptoa(physmem) / 1024 / 1024);

	/*
	 * Find out how much space we need, allocate it,
	 * and then give everything true virtual addresses.
	 */
	size = (vsize_t)allocsys((caddr_t)0);
	if ((v = (caddr_t) uvm_km_zalloc(kernel_map, round_page(size))) == 0)
		panic("startup: no room for tables");
	if (allocsys(v) - v != size)
		panic("startup: table size inconsistency");

	/*
	 * Determine how many buffers to allocate.
	 * We allocate bufcachepercent% of memory for buffer space.
	 */
	if (bufpages == 0)
		bufpages = physmem * bufcachepercent / 100;

	/* Restrict to at most 25% filled kvm */
	if (bufpages >
	    (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) 
		bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
		    PAGE_SIZE / 4;

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a submap for physio.
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   VM_PHYS_SIZE, 0, FALSE, NULL);

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();

	printf("avail mem = %u (%uMB)\n",
	    ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024 / 1024);

	/*
	 * Configure the system.
	 */
	if (boothowto & RB_CONFIG) {
#ifdef BOOT_CONFIG
		user_config();
#else
		printf("kernel does not support -c; continuing..\n");
#endif
	}
}
Example #2
0
/*
 * void cpu_startup(void)
 *
 * Machine dependant startup code. 
 *
 */
void
cpu_startup()
{
	u_int loop;
	paddr_t minaddr;
	paddr_t maxaddr;

	proc0paddr = (struct user *)kernelstack.pv_va;
	proc0.p_addr = proc0paddr;

	/* Set the cpu control register */
	cpu_setup();

	/* Lock down zero page */
	vector_page_setprot(VM_PROT_READ|VM_PROT_EXECUTE);

	/*
	 * Give pmap a chance to set up a few more things now the vm
	 * is initialised
	 */
	pmap_postinit();

	/*
	 * Allow per-board specific initialization
	 */
	board_startup();

	/*
	 * Initialize error message buffer (at end of core).
	 */

	/* msgbufphys was setup during the secondary boot strap */
	for (loop = 0; loop < atop(MSGBUFSIZE); ++loop)
		pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
		    msgbufphys + loop * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
	pmap_update(pmap_kernel());
	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));

	/*
	 * Identify ourselves for the msgbuf (everything printed earlier will
	 * not be buffered).
	 */
	printf(version);

	printf("real mem  = %u (%uMB)\n", ptoa(physmem),
	    ptoa(physmem)/1024/1024);

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a submap for physio
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   VM_PHYS_SIZE, 0, FALSE, NULL);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit(); 

	printf("avail mem = %lu (%uMB)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free)/1024/1024);

	curpcb = &proc0.p_addr->u_pcb;
	curpcb->pcb_flags = 0;
	curpcb->pcb_un.un_32.pcb32_und_sp = (u_int)proc0.p_addr +
	    USPACE_UNDEF_STACK_TOP;
	curpcb->pcb_un.un_32.pcb32_sp = (u_int)proc0.p_addr +
	    USPACE_SVC_STACK_TOP;
	pmap_set_pcb_pagedir(pmap_kernel(), curpcb);

	curpcb->pcb_tf = (struct trapframe *)curpcb->pcb_un.un_32.pcb32_sp - 1;
}
Example #3
0
/*
 * Utility function to load a linear buffer.  lastaddrp holds state
 * between invocations (for multiple-buffer loads).  segp contains
 * the starting segment on entrace, and the ending segment on exit.
 * first indicates if this is the first invocation of this function.
 */
int
_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
    int *segp, int first)
{
	struct arm32_dma_range *dr;
	bus_size_t sgsize;
	bus_addr_t curaddr, lastaddr, baddr, bmask;
	vaddr_t vaddr = (vaddr_t)buf;
	pd_entry_t *pde;
	pt_entry_t pte;
	int seg;
	pmap_t pmap;
	pt_entry_t *ptep;

#ifdef DEBUG_DMA
	printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
	    buf, buflen, flags, first);
#endif	/* DEBUG_DMA */

	if (p != NULL)
		pmap = p->p_vmspace->vm_map.pmap;
	else
		pmap = pmap_kernel();

	lastaddr = *lastaddrp;
	bmask  = ~(map->_dm_boundary - 1);

	for (seg = *segp; buflen > 0; ) {
		/*
		 * Get the physical address for this segment.
		 *
		 * XXX Don't support checking for coherent mappings
		 * XXX in user address space.
		 */
		if (__predict_true(pmap == pmap_kernel())) {
			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
			if (__predict_false(pmap_pde_section(pde))) {
				curaddr = (*pde & L1_S_FRAME) |
				    (vaddr & L1_S_OFFSET);
				if (*pde & L1_S_CACHE_MASK) {
					map->_dm_flags &=
					    ~ARM32_DMAMAP_COHERENT;
				}
			} else {
				pte = *ptep;
				KDASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV);
				if (__predict_false((pte & L2_TYPE_MASK)
						    == L2_TYPE_L)) {
					curaddr = (pte & L2_L_FRAME) |
					    (vaddr & L2_L_OFFSET);
					if (pte & L2_L_CACHE_MASK) {
						map->_dm_flags &=
						    ~ARM32_DMAMAP_COHERENT;
					}
				} else {
					curaddr = (pte & L2_S_FRAME) |
					    (vaddr & L2_S_OFFSET);
					if (pte & L2_S_CACHE_MASK) {
						map->_dm_flags &=
						    ~ARM32_DMAMAP_COHERENT;
					}
				}
			}
		} else {
			(void) pmap_extract(pmap, vaddr, &curaddr);
			map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;
		}

		/*
		 * Make sure we're in an allowed DMA range.
		 */
		if (t->_ranges != NULL) {
			/* XXX cache last result? */
			dr = _bus_dma_inrange(t->_ranges, t->_nranges,
			    curaddr);
			if (dr == NULL)
				return (EINVAL);
			
			/*
			 * In a valid DMA range.  Translate the physical
			 * memory address to an address in the DMA window.
			 */
			curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
		}

		/*
		 * Compute the segment size, and adjust counts.
		 */
		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
		if (buflen < sgsize)
			sgsize = buflen;

		/*
		 * Make sure we don't cross any boundaries.
		 */
		if (map->_dm_boundary > 0) {
			baddr = (curaddr + map->_dm_boundary) & bmask;
			if (sgsize > (baddr - curaddr))
				sgsize = (baddr - curaddr);
		}

		/*
		 * Insert chunk into a segment, coalescing with
		 * previous segment if possible.
		 */
		if (first) {
			map->dm_segs[seg].ds_addr = curaddr;
			map->dm_segs[seg].ds_len = sgsize;
			first = 0;
		} else {
			if (curaddr == lastaddr &&
			    (map->dm_segs[seg].ds_len + sgsize) <=
			     map->_dm_maxsegsz &&
			    (map->_dm_boundary == 0 ||
			     (map->dm_segs[seg].ds_addr & bmask) ==
			     (curaddr & bmask)))
				map->dm_segs[seg].ds_len += sgsize;
			else {
				if (++seg >= map->_dm_segcnt)
					break;
				map->dm_segs[seg].ds_addr = curaddr;
				map->dm_segs[seg].ds_len = sgsize;
			}
		}

		lastaddr = curaddr + sgsize;
		vaddr += sgsize;
		buflen -= sgsize;
	}

	*segp = seg;
	*lastaddrp = lastaddr;

	/*
	 * Did we fit?
	 */
	if (buflen != 0)
		return (EFBIG);		/* XXX better return value here? */
	return (0);
}
Example #4
0
/*
 * Utility function to load a linear buffer.  lastaddrp holds state
 * between invocations (for multiple-buffer loads).  segp contains
 * the starting segment on entrance, and the ending segment on exit.
 * first indicates if this is the first invocation of this function.
 */
int
_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
    int *segp, int first)
{
	bus_size_t sgsize;
	bus_addr_t lastaddr, baddr, bmask;
	paddr_t curaddr;
	vaddr_t vaddr = (vaddr_t)buf;
	int seg;
	pmap_t pmap;

	if (p != NULL)
		pmap = p->p_vmspace->vm_map.pmap;
	else
		pmap = pmap_kernel();

	lastaddr = *lastaddrp;
	bmask  = ~(map->_dm_boundary - 1);
	if (t->_dma_mask != 0)
		bmask &= t->_dma_mask;

	for (seg = *segp; buflen > 0; ) {
		/*
		 * Get the physical address for this segment.
		 */
		if (pmap_extract(pmap, vaddr, &curaddr) == FALSE)
			panic("_dmapmap_load_buffer: pmap_extract(%x, %x) failed!",
			    pmap, vaddr);

		/*
		 * Compute the segment size, and adjust counts.
		 */
		sgsize = NBPG - ((u_long)vaddr & PGOFSET);
		if (buflen < sgsize)
			sgsize = buflen;

		/*
		 * Make sure we don't cross any boundaries.
		 */
		if (map->_dm_boundary > 0) {
			baddr = ((bus_addr_t)curaddr + map->_dm_boundary) &
			    bmask;
			if (sgsize > (baddr - (bus_addr_t)curaddr))
				sgsize = (baddr - (bus_addr_t)curaddr);
		}

		/*
		 * Insert chunk into a segment, coalescing with
		 * previous segment if possible.
		 */
		if (first) {
			map->dm_segs[seg].ds_addr =
			    (*t->_pa_to_device)(curaddr);
			map->dm_segs[seg].ds_len = sgsize;
			map->dm_segs[seg]._ds_paddr = curaddr;
			map->dm_segs[seg]._ds_vaddr = vaddr;
			first = 0;
		} else {
			if ((bus_addr_t)curaddr == lastaddr &&
			    (map->dm_segs[seg].ds_len + sgsize) <=
			     map->_dm_maxsegsz &&
			     (map->_dm_boundary == 0 ||
			     (map->dm_segs[seg].ds_addr & bmask) ==
			     ((bus_addr_t)curaddr & bmask)))
				map->dm_segs[seg].ds_len += sgsize;
			else {
				if (++seg >= map->_dm_segcnt)
					break;
				map->dm_segs[seg].ds_addr =
				    (*t->_pa_to_device)(curaddr);
				map->dm_segs[seg].ds_len = sgsize;
				map->dm_segs[seg]._ds_paddr = curaddr;
				map->dm_segs[seg]._ds_vaddr = vaddr;
			}
		}

		lastaddr = (bus_addr_t)curaddr + sgsize;
		vaddr += sgsize;
		buflen -= sgsize;
	}

	*segp = seg;
	*lastaddrp = lastaddr;

	/*
	 * Did we fit?
	 */
	if (buflen != 0)
		return (EFBIG);		/* XXX better return value here? */

	return (0);
}
Example #5
0
void
cpu_startup()
{
	caddr_t v;
	int sz, i;
	vsize_t size;
	int base, residual;
	vaddr_t minaddr, maxaddr, uarea_pages;

	/*
	 * Initialize error message buffer (at end of core).
	 * avail_end was pre-decremented in luna88k_bootstrap() to compensate.
	 */
	for (i = 0; i < btoc(MSGBUFSIZE); i++)
		pmap_kenter_pa((paddr_t)msgbufp + i * NBPG,
		    avail_end + i * NBPG, VM_PROT_READ | VM_PROT_WRITE);
	pmap_update(pmap_kernel());
	initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE));

	/* Determine the machine type from FUSE ROM data */
	get_fuse_rom_data();
	if (strncmp(fuse_rom_data, "MNAME=LUNA88K+", 14) == 0) {
		machtype = LUNA_88K2;
	}

        /* Determine the 'auto-boot' device from NVRAM data */
        get_nvram_data();
        get_autoboot_device();

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	identifycpu();
	printf("real mem  = %d\n", ctob(physmem));

	/*
	 * Check front DIP switch setting
	 */
	printf("dipsw = 0x%x\n", dipswitch);

	/* Check DIP switch 1 - 1 */
	if ((0x8000 & dipswitch) == 0) {
		boothowto |= RB_SINGLE;
	}

	/* Check DIP switch 1 - 3 */
	if ((0x2000 & dipswitch) == 0) {
		boothowto |= RB_ASKNAME;
	}

	/* Check DIP switch 1 - 4 */
	if ((0x1000 & dipswitch) == 0) {
		boothowto |= RB_CONFIG;
	}

	/*
	 * Check frame buffer depth.
	 */
	switch (hwplanebits) {
	case 0:				/* No frame buffer */
	case 1:
	case 4:
	case 8:
		break;
	default:
		printf("unexpected frame buffer depth = %d\n", hwplanebits);
		hwplanebits = 0;
		break;
	}

#if 0 /* just for test */
	/*
	 * Get boot arguments
	 */
	{
		char buf[256];
		char **p = (volatile char **)0x00001120;

		strncpy(buf, *p, 256);
		if (buf[255] != '\0')
			buf[255] = '\0';

		printf("boot arg: (0x%x) %s\n", *p, buf);
	}
#endif

	/*
	 * Find out how much space we need, allocate it,
	 * and then give everything true virtual addresses.
	 */
	sz = (int)allocsys((caddr_t)0);

	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");
	if (allocsys(v) - v != sz)
		panic("startup: table size inconsistency");

	/*
	 * Grab UADDR virtual address
	 */
	uarea_pages = UADDR;
	uvm_map(kernel_map, (vaddr_t *)&uarea_pages, USPACE,
	    NULL, UVM_UNKNOWN_OFFSET, 0,
	      UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
	        UVM_ADV_NORMAL, 0));
	if (uarea_pages != UADDR)
		panic("uarea_pages %lx: UADDR not free", uarea_pages);

	/*
	 * Grab the OBIO space that we hardwired in pmap_bootstrap
	 */
	obiova = OBIO_START;
	uvm_map(kernel_map, (vaddr_t *)&obiova, OBIO_SIZE,
	    NULL, UVM_UNKNOWN_OFFSET, 0,
	      UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
	        UVM_ADV_NORMAL, 0));
	if (obiova != OBIO_START)
		panic("obiova %lx: OBIO not free", obiova);

	/*
	 * Now allocate buffers proper.  They are different than the above
	 * in that they usually occupy more virtual memory than physical.
	 */
	size = MAXBSIZE * nbuf;
	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
	    NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
	      UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0)))
		panic("cpu_startup: cannot allocate VM for buffers");
	minaddr = (vaddr_t)buffers;

	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
		/* don't want to alloc more physical mem than needed */
		bufpages = btoc(MAXBSIZE) * nbuf;
	}
	base = bufpages / nbuf;
	residual = bufpages % nbuf;

	for (i = 0; i < nbuf; i++) {
		vsize_t curbufsize;
		vaddr_t curbuf;
		struct vm_page *pg;

		/*
		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
		 * that MAXBSIZE space, we allocate and map (base+1) pages
		 * for the first "residual" buffers, and then we allocate
		 * "base" pages for the rest.
		 */
		curbuf = (vaddr_t)buffers + (i * MAXBSIZE);
		curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);

		while (curbufsize) {
			pg = uvm_pagealloc(NULL, 0, NULL, 0);
			if (pg == NULL)
				panic("cpu_startup: not enough memory for "
				      "buffer cache");
			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
			    VM_PROT_READ | VM_PROT_WRITE);
			curbuf += PAGE_SIZE;
			curbufsize -= PAGE_SIZE;
		}
	}
	pmap_update(pmap_kernel());

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate map for physio.
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
	    VM_PHYS_SIZE, 0, FALSE, NULL);

	printf("avail mem = %ld (%d pages)\n", ptoa(uvmexp.free), uvmexp.free);
	printf("using %d buffers containing %d bytes of memory\n", nbuf,
	    bufpages * PAGE_SIZE);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();

	/*
	 * Initialize the autovectored interrupt list.
	 */
	isrinit();

	/*
	 * Configure the system.
	 */
	if (boothowto & RB_CONFIG) {
#ifdef BOOT_CONFIG
		user_config();
#else
		printf("kernel does not support -c; continuing..\n");
#endif
	}

	/*
	 * Say hello to the world on LCD.
	 */
	greeting();
}
Example #6
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error, pmap_flags;

#if defined(TGT_INDIGO2)
	/*
	 * On ECC MC systems, which do not allow uncached writes to memory
	 * during regular operation, fail requests for uncached (coherent)
	 * memory, unless the caller tells us it is aware of this and will
	 * do the right thing, by passing BUS_DMA_BUS1 as well.
	 */
	if ((flags & (BUS_DMA_COHERENT | BUS_DMA_BUS1)) == BUS_DMA_COHERENT &&
	    ip22_ecc)
		return EINVAL;
#endif

#ifdef TGT_COHERENT
	/* coherent mappings do not need to be uncached on these platforms */
	flags &= ~BUS_DMA_COHERENT;
#endif

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_NC);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	pmap_flags = PMAP_WIRED | PMAP_CANFAIL;
	if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
		pmap_flags |= PMAP_NOCACHE;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
#ifdef DIAGNOSTIC
			if (size == 0)
				panic("_dmamem_map: size botch");
#endif
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | pmap_flags);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			/*
			 * This is redundant with what pmap_enter() did
			 * above, but will take care of forcing other
			 * mappings of the same page (if any) to be
			 * uncached.
			 * If there are no multiple mappings of that
			 * page, this amounts to a noop.
			 */
			if (flags & (BUS_DMA_COHERENT | BUS_DMA_NOCACHE))
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
Example #7
0
/*
 * Handle a single exception.
 */
void
itsa(struct trapframe *trapframe, struct cpu_info *ci, struct proc *p,
    int type)
{
	int i;
	unsigned ucode = 0;
	vm_prot_t ftype;
	extern vaddr_t onfault_table[];
	int onfault;
	int typ = 0;
	union sigval sv;
	struct pcb *pcb;

	switch (type) {
	case T_TLB_MOD:
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			if (pmap_emulate_modify(pmap_kernel(),
			    trapframe->badvaddr)) {
				/* write to read only page in the kernel */
				ftype = PROT_WRITE;
				pcb = &p->p_addr->u_pcb;
				goto kernel_fault;
			}
			return;
		}
		/* FALLTHROUGH */

	case T_TLB_MOD+T_USER:
		if (pmap_emulate_modify(p->p_vmspace->vm_map.pmap,
		    trapframe->badvaddr)) {
			/* write to read only page */
			ftype = PROT_WRITE;
			pcb = &p->p_addr->u_pcb;
			goto fault_common_no_miss;
		}
		return;

	case T_TLB_LD_MISS:
	case T_TLB_ST_MISS:
		ftype = (type == T_TLB_ST_MISS) ? PROT_WRITE : PROT_READ;
		pcb = &p->p_addr->u_pcb;
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			vaddr_t va;
			int rv;

	kernel_fault:
			va = trunc_page((vaddr_t)trapframe->badvaddr);
			onfault = pcb->pcb_onfault;
			pcb->pcb_onfault = 0;
			KERNEL_LOCK();
			rv = uvm_fault(kernel_map, va, 0, ftype);
			KERNEL_UNLOCK();
			pcb->pcb_onfault = onfault;
			if (rv == 0)
				return;
			if (onfault != 0) {
				pcb->pcb_onfault = 0;
				trapframe->pc = onfault_table[onfault];
				return;
			}
			goto err;
		}
		/*
		 * It is an error for the kernel to access user space except
		 * through the copyin/copyout routines.
		 */
		if (pcb->pcb_onfault != 0) {
			/*
			 * We want to resolve the TLB fault before invoking
			 * pcb_onfault if necessary.
			 */
			goto fault_common;
		} else {
			goto err;
		}

	case T_TLB_LD_MISS+T_USER:
		ftype = PROT_READ;
		pcb = &p->p_addr->u_pcb;
		goto fault_common;

	case T_TLB_ST_MISS+T_USER:
		ftype = PROT_WRITE;
		pcb = &p->p_addr->u_pcb;
fault_common:

#ifdef CPU_R4000
		if (r4000_errata != 0) {
			if (eop_tlb_miss_handler(trapframe, ci, p) != 0)
				return;
		}
#endif

fault_common_no_miss:

#ifdef CPU_R4000
		if (r4000_errata != 0) {
			eop_cleanup(trapframe, p);
		}
#endif

	    {
		vaddr_t va;
		struct vmspace *vm;
		vm_map_t map;
		int rv;

		vm = p->p_vmspace;
		map = &vm->vm_map;
		va = trunc_page((vaddr_t)trapframe->badvaddr);

		onfault = pcb->pcb_onfault;
		pcb->pcb_onfault = 0;
		KERNEL_LOCK();

		rv = uvm_fault(map, va, 0, ftype);
		pcb->pcb_onfault = onfault;

		/*
		 * If this was a stack access we keep track of the maximum
		 * accessed stack size.  Also, if vm_fault gets a protection
		 * failure it is due to accessing the stack region outside
		 * the current limit and we need to reflect that as an access
		 * error.
		 */
		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (rv == 0)
				uvm_grow(p, va);
			else if (rv == EACCES)
				rv = EFAULT;
		}
		KERNEL_UNLOCK();
		if (rv == 0)
			return;
		if (!USERMODE(trapframe->sr)) {
			if (onfault != 0) {
				pcb->pcb_onfault = 0;
				trapframe->pc =  onfault_table[onfault];
				return;
			}
			goto err;
		}

		ucode = ftype;
		i = SIGSEGV;
		typ = SEGV_MAPERR;
		break;
	    }

	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
		ucode = 0;		/* XXX should be PROT_something */
		i = SIGBUS;
		typ = BUS_ADRALN;
		break;
	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
		ucode = 0;		/* XXX should be PROT_something */
		i = SIGBUS;
		typ = BUS_OBJERR;
		break;

	case T_SYSCALL+T_USER:
	    {
		struct trapframe *locr0 = p->p_md.md_regs;
		struct sysent *callp;
		unsigned int code;
		register_t tpc;
		int numsys, error;
		struct args {
			register_t i[8];
		} args;
		register_t rval[2];

		atomic_inc_int(&uvmexp.syscalls);

		/* compute next PC after syscall instruction */
		tpc = trapframe->pc; /* Remember if restart */
		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
		callp = p->p_p->ps_emul->e_sysent;
		numsys = p->p_p->ps_emul->e_nsysent;
		code = locr0->v0;
		switch (code) {
		case SYS_syscall:
		case SYS___syscall:
			/*
			 * Code is first argument, followed by actual args.
			 * __syscall provides the code as a quad to maintain
			 * proper alignment of 64-bit arguments on 32-bit
			 * platforms, which doesn't change anything here.
			 */
			code = locr0->a0;
			if (code >= numsys)
				callp += p->p_p->ps_emul->e_nosys; /* (illegal) */
			else
				callp += code;
			i = callp->sy_argsize / sizeof(register_t);
			args.i[0] = locr0->a1;
			args.i[1] = locr0->a2;
			args.i[2] = locr0->a3;
			if (i > 3) {
				args.i[3] = locr0->a4;
				args.i[4] = locr0->a5;
				args.i[5] = locr0->a6;
				args.i[6] = locr0->a7;
				if (i > 7)
					if ((error = copyin((void *)locr0->sp,
					    &args.i[7], sizeof(register_t))))
						goto bad;
			}
			break;
		default:
			if (code >= numsys)
				callp += p->p_p->ps_emul->e_nosys; /* (illegal) */
			else
				callp += code;

			i = callp->sy_narg;
			args.i[0] = locr0->a0;
			args.i[1] = locr0->a1;
			args.i[2] = locr0->a2;
			args.i[3] = locr0->a3;
			if (i > 4) {
				args.i[4] = locr0->a4;
				args.i[5] = locr0->a5;
				args.i[6] = locr0->a6;
				args.i[7] = locr0->a7;
			}
		}

		rval[0] = 0;
		rval[1] = locr0->v1;

#if defined(DDB) || defined(DEBUG)
		trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ?
		    TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code;
#endif

		error = mi_syscall(p, code, callp, args.i, rval);

		switch (error) {
		case 0:
			locr0->v0 = rval[0];
			locr0->v1 = rval[1];
			locr0->a3 = 0;
			break;

		case ERESTART:
			locr0->pc = tpc;
			break;

		case EJUSTRETURN:
			break;	/* nothing to do */

		default:
		bad:
			locr0->v0 = error;
			locr0->a3 = 1;
		}

		mi_syscall_return(p, code, error, rval);

		return;
	    }

	case T_BREAK:
#ifdef DDB
		db_ktrap(type, trapframe);
#endif
		/* Reenable interrupts if necessary */
		if (trapframe->sr & SR_INT_ENAB) {
			enableintr();
		}
		return;

	case T_BREAK+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trapframe *locr0 = p->p_md.md_regs;

		/* compute address of break instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;

		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) {
		case 6:	/* gcc range error */
			i = SIGFPE;
			typ = FPE_FLTSUB;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
		case 7:	/* gcc3 divide by zero */
			i = SIGFPE;
			typ = FPE_INTDIV;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
#ifdef PTRACE
		case BREAK_SSTEP_VAL:
			if (p->p_md.md_ss_addr == (long)va) {
#ifdef DEBUG
				printf("trap: %s (%d): breakpoint at %p "
				    "(insn %08x)\n",
				    p->p_comm, p->p_pid,
				    (void *)p->p_md.md_ss_addr,
				    p->p_md.md_ss_instr);
#endif

				/* Restore original instruction and clear BP */
				KERNEL_LOCK();
				process_sstep(p, 0);
				KERNEL_UNLOCK();
				typ = TRAP_BRKPT;
			} else {
				typ = TRAP_TRACE;
			}
			i = SIGTRAP;
			break;
#endif
#ifdef FPUEMUL
		case BREAK_FPUEMUL_VAL:
			/*
			 * If this is a genuine FP emulation break,
			 * resume execution to our branch destination.
			 */
			if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
			    p->p_md.md_fppgva + 4 == (vaddr_t)va) {
				struct vm_map *map = &p->p_vmspace->vm_map;

				p->p_md.md_flags &= ~MDP_FPUSED;
				locr0->pc = p->p_md.md_fpbranchva;

				/*
				 * Prevent access to the relocation page.
				 * XXX needs to be fixed to work with rthreads
				 */
				KERNEL_LOCK();
				uvm_fault_unwire(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE);
				KERNEL_UNLOCK();
				(void)uvm_map_protect(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE,
				    PROT_NONE, FALSE);
				return;
			}
			/* FALLTHROUGH */
#endif
		default:
			typ = TRAP_TRACE;
			i = SIGTRAP;
			break;
		}
		break;
	    }

	case T_IWATCH+T_USER:
	case T_DWATCH+T_USER:
	    {
		caddr_t va;
		/* compute address of trapped instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		printf("watch exception @ %p\n", va);
#ifdef RM7K_PERFCNTR
		if (rm7k_watchintr(trapframe)) {
			/* Return to user, don't add any more overhead */
			return;
		}
#endif
		i = SIGTRAP;
		typ = TRAP_BRKPT;
		break;
	    }

	case T_TRAP+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trapframe *locr0 = p->p_md.md_regs;

		/* compute address of trap instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
#ifdef RM7K_PERFCNTR
		if (instr == 0x040c0000) { /* Performance cntr trap */
			int result;

			result = rm7k_perfcntr(trapframe->a0, trapframe->a1,
						trapframe->a2, trapframe->a3);
			locr0->v0 = -result;
			/* Return to user, don't add any more overhead */
			return;
		} else
#endif
		/*
		 * GCC 4 uses teq with code 7 to signal divide by
	 	 * zero at runtime. This is one instruction shorter
		 * than the BEQ + BREAK combination used by gcc 3.
		 */
		if ((instr & 0xfc00003f) == 0x00000034 /* teq */ &&
		    (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) {
			i = SIGFPE;
			typ = FPE_INTDIV;
		} else {
			i = SIGEMT;	/* Stuff it with something for now */
			typ = 0;
		}
		break;
	    }

	case T_RES_INST+T_USER:
		i = SIGILL;
		typ = ILL_ILLOPC;
		break;

	case T_COP_UNUSABLE+T_USER:
		/*
		 * Note MIPS IV COP1X instructions issued with FPU
		 * disabled correctly report coprocessor 1 as the
		 * unusable coprocessor number.
		 */
		if ((trapframe->cause & CR_COP_ERR) != CR_COP1_ERR) {
			i = SIGILL;	/* only FPU instructions allowed */
			typ = ILL_ILLOPC;
			break;
		}
#ifdef FPUEMUL
		MipsFPTrap(trapframe);
#else
		enable_fpu(p);
#endif
		return;

	case T_FPE:
		printf("FPU Trap: PC %lx CR %lx SR %lx\n",
			trapframe->pc, trapframe->cause, trapframe->sr);
		goto err;

	case T_FPE+T_USER:
		MipsFPTrap(trapframe);
		return;

	case T_OVFLOW+T_USER:
		i = SIGFPE;
		typ = FPE_FLTOVF;
		break;

	case T_ADDR_ERR_LD:	/* misaligned access */
	case T_ADDR_ERR_ST:	/* misaligned access */
	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
		pcb = &p->p_addr->u_pcb;
		if ((onfault = pcb->pcb_onfault) != 0) {
			pcb->pcb_onfault = 0;
			trapframe->pc = onfault_table[onfault];
			return;
		}
		goto err;

#ifdef CPU_R10000
	case T_BUS_ERR_IFETCH:
		/*
		 * At least R16000 processor have been found triggering
		 * reproduceable bus error on instruction fetch in the
		 * kernel code, which are trivially recoverable (and
		 * look like an obscure errata to me).
		 *
		 * Thus, ignore these exceptions if the faulting address
		 * is in the kernel.
		 */
	    {
		extern void *kernel_text;
		extern void *etext;
		vaddr_t va;

		va = (vaddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		if (va > (vaddr_t)&kernel_text && va < (vaddr_t)&etext)
			return;
	    }
		goto err;
#endif

	default:
	err:
		disableintr();
#if !defined(DDB) && defined(DEBUG)
		trapDump("trap", printf);
#endif
		printf("\nTrap cause = %d Frame %p\n", type, trapframe);
		printf("Trap PC %p RA %p fault %p\n",
		    (void *)trapframe->pc, (void *)trapframe->ra,
		    (void *)trapframe->badvaddr);
#ifdef DDB
		stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs);
		db_ktrap(type, trapframe);
#endif
		panic("trap");
	}

#ifdef FPUEMUL
	/*
	 * If a relocated delay slot causes an exception, blame the
	 * original delay slot address - userland is not supposed to
	 * know anything about emulation bowels.
	 */
	if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
	    trapframe->badvaddr == p->p_md.md_fppgva)
		trapframe->badvaddr = p->p_md.md_fpslotva;
#endif
	p->p_md.md_regs->pc = trapframe->pc;
	p->p_md.md_regs->cause = trapframe->cause;
	p->p_md.md_regs->badvaddr = trapframe->badvaddr;
	sv.sival_ptr = (void *)trapframe->badvaddr;
	KERNEL_LOCK();
	trapsignal(p, i, ucode, typ, sv);
	KERNEL_UNLOCK();
}
Example #8
0
/*
 * Machine-dependent startup code
 */
void
cpu_startup()
{
	caddr_t v;
	int sz;
#ifdef DEBUG
	extern int pmapdebug;
	int opmapdebug = pmapdebug;
#endif
	vaddr_t minaddr, maxaddr;
	extern struct user *proc0paddr;

#ifdef DEBUG
	pmapdebug = 0;
#endif

	if (CPU_ISSUN4M) {
		extern int stackgap_random;

		stackgap_random = STACKGAP_RANDOM_SUN4M;
	}

	/*
	 * fix message buffer mapping, note phys addr of msgbuf is 0
	 */
	pmap_map(MSGBUF_VA, 0, MSGBUFSIZE, VM_PROT_READ|VM_PROT_WRITE);
	initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE);

	proc0.p_addr = proc0paddr;

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	/*identifycpu();*/
	printf("real mem = %u (%uMB)\n", ptoa(physmem),
	    ptoa(physmem)/1024/1024);

	/*
	 * Find out how much space we need, allocate it,
	 * and then give everything true virtual addresses.
	 */
	sz = (int)allocsys((caddr_t)0);

	if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");

	if (allocsys(v) - v != sz)
		panic("startup: table size inconsistency");

	/*
	 * Determine how many buffers to allocate.
	 * We allocate bufcachepercent% of memory for buffer space.
	 */
	if (bufpages == 0)
		bufpages = physmem * bufcachepercent / 100;

	/* Restrict to at most 25% filled kvm */
	if (bufpages >
	    (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE / 4) 
		bufpages = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
		    PAGE_SIZE / 4;

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	minaddr = vm_map_min(kernel_map);
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * Allocate a map for physio.  Others use a submap of the kernel
	 * map, but we want one completely separate, even though it uses
	 * the same pmap.
	 */
	dvma_base = CPU_ISSUN4M ? DVMA4M_BASE : DVMA_BASE;
	dvma_end = CPU_ISSUN4M ? DVMA4M_END : DVMA_END;
#if defined(SUN4M)
	if (CPU_ISSUN4M) {
		/*
		 * The DVMA space we want partially overrides kernel_map.
		 * Allocate it in kernel_map as well to prevent it from being
		 * used for other things.
		 */
		if (uvm_map(kernel_map, &dvma_base,
		    vm_map_max(kernel_map) - dvma_base,
                    NULL, UVM_UNKNOWN_OFFSET, 0,
                    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
                                UVM_ADV_NORMAL, 0)))
			panic("startup: can not steal dvma map");
	}
#endif
	phys_map = uvm_map_create(pmap_kernel(), dvma_base, dvma_end,
	    VM_MAP_INTRSAFE);
	if (phys_map == NULL)
		panic("unable to create DVMA map");

	/*
	 * Allocate DVMA space and dump into a privately managed
	 * resource map for double mappings which is usable from
	 * interrupt contexts.
	 */
	if (uvm_km_valloc_wait(phys_map, (dvma_end-dvma_base)) != dvma_base)
		panic("unable to allocate from DVMA map");
	dvmamap_extent = extent_create("dvmamap", dvma_base, dvma_end,
				       M_DEVBUF, NULL, 0, EX_NOWAIT);
	if (dvmamap_extent == 0)
		panic("unable to allocate extent for dvma");

#ifdef DEBUG
	pmapdebug = opmapdebug;
#endif
	printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free),
	    ptoa(uvmexp.free)/1024/1024);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();

	/* Early interrupt handlers initialization */
	intr_init();
}
Example #9
0
/*
 * Utility function to load a linear buffer.  lastaddrp holds state
 * between invocations (for multiple-buffer loads).  segp contains
 * the starting segment on entrance, and the ending segment on exit.
 * first indicates if this is the first invocation of this function.
 */
int
_bus_dmamap_load_buffer_direct_common(bus_dma_tag_t t, bus_dmamap_t map,
    void *buf, bus_size_t buflen, struct vmspace *vm, int flags,
    paddr_t *lastaddrp, int *segp, int first)
{
	bus_size_t sgsize;
	bus_addr_t curaddr, lastaddr, baddr, bmask;
	vaddr_t vaddr = (vaddr_t)buf;
	int seg, cacheable, coherent;
	pmap_t pmap;
	bool rv;

	coherent = BUS_DMA_COHERENT;
	lastaddr = *lastaddrp;
	bmask = ~(map->_dm_boundary - 1);
	if (!VMSPACE_IS_KERNEL_P(vm))
		pmap = vm_map_pmap(&vm->vm_map);
	else
		pmap = pmap_kernel();

	for (seg = *segp; buflen > 0 ; ) {
		/*
		 * Get the physical address for this segment.
		 */
		rv = pmap_extract(pmap, vaddr, &curaddr);
		KASSERT(rv);

		cacheable = _pmap_page_is_cacheable(pmap, vaddr);

		if (cacheable)
			coherent = 0;

		/*
		 * Compute the segment size, and adjust counts.
		 */
		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
		if (buflen < sgsize)
			sgsize = buflen;

		/*
		 * Make sure we don't cross any boundaries.
		 */
		if (map->_dm_boundary > 0) {
			baddr = (curaddr + map->_dm_boundary) & bmask;
			if (sgsize > (baddr - curaddr))
				sgsize = (baddr - curaddr);
		}

		/*
		 * Insert chunk into a segment, coalescing with
		 * the previous segment if possible.
		 */
		if (first) {
			map->dm_segs[seg].ds_addr = curaddr;
			map->dm_segs[seg].ds_len = sgsize;
			map->dm_segs[seg]._ds_flags =
			    cacheable ? 0 : BUS_DMA_COHERENT;
			first = 0;
		} else {
			if (curaddr == lastaddr &&
			    (map->dm_segs[seg].ds_len + sgsize) <=
			     map->dm_maxsegsz &&
			    (map->_dm_boundary == 0 ||
			     (map->dm_segs[seg].ds_addr & bmask) ==
			     (curaddr & bmask)))
				map->dm_segs[seg].ds_len += sgsize;
			else {
				if (++seg >= map->_dm_segcnt)
					break;
				map->dm_segs[seg].ds_addr = curaddr;
				map->dm_segs[seg].ds_len = sgsize;
				map->dm_segs[seg]._ds_flags =
				    cacheable ? 0 : BUS_DMA_COHERENT;
			}
		}

		lastaddr = curaddr + sgsize;
		vaddr += sgsize;
		buflen -= sgsize;
	}

	*segp = seg;
	*lastaddrp = lastaddr;
	map->_dm_flags &= ~BUS_DMA_COHERENT;
	/* BUS_DMA_COHERENT is set only if all segments are uncached */
	map->_dm_flags |= coherent;

	/*
	 * Did we fit?
	 */
	if (buflen != 0) {
		/*
		 * If there is a chained window, we will automatically
		 * fall back to it.
		 */
		return EFBIG;		/* XXX better return value here? */
	}

	return 0;
}
Example #10
0
void
cpu_attach(struct device *parent, struct device *self, void *aux)
{
	struct cpu_info *ci = (struct cpu_info *)self;
	struct cpu_attach_args *caa = (struct cpu_attach_args *)aux;

#ifdef MULTIPROCESSOR
	int cpunum = ci->ci_dev.dv_unit;
	vaddr_t kstack;
	struct pcb *pcb;
#endif

	if (caa->cpu_role == CPU_ROLE_AP) {
#ifdef MULTIPROCESSOR
		if (cpu_info[cpunum] != NULL)
			panic("cpu at apic id %d already attached?", cpunum);
		cpu_info[cpunum] = ci;
#endif
	} else {
		ci = &cpu_info_primary;
#ifdef MULTIPROCESSOR
		if (caa->cpu_number != lapic_cpu_number()) {
			panic("%s: running cpu is at apic %d"
			    " instead of at expected %d",
			    self->dv_xname, lapic_cpu_number(), caa->cpu_number);
		}
#endif
		bcopy(self, &ci->ci_dev, sizeof *self);
	}

	ci->ci_self = ci;
	ci->ci_apicid = caa->cpu_number;
#ifdef MULTIPROCESSOR
	ci->ci_cpuid = cpunum;
#else
	ci->ci_cpuid = 0;	/* False for APs, so what, they're not used */
#endif
	ci->ci_signature = caa->cpu_signature;
	ci->ci_feature_flags = caa->feature_flags;
	ci->ci_func = caa->cpu_func;

#ifdef MULTIPROCESSOR
	/*
	 * Allocate UPAGES contiguous pages for the idle PCB and stack.
	 */

	kstack = uvm_km_alloc(kernel_map, USPACE);
	if (kstack == 0) {
		if (cpunum == 0) { /* XXX */
			panic("cpu_attach: unable to allocate idle stack for"
			    " primary");
		}
		printf("%s: unable to allocate idle stack\n",
		    ci->ci_dev.dv_xname);
		return;
	}
	pcb = ci->ci_idle_pcb = (struct pcb *)kstack;
	memset(pcb, 0, USPACE);

	pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
	pcb->pcb_tss.tss_esp0 = kstack + USPACE - 16 -
	    sizeof (struct trapframe);
	pcb->pcb_tss.tss_esp = kstack + USPACE - 16 -
	    sizeof (struct trapframe);
	pcb->pcb_pmap = pmap_kernel();
	pcb->pcb_cr3 = pcb->pcb_pmap->pm_pdirpa;
#endif
	ci->ci_curpmap = pmap_kernel();

	/* further PCB init done later. */

	printf(": ");

	switch (caa->cpu_role) {
	case CPU_ROLE_SP:
		printf("(uniprocessor)\n");
		ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY;
		identifycpu(ci);
#ifdef MTRR
		mem_range_attach();
#endif
		cpu_init(ci);
		cpu_init_mwait(&ci->ci_dev);
		break;

	case CPU_ROLE_BP:
		printf("apid %d (boot processor)\n", caa->cpu_number);
		ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY;
		identifycpu(ci);
#ifdef MTRR
		mem_range_attach();
#endif
		cpu_init(ci);

#if NLAPIC > 0
		/*
		 * Enable local apic
		 */
		lapic_enable();
		lapic_calibrate_timer(ci);
#endif
#if NIOAPIC > 0
		ioapic_bsp_id = caa->cpu_number;
#endif
		cpu_init_mwait(&ci->ci_dev);
		break;

	case CPU_ROLE_AP:
		/*
		 * report on an AP
		 */
		printf("apid %d (application processor)\n", caa->cpu_number);

#ifdef MULTIPROCESSOR
		gdt_alloc_cpu(ci);
		ci->ci_flags |= CPUF_PRESENT | CPUF_AP;
		identifycpu(ci);
		sched_init_cpu(ci);
		ci->ci_next = cpu_info_list->ci_next;
		cpu_info_list->ci_next = ci;
		ncpus++;
#endif
		break;

	default:
		panic("unknown processor type??");
	}

#ifdef MULTIPROCESSOR
	if (mp_verbose) {
		printf("%s: kstack at 0x%lx for %d bytes\n",
		    ci->ci_dev.dv_xname, kstack, USPACE);
		printf("%s: idle pcb at %p, idle sp at 0x%x\n",
		    ci->ci_dev.dv_xname, pcb, pcb->pcb_esp);
	}
#endif
}
Example #11
0
/*
 * Initialize the BIOS32 interface.
 */
void
bios32_init(void)
{
#if 0	/* XXXfvdl need to set up compatibility segment for this */
	paddr_t entry = 0;
	void *p;
	unsigned char cksum;
	int i;

	for (p = (void *)ISA_HOLE_VADDR(BIOS32_START);
	     p < (void *)ISA_HOLE_VADDR(BIOS32_END);
	     p += 16) {
		if (*(int *)p != BIOS32_MAKESIG('_', '3', '2', '_'))
			continue;

		cksum = 0;
		for (i = 0; i < 16; i++)
			cksum += *(unsigned char *)(p + i);
		if (cksum != 0)
			continue;

		if (*(p + 9) != 1)
			continue;

		entry = *(uint32_t *)(p + 4);

		aprint_verbose("BIOS32 rev. %d found at 0x%lx\n",
		    *(p + 8), entry);

		if (entry < BIOS32_START ||
		    entry >= BIOS32_END) {
			aprint_error("BIOS32 entry point outside "
			    "allowable range\n");
			entry = 0;
		}
		break;
	}

	if (entry != 0) {
		bios32_entry.offset = (void *)ISA_HOLE_VADDR(entry);
		bios32_entry.segment = GSEL(GCODE_SEL, SEL_KPL);
	}
#endif
	uint8_t *p;
	int i;

	/* see if we have SMBIOS extentions */
	for (p = ISA_HOLE_VADDR(SMBIOS_START);
	    p < (uint8_t *)ISA_HOLE_VADDR(SMBIOS_END); p+= 16) {
		struct smbhdr * sh = (struct smbhdr *)p;
		uint8_t chksum;
		vaddr_t eva;
		paddr_t pa, end;

		if (sh->sig != BIOS32_MAKESIG('_', 'S', 'M', '_'))
			continue;
		i = sh->len;
		for (chksum = 0; i--; )
			chksum += p[i];
		if (chksum != 0)
			continue;
		p += 0x10;
		if (p[0] != '_' && p[1] != 'D' && p[2] != 'M' &&
		    p[3] != 'I' && p[4] != '_')
			continue;
		for (chksum = 0, i = 0xf; i--; )
			chksum += p[i];
		if (chksum != 0)
			continue;

		pa = trunc_page(sh->addr);
		end = round_page(sh->addr + sh->size);
		eva = uvm_km_alloc(kernel_map, end - pa, 0, UVM_KMF_VAONLY);
		if (eva == 0)
			break;

		smbios_entry.addr = (uint8_t *)(eva +
		    (sh->addr & PGOFSET));
		smbios_entry.len = sh->size;
		smbios_entry.mjr = sh->majrev;
		smbios_entry.min = sh->minrev;
		smbios_entry.count = sh->count;

    		for (; pa < end; pa+= NBPG, eva+= NBPG)
#ifdef XEN
			pmap_kenter_ma(eva, pa, VM_PROT_READ, 0);
#else
			pmap_kenter_pa(eva, pa, VM_PROT_READ, 0);
#endif
		pmap_update(pmap_kernel());

		aprint_debug("SMBIOS rev. %d.%d @ 0x%lx (%d entries)\n",
			    sh->majrev, sh->minrev, (u_long)sh->addr,
			    sh->count);

		break;
	}
}
Example #12
0
void
sfasinitialize(struct sfas_softc *dev)
{
	int		 i;

	dev->sc_led_status = 0;

	TAILQ_INIT(&dev->sc_xs_pending);
	TAILQ_INIT(&dev->sc_xs_free);

/*
 * Initialize the sfas_pending structs and link them into the free list. We
 * have to set vm_link_data.pages to 0 or the vm FIX won't work.
 */
	for(i=0; i<MAXPENDING; i++) {
		TAILQ_INSERT_TAIL(&dev->sc_xs_free, &dev->sc_xs_store[i],
				  link);
	}

/*
 * Calculate the correct clock conversion factor 2 <= factor <= 8, i.e. set
 * the factor to clock_freq / 5 (int).
 */
	if (dev->sc_clock_freq <= 10)
		dev->sc_clock_conv_fact = 2;
	if (dev->sc_clock_freq <= 40)
		dev->sc_clock_conv_fact = 2+((dev->sc_clock_freq-10)/5);
	else
		panic("sfasinitialize: Clock frequence too high");

/* Setup and save the basic configuration registers */
	dev->sc_config1 = (dev->sc_host_id & SFAS_CFG1_BUS_ID_MASK);
	dev->sc_config2 = SFAS_CFG2_FEATURES_ENABLE;
	dev->sc_config3 = (dev->sc_clock_freq > 25 ? SFAS_CFG3_FASTCLK : 0);

/* Precalculate timeout value and clock period. */
/* Ekkk ... floating point in the kernel !!!! */
/*	dev->sc_timeout_val  = 1+dev->sc_timeout*dev->sc_clock_freq/
				 (7.682*dev->sc_clock_conv_fact);*/
	dev->sc_timeout_val  = 1+dev->sc_timeout*dev->sc_clock_freq/
				 ((7682*dev->sc_clock_conv_fact)/1000);
	dev->sc_clock_period = 1000/dev->sc_clock_freq;

	sfasreset(dev, 1 | 2);	/* Reset Chip and Bus */

	dev->sc_units_disconnected = 0;
	dev->sc_msg_in_len = 0;
	dev->sc_msg_out_len = 0;

	dev->sc_flags = 0;

	for(i=0; i<8; i++)
		sfas_init_nexus(dev, &dev->sc_nexus[i]);

	if (dev->sc_ixfer == NULL)
		dev->sc_ixfer = sfas_ixfer;

/*
 * Setup bump buffer.
 */
	dev->sc_bump_va = (u_char *)uvm_km_alloc(kernel_map, dev->sc_bump_sz, 0,
	    UVM_KMF_WIRED | UVM_KMF_ZERO);
	(void) pmap_extract(pmap_kernel(), (vaddr_t)dev->sc_bump_va,
	    (paddr_t *)&dev->sc_bump_pa);

/*
 * Setup pages to noncachable, that way we don't have to flush the cache
 * every time we need "bumped" transfer.
 */
	pt_entry_t * const ptep = vtopte((vaddr_t) dev->sc_bump_va);
	const pt_entry_t opte = *ptep;
	const pt_entry_t npte = opte & ~(L2_C | L2_B);
	l2pte_set(ptep, npte, opte);
	PTE_SYNC(ptep);
	cpu_tlb_flushD();
	cpu_dcache_wbinv_range((vaddr_t)dev->sc_bump_va, PAGE_SIZE);

	printf(" dmabuf V0x%08x P0x%08x", (u_int)dev->sc_bump_va, (u_int)dev->sc_bump_pa);
}
Example #13
0
/*ARGSUSED*/
int
mmrw(dev_t dev, struct uio *uio, int flags)
{
	vaddr_t va;
	paddr_t pa;
	int o, c;
	struct iovec *iov;
	int error = 0;
	static int physlock;
	vm_prot_t prot;
	extern void *vmmap;

	if (minor(dev) == DEV_MEM) {
		/* lock against other uses of shared vmmap */
		while (physlock > 0) {
			physlock++;
			error = tsleep((void *)&physlock, PZERO | PCATCH,
			    "mmrw", 0);
			if (error)
				return (error);
		}
		physlock = 1;
	}
	while (uio->uio_resid > 0 && error == 0) {
		int n;
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}

		/* Note how much is still to go */
		n = uio->uio_resid;

		switch (minor(dev)) {

		case DEV_MEM:
			pa = (paddr_t)uio->uio_offset;
			if (!pmap_pa_exists(pa)) {
				error = EFAULT;
				goto unlock;
			}
			prot = uio->uio_rw == UIO_READ ? VM_PROT_READ :
			    VM_PROT_WRITE;
			pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
			    trunc_page(pa), prot, prot|PMAP_WIRED);
			pmap_update(pmap_kernel());
			o = uio->uio_offset & PGOFSET;
			c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
			error = uiomove((char *)vmmap + o, c, uio);
			pmap_remove(pmap_kernel(),
			    (vaddr_t)vmmap, (vaddr_t)vmmap + PAGE_SIZE);
			pmap_update(pmap_kernel());
			break;

		case DEV_KMEM:
			va = (vaddr_t)uio->uio_offset;
			if (va >= MSGBUF_VA && va < MSGBUF_VA+PAGE_SIZE) {
				c = min(iov->iov_len, 4096);
			} else if (va >= prom_vstart && va < prom_vend &&
				   uio->uio_rw == UIO_READ) {
				/* Allow read-only access to the PROM */
				c = min(iov->iov_len, prom_vend - prom_vstart);
			} else {
				c = min(iov->iov_len, MAXPHYS);
				if (!uvm_kernacc((void *)va, c,
				    uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
					return (EFAULT);
			}
			error = uiomove((void *)va, c, uio);
			break;

		case DEV_NULL:
			if (uio->uio_rw == UIO_WRITE)
				uio->uio_resid = 0;
			return (0);

/* XXX should add sbus, etc */

#if defined(SUN4)
		case DEV_EEPROM:
			if (cputyp == CPU_SUN4)
				error = eeprom_uio(uio);
			else
				error = ENXIO;

			break;
#endif /* SUN4 */

		case DEV_ZERO:
			if (uio->uio_rw == UIO_WRITE) {
				uio->uio_resid = 0;
				return(0);
			}
			if (zeropage == NULL) {
				zeropage = (void *)
				    malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
				bzero(zeropage, PAGE_SIZE);
			}
			c = min(iov->iov_len, PAGE_SIZE);
			error = uiomove(zeropage, c, uio);
			break;

		default:
			return (ENXIO);
		}

		/* If we didn't make any progress (i.e. EOF), we're done here */
		if (n == uio->uio_resid)
			break;
	}
	if (minor(dev) == 0) {
unlock:
		if (physlock > 1)
			wakeup((void *)&physlock);
		physlock = 0;
	}
	return (error);
}
Example #14
0
/*
 * void cpu_startup(void)
 *
 * Machine dependent startup code. 
 *
 */
void
cpu_startup(void)
{
	vaddr_t minaddr;
	vaddr_t maxaddr;
	char pbuf[9];

	/*
	 * Until we better locking, we have to live under the kernel lock.
	 */
	//KERNEL_LOCK(1, NULL);

	/* Set the CPU control register */
	cpu_setup(boot_args);

#ifndef ARM_HAS_VBAR
	/* Lock down zero page */
	vector_page_setprot(VM_PROT_READ);
#endif

	/*
	 * Give pmap a chance to set up a few more things now the vm
	 * is initialised
	 */
	pmap_postinit();

	/*
	 * Initialize error message buffer (at end of core).
	 */

	/* msgbufphys was setup during the secondary boot strap */
	if (!pmap_extract(pmap_kernel(), (vaddr_t)msgbufaddr, NULL)) {
		for (u_int loop = 0; loop < btoc(MSGBUFSIZE); ++loop) {
			pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
			    msgbufphys + loop * PAGE_SIZE,
			    VM_PROT_READ|VM_PROT_WRITE, 0);
		}
	}
	pmap_update(pmap_kernel());
	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));

	/*
	 * Identify ourselves for the msgbuf (everything printed earlier will
	 * not be buffered).
	 */
	printf("%s%s", copyright, version);

	format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem));
	printf("total memory = %s\n", pbuf);

	minaddr = 0;

	/*
	 * Allocate a submap for physio
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   VM_PHYS_SIZE, 0, false, NULL);

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);

	struct lwp * const l = &lwp0;
	struct pcb * const pcb = lwp_getpcb(l);
	pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP;
	lwp_settrapframe(l, (struct trapframe *)pcb->pcb_ksp - 1);
}
Example #15
0
/*
 * Load a contiguous kva buffer into a dmamap.  The physical pages are
 * not assumed to be contiguous.  Two passes are made through the buffer
 * and both call pmap_extract() for the same va->pa translations.  It
 * is possible to run out of pa->dvma mappings; the code should be smart
 * enough to resize the iomap (when the "flags" permit allocation).  It
 * is trivial to compute the number of entries required (round the length
 * up to the page size and then divide by the page size)...
 */
int
viommu_dvmamap_load(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map,
    void *buf, bus_size_t buflen, struct proc *p, int flags)
{
	int err = 0;
	bus_size_t sgsize;
	u_long dvmaddr, sgstart, sgend;
	bus_size_t align, boundary;
	struct iommu_state *is;
	struct iommu_map_state *ims = map->_dm_cookie;
	pmap_t pmap;

#ifdef DIAGNOSTIC
	if (ims == NULL)
		panic("viommu_dvmamap_load: null map state");
	if (ims->ims_iommu == NULL)
		panic("viommu_dvmamap_load: null iommu");
#endif
	is = ims->ims_iommu;

	if (map->dm_nsegs) {
		/*
		 * Is it still in use? _bus_dmamap_load should have taken care
		 * of this.
		 */
#ifdef DIAGNOSTIC
		panic("iommu_dvmamap_load: map still in use");
#endif
		bus_dmamap_unload(t0, map);
	}

	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_nsegs = 0;

	if (buflen < 1 || buflen > map->_dm_size) {
		DPRINTF(IDB_BUSDMA,
		    ("iommu_dvmamap_load(): error %d > %d -- "
		     "map size exceeded!\n", (int)buflen, (int)map->_dm_size));
		return (EINVAL);
	}

	/*
	 * A boundary presented to bus_dmamem_alloc() takes precedence
	 * over boundary in the map.
	 */
	if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
		boundary = map->_dm_boundary;
	align = MAX(map->dm_segs[0]._ds_align, PAGE_SIZE);

	pmap = p ? p->p_vmspace->vm_map.pmap : pmap = pmap_kernel();

	/* Count up the total number of pages we need */
	iommu_iomap_clear_pages(ims);
	{ /* Scope */
		bus_addr_t a, aend;
		bus_addr_t addr = (vaddr_t)buf;
		int seg_len = buflen;

		aend = round_page(addr + seg_len);
		for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
			paddr_t pa;

			if (pmap_extract(pmap, a, &pa) == FALSE) {
				printf("iomap pmap error addr 0x%llx\n", a);
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}

			err = iommu_iomap_insert_page(ims, pa);
			if (err) {
				printf("iomap insert error: %d for "
				    "va 0x%llx pa 0x%lx "
				    "(buf %p len %lld/%llx)\n",
				    err, a, pa, buf, buflen, buflen);
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}
		}
	}
	sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;

	mtx_enter(&is->is_mtx);
	if (flags & BUS_DMA_24BIT) {
		sgstart = MAX(is->is_dvmamap->ex_start, 0xff000000);
		sgend = MIN(is->is_dvmamap->ex_end, 0xffffffff);
	} else {
		sgstart = is->is_dvmamap->ex_start;
		sgend = is->is_dvmamap->ex_end;
	}

	/* 
	 * If our segment size is larger than the boundary we need to 
	 * split the transfer up into little pieces ourselves.
	 */
	err = extent_alloc_subregion(is->is_dvmamap, sgstart, sgend,
	    sgsize, align, 0, (sgsize > boundary) ? 0 : boundary, 
	    EX_NOWAIT | EX_BOUNDZERO, (u_long *)&dvmaddr);
	mtx_leave(&is->is_mtx);

#ifdef DEBUG
	if (err || (dvmaddr == (bus_addr_t)-1))	{ 
		printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
		    (int)sgsize, flags);
#ifdef DDB
		if (iommudebug & IDB_BREAK)
			Debugger();
#endif
	}		
#endif	
	if (err != 0)
		return (err);

	if (dvmaddr == (bus_addr_t)-1)
		return (ENOMEM);

	/* Set the active DVMA map */
	map->_dm_dvmastart = dvmaddr;
	map->_dm_dvmasize = sgsize;

	map->dm_mapsize = buflen;

	if (viommu_iomap_load_map(is, ims, dvmaddr, flags))
		return (EFBIG);

	{ /* Scope */
		bus_addr_t a, aend;
		bus_addr_t addr = (vaddr_t)buf;
		int seg_len = buflen;

		aend = round_page(addr + seg_len);
		for (a = trunc_page(addr); a < aend; a += PAGE_SIZE) {
			bus_addr_t pgstart;
			bus_addr_t pgend;
			paddr_t pa;
			int pglen;

			/* Yuck... Redoing the same pmap_extract... */
			if (pmap_extract(pmap, a, &pa) == FALSE) {
				printf("iomap pmap error addr 0x%llx\n", a);
				iommu_iomap_clear_pages(ims);
				return (EFBIG);
			}

			pgstart = pa | (MAX(a, addr) & PAGE_MASK);
			pgend = pa | (MIN(a + PAGE_SIZE - 1,
			    addr + seg_len - 1) & PAGE_MASK);
			pglen = pgend - pgstart + 1;

			if (pglen < 1)
				continue;

			err = viommu_dvmamap_append_range(t, map, pgstart,
			    pglen, flags, boundary);
			if (err == EFBIG)
				return (err);
			if (err) {
				printf("iomap load seg page: %d for "
				    "va 0x%llx pa %lx (%llx - %llx) "
				    "for %d/0x%x\n",
				    err, a, pa, pgstart, pgend, pglen, pglen);
				return (err);
			}
		}
	}

	return (err);
}
Example #16
0
void
vdsp_read_desc(struct vdsp_softc *sc, struct vdsk_desc_msg *dm)
{
	struct ldc_conn *lc = &sc->sc_lc;
	struct proc *p = curproc;
	struct iovec iov;
	struct uio uio;
	caddr_t buf;
	vaddr_t va;
	paddr_t pa;
	uint64_t size, off;
	psize_t nbytes;
	int err, i;

	if (sc->sc_vp == NULL)
		return;

	buf = malloc(dm->size, M_DEVBUF, M_WAITOK);

	iov.iov_base = buf;
	iov.iov_len = dm->size;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = dm->offset * DEV_BSIZE;
	uio.uio_resid = dm->size;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_procp = p;

	vn_lock(sc->sc_vp, LK_EXCLUSIVE | LK_RETRY, p);
	dm->status = VOP_READ(sc->sc_vp, &uio, 0, p->p_ucred);
	VOP_UNLOCK(sc->sc_vp, 0, p);

	KERNEL_UNLOCK();
	if (dm->status == 0) {
		i = 0;
		va = (vaddr_t)buf;
		size = dm->size;
		off = 0;
		while (size > 0 && i < dm->ncookies) {
			pmap_extract(pmap_kernel(), va, &pa);
			nbytes = MIN(size, dm->cookie[i].size - off);
			nbytes = MIN(nbytes, PAGE_SIZE - (off & PAGE_MASK));
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT,
			    dm->cookie[i].addr + off, pa, nbytes, &nbytes);
			if (err != H_EOK) {
				printf("%s: hv_ldc_copy: %d\n", __func__, err);
				dm->status = EIO;
				KERNEL_LOCK();
				goto fail;
			}
			va += nbytes;
			size -= nbytes;
			off += nbytes;
			if (off >= dm->cookie[i].size) {
				off = 0;
				i++;
			}
		}
	}
	KERNEL_LOCK();

fail:
	free(buf, M_DEVBUF, 0);

	/* ACK the descriptor. */
	dm->tag.stype = VIO_SUBTYPE_ACK;
	dm->tag.sid = sc->sc_local_sid;
	vdsp_sendmsg(sc, dm, sizeof(*dm) +
	    (dm->ncookies - 1) * sizeof(struct ldc_cookie), 1);
}
Example #17
0
/*
 * Handle an exception.
 * In the case of a kernel trap, we return the pc where to resume if
 * pcb_onfault is set, otherwise, return old pc.
 */
void
trap(struct trap_frame *trapframe)
{
	struct cpu_info *ci = curcpu();
	int type, i;
	unsigned ucode = 0;
	struct proc *p = ci->ci_curproc;
	vm_prot_t ftype;
	extern vaddr_t onfault_table[];
	int onfault;
	int typ = 0;
	union sigval sv;

	trapdebug_enter(ci, trapframe, -1);

	type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT;
	if (USERMODE(trapframe->sr)) {
		type |= T_USER;
	}

	/*
	 * Enable hardware interrupts if they were on before the trap;
	 * enable IPI interrupts only otherwise.
	 */
	if (type != T_BREAK) {
		if (ISSET(trapframe->sr, SR_INT_ENAB))
			enableintr();
		else {
#ifdef MULTIPROCESSOR
			ENABLEIPI();
#endif
		}
	}

	switch (type) {
	case T_TLB_MOD:
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			pt_entry_t *pte, entry;
			paddr_t pa;
			vm_page_t pg;

			pte = kvtopte(trapframe->badvaddr);
			entry = *pte;
#ifdef DIAGNOSTIC
			if (!(entry & PG_V) || (entry & PG_M))
				panic("trap: ktlbmod: invalid pte");
#endif
			if (pmap_is_page_ro(pmap_kernel(),
			    trunc_page(trapframe->badvaddr), entry)) {
				/* write to read only page in the kernel */
				ftype = VM_PROT_WRITE;
				goto kernel_fault;
			}
			entry |= PG_M;
			*pte = entry;
			KERNEL_LOCK();
			pmap_update_kernel_page(trapframe->badvaddr & ~PGOFSET,
			    entry);
			pa = pfn_to_pad(entry);
			pg = PHYS_TO_VM_PAGE(pa);
			if (pg == NULL)
				panic("trap: ktlbmod: unmanaged page");
			pmap_set_modify(pg);
			KERNEL_UNLOCK();
			return;
		}
		/* FALLTHROUGH */

	case T_TLB_MOD+T_USER:
	    {
		pt_entry_t *pte, entry;
		paddr_t pa;
		vm_page_t pg;
		pmap_t pmap = p->p_vmspace->vm_map.pmap;

		if (!(pte = pmap_segmap(pmap, trapframe->badvaddr)))
			panic("trap: utlbmod: invalid segmap");
		pte += uvtopte(trapframe->badvaddr);
		entry = *pte;
#ifdef DIAGNOSTIC
		if (!(entry & PG_V) || (entry & PG_M))
			panic("trap: utlbmod: invalid pte");
#endif
		if (pmap_is_page_ro(pmap,
		    trunc_page(trapframe->badvaddr), entry)) {
			/* write to read only page */
			ftype = VM_PROT_WRITE;
			goto fault_common;
		}
		entry |= PG_M;
		*pte = entry;
		KERNEL_LOCK();
		pmap_update_user_page(pmap, (trapframe->badvaddr & ~PGOFSET), 
		    entry);
		pa = pfn_to_pad(entry);
		pg = PHYS_TO_VM_PAGE(pa);
		if (pg == NULL)
			panic("trap: utlbmod: unmanaged page");
		pmap_set_modify(pg);
		KERNEL_UNLOCK();
		if (!USERMODE(trapframe->sr))
			return;
		goto out;
	    }

	case T_TLB_LD_MISS:
	case T_TLB_ST_MISS:
		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			vaddr_t va;
			int rv;

	kernel_fault:
			va = trunc_page((vaddr_t)trapframe->badvaddr);
			onfault = p->p_addr->u_pcb.pcb_onfault;
			p->p_addr->u_pcb.pcb_onfault = 0;
			KERNEL_LOCK();
			rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype);
			KERNEL_UNLOCK();
			p->p_addr->u_pcb.pcb_onfault = onfault;
			if (rv == 0)
				return;
			if (onfault != 0) {
				p->p_addr->u_pcb.pcb_onfault = 0;
				trapframe->pc = onfault_table[onfault];
				return;
			}
			goto err;
		}
		/*
		 * It is an error for the kernel to access user space except
		 * through the copyin/copyout routines.
		 */
		if (p->p_addr->u_pcb.pcb_onfault != 0) {
			/*
			 * We want to resolve the TLB fault before invoking
			 * pcb_onfault if necessary.
			 */
			goto fault_common;
		} else {
			goto err;
		}

	case T_TLB_LD_MISS+T_USER:
		ftype = VM_PROT_READ;
		goto fault_common;

	case T_TLB_ST_MISS+T_USER:
		ftype = VM_PROT_WRITE;
fault_common:
	    {
		vaddr_t va;
		struct vmspace *vm;
		vm_map_t map;
		int rv;

		vm = p->p_vmspace;
		map = &vm->vm_map;
		va = trunc_page((vaddr_t)trapframe->badvaddr);

		onfault = p->p_addr->u_pcb.pcb_onfault;
		p->p_addr->u_pcb.pcb_onfault = 0;
		KERNEL_LOCK();

		rv = uvm_fault(map, trunc_page(va), 0, ftype);
		p->p_addr->u_pcb.pcb_onfault = onfault;

		/*
		 * If this was a stack access we keep track of the maximum
		 * accessed stack size.  Also, if vm_fault gets a protection
		 * failure it is due to accessing the stack region outside
		 * the current limit and we need to reflect that as an access
		 * error.
		 */
		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (rv == 0)
				uvm_grow(p, va);
			else if (rv == EACCES)
				rv = EFAULT;
		}
		KERNEL_UNLOCK();
		if (rv == 0) {
			if (!USERMODE(trapframe->sr))
				return;
			goto out;
		}
		if (!USERMODE(trapframe->sr)) {
			if (onfault != 0) {
				p->p_addr->u_pcb.pcb_onfault = 0;
				trapframe->pc =  onfault_table[onfault];
				return;
			}
			goto err;
		}

#ifdef ADEBUG
printf("SIG-SEGV @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra);
#endif
		ucode = ftype;
		i = SIGSEGV;
		typ = SEGV_MAPERR;
		break;
	    }

	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
		ucode = 0;		/* XXX should be VM_PROT_something */
		i = SIGBUS;
		typ = BUS_ADRALN;
#ifdef ADEBUG
printf("SIG-BUSA @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra);
#endif
		break;
	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
		ucode = 0;		/* XXX should be VM_PROT_something */
		i = SIGBUS;
		typ = BUS_OBJERR;
#ifdef ADEBUG
printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra);
#endif
		break;

	case T_SYSCALL+T_USER:
	    {
		struct trap_frame *locr0 = p->p_md.md_regs;
		struct sysent *callp;
		unsigned int code;
		unsigned long tpc;
		int numsys;
		struct args {
			register_t i[8];
		} args;
		register_t rval[2];

		uvmexp.syscalls++;

		/* compute next PC after syscall instruction */
		tpc = trapframe->pc; /* Remember if restart */
		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
		callp = p->p_emul->e_sysent;
		numsys = p->p_emul->e_nsysent;
		code = locr0->v0;
		switch (code) {
		case SYS_syscall:
			/*
			 * Code is first argument, followed by actual args.
			 */
			code = locr0->a0;
			if (code >= numsys)
				callp += p->p_emul->e_nosys; /* (illegal) */
			else
				callp += code;
			i = callp->sy_argsize / sizeof(register_t);
			args.i[0] = locr0->a1;
			args.i[1] = locr0->a2;
			args.i[2] = locr0->a3;
			if (i > 3) {
				args.i[3] = locr0->a4;
				args.i[4] = locr0->a5;
				args.i[5] = locr0->a6;
				args.i[6] = locr0->a7;
				i = copyin((void *)locr0->sp,
				    &args.i[7], sizeof(register_t));
			}
			break;

		case SYS___syscall:
			/*
			 * Like syscall, but code is a quad, so as to maintain
			 * quad alignment for the rest of the arguments.
			 */
			code = locr0->a0;
			args.i[0] = locr0->a1;
			args.i[1] = locr0->a2;
			args.i[2] = locr0->a3;

			if (code >= numsys)
				callp += p->p_emul->e_nosys; /* (illegal) */
			else
				callp += code;
			i = callp->sy_argsize / sizeof(int);
			if (i > 3) {
				args.i[3] = locr0->a4;
				args.i[4] = locr0->a5;
				args.i[5] = locr0->a6;
				args.i[6] = locr0->a7;
				i = copyin((void *)locr0->sp, &args.i[7],
				    sizeof(register_t));
			}
			break;

		default:
			if (code >= numsys)
				callp += p->p_emul->e_nosys; /* (illegal) */
			else
				callp += code;

			i = callp->sy_narg;
			args.i[0] = locr0->a0;
			args.i[1] = locr0->a1;
			args.i[2] = locr0->a2;
			args.i[3] = locr0->a3;
			if (i > 4) {
				args.i[4] = locr0->a4;
				args.i[5] = locr0->a5;
				args.i[6] = locr0->a6;
				args.i[7] = locr0->a7;
			}
		}
#ifdef SYSCALL_DEBUG
		KERNEL_LOCK();
		scdebug_call(p, code, args.i);
		KERNEL_UNLOCK();
#endif
#ifdef KTRACE
		if (KTRPOINT(p, KTR_SYSCALL)) {
			KERNEL_LOCK();
			ktrsyscall(p, code, callp->sy_argsize, args.i);
			KERNEL_UNLOCK();
		}
#endif
		rval[0] = 0;
		rval[1] = locr0->v1;
#if defined(DDB) || defined(DEBUG)
		trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ?
		    TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code;
#endif

#if NSYSTRACE > 0
		if (ISSET(p->p_flag, P_SYSTRACE)) {
			KERNEL_LOCK();
			i = systrace_redirect(code, p, args.i, rval);
			KERNEL_UNLOCK();
		} else
#endif
		{
			int nolock = (callp->sy_flags & SY_NOLOCK);
			if (!nolock)
				KERNEL_LOCK();
			i = (*callp->sy_call)(p, &args, rval);
			if (!nolock)
				KERNEL_UNLOCK();
		}
		switch (i) {
		case 0:
			locr0->v0 = rval[0];
			locr0->v1 = rval[1];
			locr0->a3 = 0;
			break;

		case ERESTART:
			locr0->pc = tpc;
			break;

		case EJUSTRETURN:
			break;	/* nothing to do */

		default:
			locr0->v0 = i;
			locr0->a3 = 1;
		}
#ifdef SYSCALL_DEBUG
		KERNEL_LOCK();
		scdebug_ret(p, code, i, rval);
		KERNEL_UNLOCK();
#endif
#ifdef KTRACE
		if (KTRPOINT(p, KTR_SYSRET)) {
			KERNEL_LOCK();
			ktrsysret(p, code, i, rval[0]);
			KERNEL_UNLOCK();
		}
#endif
		goto out;
	    }

	case T_BREAK:
#ifdef DDB
		kdb_trap(type, trapframe);
#endif
		/* Reenable interrupts if necessary */
		if (trapframe->sr & SR_INT_ENAB) {
			enableintr();
		}
		return;

	case T_BREAK+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trap_frame *locr0 = p->p_md.md_regs;

		/* compute address of break instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;

		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) {
		case 6:	/* gcc range error */
			i = SIGFPE;
			typ = FPE_FLTSUB;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
		case 7:	/* gcc3 divide by zero */
			i = SIGFPE;
			typ = FPE_INTDIV;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
#ifdef PTRACE
		case BREAK_SSTEP_VAL:
			if (p->p_md.md_ss_addr == (long)va) {
#ifdef DEBUG
				printf("trap: %s (%d): breakpoint at %p "
				    "(insn %08x)\n",
				    p->p_comm, p->p_pid,
				    p->p_md.md_ss_addr, p->p_md.md_ss_instr);
#endif

				/* Restore original instruction and clear BP */
				process_sstep(p, 0);
				typ = TRAP_BRKPT;
			} else {
				typ = TRAP_TRACE;
			}
			i = SIGTRAP;
			break;
#endif
#ifdef FPUEMUL
		case BREAK_FPUEMUL_VAL:
			/*
			 * If this is a genuine FP emulation break,
			 * resume execution to our branch destination.
			 */
			if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
			    p->p_md.md_fppgva + 4 == (vaddr_t)va) {
				struct vm_map *map = &p->p_vmspace->vm_map;

				p->p_md.md_flags &= ~MDP_FPUSED;
				locr0->pc = p->p_md.md_fpbranchva;

				/*
				 * Prevent access to the relocation page.
				 * XXX needs to be fixed to work with rthreads
				 */
				uvm_fault_unwire(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE);
				(void)uvm_map_protect(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE,
				    UVM_PROT_NONE, FALSE);
				goto out;
			}
			/* FALLTHROUGH */
#endif
		default:
			typ = TRAP_TRACE;
			i = SIGTRAP;
			break;
		}

		break;
	    }

	case T_IWATCH+T_USER:
	case T_DWATCH+T_USER:
	    {
		caddr_t va;
		/* compute address of trapped instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		printf("watch exception @ %p\n", va);
#ifdef RM7K_PERFCNTR
		if (rm7k_watchintr(trapframe)) {
			/* Return to user, don't add any more overhead */
			goto out;
		}
#endif
		i = SIGTRAP;
		typ = TRAP_BRKPT;
		break;
	    }

	case T_TRAP+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trap_frame *locr0 = p->p_md.md_regs;

		/* compute address of trap instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
#ifdef RM7K_PERFCNTR
		if (instr == 0x040c0000) { /* Performance cntr trap */
			int result;

			result = rm7k_perfcntr(trapframe->a0, trapframe->a1,
						trapframe->a2, trapframe->a3);
			locr0->v0 = -result;
			/* Return to user, don't add any more overhead */
			goto out;
		} else
#endif
		/*
		 * GCC 4 uses teq with code 7 to signal divide by
	 	 * zero at runtime. This is one instruction shorter
		 * than the BEQ + BREAK combination used by gcc 3.
		 */
		if ((instr & 0xfc00003f) == 0x00000034 /* teq */ &&
		    (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) {
			i = SIGFPE;
			typ = FPE_INTDIV;
		} else {
			i = SIGEMT;	/* Stuff it with something for now */
			typ = 0;
		}
		break;
	    }

	case T_RES_INST+T_USER:
		i = SIGILL;
		typ = ILL_ILLOPC;
		break;

	case T_COP_UNUSABLE+T_USER:
		/*
		 * Note MIPS IV COP1X instructions issued with FPU
		 * disabled correctly report coprocessor 1 as the
		 * unusable coprocessor number.
		 */
		if ((trapframe->cause & CR_COP_ERR) != 0x10000000) {
			i = SIGILL;	/* only FPU instructions allowed */
			typ = ILL_ILLOPC;
			break;
		}
#ifdef FPUEMUL
		MipsFPTrap(trapframe);
#else
		enable_fpu(p);
#endif
		goto out;

	case T_FPE:
		printf("FPU Trap: PC %x CR %x SR %x\n",
			trapframe->pc, trapframe->cause, trapframe->sr);
		goto err;

	case T_FPE+T_USER:
		MipsFPTrap(trapframe);
		goto out;

	case T_OVFLOW+T_USER:
		i = SIGFPE;
		typ = FPE_FLTOVF;
		break;

	case T_ADDR_ERR_LD:	/* misaligned access */
	case T_ADDR_ERR_ST:	/* misaligned access */
	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
		if ((onfault = p->p_addr->u_pcb.pcb_onfault) != 0) {
			p->p_addr->u_pcb.pcb_onfault = 0;
			trapframe->pc = onfault_table[onfault];
			return;
		}
		goto err;

	default:
	err:
		disableintr();
#if !defined(DDB) && defined(DEBUG)
		trapDump("trap");
#endif
		printf("\nTrap cause = %d Frame %p\n", type, trapframe);
		printf("Trap PC %p RA %p fault %p\n",
		    trapframe->pc, trapframe->ra, trapframe->badvaddr);
#ifdef DDB
		stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs);
		kdb_trap(type, trapframe);
#endif
		panic("trap");
	}
#ifdef FPUEMUL
	/*
	 * If a relocated delay slot causes an exception, blame the
	 * original delay slot address - userland is not supposed to
	 * know anything about emulation bowels.
	 */
	if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
	    trapframe->badvaddr == p->p_md.md_fppgva)
		trapframe->badvaddr = p->p_md.md_fpslotva;
#endif
	p->p_md.md_regs->pc = trapframe->pc;
	p->p_md.md_regs->cause = trapframe->cause;
	p->p_md.md_regs->badvaddr = trapframe->badvaddr;
	sv.sival_ptr = (void *)trapframe->badvaddr;
	KERNEL_LOCK();
	trapsignal(p, i, ucode, typ, sv);
	KERNEL_UNLOCK();
out:
	/*
	 * Note: we should only get here if returning to user mode.
	 */
	userret(p);
}
Example #18
0
void
vdsp_write_dring(void *arg1, void *arg2)
{
	struct vdsp_softc *sc = arg1;
	struct ldc_conn *lc = &sc->sc_lc;
	struct vd_desc *vd = arg2;
	struct proc *p = curproc;
	struct iovec iov;
	struct uio uio;
	caddr_t buf;
	vaddr_t va;
	paddr_t pa;
	uint64_t size, off;
	psize_t nbytes;
	int err, i;

	if (sc->sc_vp == NULL)
		return;

	buf = malloc(vd->size, M_DEVBUF, M_WAITOK);

	KERNEL_UNLOCK();
	i = 0;
	va = (vaddr_t)buf;
	size = vd->size;
	off = 0;
	while (size > 0 && i < vd->ncookies) {
		pmap_extract(pmap_kernel(), va, &pa);
		nbytes = MIN(size, vd->cookie[i].size - off);
		nbytes = MIN(nbytes, PAGE_SIZE - (off & PAGE_MASK));
		err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
		    vd->cookie[i].addr + off, pa, nbytes, &nbytes);
		if (err != H_EOK) {
			printf("%s: hv_ldc_copy: %d\n", __func__, err);
			vd->status = EIO;
			KERNEL_LOCK();
			goto fail;
		}
		va += nbytes;
		size -= nbytes;
		off += nbytes;
		if (off >= vd->cookie[i].size) {
			off = 0;
			i++;
		}
	}
	KERNEL_LOCK();

	iov.iov_base = buf;
	iov.iov_len = vd->size;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = vd->offset * DEV_BSIZE;
	uio.uio_resid = vd->size;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_WRITE;
	uio.uio_procp = p;

	vn_lock(sc->sc_vp, LK_EXCLUSIVE | LK_RETRY, p);
	vd->status = VOP_WRITE(sc->sc_vp, &uio, 0, p->p_ucred);
	VOP_UNLOCK(sc->sc_vp, 0, p);

fail:
	free(buf, M_DEVBUF, 0);

	/* ACK the descriptor. */
	vd->hdr.dstate = VIO_DESC_DONE;
	vdsp_ack_desc(sc, vd);
}
Example #19
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 */
void
cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct trapframe *tf;
	struct callframe *cf;
	struct switchframe *sf;
	caddr_t stktop1, stktop2;
	extern void fork_trampoline(void);
	struct pcb *pcb = &p2->p_addr->u_pcb;
	struct cpu_info *ci = curcpu();

	if (p1 == ci->ci_fpuproc)
		save_fpu();
	*pcb = p1->p_addr->u_pcb;
	
#ifdef ALTIVEC
	if (p1->p_addr->u_pcb.pcb_vr != NULL) {
		if (p1 == ci->ci_vecproc)
			save_vec(p1);
		pcb->pcb_vr = pool_get(&ppc_vecpl, PR_WAITOK);
		*pcb->pcb_vr = *p1->p_addr->u_pcb.pcb_vr;
	} else
		pcb->pcb_vr = NULL;

#endif /* ALTIVEC */

	pcb->pcb_pm = p2->p_vmspace->vm_map.pmap;

	pmap_extract(pmap_kernel(),
	    (vaddr_t)pcb->pcb_pm, (paddr_t *)&pcb->pcb_pmreal);
	
	/*
	 * Setup the trap frame for the new process
	 */
	stktop1 = (caddr_t)trapframe(p1);
	stktop2 = (caddr_t)trapframe(p2);
	bcopy(stktop1, stktop2, sizeof(struct trapframe));

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL) {
		tf = trapframe(p2);
		tf->fixreg[1] = (register_t)stack + stacksize;
	}

	stktop2 = (caddr_t)((u_long)stktop2 & ~15);  /* Align stack pointer */
	
	/*
	 * There happens to be a callframe, too.
	 */
	cf = (struct callframe *)stktop2;
	cf->lr = (int)fork_trampoline;
	
	/*
	 * Below the trap frame, there is another call frame:
	 */
	stktop2 -= 16;
	cf = (struct callframe *)stktop2;
	cf->r31 = (register_t)func;
	cf->r30 = (register_t)arg;
	
	/*
	 * Below that, we allocate the switch frame:
	 */
	/* must match SFRAMELEN in genassym */
	stktop2 -= roundup(sizeof *sf, 16);

	sf = (struct switchframe *)stktop2;
	bzero((void *)sf, sizeof *sf);		/* just in case */
	sf->sp = (int)cf;
	sf->user_sr = pmap_kernel()->pm_sr[PPC_USER_SR]; /* just in case */
	pcb->pcb_sp = (int)stktop2;
}
Example #20
0
void
vdsp_rx_vio_dring_data(struct vdsp_softc *sc, struct vio_msg_tag *tag)
{
	struct vio_dring_msg *dm = (struct vio_dring_msg *)tag;
	struct vd_desc *vd;
	vaddr_t va;
	paddr_t pa;
	uint64_t size, off;
	psize_t nbytes;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
		DPRINTF(("DATA/INFO/DRING_DATA\n"));

		if (dm->dring_ident != sc->sc_dring_ident ||
		    dm->start_idx >= sc->sc_num_descriptors) {
			dm->tag.stype = VIO_SUBTYPE_NACK;
			vdsp_sendmsg(sc, dm, sizeof(*dm), 0);
			return;
		}

		off = dm->start_idx * sc->sc_descriptor_size;
		vd = (struct vd_desc *)(sc->sc_vd + off);
		va = (vaddr_t)vd;
		size = sc->sc_descriptor_size;
		while (size > 0) {
			pmap_extract(pmap_kernel(), va, &pa);
			nbytes = MIN(size, PAGE_SIZE - (off & PAGE_MASK));
			err = hv_ldc_copy(sc->sc_lc.lc_id, LDC_COPY_IN,
			    sc->sc_dring_cookie.addr + off, pa,
			    nbytes, &nbytes);
			if (err != H_EOK) {
				printf("%s: hv_ldc_copy %d\n", __func__, err);
				return;
			}
			va += nbytes;
			size -= nbytes;
			off += nbytes;
		}

		sc->sc_vd_ring[sc->sc_vd_prod % sc->sc_num_descriptors] = vd;
		membar_producer();
		sc->sc_vd_prod++;
		task_add(systq, &sc->sc_vd_task);

		break;

	case VIO_SUBTYPE_ACK:
		DPRINTF(("DATA/ACK/DRING_DATA\n"));
		break;

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DRING_DATA\n"));
		break;

	default:
		DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
		break;
	}
}
Example #21
0
/*
 * cpu_startup: allocate memory for variable-sized tables,
 * initialize cpu, and do autoconfiguration.
 *
 * This is called early in init_main.c:main(), after the
 * kernel memory allocator is ready for use, but before
 * the creation of processes 1,2, and mountroot, etc.
 */
void
cpu_startup()
{
	caddr_t v;
	int sz, i;
	vsize_t size;
	int base, residual;
	vaddr_t minaddr, maxaddr;
	char pbuf[9];

	/*
	 * Initialize message buffer (for kernel printf).
	 * This is put in physical pages four through seven
	 * so it will always be in the same place after a 
	 * reboot. (physical pages 0-3 are reserved by the PROM
	 * for its vector table and other stuff.)
	 * Its mapping was prepared in pmap_bootstrap().
	 * Also, offset some to avoid PROM scribbles.
	 */
	v = (caddr_t) (NBPG * 4);
	msgbufaddr = (caddr_t)(v + MSGBUFOFF);
	initmsgbuf(msgbufaddr, MSGBUFSIZE);

#ifdef DDB
	{
		extern int end[];
		extern char *esym;

		ddb_init(end[0], end + 1, (int*)esym);
	}
#endif /* DDB */

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	identifycpu();
	fputype = FPU_NONE;
#ifdef  FPU_EMULATE
	printf("fpu: emulator\n");
#else
	printf("fpu: no math support\n");
#endif

	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("total memory = %s\n", pbuf);

	/*
	 * XXX fredette - we force a small number of buffers
	 * to help me debug this on my low-memory machine.
	 * this should go away at some point, allowing the
	 * normal automatic buffer-sizing to happen.
	 */
	bufpages = 37;

	/*
	 * Get scratch page for dumpsys().
	 */
	if ((dumppage = uvm_km_alloc(kernel_map, NBPG)) == 0)
		panic("startup: alloc dumppage");

	/*
	 * Find out how much space we need, allocate it,
	 * and then give everything true virtual addresses.
	 */
	sz = (int)allocsys(NULL, NULL);
	if ((v = (caddr_t)uvm_km_alloc(kernel_map, round_page(sz))) == 0)
		panic("startup: no room for tables");
	if (allocsys(v, NULL) - v != sz)
		panic("startup: table size inconsistency");

	/*
	 * Now allocate buffers proper.  They are different than the above
	 * in that they usually occupy more virtual memory than physical.
	 */
	size = MAXBSIZE * nbuf;
	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
		    NULL, UVM_UNKNOWN_OFFSET, 0,
		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
				UVM_ADV_NORMAL, 0)) != 0)
		panic("startup: cannot allocate VM for buffers");
	minaddr = (vaddr_t)buffers;
	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
		/* don't want to alloc more physical mem than needed */
		bufpages = btoc(MAXBSIZE) * nbuf;
	}
	base = bufpages / nbuf;
	residual = bufpages % nbuf;
	for (i = 0; i < nbuf; i++) {
		vsize_t curbufsize;
		vaddr_t curbuf;
		struct vm_page *pg;

		/*
		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
		 * that MAXBSIZE space, we allocate and map (base+1) pages
		 * for the first "residual" buffers, and then we allocate
		 * "base" pages for the rest.
		 */
		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
		curbufsize = NBPG * ((i < residual) ? (base+1) : base);

		while (curbufsize) {
			pg = uvm_pagealloc(NULL, 0, NULL, 0);
			if (pg == NULL) 
				panic("cpu_startup: not enough memory for "
				    "buffer cache");
			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
				       VM_PROT_READ|VM_PROT_WRITE);
			curbuf += PAGE_SIZE;
			curbufsize -= PAGE_SIZE;
		}
	}
	pmap_update(pmap_kernel());

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				   NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);

	/*
	 * We don't use a submap for physio, and use a separate map
	 * for DVMA allocations.  Our vmapbuf just maps pages into
	 * the kernel map (any kernel mapping is OK) and then the
	 * device drivers clone the kernel mappings into DVMA space.
	 */

	/*
	 * Finally, allocate mbuf cluster submap.
	 */
	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				 nmbclusters * mclbytes, VM_MAP_INTRSAFE,
				 FALSE, NULL);

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf("avail memory = %s\n", pbuf);
	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);

	/*
	 * Allocate a virtual page (for use by /dev/mem)
	 * This page is handed to pmap_enter() therefore
	 * it has to be in the normal kernel VA range.
	 */
	vmmap = uvm_km_valloc_wait(kernel_map, NBPG);

	/*
	 * Allocate dma map for devices on the bus.
	 */
	dvmamap = extent_create("dvmamap",
	    DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL,
	    M_DEVBUF, 0, 0, EX_NOWAIT);
	if (dvmamap == NULL)
		panic("unable to allocate DVMA map");

	/*
	 * Set up CPU-specific registers, cache, etc.
	 */
	initcpu();

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();
}
Example #22
0
/*
 * Early initialization, right before main is called.
 */
void
mvme68k_init(void)
{
	int i;

	/*
	 * Since mvme68k boards can have anything from 4MB of onboard RAM, we
	 * would rather set the pager_map_size at runtime based on the amount
	 * of onboard RAM.
	 *
	 * Set pager_map_size to half the size of onboard RAM, up to a
	 * maximum of 16MB.
	 * (Note: Just use ps_end here since onboard RAM starts at 0x0)
	 */
	pager_map_size = phys_seg_list[0].ps_end / 2;
	if (pager_map_size > (16 * 1024 * 1024))
		pager_map_size = 16 * 1024 * 1024;

	/*
	 * Tell the VM system about available physical memory.
	 */
	for (i = 0; i < mem_cluster_cnt; i++) {
		if (phys_seg_list[i].ps_start == phys_seg_list[i].ps_end) {
			/*
			 * Segment has been completely gobbled up.
			 */
			continue;
		}
		/*
		 * Note the index of the mem cluster is the free
		 * list we want to put the memory on (0 == default,
		 * 1 == VME).  There can only be two.
		 */
		uvm_page_physload(atop(phys_seg_list[i].ps_start),
				 atop(phys_seg_list[i].ps_end),
				 atop(phys_seg_list[i].ps_start),
				 atop(phys_seg_list[i].ps_end), i);
	}

	switch (machineid) {
#ifdef MVME147
	case MVME_147:
		mvme147_init();
		break;
#endif
#ifdef MVME167
	case MVME_167:
#endif
#ifdef MVME162
	case MVME_162:
#endif
#ifdef MVME177
	case MVME_177:
#endif
#ifdef MVME172
	case MVME_172:
#endif
#if defined(MVME162) || defined(MVME167) || defined(MVME172) || defined(MVME177)
		mvme1xx_init();
		break;
#endif
	default:
		panic("%s: impossible machineid", __func__);
	}

	/*
	 * Initialize error message buffer (at end of core).
	 */
	for (i = 0; i < btoc(round_page(MSGBUFSIZE)); i++)
		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE,
		    msgbufpa + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
	pmap_update(pmap_kernel());
}
Example #23
0
/*
 * u_int initarm(...)
 *
 * Initial entry point on startup. This gets called before main() is
 * entered.
 * It should be responsible for setting up everything that must be
 * in place when main is called.
 * This includes
 *   Taking a copy of the boot configuration structure.
 *   Initialising the physical console so characters can be printed.
 *   Setting up page tables for the kernel
 *   Relocating the kernel to the bottom of physical memory
 */
u_int
initarm(void *arg0, void *arg1, void *arg2)
{
	extern vaddr_t xscale_cache_clean_addr;
	extern cpu_kcore_hdr_t cpu_kcore_hdr;
	int loop;
	int loop1;
	u_int l1pagetable;
	pv_addr_t kernel_l1pt;
	paddr_t memstart;
	psize_t memsize;
	extern u_int32_t esym;	/* &_end if no symbols are loaded */

#ifdef DIAGNOSTIC
	extern vsize_t xscale_minidata_clean_size; /* used in KASSERT */
#endif

	/* setup a serial console for very early boot */
	consinit();

	/*
	 * Heads up ... Setup the CPU / MMU / TLB functions
	 */
	if (set_cpufuncs())
		panic("cpu not recognized!");

	/*
	 * Examine the boot args string for options we need to know about
	 * now.
	 */
	/* XXX should really be done after setting up the console, but we
	 * XXX need to parse the console selection flags right now. */
	process_kernel_args((char *)0xa0200000 - MAX_BOOT_STRING - 1);

	/* Calibrate the delay loop. */
#if 1
	i80321_calibrate_delay();
#endif

	/* Talk to the user */
	printf("\nOpenBSD/armish booting ...\n");

	/*
	 * Reset the secondary PCI bus.  RedBoot doesn't stop devices
	 * on the PCI bus before handing us control, so we have to
	 * do this.
	 *
	 * XXX This is arguably a bug in RedBoot, and doing this reset
	 * XXX could be problematic in the future if we encounter an
	 * XXX application where the PPB in the i80312 is used as a
	 * XXX PPB.
	 */
//#define VERBOSE_INIT_ARM

	/*
	 * Fetch the SDRAM start/size from the i80312 SDRAM configuration
	 * registers.
	 */
	i80321_sdram_bounds(&obio_bs_tag, VERDE_PMMR_BASE + VERDE_MCU_BASE,
	    &memstart, &memsize);

#define DEBUG
#ifdef DEBUG
	printf("initarm: Configuring system ...\n");
#endif

	/* Fake bootconfig structure for the benefit of pmap.c */
	/* XXX must make the memory description h/w independant */
	bootconfig.dramblocks = 1;
	bootconfig.dram[0].address = memstart;
	bootconfig.dram[0].pages = memsize / PAGE_SIZE;

	/*
	 * Set up the variables that define the availablilty of
	 * physical memory.  For now, we're going to set
	 * physical_freestart to 0xa0200000 (where the kernel
	 * was loaded), and allocate the memory we need downwards.
	 * If we get too close to the page tables that RedBoot
	 * set up, we will panic.  We will update physical_freestart
	 * and physical_freeend later to reflect what pmap_bootstrap()
	 * wants to see.
	 *
	 * XXX pmap_bootstrap() needs an enema.
	 */
	physical_start = bootconfig.dram[0].address;
	physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);

	physical_freestart = 0xa0009000UL;
	physical_freeend = 0xa0200000UL;

	physmem = (physical_end - physical_start) / PAGE_SIZE;

#ifdef DEBUG
	/* Tell the user about the memory */
	printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
	    physical_start, physical_end - 1);
#endif

	/*
	 * Okay, the kernel starts 2MB in from the bottom of physical
	 * memory.  We are going to allocate our bootstrap pages downwards
	 * from there.
	 *
	 * We need to allocate some fixed page tables to get the kernel
	 * going.  We allocate one page directory and a number of page
	 * tables and store the physical addresses in the kernel_pt_table
	 * array.
	 *
	 * The kernel page directory must be on a 16K boundary.  The page
	 * tables must be on 4K boundaries.  What we do is allocate the
	 * page directory on the first 16K boundary that we encounter, and
	 * the page tables on 4K boundaries otherwise.  Since we allocate
	 * at least 3 L2 page tables, we are guaranteed to encounter at
	 * least one 16K aligned region.
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Allocating page tables\n");
#endif

	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;

#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n",
	       physical_freestart, free_pages, free_pages);
#endif

	/* Define a macro to simplify memory allocation */
#define	valloc_pages(var, np)				\
	alloc_pages((var).pv_pa, (np));			\
	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;

#define alloc_pages(var, np)				\
	physical_freeend -= ((np) * PAGE_SIZE);		\
	if (physical_freeend < physical_freestart)	\
		panic("initarm: out of memory");	\
	(var) = physical_freeend;			\
	free_pages -= (np);				\
	memset((char *)(var), 0, ((np) * PAGE_SIZE));

	loop1 = 0;
	kernel_l1pt.pv_pa = 0;
	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
		/* Are we 16KB aligned for an L1 ? */
		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
		    && kernel_l1pt.pv_pa == 0) {
			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
		} else {
			valloc_pages(kernel_pt_table[loop1],
			    L2_TABLE_SIZE / PAGE_SIZE);
			++loop1;
		}
	}

	/* This should never be able to happen but better confirm that. */
	if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0)
		panic("initarm: Failed to align the kernel page directory");

	/*
	 * Allocate a page for the system page mapped to V0x00000000
	 * This page will just contain the system vectors and can be
	 * shared by all processes.
	 */
	alloc_pages(systempage.pv_pa, 1);

	/* Allocate stacks for all modes */
	valloc_pages(irqstack, IRQ_STACK_SIZE);
	valloc_pages(abtstack, ABT_STACK_SIZE);
	valloc_pages(undstack, UND_STACK_SIZE);
	valloc_pages(kernelstack, UPAGES);

	/* Allocate enough pages for cleaning the Mini-Data cache. */
	KASSERT(xscale_minidata_clean_size <= PAGE_SIZE);
	valloc_pages(minidataclean, 1);

#ifdef VERBOSE_INIT_ARM
	printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa,
	    irqstack.pv_va); 
	printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa,
	    abtstack.pv_va); 
	printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa,
	    undstack.pv_va); 
	printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa,
	    kernelstack.pv_va); 
#endif

	/*
	 * XXX Defer this to later so that we can reclaim the memory
	 * XXX used by the RedBoot page tables.
	 */
	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);

	/*
	 * Ok we have allocated physical pages for the primary kernel
	 * page tables
	 */

#ifdef VERBOSE_INIT_ARM
	printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
#endif

	/*
	 * Now we start construction of the L1 page table
	 * We start by mapping the L2 page tables into the L1.
	 * This means that we can replace L1 mappings later on if necessary
	 */
	l1pagetable = kernel_l1pt.pv_pa;

#ifdef HIGH_VECT
	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1),
	    &kernel_pt_table[KERNEL_PT_SYS]);
#else
	/* Map the L2 pages tables in the L1 page table */
	pmap_link_l2pt(l1pagetable, 0x00000000,
	    &kernel_pt_table[KERNEL_PT_SYS]);
#endif
	for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_KERNEL + loop]);
	for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++)
		pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000,
		    &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
#if 0
	pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
	    &kernel_pt_table[KERNEL_PT_IOPXS]);
#endif

	/* update the top of the kernel VM */
	pmap_curmaxkvaddr =
	    KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000);

#ifdef VERBOSE_INIT_ARM
	printf("Mapping kernel\n");
#endif

	/* Now we fill in the L2 pagetable for the kernel static code/data
	 * and the symbol table. */
	{
		extern char etext[];
#ifdef VERBOSE_INIT_ARM
		extern char _end[];
#endif
		size_t textsize = (u_int32_t) etext - KERNEL_TEXT_BASE;
		size_t totalsize = esym - KERNEL_TEXT_BASE;
		u_int logical;

#ifdef VERBOSE_INIT_ARM
		printf("kernelsize text %x total %x end %xesym %x\n",
		    textsize, totalsize, _end, esym);
#endif

		textsize = round_page(textsize);
		totalsize = round_page(totalsize);
		
		logical = 0x00200000;	/* offset of kernel in RAM */

		/* Update dump information */
		cpu_kcore_hdr.kernelbase = KERNEL_BASE;
		cpu_kcore_hdr.kerneloffs = logical;
		cpu_kcore_hdr.staticsize = totalsize;

		logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
		pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
		    physical_start + logical, totalsize - textsize,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	}

#ifdef VERBOSE_INIT_ARM
	printf("Constructing L2 page tables\n");
#endif

	/* Map the stack pages */
	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
	    UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);

	pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
	    L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);

	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
	}

	/* Map the Mini-Data cache clean area. */
	xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
	    minidataclean.pv_pa);

	/* Map the vector page. */
#ifdef HIGH_VECT
	pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#else
        pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
#endif

	pmap_devmap_bootstrap(l1pagetable, iq80321_devmap);

	/*
	 * Give the XScale global cache clean code an appropriately
	 * sized chunk of unmapped VA space starting at 0xff000000
	 * (our device mappings end before this address).
	 */
	xscale_cache_clean_addr = 0xff000000U;

	/*
	 * Now we have the real page tables in place so we can switch to them.
	 * Once this is done we will be running with the REAL kernel page
	 * tables.
	 */

	/*
	 * Update the physical_freestart/physical_freeend/free_pages
	 * variables.
	 */
	{
		physical_freestart = physical_start - KERNEL_BASE +
		    round_page(esym);
		physical_freeend = physical_end;
		free_pages =
		    (physical_freeend - physical_freestart) / PAGE_SIZE;
	}
#ifdef VERBOSE_INIT_ARM
	printf("physical_freestart %x end %x\n", physical_freestart,
	    physical_freeend);
#endif

	/* be a client to all domains */
	cpu_domains(0x55555555);
	/* Switch tables */
#ifdef VERBOSE_INIT_ARM
	printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
	       physical_freestart, free_pages, free_pages);
	printf("switching to new L1 page table  @%#lx...", kernel_l1pt.pv_pa);
#endif


	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
	setttb(kernel_l1pt.pv_pa);
	cpu_tlb_flushID();
	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));

	/*
	 * Moved from cpu_startup() as data_abort_handler() references
	 * this during uvm init
	 */
	proc0paddr = (struct user *)kernelstack.pv_va;
	proc0.p_addr = proc0paddr;

#ifdef VERBOSE_INIT_ARM
	printf("bootstrap done.\n");
#endif

#ifdef HIGH_VECT
	arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
#else
	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
#endif

	/*
	 * Pages were allocated during the secondary bootstrap for the
	 * stacks for different CPU modes.
	 * We must now set the r13 registers in the different CPU modes to
	 * point to these stacks.
	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
	 * of the stack memory.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("init subsystems: stacks ");
#endif

	set_stackptr(PSR_IRQ32_MODE,
	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_ABT32_MODE,
	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
	set_stackptr(PSR_UND32_MODE,
	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);

	/*
	 * Well we should set a data abort handler.
	 * Once things get going this will change as we will need a proper
	 * handler.
	 * Until then we will use a handler that just panics but tells us
	 * why.
	 * Initialisation of the vectors will just panic on a data abort.
	 * This just fills in a slightly better one.
	 */
#ifdef VERBOSE_INIT_ARM
	printf("vectors ");
#endif
	data_abort_handler_address = (u_int)data_abort_handler;
	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
	undefined_handler_address = (u_int)undefinedinstruction_bounce;

	/* Initialise the undefined instruction handlers */
#ifdef VERBOSE_INIT_ARM
	printf("undefined ");
#endif
	undefined_init();

	/* Load memory into UVM. */
#ifdef VERBOSE_INIT_ARM
	printf("page ");
#endif
	uvm_setpagesize();	/* initialize PAGE_SIZE-dependent variables */
	uvm_page_physload(atop(physical_freestart), atop(physical_freeend),
	    atop(physical_freestart), atop(physical_freeend), 0);

	/* Boot strap pmap telling it where the kernel page table is */
#ifdef VERBOSE_INIT_ARM
	printf("pmap ");
#endif
	pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, KERNEL_VM_BASE,
	    KERNEL_VM_BASE + KERNEL_VM_SIZE);

	/* Update dump information */
	cpu_kcore_hdr.pmap_kernel_l1 = (u_int32_t)pmap_kernel()->pm_l1;
	cpu_kcore_hdr.pmap_kernel_l2 = (u_int32_t)&(pmap_kernel()->pm_l2);

	/* Setup the IRQ system */
#ifdef VERBOSE_INIT_ARM
	printf("irq ");
#endif
	i80321intc_intr_init();

#ifdef VERBOSE_INIT_ARM
	printf("done.\n");
#endif

#ifdef DDB
	db_machine_init();

	/* Firmware doesn't load symbols. */
	ddb_init();

	if (boothowto & RB_KDB)
		Debugger();
#endif
    
	/* We return the new stack pointer address */
	return(kernelstack.pv_va + USPACE_SVC_STACK_TOP);
}
Example #24
0
/*
 * Utility function to load a linear buffer.  lastaddrp holds state
 * between invocations (for multiple-buffer loads).  segp contains
 * the starting segment on entrance, and the ending segment on exit.
 * first indicates if this is the first invocation of this function.
 */
int
_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp, int *segp,
    int first)
{
	bus_size_t sgsize;
	bus_addr_t curaddr, lastaddr, baddr, bmask;
	vaddr_t vaddr = (vaddr_t)buf;
	int seg;
	pmap_t pmap;

	if (p != NULL)
		pmap = p->p_vmspace->vm_map.pmap;
	else
		pmap = pmap_kernel();

	lastaddr = *lastaddrp;
	bmask  = ~(map->_dm_boundary - 1);

	for (seg = *segp; buflen > 0 ; ) {
		/*
		 * Get the physical address for this segment.
		 */
		pmap_extract(pmap, vaddr, (paddr_t *)&curaddr);

		if (curaddr > dma_constraint.ucr_high)
			panic("Non dma-reachable buffer at curaddr %#lx(raw)",
			    curaddr);

		/*
		 * Compute the segment size, and adjust counts.
		 */
		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
		if (buflen < sgsize)
			sgsize = buflen;

		/*
		 * Make sure we don't cross any boundaries.
		 */
		if (map->_dm_boundary > 0) {
			baddr = (curaddr + map->_dm_boundary) & bmask;
			if (sgsize > (baddr - curaddr))
				sgsize = (baddr - curaddr);
		}

		/*
		 * Insert chunk into a segment, coalescing with
		 * previous segment if possible.
		 */
		if (first) {
			map->dm_segs[seg].ds_addr = curaddr;
			map->dm_segs[seg].ds_len = sgsize;
			first = 0;
		} else {
			if (curaddr == lastaddr &&
			    (map->dm_segs[seg].ds_len + sgsize) <=
			     map->_dm_maxsegsz &&
			    (map->_dm_boundary == 0 ||
			     (map->dm_segs[seg].ds_addr & bmask) ==
			     (curaddr & bmask)))
				map->dm_segs[seg].ds_len += sgsize;
			else {
				if (++seg >= map->_dm_segcnt)
					break;
				map->dm_segs[seg].ds_addr = curaddr;
				map->dm_segs[seg].ds_len = sgsize;
			}
		}

		lastaddr = curaddr + sgsize;
		vaddr += sgsize;
		buflen -= sgsize;
	}

	*segp = seg;
	*lastaddrp = lastaddr;

	/*
	 * Did we fit?
	 */
	if (buflen != 0)
		return (EFBIG);		/* XXX better return value here? */
	return (0);
}
Example #25
0
/*
 * Allocate memory for variable-sized tables,
 */
void
cpu_startup()
{
	unsigned i;
	int base, residual;
	vaddr_t minaddr, maxaddr;
	vsize_t size;
	char pbuf[9];

	/*
	 * Good {morning,afternoon,evening,night}.
	 */
	printf(version);
	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
	printf("%s memory", pbuf);

	/*
	 * Virtual memory is bootstrapped -- notify the bus spaces
	 * that memory allocation is now safe.
	 */
	malta_configuration.mc_mallocsafe = 1;

	/*
	 * Allocate virtual address space for file I/O buffers.
	 * Note they are different than the array of headers, 'buf',
	 * and usually occupy more virtual memory than physical.
	 */
	size = MAXBSIZE * nbuf;
	if (uvm_map(kernel_map, (vaddr_t *)&buffers, round_page(size),
		    NULL, UVM_UNKNOWN_OFFSET, 0,
		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
		    UVM_ADV_NORMAL, 0)) != 0)
		panic("startup: cannot allocate VM for buffers");
	minaddr = (vaddr_t)buffers;
	base = bufpages / nbuf;
	residual = bufpages % nbuf;
	for (i = 0; i < nbuf; i++) {
		vsize_t curbufsize;
		vaddr_t curbuf;
		struct vm_page *pg;

		/*
		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
		 * that MAXBSIZE space, we allocate and map (base+1) pages
		 * for the first "residual" buffers, and then we allocate
		 * "base" pages for the rest.
		 */
		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
		curbufsize = NBPG * ((i < residual) ? (base + 1) : base);

		while (curbufsize) {
			pg = uvm_pagealloc(NULL, 0, NULL, 0);
			if (pg == NULL)
				panic("cpu_startup: not enough memory for "
					"buffer cache");
			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
				       VM_PROT_READ|VM_PROT_WRITE);
			curbuf += PAGE_SIZE;
			curbufsize -= PAGE_SIZE;
		}
	}
	pmap_update(pmap_kernel());

	/*
	 * Allocate a submap for exec arguments.  This map effectively
	 * limits the number of processes exec'ing at any time.
	 */
	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				    16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
	/*
	 * Allocate a submap for physio.
	 */
	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
				    VM_PHYS_SIZE, 0, FALSE, NULL);

	/*
	 * (No need to allocate an mbuf cluster submap.  Mbuf clusters
	 * are allocated via the pool allocator, and we use KSEG to
	 * map those pages.)
	 */

	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
	printf(", %s free", pbuf);
	format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
	printf(", %s in %d buffers\n", pbuf, nbuf);

	/*
	 * Set up buffers, so they can be used to read disk labels.
	 */
	bufinit();
}
Example #26
0
File: xbd.c Project: MarginC/kame
static void
fill_ring(struct xbdreq *xr)
{
	struct xbdreq *pxr = xr->xr_parent;
	paddr_t pa;
	unsigned long ma;
	vaddr_t addr, off;
	blk_ring_req_entry_t *ring_req;
	int breq, nr_sectors;

	/* Fill out a communications ring structure. */
	ring_req = &blk_ring->ring[MASK_BLK_IDX(req_prod)].req;
	ring_req->id = (unsigned long)xr;
	ring_req->operation = pxr->xr_bp->b_flags & B_READ ? XEN_BLOCK_READ :
		XEN_BLOCK_WRITE;
	ring_req->sector_number = (xen_sector_t)pxr->xr_bn;
	ring_req->device = pxr->xr_sc->sc_xd_device;

	DPRINTF(XBDB_IO, ("fill_ring(%d): bp %p sector %llu pxr %p xr %p\n",
	    MASK_BLK_IDX(req_prod), pxr->xr_bp, (unsigned long long)pxr->xr_bn,
	    pxr, xr));

	xr->xr_breq = 0;
	ring_req->nr_segments = 0;
	addr = trunc_page(pxr->xr_data);
	off = pxr->xr_data - addr;
	while (pxr->xr_bqueue > 0) {
#if 0
		pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    addr, &pa);
#else
		pmap_extract(pmap_kernel(), addr, &pa);
#endif
		ma = xpmap_ptom_masked(pa) + off;
		DIAGCONDPANIC((ma & (XEN_BSIZE - 1)) != 0,
		    ("xbd request ma not sector aligned"));

		if (pxr->xr_bqueue > PAGE_SIZE - off)
			breq = PAGE_SIZE - off;
		else
			breq = pxr->xr_bqueue;
		nr_sectors = breq >> XEN_BSHIFT;
		DIAGCONDPANIC(nr_sectors >= XEN_BSIZE,
		    ("xbd request nr_sectors >= XEN_BSIZE"));

		DPRINTF(XBDB_IO, ("fill_ring(%d): va 0x%08lx pa 0x%08lx "
		    "ma 0x%08lx, sectors %d, left %ld/%ld\n",
		    MASK_BLK_IDX(req_prod), addr, pa, ma, nr_sectors,
		    pxr->xr_bqueue >> XEN_BSHIFT, pxr->xr_bqueue));

		ring_req->buffer_and_sects[ring_req->nr_segments++] =
			ma | nr_sectors;
		addr += PAGE_SIZE;
		pxr->xr_bqueue -= breq;
		pxr->xr_bn += nr_sectors;
		xr->xr_breq += breq;
		off = 0;
		if (ring_req->nr_segments == MAX_BLK_SEGS)
			break;
	}
	pxr->xr_data = addr;

	req_prod++;
}
Example #27
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	pt_entry_t *ptep/*, pte*/;

#ifdef DEBUG_DMA
	printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
	    segs, nsegs, (unsigned long)size, flags);
#endif	/* DEBUG_DMA */

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);

	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
#ifdef DEBUG_DMA
			printf("wiring p%lx to v%lx", addr, va);
#endif	/* DEBUG_DMA */
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
			/*
			 * If the memory must remain coherent with the
			 * cache then we must make the memory uncacheable
			 * in order to maintain virtual cache coherency.
			 * We must also guarantee the cache does not already
			 * contain the virtal addresses we are making
			 * uncacheable.
			 */
			if (flags & BUS_DMA_COHERENT) {
				cpu_dcache_wbinv_range(va, PAGE_SIZE);
				cpu_drain_writebuf();
				ptep = vtopte(va);
				*ptep &= ~L2_S_CACHE_MASK;
				PTE_SYNC(ptep);
				tlb_flush();
			}
#ifdef DEBUG_DMA
			ptep = vtopte(va);
			printf(" pte=v%p *pte=%x\n", ptep, *ptep);
#endif	/* DEBUG_DMA */
		}
	}
	pmap_update(pmap_kernel());
#ifdef DEBUG_DMA
	printf("dmamem_map: =%p\n", *kvap);
#endif	/* DEBUG_DMA */
	return (0);
}
Example #28
0
/*ARGSUSED*/
int
mmrw(dev_t dev, struct uio *uio, int flags)
{
    register vaddr_t o, v;
    register int c;
    register struct iovec *iov;
    int error = 0;
    vm_prot_t prot;

    while (uio->uio_resid > 0 && !error) {
        iov = uio->uio_iov;
        if (iov->iov_len == 0) {
            uio->uio_iov++;
            uio->uio_iovcnt--;
            if (uio->uio_iovcnt < 0)
                panic("mmrw");
            continue;
        }
        switch (minor(dev)) {
        case DEV_MEM:
            /* lock against other uses of shared vmmap */
            mutex_enter(&mm_lock);
            v = uio->uio_offset;
            prot = uio->uio_rw == UIO_READ ? VM_PROT_READ :
                   VM_PROT_WRITE;
            error = check_pa_acc(uio->uio_offset, prot);
            if (error) {
                mutex_exit(&mm_lock);
                break;
            }
            pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
                       trunc_page(v), prot, PMAP_WIRED|prot);
            pmap_update(pmap_kernel());
            o = uio->uio_offset & PGOFSET;
            c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
            error = uiomove((char *)vmmap + o, c, uio);
            pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
                        (vaddr_t)vmmap + PAGE_SIZE);
            pmap_update(pmap_kernel());
            mutex_exit(&mm_lock);
            break;

        case DEV_KMEM:
            v = uio->uio_offset;
            c = min(iov->iov_len, MAXPHYS);
            if (!uvm_kernacc((void *)v, c,
                             uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
                return (EFAULT);
            error = uiomove((void *)v, c, uio);
            break;

        case DEV_NULL:
            if (uio->uio_rw == UIO_WRITE)
                uio->uio_resid = 0;
            return (0);

        case DEV_ZERO:
            if (uio->uio_rw == UIO_WRITE) {
                uio->uio_resid = 0;
                return (0);
            }
            c = min(iov->iov_len, PAGE_SIZE);
            error = uiomove(zeropage, c, uio);
            break;

        default:
            return (ENXIO);
        }
    }

    return (error);
}
Example #29
0
/*
 * cpu_lwp_fork: finish a new LWP (l2) operation.
 *
 * First LWP (l1) is the process being forked.  If it is &lwp0, then we
 * are creating a kthread, where return path and argument are specified
 * with `func' and `arg'.
 *
 * If an alternate user-level stack is requested (with non-zero values
 * in both the stack and stacksize arguments), then set up the user stack
 * pointer accordingly.
 */
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
             void (*func)(void *), void *arg)
{
    struct pcb *pcb1, *pcb2;
    struct trapframe *tf;
    struct switchframe *sf;
    vaddr_t uv;

    pcb1 = lwp_getpcb(l1);
    pcb2 = lwp_getpcb(l2);

    /*
     * If parent LWP was using FPU, then we have to save the FPU h/w
     * state to PCB so that we can copy it.
     */
    fpusave_lwp(l1, true);

    /*
     * Sync the PCB before we copy it.
     */
    if (l1 == curlwp) {
        KASSERT(pcb1 == curpcb);
        savectx(pcb1);
    } else {
        KASSERT(l1 == &lwp0);
    }

    /* Copy the PCB from parent. */
    memcpy(pcb2, pcb1, sizeof(struct pcb));
    /* Copy any additional fpu state */
    fpu_save_area_fork(pcb2, pcb1);

#if defined(XEN)
    pcb2->pcb_iopl = SEL_KPL;
#endif

    /*
     * Set the kernel stack address (from the address to uarea) and
     * trapframe address for child.
     *
     * Rig kernel stack so that it would start out in lwp_trampoline()
     * and call child_return() with l2 as an argument.  This causes the
     * newly-created child process to go directly to user level with a
     * parent return value of 0 from fork(), while the parent process
     * returns normally.
     */
    uv = uvm_lwp_getuarea(l2);

#ifdef __x86_64__
    pcb2->pcb_rsp0 = (uv + USPACE - 16) & ~0xf;
    tf = (struct trapframe *)pcb2->pcb_rsp0 - 1;
#else
    pcb2->pcb_esp0 = (uv + USPACE - 16);
    tf = (struct trapframe *)pcb2->pcb_esp0 - 1;

    pcb2->pcb_iomap = NULL;
#endif
    l2->l_md.md_regs = tf;

    /*
     * Copy the trapframe from parent, so that return to userspace
     * will be to right address, with correct registers.
     */
    memcpy(tf, l1->l_md.md_regs, sizeof(struct trapframe));

    /* Child LWP might get aston() before returning to userspace. */
    tf->tf_trapno = T_ASTFLT;

#if 0 /* DIAGNOSTIC */
    /* Set a red zone in the kernel stack after the uarea. */
    pmap_kremove(uv, PAGE_SIZE);
    pmap_update(pmap_kernel());
#endif

    /* If specified, set a different user stack for a child. */
    if (stack != NULL) {
#ifdef __x86_64__
        tf->tf_rsp = (uint64_t)stack + stacksize;
#else
        tf->tf_esp = (uint32_t)stack + stacksize;
#endif
    }

    l2->l_md.md_flags = l1->l_md.md_flags;
    l2->l_md.md_astpending = 0;

    sf = (struct switchframe *)tf - 1;

#ifdef __x86_64__
    sf->sf_r12 = (uint64_t)func;
    sf->sf_r13 = (uint64_t)arg;
    sf->sf_rip = (uint64_t)lwp_trampoline;
    pcb2->pcb_rsp = (uint64_t)sf;
    pcb2->pcb_rbp = (uint64_t)l2;
#else
    /*
     * XXX Is there a reason sf->sf_edi isn't initialized here?
     * Could this leak potentially sensitive information to new
     * userspace processes?
     */
    sf->sf_esi = (int)func;
    sf->sf_ebx = (int)arg;
    sf->sf_eip = (int)lwp_trampoline;
    pcb2->pcb_esp = (int)sf;
    pcb2->pcb_ebp = (int)l2;
#endif
}
Example #30
0
int
writeback(struct frame *fp, int docachepush)
{
	struct fmt7 *f = &fp->f_fmt7;
	struct lwp *l = curlwp;
	struct proc *p = l->l_proc;
	struct pcb *pcb = lwp_getpcb(l);
	int err = 0;
	u_int fa;
	void *oonfault = pcb->pcb_onfault;
	extern int suline(void *, void *);	/* locore.s */

#ifdef DEBUG
	if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) {
		printf(" pid=%d, fa=%x,", p->p_pid, f->f_fa);
		dumpssw(f->f_ssw);
	}
	wbstats.calls++;
#endif
	/*
	 * Deal with special cases first.
	 */
	if ((f->f_ssw & SSW4_TMMASK) == SSW4_TMDCP) {
		/*
		 * Dcache push fault.
		 * Line-align the address and write out the push data to
		 * the indicated physical address.
		 */
#ifdef DEBUG
		if ((mmudebug & MDB_WBFOLLOW) || MDB_ISPID(p->p_pid)) {
			printf(" pushing %s to PA %x, data %x",
			       f7sz[(f->f_ssw & SSW4_SZMASK) >> 5],
			       f->f_fa, f->f_pd0);
			if ((f->f_ssw & SSW4_SZMASK) == SSW4_SZLN)
				printf("/%x/%x/%x",
				       f->f_pd1, f->f_pd2, f->f_pd3);
			printf("\n");
		}
		if (f->f_wb1s & SSW4_WBSV)
			panic("writeback: cache push with WB1S valid");
		wbstats.cpushes++;
#endif
		/*
		 * XXX there are security problems if we attempt to do a
		 * cache push after a signal handler has been called.
		 */
		if (docachepush) {
			paddr_t pa;
			pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
			    trunc_page(f->f_fa), VM_PROT_WRITE,
			    VM_PROT_WRITE|PMAP_WIRED);
			pmap_update(pmap_kernel());
			fa = (u_int)&vmmap[(f->f_fa & PGOFSET) & ~0xF];
			fastcopy16(&f->f_pd0, (u_int *)fa);
			(void) pmap_extract(pmap_kernel(), (vaddr_t)fa, &pa);
			DCFL_40(pa);
			pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
				    (vaddr_t)&vmmap[PAGE_SIZE]);
			pmap_update(pmap_kernel());
		} else
			printf("WARNING: pid %d(%s) uid %d: CPUSH not done\n",
			    p->p_pid, p->p_comm, kauth_cred_geteuid(l->l_cred));
	} else if ((f->f_ssw & (SSW4_RW|SSW4_TTMASK)) == SSW4_TTM16) {