コード例 #1
0
/*
 * Map an IO request into kernel virtual address space.
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t uva, kva;
	paddr_t pa;
	vsize_t size, off;
	int npf;
	struct pmap *upmap, *kpmap;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
#endif
	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	kpmap = vm_map_pmap(phys_map);
	bp->b_saveaddr = bp->b_data;
	uva = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - uva;
	size = round_page(off + len);
	kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(kva + off);
	npf = btoc(size);
	while (npf--) {
		if (pmap_extract(upmap, uva, &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_enter(kpmap, kva, pa,
		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
	}
	pmap_update(kpmap);

	return 0;
}
コード例 #2
0
ファイル: vm_machdep.c プロジェクト: alenichev/openbsd-kernel
/*
 * Map an IO request into kernel virtual address space.
 */
void
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t pa;
	
#ifdef	DIAGNOSTIC
	if (!(bp->b_flags & B_PHYS))
		panic("vmapbuf");
#endif
	faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_valloc_wait(phys_map, len);
	bp->b_data = (caddr_t)(taddr + off);
	for (; len > 0; len -= NBPG) {
		pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    faddr, &pa);
		pmap_enter(vm_map_pmap(phys_map), taddr, pa,
		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
		faddr += NBPG;
		taddr += NBPG;
	}
	pmap_update(vm_map_pmap(phys_map));
}
コード例 #3
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	/*
	 * If we're only mapping 1 segment, use K0SEG, to avoid
	 * TLB thrashing.
	 */
#ifdef _LP64
	if (nsegs == 1) {
		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0)
		&&  (flags & BUS_DMA_COHERENT))
			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(
			    segs[0].ds_addr);
		else
			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(
			    segs[0].ds_addr);
		return 0;
	}
#else
	if ((nsegs == 1) && (segs[0].ds_addr < MIPS_PHYS_MASK)) {
		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0)
		&&  (flags & BUS_DMA_COHERENT))
			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
		else
			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
		return (0);
	}
#endif	/* _LP64 */

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return (ENOMEM);

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
コード例 #4
0
/*
 * uvm_pagermapin: map pages into KVA for I/O that needs mappings
 *
 * We basically just km_valloc a blank map entry to reserve the space in the
 * kernel map and then use pmap_enter() to put the mappings in by hand.
 */
vaddr_t
uvm_pagermapin(struct vm_page **pps, int npages, int flags)
{
	vaddr_t kva, cva;
	vm_prot_t prot;
	vsize_t size;
	struct vm_page *pp;

	prot = VM_PROT_READ;
	if (flags & UVMPAGER_MAPIN_READ)
		prot |= VM_PROT_WRITE;
	size = ptoa(npages);

	KASSERT(size <= MAXBSIZE);

	kva = uvm_pseg_get(flags);
	if (kva == 0)
		return 0;

	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
		pp = *pps++;
		KASSERT(pp);
		KASSERT(pp->pg_flags & PG_BUSY);
		/* Allow pmap_enter to fail. */
		if (pmap_enter(pmap_kernel(), cva, VM_PAGE_TO_PHYS(pp),
		    prot, PMAP_WIRED | PMAP_CANFAIL | prot) != 0) {
			pmap_remove(pmap_kernel(), kva, cva);
			pmap_update(pmap_kernel());
			uvm_pseg_release(kva);
			return 0;
		}
	}
	pmap_update(pmap_kernel());
	return kva;
}
コード例 #5
0
/* This code was originally stolen from the alpha port. */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t pa;
	struct proc *p;
	vm_prot_t prot;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
	p = bp->b_proc;
	bp->b_saveaddr = bp->b_data;
	faddr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(taddr + off);
	len = atop(len);
	prot = bp->b_flags & B_READ ? VM_PROT_READ | VM_PROT_WRITE :
				      VM_PROT_READ;
	while (len--) {
		if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
		    &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
		    prot, prot | PMAP_WIRED);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
	}
	pmap_update(vm_map_pmap(phys_map));

	return 0;
}
コード例 #6
0
ファイル: leds.c プロジェクト: MarginC/kame
/*
 * Map the LED page and setup the KVA to access it.
 */
void
ledinit()
{

	pmap_enter(pmap_kernel(), (vm_offset_t)ledbase, (vm_offset_t)LED_ADDR,
		   VM_PROT_READ|VM_PROT_WRITE, TRUE);
	ledaddr = (u_int8_t *) ((long)ledbase | (LED_ADDR & PGOFSET));
}
コード例 #7
0
int
mappedcopyout(void *f, void *t, size_t count)
{
	void *fromp = f, *top = t;
	vaddr_t kva;
	paddr_t upa;
	size_t len;
	int off, alignable;
	pmap_t upmap;
#define CADDR2 caddr1

#ifdef DEBUG
	if (mappedcopydebug & MDB_COPYOUT)
		printf("mappedcopyout(%p, %p, %lu), pid %d\n",
		    fromp, top, (u_long)count, curproc->p_pid);
	mappedcopyoutcount++;
#endif

	if (CADDR2 == 0)
		CADDR2 = (void *) uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
		    UVM_KMF_VAONLY);

	kva = (vaddr_t) CADDR2;
	off = (int)((u_long)top & PAGE_MASK);
	alignable = (off == ((u_long)fromp & PAGE_MASK));
	upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
	while (count > 0) {
		/*
		 * First access of a page, use subyte to make sure
		 * page is faulted in and write access allowed.
		 */
		if (subyte(top, *((char *)fromp)) == -1)
			return EFAULT;
		/*
		 * Map in the page and memcpy data out to it
		 */
		if (pmap_extract(upmap, trunc_page((vaddr_t)top), &upa)
		    == false)
			panic("mappedcopyout: null page frame");
		len = min(count, (PAGE_SIZE - off));
		pmap_enter(pmap_kernel(), kva, upa,
		    VM_PROT_READ|VM_PROT_WRITE,
		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
		pmap_update(pmap_kernel());
		if (len == PAGE_SIZE && alignable && off == 0)
			copypage(fromp, (void *)kva);
		else
			memcpy((void *)(kva + off), fromp, len);
		fromp += len;
		top += len;
		count -= len;
		off = 0;
	}
	pmap_remove(pmap_kernel(), kva, kva + PAGE_SIZE);
	pmap_update(pmap_kernel());
	return 0;
#undef CADDR2
}
コード例 #8
0
ファイル: machdep.c プロジェクト: MarginC/kame
/*
 * Early initialization, right before main is called.
 */
void
mvme68k_init()
{
	int i;

	/*
	 * Tell the VM system about available physical memory.
	 */
	for (i = 0; i < mem_cluster_cnt; i++) {
		if (phys_seg_list[i].ps_start == phys_seg_list[i].ps_end) {
			/*
			 * Segment has been completely gobbled up.
			 */
			continue;
		}
		/*
		 * Note the index of the mem cluster is the free
		 * list we want to put the memory on (0 == default,
		 * 1 == VME).  There can only be two.
		 */
		uvm_page_physload(atop(phys_seg_list[i].ps_start),
				 atop(phys_seg_list[i].ps_end),
				 atop(phys_seg_list[i].ps_start),
				 atop(phys_seg_list[i].ps_end), i);
	}

	/* Initialize interrupt handlers. */
	isrinit();

	switch (machineid) {
#ifdef MVME147
	case MVME_147:
		mvme147_init();
		break;
#endif
#ifdef MVME162
	case MVME_162:
		mvme162_init();
		break;
#endif
#ifdef MVME167
	case MVME_167:
		mvme167_init();
		break;
#endif
	default:
		panic("mvme68k_init: impossible machineid");
	}

	/*
	 * Initialize error message buffer (at end of core).
	 */
	for (i = 0; i < btoc(round_page(MSGBUFSIZE)); i++)
		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
		    msgbufpa + i * NBPG, VM_PROT_READ|VM_PROT_WRITE, TRUE,
		    VM_PROT_READ|VM_PROT_WRITE);
	initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
}
コード例 #9
0
ファイル: viommu.c プロジェクト: repos-holder/openbsd-patches
/*
 * Map the DVMA mappings into the kernel pmap.
 * Check the flags to see whether we're streaming or coherent.
 */
int
viommu_dvmamem_map(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dma_segment_t *segs,
    int nsegs, size_t size, caddr_t *kvap, int flags)
{
	struct vm_page *m;
	vaddr_t va;
	bus_addr_t addr;
	struct pglist *mlist;
	bus_addr_t cbit = 0;

	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: segp %p nsegs %d size %lx\n",
	    segs, nsegs, size));

	/*
	 * Allocate some space in the kernel map, and then map these pages
	 * into this space.
	 */
	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	/* 
	 * digest flags:
	 */
#if 0
	if (flags & BUS_DMA_COHERENT)	/* Disable vcache */
		cbit |= PMAP_NVC;
#endif
	if (flags & BUS_DMA_NOCACHE)	/* sideffects */
		cbit |= PMAP_NC;

	/*
	 * Now take this and map it into the CPU.
	 */
	mlist = segs[0]._ds_mlist;
	TAILQ_FOREACH(m, mlist, pageq) {
#ifdef DIAGNOSTIC
		if (size == 0)
			panic("iommu_dvmamem_map: size botch");
#endif
		addr = VM_PAGE_TO_PHYS(m);
		DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_map: "
		    "mapping va %lx at %llx\n", va,
		    (unsigned long long)addr | cbit));
		pmap_enter(pmap_kernel(), va, addr | cbit,
		    VM_PROT_READ | VM_PROT_WRITE,
		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
		va += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return (0);
}
コード例 #10
0
ファイル: machdep.c プロジェクト: lacombar/netbsd-alc
/*
 * Early initialization, before main() is called.
 */
void
luna68k_init()
{
	volatile unsigned char *pio0 = (void *)0x49000000;
	int sw1, i;
	char *cp;
	extern char bootarg[64];

	extern paddr_t avail_start, avail_end;

	/*
	 * Tell the VM system about available physical memory.  The
	 * luna68k only has one segment.
	 */
	uvm_page_physload(atop(avail_start), atop(avail_end),
	    atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);

	/*
	 * Initialize error message buffer (at end of core).
	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
	 */
	for (i = 0; i < btoc(MSGBUFSIZE); i++)
		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE,
		    avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
	pmap_update(pmap_kernel());
	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));


	pio0[3] = 0xb6;
	pio0[2] = 1 << 6;		/* enable parity check */
	pio0[3] = 0xb6;
	sw1 = pio0[0];			/* dipssw1 value */
	sw1 ^= 0xff;
	sysconsole = !(sw1 & 0x2);	/* console selection */

	boothowto = 0;
	i = 0;
	/*
	 * 'bootarg' has;
	 *   "<args of x command> ENADDR=<addr> HOST=<host> SERVER=<name>"
	 * where <addr> is MAC address of which network loader used (not
	 * necessarily same as one at 0x4101.FFE0), <host> and <name>
	 * are the values of HOST and SERVER environment variables,
	 *
	 * NetBSD/luna68k cares only the first argment; any of "sda".
	 */
	for (cp = bootarg; *cp != ' '; cp++) {
		BOOT_FLAG(*cp, boothowto);
		if (i++ >= sizeof(bootarg))
			break;
	}
#if 0 /* overload 1:sw1, which now means 'go ROM monitor' after poweron */
	if (boothowto == 0)
		boothowto = (sw1 & 0x1) ? RB_SINGLE : 0;
#endif
}
コード例 #11
0
ファイル: vm_kern.c プロジェクト: derekmarcotte/freebsd
/*
 *	Allocates a region from the kernel address map and physically
 *	contiguous pages within the specified address range to the kernel
 *	object.  Creates a wired mapping from this region to these pages, and
 *	returns the region's starting virtual address.  If M_ZERO is specified
 *	through the given flags, then the pages are zeroed before they are
 *	mapped.
 */
vm_offset_t
kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
    vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
    vm_memattr_t memattr)
{
	vmem_t *vmem;
	vm_object_t object = kernel_object;
	vm_offset_t addr, offset, tmp;
	vm_page_t end_m, m;
	u_long npages;
	int pflags, tries;
 
	size = round_page(size);
	vmem = vm_dom[domain].vmd_kernel_arena;
	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
		return (0);
	offset = addr - VM_MIN_KERNEL_ADDRESS;
	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
	pflags |= VM_ALLOC_NOWAIT;
	npages = atop(size);
	VM_OBJECT_WLOCK(object);
	tries = 0;
retry:
	m = vm_page_alloc_contig_domain(object, atop(offset), domain, pflags,
	    npages, low, high, alignment, boundary, memattr);
	if (m == NULL) {
		VM_OBJECT_WUNLOCK(object);
		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
			if (!vm_page_reclaim_contig_domain(domain, pflags,
			    npages, low, high, alignment, boundary) &&
			    (flags & M_WAITOK) != 0)
				vm_wait_domain(domain);
			VM_OBJECT_WLOCK(object);
			tries++;
			goto retry;
		}
		vmem_free(vmem, addr, size);
		return (0);
	}
	KASSERT(vm_phys_domain(m) == domain,
	    ("kmem_alloc_contig_domain: Domain mismatch %d != %d",
	    vm_phys_domain(m), domain));
	end_m = m + npages;
	tmp = addr;
	for (; m < end_m; m++) {
		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
			pmap_zero_page(m);
		m->valid = VM_PAGE_BITS_ALL;
		pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL,
		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
		tmp += PAGE_SIZE;
	}
	VM_OBJECT_WUNLOCK(object);
	return (addr);
}
コード例 #12
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	paddr_t pa;
	bus_addr_t addr;
	int curseg, error;

	if (nsegs == 1) {
		pa = (*t->_device_to_pa)(segs[0].ds_addr);
		if (flags & BUS_DMA_COHERENT)
			*kvap = (caddr_t)PHYS_TO_UNCACHED(pa);
		else
			*kvap = (caddr_t)PHYS_TO_XKPHYS(pa, CCA_CACHED);
		return (0);
	}

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += NBPG, va += NBPG, size -= NBPG) {
			if (size == 0)
				panic("_dmamem_map: size botch");
			pa = (*t->_device_to_pa)(addr);
			error = pmap_enter(pmap_kernel(), va, pa,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}

			if (flags & BUS_DMA_COHERENT)
				pmap_page_cache(PHYS_TO_VM_PAGE(pa),
				    PV_UNCACHED);
		}
		pmap_update(pmap_kernel());
	}

	return (0);
}
コード例 #13
0
void
lapic_init(void)
{
	int		result;
	vm_map_entry_t	entry;
	uint32_t	lo;
	uint32_t	hi;
	boolean_t	is_boot_processor;
	boolean_t	is_lapic_enabled;
	vm_offset_t	lapic_base;

	/* Examine the local APIC state */
	rdmsr(MSR_IA32_APIC_BASE, lo, hi);
	is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
	is_lapic_enabled  = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
	lapic_base = (lo &  MSR_IA32_APIC_BASE_BASE);
	kprintf("MSR_IA32_APIC_BASE %p %s %s\n", (void *) lapic_base,
		is_lapic_enabled ? "enabled" : "disabled",
		is_boot_processor ? "BSP" : "AP");
	if (!is_boot_processor || !is_lapic_enabled)
		panic("Unexpected local APIC state\n");

	/* Establish a map to the local apic */
	lapic_start = (vm_offset_t)vm_map_min(kernel_map);
	result = vm_map_find_space(kernel_map,
				   (vm_map_address_t *) &lapic_start,
				   round_page(LAPIC_SIZE), 0,
				   VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
	if (result != KERN_SUCCESS) {
		panic("smp_init: vm_map_find_entry FAILED (err=%d)", result);
	}
	vm_map_unlock(kernel_map);
/* Map in the local APIC non-cacheable, as recommended by Intel
 * in section 8.4.1 of the "System Programming Guide".
 */
	pmap_enter(pmap_kernel(),
			lapic_start,
			(ppnum_t) i386_btop(lapic_base),
			VM_PROT_READ|VM_PROT_WRITE,
			VM_WIMG_IO,
			TRUE);
	lapic_id = (unsigned long)(lapic_start + LAPIC_ID);

	if ((LAPIC_READ(VERSION)&LAPIC_VERSION_MASK) < 0x14) {
		panic("Local APIC version 0x%x, 0x14 or more expected\n",
			(LAPIC_READ(VERSION)&LAPIC_VERSION_MASK));
	}

	/* Set up the lapic_id <-> cpu_number map and add this boot processor */
	lapic_cpu_map_init();
	lapic_cpu_map((LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
	kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
}
コード例 #14
0
ファイル: vm_kern.c プロジェクト: derekmarcotte/freebsd
/*
 *	kmem_back:
 *
 *	Allocate physical pages for the specified virtual address range.
 */
int
kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
    vm_size_t size, int flags)
{
	vm_offset_t offset, i;
	vm_page_t m, mpred;
	int pflags;

	KASSERT(object == kernel_object,
	    ("kmem_back_domain: only supports kernel object."));

	offset = addr - VM_MIN_KERNEL_ADDRESS;
	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
	if (flags & M_WAITOK)
		pflags |= VM_ALLOC_WAITFAIL;

	i = 0;
	VM_OBJECT_WLOCK(object);
retry:
	mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
	for (; i < size; i += PAGE_SIZE, mpred = m) {
		m = vm_page_alloc_domain_after(object, atop(offset + i),
		    domain, pflags, mpred);

		/*
		 * Ran out of space, free everything up and return. Don't need
		 * to lock page queues here as we know that the pages we got
		 * aren't on any queues.
		 */
		if (m == NULL) {
			if ((flags & M_NOWAIT) == 0)
				goto retry;
			VM_OBJECT_WUNLOCK(object);
			kmem_unback(object, addr, i);
			return (KERN_NO_SPACE);
		}
		KASSERT(vm_phys_domain(m) == domain,
		    ("kmem_back_domain: Domain mismatch %d != %d",
		    vm_phys_domain(m), domain));
		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
			pmap_zero_page(m);
		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
		    ("kmem_malloc: page %p is managed", m));
		m->valid = VM_PAGE_BITS_ALL;
		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
	}
	VM_OBJECT_WUNLOCK(object);

	return (KERN_SUCCESS);
}
コード例 #15
0
ファイル: vm_kern.c プロジェクト: derekmarcotte/freebsd
/*
 *	Allocates a region from the kernel address map and physical pages
 *	within the specified address range to the kernel object.  Creates a
 *	wired mapping from this region to these pages, and returns the
 *	region's starting virtual address.  The allocated pages are not
 *	necessarily physically contiguous.  If M_ZERO is specified through the
 *	given flags, then the pages are zeroed before they are mapped.
 */
vm_offset_t
kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
    vm_paddr_t high, vm_memattr_t memattr)
{
	vmem_t *vmem;
	vm_object_t object = kernel_object;
	vm_offset_t addr, i, offset;
	vm_page_t m;
	int pflags, tries;

	size = round_page(size);
	vmem = vm_dom[domain].vmd_kernel_arena;
	if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
		return (0);
	offset = addr - VM_MIN_KERNEL_ADDRESS;
	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
	pflags |= VM_ALLOC_NOWAIT;
	VM_OBJECT_WLOCK(object);
	for (i = 0; i < size; i += PAGE_SIZE) {
		tries = 0;
retry:
		m = vm_page_alloc_contig_domain(object, atop(offset + i),
		    domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr);
		if (m == NULL) {
			VM_OBJECT_WUNLOCK(object);
			if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
				if (!vm_page_reclaim_contig_domain(domain,
				    pflags, 1, low, high, PAGE_SIZE, 0) &&
				    (flags & M_WAITOK) != 0)
					vm_wait_domain(domain);
				VM_OBJECT_WLOCK(object);
				tries++;
				goto retry;
			}
			kmem_unback(object, addr, i);
			vmem_free(vmem, addr, size);
			return (0);
		}
		KASSERT(vm_phys_domain(m) == domain,
		    ("kmem_alloc_attr_domain: Domain mismatch %d != %d",
		    vm_phys_domain(m), domain));
		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
			pmap_zero_page(m);
		m->valid = VM_PAGE_BITS_ALL;
		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
		    VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
	}
	VM_OBJECT_WUNLOCK(object);
	return (addr);
}
コード例 #16
0
ファイル: bus_dma.c プロジェクト: lacombar/netbsd-alc
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	/*
	 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid
	 * TLB thrashing.
	 */
	if (nsegs == 1) {
		if (flags & BUS_DMA_COHERENT)
			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0]._ds_paddr);
		else
			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0]._ds_paddr);
		return 0;
	}

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return ENOMEM;

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		segs[curseg]._ds_vaddr = va;
		for (addr = segs[curseg]._ds_paddr;
		    addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);

			/* XXX Do something about COHERENT here. */
		}
	}
	pmap_update(pmap_kernel());

	return 0;
}
コード例 #17
0
ファイル: bus_dma.c プロジェクト: ajinkya93/OpenBSD
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, pmapflags = 0, ret;
	const struct kmem_dyn_mode *kd;

	if (flags & BUS_DMA_NOCACHE)
		pmapflags |= PMAP_NOCACHE;

	size = round_page(size);
	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			/*
			 * we don't want pmap to panic here if it can't
			 * alloc
			 */
			ret = pmap_enter(pmap_kernel(), va, addr | pmapflags,
			    PROT_READ | PROT_WRITE,
			    PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (ret) {
				pmap_update(pmap_kernel());
				km_free((void *)sva, ssize, &kv_any, &kp_none);
				return (ret);
			}

		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
コード例 #18
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, pmapflags = 0, error;

	if (nsegs == 1 && (flags & BUS_DMA_NOCACHE) == 0) {
		*kvap = (caddr_t)PMAP_DIRECT_MAP(segs[0].ds_addr);
		return (0);
	}

	if (flags & BUS_DMA_NOCACHE)
		pmapflags |= PMAP_NOCACHE;

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, sva, ssize);
				return (error);
			}
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
コード例 #19
0
ファイル: pmap.c プロジェクト: DJHartley/xnu
/**
 * pmap_map
 *
 * Map specified virtual address range to a physical one.
 */
vm_offset_t
pmap_map(vm_offset_t virt,
         vm_map_offset_t start_addr,
         vm_map_offset_t end_addr,
         vm_prot_t prot,
         unsigned int flags)
{
    int        ps;
    
    ps = PAGE_SIZE;
    while (start_addr < end_addr) {
        pmap_enter(kernel_pmap, (vm_map_offset_t)virt, (start_addr), prot, flags, FALSE, TRUE);
        virt += ps;
        start_addr += ps;
    }
    return(virt);
}
コード例 #20
0
/*
 * Physcal address to virtual address
 */
vaddr_t
kloader_phystov(paddr_t pa)
{
	vaddr_t va;
	int error;

#if defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270)
	va = KERNEL_BASE + pa - PXA2X0_SDRAM0_START;
#else
#error	No support KLOADER with specific CPU type.
#endif
	error = pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
	if (error) {
		printf("%s: map failed: pa=0x%lx, va=0x%lx, error=%d\n",
		    __func__, pa, va, error);
	}
	return va;
}
コード例 #21
0
ファイル: bus_dma.c プロジェクト: lacombar/netbsd-alc
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return ENOMEM;

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);

			/* Cache-inhibit the page if necessary */
			if ((flags & BUS_DMA_COHERENT) != 0)
				_pmap_set_page_cacheinhibit(pmap_kernel(), va);

			segs[curseg]._ds_flags &= ~BUS_DMA_COHERENT;
			segs[curseg]._ds_flags |= (flags & BUS_DMA_COHERENT);
		}
	}
	pmap_update(pmap_kernel());

	if ((flags & BUS_DMA_COHERENT) != 0)
		TBIAS();

	return 0;
}
コード例 #22
0
ファイル: dma.c プロジェクト: alenichev/openbsd-kernel
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
    caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg, error;

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			error = pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
			    VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
			if (error) {
				/*
				 * Clean up after ourselves.
				 * XXX uvm_wait on WAITOK
				 */
				pmap_update(pmap_kernel());
				uvm_km_free(kernel_map, va, ssize);
				return (error);
			}
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
コード例 #23
0
ファイル: ifpga_io.c プロジェクト: ryo/netbsd-src
int
ifpga_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int cacheable, bus_space_handle_t *bshp)
{
    bus_addr_t startpa, endpa;
    vaddr_t va;
    const struct pmap_devmap *pd;
    bus_addr_t pa = bpa + (bus_addr_t) t;

    if ((pd = pmap_devmap_find_pa(pa, size)) != NULL) {
        /* Device was statically mapped. */
        *bshp = pd->pd_va + (pa - pd->pd_pa);
        return 0;
    }

    /* Round the allocation to page boundries */
    startpa = trunc_page(bpa);
    endpa = round_page(bpa + size);

    /* Get some VM.  */
    va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
                      UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
    if (va == 0)
        return ENOMEM;

    /* Store the bus space handle */
    *bshp = va + (bpa & PGOFSET);

    /* Now map the pages */
    /* The cookie is the physical base address for the I/O area */
    while (startpa < endpa) {
        /* XXX pmap_kenter_pa maps pages cacheable -- not what
           we want.  */
        pmap_enter(pmap_kernel(), va, (bus_addr_t)t + startpa,
                   VM_PROT_READ | VM_PROT_WRITE, 0);
        va += PAGE_SIZE;
        startpa += PAGE_SIZE;
    }
    pmap_update(pmap_kernel());

    return 0;
}
コード例 #24
0
ファイル: ep93xx_space.c プロジェクト: lacombar/netbsd-alc
int
ep93xx_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int cacheable, bus_space_handle_t *bshp)
{
	const struct pmap_devmap	*pd;

	paddr_t		startpa;
        paddr_t		endpa;
        paddr_t		pa;
        paddr_t		offset;
        vaddr_t		va;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return 0;
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);

	/* Get some VM.  */
	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0)
		return ENOMEM;

	/* Store the bus space handle */
	*bshp = va + offset;

	/* Now map the pages */
	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_enter(pmap_kernel(), va, pa,
		    VM_PROT_READ | VM_PROT_WRITE,
		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
	}
	pmap_update(pmap_kernel());

	return(0);
}
コード例 #25
0
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t fpa;


#ifdef PMAP_DEBUG
	if (pmap_debug_level > 0)
		printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
		    (u_int)bp->b_data, (u_int)len);
#endif	/* PMAP_DEBUG */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	bp->b_saveaddr = bp->b_data;
	faddr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, atop(faddr) & uvmexp.colormask,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
	bp->b_data = (void *)(taddr + off);

	/*
	 * The region is locked, so we expect that pmap_pte() will return
	 * non-NULL.
	 */
	while (len) {
		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    faddr, &fpa);
		pmap_enter(pmap_kernel(), taddr, fpa,
			VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
		len -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return 0;
}
コード例 #26
0
ファイル: bus.c プロジェクト: yazshel/netbsd-kernel
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int 
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	struct vm_page *m;
	vaddr_t va;
	struct pglist *mlist;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	if (nsegs != 1)
		panic("_bus_dmamem_map: nsegs = %d", nsegs);

	size = m68k_round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
	if (va == 0)
		return (ENOMEM);

	segs[0]._ds_va = va;
	*kvap = (void *)va;

	mlist = segs[0]._ds_mlist;
	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
		paddr_t pa;

		if (size == 0)
			panic("_bus_dmamem_map: size botch");

		pa = VM_PAGE_TO_PHYS(m);
		pmap_enter(pmap_kernel(), va, pa | PMAP_NC,
			   VM_PROT_READ | VM_PROT_WRITE,
			   VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);

		va += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return (0);
}
コード例 #27
0
/*
 * _bus_dmamem_map_common --
 *	Map memory allocated with _bus_dmamem_alloc_range_common() into
 *	the kernel virtual address space.
 */
int
_bus_dmamem_map_common(bus_dma_tag_t t,
		       bus_dma_segment_t *segs,
		       int nsegs,
		       size_t size,
		       void **kvap,
		       int flags,
		       int pmapflags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
	if (__predict_false(va == 0))
		return (ENOMEM);
	
	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		     addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			KASSERT(size != 0);
			/* XXX pmap_kenter_pa()? */
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    pmapflags | PMAP_WIRED |
			    	VM_PROT_READ | VM_PROT_WRITE);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
コード例 #28
0
ファイル: vm_machdep.c プロジェクト: orumin/openbsd-efivars
void
vmapbuf(struct buf *bp, vsize_t len)
{
    vaddr_t faddr, taddr, off;
    paddr_t fpa;
    pmap_t kpmap, upmap;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vmapbuf");
    bp->b_saveaddr = bp->b_data;
    faddr = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - faddr;
    len = round_page(off + len);
    taddr = uvm_km_valloc_prefer_wait(phys_map, len, faddr);
    bp->b_data = (caddr_t)(taddr + off);
    /*
     * The region is locked, so we expect that pmap_pte() will return
     * non-NULL.
     * XXX: unwise to expect this in a multithreaded environment.
     * anything can happen to a pmap between the time we lock a
     * region, release the pmap lock, and then relock it for
     * the pmap_extract().
     *
     * no need to flush TLB since we expect nothing to be mapped
     * where we we just allocated (TLB will be flushed when our
     * mapping is removed).
     */
    upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
    kpmap = vm_map_pmap(phys_map);
    while (len) {
        pmap_extract(upmap, faddr, &fpa);
        pmap_enter(kpmap, taddr, fpa,
                   PROT_READ | PROT_WRITE, PMAP_WIRED);
        faddr += PAGE_SIZE;
        taddr += PAGE_SIZE;
        len -= PAGE_SIZE;
    }
    pmap_update(kpmap);
}
コード例 #29
0
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	struct pmap *upmap, *kpmap __unused;
	vaddr_t uva;		/* User VA (map from) */
	vaddr_t kva;		/* Kernel VA (new to) */
	paddr_t pa; 		/* physical address */
	vsize_t off;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data);
	off = (vaddr_t)bp->b_data - uva;
	len = m68k_round_page(off + len);
	kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(kva + off);

	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	kpmap = vm_map_pmap(phys_map);
	do {
		if (pmap_extract(upmap, uva, &pa) == false)
			panic("vmapbuf: null page frame");
#ifdef M68K_VAC
		pmap_enter(kpmap, kva, pa, VM_PROT_READ | VM_PROT_WRITE,
		    PMAP_WIRED);
#else
		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
#endif
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
		len -= PAGE_SIZE;
	} while (len);
	pmap_update(kpmap);

	return 0;
}
コード例 #30
0
void fic_init(void)
{
	int i;

	extern paddr_t avail_start, avail_end;

	boothowto = RB_SINGLE; /* XXX for now */
	boothowto |= RB_KDB; /* XXX for now */

	delay_divisor = 30; /* XXX */

	/*
	 * Tell the VM system about available physical memory.  The
	 * fic uses one segment.
	 */
	uvm_page_physload(atop(avail_start), atop(avail_end),
	    atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);

	/*
	 * map and init interrupt controller
	 */
	physaccess((void*)virtual_avail, (void*)0x44000000,
	    PAGE_SIZE, PG_RW|PG_CI);
	sicinit((void*)virtual_avail);
	virtual_avail += PAGE_SIZE;

	/*
	 * Initialize error message buffer (at end of core).
	 * avail_end was pre-decremented in pmap_bootstrap to compensate.
	 */
	for (i = 0; i < btoc(MSGBUFSIZE); i++)
		pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * PAGE_SIZE,
		    avail_end + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
	pmap_update(pmap_kernel());
	initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
}