Esempio n. 1
0
/*
 *	kmem_unback:
 *
 *	Unmap and free the physical pages underlying the specified virtual
 *	address range.
 *
 *	A physical page must exist within the specified object at each index
 *	that is being unmapped.
 */
static int
_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
{
	vm_page_t m, next;
	vm_offset_t end, offset;
	int domain;

	KASSERT(object == kernel_object,
	    ("kmem_unback: only supports kernel object."));

	if (size == 0)
		return (0);
	pmap_remove(kernel_pmap, addr, addr + size);
	offset = addr - VM_MIN_KERNEL_ADDRESS;
	end = offset + size;
	VM_OBJECT_WLOCK(object);
	m = vm_page_lookup(object, atop(offset)); 
	domain = vm_phys_domain(m);
	for (; offset < end; offset += PAGE_SIZE, m = next) {
		next = vm_page_next(m);
		vm_page_unwire(m, PQ_NONE);
		vm_page_free(m);
	}
	VM_OBJECT_WUNLOCK(object);

	return (domain);
}
Esempio n. 2
0
int 
sun68k_bus_unmap(bus_space_tag_t t, bus_space_handle_t bh, bus_size_t size)
{
	bus_size_t	offset;
	vaddr_t va = (vaddr_t)bh;

	/*
	 * Adjust the user's request to be page-aligned.
	 */
	offset = va & PGOFSET;
	va -= offset;
	size += offset;
	size = m68k_round_page(size);
	if (size == 0) {
		printf("sun68k_bus_unmap: zero size\n");
		return (EINVAL);
	}

	/*
	 * If any part of the request is in the PROM's address space,
	 * don't unmap it.
	 */
#ifdef	DIAGNOSTIC
	if ((va >= SUN_MONSTART && va < SUN_MONEND) !=
	    ((va + size) >= SUN_MONSTART && (va + size) < SUN_MONEND))
		panic("sun_bus_unmap: bad PROM mapping");
#endif
	if (va >= SUN_MONSTART && va < SUN_MONEND)
		return (0);

	pmap_remove(pmap_kernel(), va, va + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, size, UVM_KMF_VAONLY);
	return (0);
}
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;

#ifdef PMAP_DEBUG
	if (pmap_debug_level > 0)
		printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n",
		    (u_int)bp, (u_int)bp->b_data, (u_int)len);
#endif	/* PMAP_DEBUG */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");

	/*
	 * Make sure the cache does not have dirty data for the
	 * pages we had mapped.
	 */
	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);

	pmap_remove(pmap_kernel(), addr, addr + len);
	pmap_update(pmap_kernel());
	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = 0;
}
Esempio n. 4
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((uintptr_t)kva & PGOFSET)
		panic("_bus_dmamem_unmap: bad alignment on %p", kva);
#endif

	/*
	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
	 * not in KSEG2 or XKSEG).
	 */
	if (MIPS_KSEG0_P(kva) || MIPS_KSEG1_P(kva))
		return;
#ifdef _LP64
	if (MIPS_XKPHYS_P((vaddr_t)kva))
		return;
#endif

	size = round_page(size);
	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
Esempio n. 5
0
/*
 * Set a red zone in the kernel stack after the u. area.
 */
void
setguardpage(struct proc *p)
{
	pmap_remove(pmap_kernel(), (vaddr_t)p->p_addr + PAGE_SIZE,
	    (vaddr_t)p->p_addr + 2 * PAGE_SIZE);
	pmap_update(pmap_kernel());
}
Esempio n. 6
0
/*
 * uvm_pagermapin: map pages into KVA for I/O that needs mappings
 *
 * We basically just km_valloc a blank map entry to reserve the space in the
 * kernel map and then use pmap_enter() to put the mappings in by hand.
 */
vaddr_t
uvm_pagermapin(struct vm_page **pps, int npages, int flags)
{
	vaddr_t kva, cva;
	vm_prot_t prot;
	vsize_t size;
	struct vm_page *pp;

	prot = VM_PROT_READ;
	if (flags & UVMPAGER_MAPIN_READ)
		prot |= VM_PROT_WRITE;
	size = ptoa(npages);

	KASSERT(size <= MAXBSIZE);

	kva = uvm_pseg_get(flags);
	if (kva == 0)
		return 0;

	for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) {
		pp = *pps++;
		KASSERT(pp);
		KASSERT(pp->pg_flags & PG_BUSY);
		/* Allow pmap_enter to fail. */
		if (pmap_enter(pmap_kernel(), cva, VM_PAGE_TO_PHYS(pp),
		    prot, PMAP_WIRED | PMAP_CANFAIL | prot) != 0) {
			pmap_remove(pmap_kernel(), kva, cva);
			pmap_update(pmap_kernel());
			uvm_pseg_release(kva);
			return 0;
		}
	}
	pmap_update(pmap_kernel());
	return kva;
}
Esempio n. 7
0
/*
 * Passively intercepts the thread switch function to increase
 * the thread priority from a user priority to a kernel priority, reducing
 * syscall and trap overhead for the case where no switch occurs.
 *
 * Synchronizes td_ucred with p_ucred.  This is used by system calls,
 * signal handling, faults, AST traps, and anything else that enters the
 * kernel from userland and provides the kernel with a stable read-only
 * copy of the process ucred.
 *
 * To avoid races with another thread updating p_ucred we obtain p_spin.
 * The other thread doing the update will obtain both p_token and p_spin.
 * In the case where the cached cred pointer matches, we will already have
 * the ref and we don't have to do one blessed thing.
 */
static __inline void
userenter(struct thread *curtd, struct proc *curp)
{
	struct ucred *ocred;
	struct ucred *ncred;

	curtd->td_release = lwkt_passive_release;

	if (curtd->td_ucred != curp->p_ucred) {
		spin_lock(&curp->p_spin);
		ncred = crhold(curp->p_ucred);
		spin_unlock(&curp->p_spin);
		ocred = curtd->td_ucred;
		curtd->td_ucred = ncred;
		if (ocred)
			crfree(ocred);
	}

#ifdef DDB
	/*
	 * Debugging, remove top two user stack pages to catch kernel faults
	 */
	if (freeze_on_seg_fault > 1 && curtd->td_lwp) {
		pmap_remove(vmspace_pmap(curtd->td_lwp->lwp_vmspace),
			    0x00007FFFFFFFD000LU,
			    0x0000800000000000LU);
	}
#endif
}
Esempio n. 8
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
{
	vaddr_t va;
	size_t s;

#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif

	size = round_page(size);

	/*
	 * Re-enable cacheing on the range
	 * XXXSCW: There should be some way to indicate that the pages
	 * were mapped DMA_MAP_COHERENT in the first place...
	 */
	for (s = 0, va = (vaddr_t)kva; s < size;
	    s += PAGE_SIZE, va += PAGE_SIZE)
		_pmap_set_page_cacheable(pmap_kernel(), va);

	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
Esempio n. 9
0
void 
_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
{
	bus_dma_segment_t *segs;
	vaddr_t dva;
	vsize_t sgsize;
	int error, s;

#ifdef DIAGNOSTIC
	if (map->dm_nsegs != 1)
		panic("%s: invalid nsegs = %d", __func__, map->dm_nsegs);
#endif

	segs = map->dm_segs;
	dva = segs[0]._ds_va & ~PGOFSET;
	sgsize = segs[0]._ds_sgsize;

	/* Unmap the DVMA addresses. */
	pmap_remove(pmap_kernel(), dva, dva + sgsize);
	pmap_update(pmap_kernel());

	/* Free the DVMA addresses. */
	s = splvm();
	error = extent_free(dvma_extent, dva, sgsize, EX_NOWAIT);
	splx(s);
#ifdef DIAGNOSTIC
	if (error)
		panic("%s: unable to free DVMA region", __func__);
#endif

	/* Mark the mappings as invalid. */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
}
Esempio n. 10
0
kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
{
    pmap_t	pmap = map->pmap;

    pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));

    return( KERN_SUCCESS );
}
int
mappedcopyout(void *f, void *t, size_t count)
{
	void *fromp = f, *top = t;
	vaddr_t kva;
	paddr_t upa;
	size_t len;
	int off, alignable;
	pmap_t upmap;
#define CADDR2 caddr1

#ifdef DEBUG
	if (mappedcopydebug & MDB_COPYOUT)
		printf("mappedcopyout(%p, %p, %lu), pid %d\n",
		    fromp, top, (u_long)count, curproc->p_pid);
	mappedcopyoutcount++;
#endif

	if (CADDR2 == 0)
		CADDR2 = (void *) uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
		    UVM_KMF_VAONLY);

	kva = (vaddr_t) CADDR2;
	off = (int)((u_long)top & PAGE_MASK);
	alignable = (off == ((u_long)fromp & PAGE_MASK));
	upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
	while (count > 0) {
		/*
		 * First access of a page, use subyte to make sure
		 * page is faulted in and write access allowed.
		 */
		if (subyte(top, *((char *)fromp)) == -1)
			return EFAULT;
		/*
		 * Map in the page and memcpy data out to it
		 */
		if (pmap_extract(upmap, trunc_page((vaddr_t)top), &upa)
		    == false)
			panic("mappedcopyout: null page frame");
		len = min(count, (PAGE_SIZE - off));
		pmap_enter(pmap_kernel(), kva, upa,
		    VM_PROT_READ|VM_PROT_WRITE,
		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
		pmap_update(pmap_kernel());
		if (len == PAGE_SIZE && alignable && off == 0)
			copypage(fromp, (void *)kva);
		else
			memcpy((void *)(kva + off), fromp, len);
		fromp += len;
		top += len;
		count -= len;
		off = 0;
	}
	pmap_remove(pmap_kernel(), kva, kva + PAGE_SIZE);
	pmap_update(pmap_kernel());
	return 0;
#undef CADDR2
}
Esempio n. 12
0
/*
 * uvm_pagermapout: remove KVA mapping
 *
 * We remove our mappings by hand and then remove the mapping.
 */
void
uvm_pagermapout(vaddr_t kva, int npages)
{

	pmap_remove(pmap_kernel(), kva, kva + (npages << PAGE_SHIFT));
	pmap_update(pmap_kernel());
	uvm_pseg_release(kva);

}
Esempio n. 13
0
/*
 * Set a red zone in the kernel stack after the u. area.
 */
void
setredzone(struct proc *p)
{
#if 0
	pmap_remove(pmap_kernel(), (vaddr_t)p->p_addr + PAGE_SIZE,
	    (vaddr_t)p->p_addr + 2 * PAGE_SIZE);
	pmap_update(pmap_kernel());
#endif
}
Esempio n. 14
0
/*
 * Identify the physical page mapped at the given kernel virtual
 * address.  Insert this physical page into the given address space at
 * the given virtual address, replacing the physical page, if any,
 * that already exists there.
 */
static int
vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
{
	vm_map_t map = mapa;
	vm_page_t kern_pg, user_pg;
	vm_object_t uobject;
	vm_map_entry_t entry;
	vm_pindex_t upindex;
	vm_prot_t prot;
	boolean_t wired;

	KASSERT((uaddr & PAGE_MASK) == 0,
	    ("vm_pgmoveco: uaddr is not page aligned"));

	/*
	 * Herein the physical page is validated and dirtied.  It is
	 * unwired in sf_buf_mext().
	 */
	kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
	kern_pg->valid = VM_PAGE_BITS_ALL;
	KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
	    ("vm_pgmoveco: kern_pg is not correctly wired"));

	if ((vm_map_lookup(&map, uaddr,
			   VM_PROT_WRITE, &entry, &uobject,
			   &upindex, &prot, &wired)) != KERN_SUCCESS) {
		return(EFAULT);
	}
	VM_OBJECT_LOCK(uobject);
retry:
	if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
		if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
			goto retry;
		vm_page_lock_queues();
		pmap_remove_all(user_pg);
		vm_page_free(user_pg);
	} else {
		/*
		 * Even if a physical page does not exist in the
		 * object chain's first object, a physical page from a
		 * backing object may be mapped read only.
		 */
		if (uobject->backing_object != NULL)
			pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
		vm_page_lock_queues();
	}
	vm_page_insert(kern_pg, uobject, upindex);
	vm_page_dirty(kern_pg);
	vm_page_unlock_queues();
	VM_OBJECT_UNLOCK(uobject);
	vm_map_lookup_done(map, entry);
	return(KERN_SUCCESS);
}
Esempio n. 15
0
void
kmem_io_map_deallocate(
	vm_map_t	map,
	vm_offset_t	addr,
	vm_size_t	size)
{
	/*
	 *	Remove the mappings.  The pmap_remove is needed.
	 */
	
	pmap_remove(vm_map_pmap(map), addr, addr + size);
	vm_map_remove(map, addr, addr + size);
}
/*
 * _bus_dmamem_unmap_common --
 *	Remove a mapping created with _bus_dmamem_map_common().
 */
void
_bus_dmamem_unmap_common(bus_dma_tag_t t,
			 void *kva,
			 size_t size)
{

	KASSERT(((vaddr_t)kva & PAGE_MASK) == 0);

	size = round_page(size);
	/* XXX pmap_kremove()?  See above... */
	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
Esempio n. 17
0
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
	vm_page_t m;

	if (!hw_direct_map)
		pmap_remove(kernel_pmap,(vm_offset_t)mem,
		    (vm_offset_t)mem + PAGE_SIZE);

	m = PHYS_TO_VM_PAGE((vm_offset_t)mem);
	m->wire_count--;
	vm_page_free(m);
	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
	atomic_subtract_int(&hw_uma_mdpages, 1);
}
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");
	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);
	pmap_remove(vm_map_pmap(phys_map), addr, addr + len);
	pmap_update(vm_map_pmap(phys_map));
	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = NULL;
}
Esempio n. 19
0
void
ifpga_mem_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
    vaddr_t startva, endva;

    if (pmap_devmap_find_va(bsh, size) != NULL) {
        /* Device was statically mapped; nothing to do. */
        return;
    }

    startva = trunc_page(bsh);
    endva = round_page(bsh + size);

    pmap_remove(pmap_kernel(), startva, endva);
    pmap_update(pmap_kernel());
    uvm_km_free(kernel_map, startva, endva - startva, UVM_KMF_VAONLY);
}
Esempio n. 20
0
/*
 * Unmap DVMA mappings from kernel
 */
void
viommu_dvmamem_unmap(bus_dma_tag_t t, bus_dma_tag_t t0, caddr_t kva,
    size_t size)
{
	
	DPRINTF(IDB_BUSDMA, ("iommu_dvmamem_unmap: kvm %p size %lx\n",
	    kva, size));
	    
#ifdef DIAGNOSTIC
	if ((u_long)kva & PAGE_MASK)
		panic("iommu_dvmamem_unmap");
#endif
	
	size = round_page(size);
	pmap_remove(pmap_kernel(), (vaddr_t)kva, size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size);
}
Esempio n. 21
0
/*
 * Free the io map addresses associated with this IO operation.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;
	
#ifdef	DIAGNOSTIC
	if (!(bp->b_flags & B_PHYS))
		panic("vunmapbuf");
#endif
	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);
	pmap_remove(vm_map_pmap(phys_map), addr, addr + len);
	pmap_update(vm_map_pmap(phys_map));
	uvm_km_free_wakeup(phys_map, addr, len);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = 0;
}
Esempio n. 22
0
/*
 * Free the io map PTEs associated with this IO operation.
 * We also invalidate the TLB entries and restore the original b_addr.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
    vaddr_t addr, off;
    pmap_t kpmap;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vunmapbuf");
    addr = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - addr;
    len = round_page(off + len);
    kpmap = vm_map_pmap(phys_map);
    pmap_remove(kpmap, addr, addr + len);
    pmap_update(kpmap);
    uvm_km_free_wakeup(phys_map, addr, len);
    bp->b_data = bp->b_saveaddr;
    bp->b_saveaddr = 0;
}
Esempio n. 23
0
/*
 * Unmap IO request from the kernel virtual address space.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	struct pmap *pmap;
	vaddr_t kva;
	vsize_t off;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");
#endif
	kva = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - kva;
	len = round_page(off + len);
	pmap = vm_map_pmap(phys_map);
	pmap_remove(pmap, kva, kva + len);
	pmap_update(pmap);
	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = NULL;
}
Esempio n. 24
0
void
bus_mapout(void *ptr, int sz)
{
	vaddr_t va;
	int off;

	va = (vaddr_t)ptr;

	/* If it was a PROM mapping, do NOT free it! */
	if ((va >= SUN3_MONSTART) && (va < SUN3_MONEND))
		return;

	off = va & PGOFSET;
	va -= off;
	sz += off;
	sz = m68k_round_page(sz);

	pmap_remove(pmap_kernel(), va, va + sz);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, sz, UVM_KMF_VAONLY);
}
Esempio n. 25
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((u_long)kva & PGOFSET)
		panic("_bus_dmamem_unmap");
#endif

	/*
	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
	 * not in KSEG2).
	 */
	if (kva >= (void *)MIPS_KSEG0_START &&
	    kva < (void *)MIPS_KSEG2_START)
		return;

	size = round_page(size);
	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
Esempio n. 26
0
static inline void
cpu_activate_pcb(struct lwp *l)
{
	struct trapframe *tf = l->l_md.md_regs;
	struct pcb *pcb = lwp_getpcb(l);
#ifdef DIAGNOSTIC
	vaddr_t uarea = (vaddr_t)pcb;
	vaddr_t maxsp = uarea + USPACE;
#endif
	KASSERT(tf == (void *)(uarea + PAGE_SIZE));

	/*
	 * Stash the physical address of FP regs for later perusal
	 */
	tf->tf_cr30 = (u_int)pcb->pcb_fpregs;

#ifdef DIAGNOSTIC
	/* Create the kernel stack red zone. */
	pmap_remove(pmap_kernel(), maxsp - PAGE_SIZE, maxsp);
	pmap_update(pmap_kernel());
#endif
}
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t kva;
	vsize_t off;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");

	kva = m68k_trunc_page(bp->b_data);
	off = (vaddr_t)bp->b_data - kva;
	len = m68k_round_page(off + len);

#ifdef M68K_VAC
	pmap_remove(vm_map_pmap(phys_map), kva, kva + len);
#else
	pmap_kremove(kva, len);
#endif
	pmap_update(pmap_kernel());
	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = 0;
}
Esempio n. 28
0
/*
 * Unmaps a a vm_page mapped by kkt_do_mappings().
 */
void kkt_unmap(vm_offset_t vaddr)
{
  unsigned int range_index, free_index;

  simple_lock(&kkt_virt_lock);
  range_index = (vaddr - kkt_virt_start_vaddr) / PAGE_SIZE;
  if (kkt_virt_status[range_index] != USED_VADDR)
    printf("kkt_unmap error: virt_addr= %x, range_index= %d\n",
	   vaddr, range_index);
  kkt_virt_status[range_index] = FREE_VADDR ;
  kkt_virt_vmp[range_index] = (vm_page_t) NULL ;
  kkt_virt_free_index++;
  free_index = kkt_virt_free_index;
  simple_unlock(&kkt_virt_lock);

  pmap_remove(kernel_pmap, vaddr, (vaddr + PAGE_SIZE));

  /* 
   * If we are releasing the only available virtual address of the range,
   * wake up the waiting threads.
   */
  if (free_index == 1)
    thread_wakeup((event_t) kkt_virt_status);
}
Esempio n. 29
0
/*ARGSUSED*/
int
mmrw(dev_t dev, struct uio *uio, int flags)
{
	vaddr_t o, v;
	int c;
	struct iovec *iov;
	int error = 0;
	static int physlock;
	vm_prot_t prot;

	if (minor(dev) == DEV_MEM) {
		/* lock against other uses of shared vmmap */
		while (physlock > 0) {
			physlock++;
			error = tsleep((void *)&physlock, PZERO | PCATCH,
			    "mmrw", 0);
			if (error)
				return error;
		}
		physlock = 1;
	}
	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor(dev)) {

		case DEV_MEM:
			v = uio->uio_offset;

			/*
			 * Only allow reads in physical RAM.
			 */
			if (v >= 0xFFFFFFFC || v < lowram) {
				error = EFAULT;
				goto unlock;
			}

			prot = uio->uio_rw == UIO_READ ? VM_PROT_READ :
			    VM_PROT_WRITE;
			pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
			    trunc_page(v), prot, prot|PMAP_WIRED);
			pmap_update(pmap_kernel());
			o = m68k_page_offset(uio->uio_offset);
			c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
			error = uiomove(vmmap + o, c, uio);
			pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
			    (vaddr_t)vmmap + PAGE_SIZE);
			pmap_update(pmap_kernel());
			continue;

		case DEV_KMEM:
			v = uio->uio_offset;
			c = min(iov->iov_len, MAXPHYS);
			if (!uvm_kernacc((void *)v, c,
			    uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
				return EFAULT;

			/*
			 * Don't allow reading intio
			 * device space.  This could lead to
			 * corruption of device registers.
			 */
			if (ISIIOVA(v))
				return EFAULT;

			error = uiomove((void *)v, c, uio);
			continue;

		case DEV_NULL:
			if (uio->uio_rw == UIO_WRITE)
				uio->uio_resid = 0;
			return 0;

		case DEV_ZERO:
			if (uio->uio_rw == UIO_WRITE) {
				c = iov->iov_len;
				break;
			}
			/*
			 * On the first call, allocate and zero a page
			 * of memory for use with /dev/zero.
			 */
			if (devzeropage == NULL)
				devzeropage = (void *)
				    malloc(PAGE_SIZE, M_TEMP, M_WAITOK|M_ZERO);
			c = min(iov->iov_len, PAGE_SIZE);
			error = uiomove(devzeropage, c, uio);
			continue;

		default:
			return ENXIO;
		}
		if (error)
			break;
		iov->iov_base = (char *)iov->iov_base + c;
		iov->iov_len -= c;
		uio->uio_offset += c;
		uio->uio_resid -= c;
	}
	if (minor(dev) == DEV_MEM) {
 unlock:
		if (physlock > 1)
			wakeup((void *)&physlock);
		physlock = 0;
	}
	return error;
}
Esempio n. 30
0
static void
pccard_attach(struct device *parent, struct device *myself, void *aux)
{
	struct pccard_softc *self = (struct pccard_softc *) myself;
	struct pcmciabus_attach_args paa;
	vaddr_t pcmcia_base;
	vaddr_t i;

	printf("\n");

	gayle_init();

	pcmcia_base = uvm_km_alloc(kernel_map,
				   GAYLE_PCMCIA_END - GAYLE_PCMCIA_START,
				   0, UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (pcmcia_base == 0) {
		printf("attach failed (no virtual memory)\n");
		return;
	}

	for (i = GAYLE_PCMCIA_START; i < GAYLE_PCMCIA_END; i += PAGE_SIZE)
		pmap_enter(vm_map_pmap(kernel_map),
		    i - GAYLE_PCMCIA_START + pcmcia_base, i,
		    VM_PROT_READ | VM_PROT_WRITE, true);
	pmap_update(vm_map_pmap(kernel_map));

	/* override the one-byte access methods for I/O space */
	pcmio_bs_methods = amiga_bus_stride_1;
	pcmio_bs_methods.bsr1 = pcmio_bsr1;
	pcmio_bs_methods.bsw1 = pcmio_bsw1;
	pcmio_bs_methods.bsrm1 = pcmio_bsrm1;
	pcmio_bs_methods.bswm1 = pcmio_bswm1;
	pcmio_bs_methods.bsrr1 = pcmio_bsrr1;
	pcmio_bs_methods.bswr1 = pcmio_bswr1;
	pcmio_bs_methods.bssr1 = pcmio_bssr1;
	pcmio_bs_methods.bscr1 = pcmio_bscr1;

	reset_card_reg = (u_int8_t *) pcmcia_base +
	    (GAYLE_PCMCIA_RESET - GAYLE_PCMCIA_START);

	self->io_space.base = (bus_addr_t) pcmcia_base +
	    (GAYLE_PCMCIA_IO_START - GAYLE_PCMCIA_START);
	self->io_space.absm = &pcmio_bs_methods;

	self->attr_space.base = (bus_addr_t) pcmcia_base +
	    (GAYLE_PCMCIA_ATTR_START - GAYLE_PCMCIA_START);
	self->attr_space.absm = &amiga_bus_stride_1;

	/* XXX we should check if the 4M of common memory are actually
	 *	RAM or PCMCIA usable.
	 * For now, we just do as if the 4M were RAM and make common memory
	 * point to attribute memory, which is OK for some I/O cards.
	 */
	self->mem_space.base = (bus_addr_t) pcmcia_base;
	self->mem_space.absm = &amiga_bus_stride_1;

	self->devs[0].sc = self;
	self->devs[0].intr_func = NULL;
	self->devs[0].intr_arg = NULL;
	self->devs[0].flags = 0;

	gayle.pcc_status = 0;
	gayle.intreq = 0;
	gayle.pcc_config = 0;
	gayle.intena &= GAYLE_INT_IDE;

	paa.paa_busname = "pcmcia";
	paa.pct = &chip_functions;
	paa.pch = &self->devs[0];
	paa.iobase = 0;
	paa.iosize = 0;
	self->devs[0].card =
		config_found(myself, &paa, simple_devprint);
	if (self->devs[0].card == NULL) {
		printf("attach failed, config_found() returned NULL\n");
		pmap_remove(kernel_map->pmap, pcmcia_base,
		    pcmcia_base + (GAYLE_PCMCIA_END - GAYLE_PCMCIA_START));
		pmap_update(kernel_map->pmap);
		uvm_deallocate(kernel_map, pcmcia_base,
			GAYLE_PCMCIA_END - GAYLE_PCMCIA_START);
		return;
	}

	self->intr6.isr_intr = pccard_intr6;
	self->intr6.isr_arg = self;
	self->intr6.isr_ipl = 6;
	add_isr(&self->intr6);

	self->intr2.isr_intr = pccard_intr2;
	self->intr2.isr_arg = self;
	self->intr2.isr_ipl = 2;
	add_isr(&self->intr2);

	if (kthread_create(PRI_NONE, 0, NULL, pccard_kthread, self,
	    NULL, "pccard")) {
		printf("%s: can't create kernel thread\n",
			self->sc_dev.dv_xname);
		panic("pccard kthread_create() failed");
	}

	gayle.intena |= GAYLE_INT_DETECT | GAYLE_INT_IREQ;

	/* reset the card if it's already there */
	if (gayle.pcc_status & GAYLE_CCMEM_DETECT) {
		volatile u_int8_t x;
		*reset_card_reg = 0x0;
		delay(1000);
		x = *reset_card_reg;
		gayle.pcc_status = GAYLE_CCMEM_WP | GAYLE_CCIO_SPKR;
	}

	pccard_attach_slot(&self->devs[0]);
}