Ejemplo n.º 1
0
static vm_page_t rtR0MemObjFreeBSDContigPhysAllocHelper(vm_object_t pObject, vm_pindex_t iPIndex,
                                                        u_long cPages, vm_paddr_t VmPhysAddrHigh,
                                                        u_long uAlignment, bool fWire)
{
    vm_page_t pPages;
    int cTries = 0;

#if __FreeBSD_version > 1000000
    int fFlags = VM_ALLOC_INTERRUPT | VM_ALLOC_NOBUSY;
    if (fWire)
        fFlags |= VM_ALLOC_WIRED;

    while (cTries <= 1)
    {
        VM_OBJECT_LOCK(pObject);
        pPages = vm_page_alloc_contig(pObject, iPIndex, fFlags, cPages, 0,
                                      VmPhysAddrHigh, uAlignment, 0, VM_MEMATTR_DEFAULT);
        VM_OBJECT_UNLOCK(pObject);
        if (pPages)
            break;
        vm_pageout_grow_cache(cTries, 0, VmPhysAddrHigh);
        cTries++;
    }

    return pPages;
#else
    while (cTries <= 1)
    {
        pPages = vm_phys_alloc_contig(cPages, 0, VmPhysAddrHigh, uAlignment, 0);
        if (pPages)
            break;
        vm_contig_grow_cache(cTries, 0, VmPhysAddrHigh);
        cTries++;
    }

    if (!pPages)
        return pPages;
    VM_OBJECT_LOCK(pObject);
    for (vm_pindex_t iPage = 0; iPage < cPages; iPage++)
    {
        vm_page_t pPage = pPages + iPage;
        vm_page_insert(pPage, pObject, iPIndex + iPage);
        pPage->valid = VM_PAGE_BITS_ALL;
        if (fWire)
        {
            pPage->wire_count = 1;
            atomic_add_int(&cnt.v_wire_count, 1);
        }
    }
    VM_OBJECT_UNLOCK(pObject);
    return pPages;
#endif
}
Ejemplo n.º 2
0
/*
 * Identify the physical page mapped at the given kernel virtual
 * address.  Insert this physical page into the given address space at
 * the given virtual address, replacing the physical page, if any,
 * that already exists there.
 */
static int
vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
{
	vm_map_t map = mapa;
	vm_page_t kern_pg, user_pg;
	vm_object_t uobject;
	vm_map_entry_t entry;
	vm_pindex_t upindex;
	vm_prot_t prot;
	boolean_t wired;

	KASSERT((uaddr & PAGE_MASK) == 0,
	    ("vm_pgmoveco: uaddr is not page aligned"));

	/*
	 * Herein the physical page is validated and dirtied.  It is
	 * unwired in sf_buf_mext().
	 */
	kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
	kern_pg->valid = VM_PAGE_BITS_ALL;
	KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
	    ("vm_pgmoveco: kern_pg is not correctly wired"));

	if ((vm_map_lookup(&map, uaddr,
			   VM_PROT_WRITE, &entry, &uobject,
			   &upindex, &prot, &wired)) != KERN_SUCCESS) {
		return(EFAULT);
	}
	VM_OBJECT_LOCK(uobject);
retry:
	if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
		if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
			goto retry;
		vm_page_lock_queues();
		pmap_remove_all(user_pg);
		vm_page_free(user_pg);
	} else {
		/*
		 * Even if a physical page does not exist in the
		 * object chain's first object, a physical page from a
		 * backing object may be mapped read only.
		 */
		if (uobject->backing_object != NULL)
			pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
		vm_page_lock_queues();
	}
	vm_page_insert(kern_pg, uobject, upindex);
	vm_page_dirty(kern_pg);
	vm_page_unlock_queues();
	VM_OBJECT_UNLOCK(uobject);
	vm_map_lookup_done(map, entry);
	return(KERN_SUCCESS);
}
Ejemplo n.º 3
0
static int
netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
                       int prot, vm_page_t *mres)
{
    struct netmap_vm_handle_t *vmh = object->handle;
    struct netmap_priv_d *priv = vmh->priv;
    vm_paddr_t paddr;
    vm_page_t page;
    vm_memattr_t memattr;
    vm_pindex_t pidx;

    ND("object %p offset %jd prot %d mres %p",
       object, (intmax_t)offset, prot, mres);
    memattr = object->memattr;
    pidx = OFF_TO_IDX(offset);
    paddr = netmap_mem_ofstophys(priv->np_mref, offset);
    if (paddr == 0)
        return VM_PAGER_FAIL;

    if (((*mres)->flags & PG_FICTITIOUS) != 0) {
        /*
         * If the passed in result page is a fake page, update it with
         * the new physical address.
         */
        page = *mres;
        vm_page_updatefake(page, paddr, memattr);
    } else {
        /*
         * Replace the passed in reqpage page with our own fake page and
         * free up the all of the original pages.
         */
#ifndef VM_OBJECT_WUNLOCK	/* FreeBSD < 10.x */
#define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
#define VM_OBJECT_WLOCK	VM_OBJECT_LOCK
#endif /* VM_OBJECT_WUNLOCK */

        VM_OBJECT_WUNLOCK(object);
        page = vm_page_getfake(paddr, memattr);
        VM_OBJECT_WLOCK(object);
        vm_page_free(*mres);
        *mres = page;
        vm_page_insert(page, object, pidx);
    }
    page->valid = VM_PAGE_BITS_ALL;
    return (VM_PAGER_OK);
}
static int old_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
    int prot, vm_page_t *mres)
{
	vm_paddr_t paddr;
	vm_page_t page;
	vm_offset_t pidx = OFF_TO_IDX(offset);
	cdev_t dev;

	page = *mres;
	dev = object->handle;

	paddr = pmap_phys_address(
		    dev_dmmap(dev, offset, prot, NULL));
	KASSERT(paddr != -1,("dev_pager_getpage: map function returns error"));
	KKASSERT(object->type == OBJT_DEVICE);

	if (page->flags & PG_FICTITIOUS) {
		/*
		 * If the passed in reqpage page is already a fake page,
		 * update it with the new physical address.
		 */
		page->phys_addr = paddr;
		page->valid = VM_PAGE_BITS_ALL;
	} else {
		/*
		 * Replace the passed in reqpage page with our own fake page
		 * and free up all the original pages.
		 */
		page = dev_pager_getfake(paddr, object->memattr);
		TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist,
				  page, pageq);
		vm_object_hold(object);
		vm_page_free(*mres);
		if (vm_page_insert(page, object, pidx) == FALSE) {
			panic("dev_pager_getpage: page (%p,%016jx) exists",
			      object, (uintmax_t)pidx);
		}
		vm_object_drop(object);
	}

	return (VM_PAGER_OK);
}
Ejemplo n.º 5
0
static int
privcmd_pg_fault(vm_object_t object, vm_ooffset_t offset,
    int prot, vm_page_t *mres)
{
	struct privcmd_map *map = object->handle;
	vm_pindex_t pidx;
	vm_page_t page, oldm;

	if (map->mapped != true)
		return (VM_PAGER_FAIL);

	pidx = OFF_TO_IDX(offset);
	if (pidx >= map->size || BIT_ISSET(map->size, pidx, map->err))
		return (VM_PAGER_FAIL);

	page = PHYS_TO_VM_PAGE(map->phys_base_addr + offset);
	if (page == NULL)
		return (VM_PAGER_FAIL);

	KASSERT((page->flags & PG_FICTITIOUS) != 0,
	    ("not fictitious %p", page));
	KASSERT(page->wire_count == 1, ("wire_count not 1 %p", page));
	KASSERT(vm_page_busied(page) == 0, ("page %p is busy", page));

	if (*mres != NULL) {
		oldm = *mres;
		vm_page_lock(oldm);
		vm_page_free(oldm);
		vm_page_unlock(oldm);
		*mres = NULL;
	}

	vm_page_insert(page, object, pidx);
	page->valid = VM_PAGE_BITS_ALL;
	vm_page_xbusy(page);
	*mres = page;
	return (VM_PAGER_OK);
}
Ejemplo n.º 6
0
/* See: old_dev_pager_fault() in device_pager.c as an example. */
static int
cheri_compositor_cfb_pg_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
    vm_page_t *mres)
{
	vm_pindex_t pidx;
	vm_paddr_t paddr;
	vm_page_t page;
	struct cfb_vm_object *cfb_vm_obj;
	struct cdev *dev;
	struct cheri_compositor_softc *sc;
	struct cdevsw *csw;
	vm_memattr_t memattr;
	int ref;
	int retval;

	pidx = OFF_TO_IDX(offset);

	VM_OBJECT_WUNLOCK(vm_obj);

	cfb_vm_obj = vm_obj->handle;
	dev = cfb_vm_obj->dev;
	sc = dev->si_drv1;

	retval = VM_PAGER_OK;

	CHERI_COMPOSITOR_DEBUG(sc, "vm_obj: %p, offset: %lu, prot: %i", vm_obj,
	    offset, prot);

	csw = dev_refthread(dev, &ref);

	if (csw == NULL) {
		retval = VM_PAGER_FAIL;
		goto done_unlocked;
	}

	/* Traditional d_mmap() call. */
	CHERI_COMPOSITOR_DEBUG(sc, "offset: %lu, nprot: %i", offset, prot);

	if (validate_prot_and_offset(sc, cfb_vm_obj->pool->mapped_fd,
	    prot, offset) != 0) {
		retval = VM_PAGER_FAIL;
		goto done_unlocked;
	}

	paddr = calculate_physical_address(sc, cfb_vm_obj->pool, offset);
	memattr = VM_MEMATTR_UNCACHEABLE;

	CHERI_COMPOSITOR_DEBUG(sc, "paddr: %p, memattr: %i",
	    (void *) paddr, memattr);

	dev_relthread(dev, ref);

	/* Sanity checks. */
	KASSERT((((*mres)->flags & PG_FICTITIOUS) == 0),
	    ("Expected non-fictitious page."));

	/*
	 * Replace the passed in reqpage page with our own fake page and
	 * free up the all of the original pages.
	 */
	page = vm_page_getfake(paddr, memattr);
	VM_OBJECT_WLOCK(vm_obj);
	vm_page_lock(*mres);
	vm_page_free(*mres);
	vm_page_unlock(*mres);
	*mres = page;
	vm_page_insert(page, vm_obj, pidx);

	page->valid = VM_PAGE_BITS_ALL;

	/* Success! */
	retval = VM_PAGER_OK;
	goto done;

done_unlocked:
	VM_OBJECT_WLOCK(vm_obj);
done:
	CHERI_COMPOSITOR_DEBUG(sc, "Finished with mres: %p (retval: %i)", *mres,
	    retval);

	return (retval);
}
Ejemplo n.º 7
0
static int
ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
    int prot, vm_page_t *mres)
{

	struct ttm_buffer_object *bo = vm_obj->handle;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_tt *ttm = NULL;
	vm_page_t m, m1, oldm;
	int ret;
	int retval = VM_PAGER_OK;
	struct ttm_mem_type_manager *man =
		&bdev->man[bo->mem.mem_type];

	vm_object_pip_add(vm_obj, 1);
	oldm = *mres;
	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_remove(oldm);
		vm_page_unlock(oldm);
		*mres = NULL;
	} else
		oldm = NULL;
retry:
	VM_OBJECT_WUNLOCK(vm_obj);
	m = NULL;

reserve:
	ret = ttm_bo_reserve(bo, false, false, false, 0);
	if (unlikely(ret != 0)) {
		if (ret == -EBUSY) {
			kern_yield(0);
			goto reserve;
		}
	}

	if (bdev->driver->fault_reserve_notify) {
		ret = bdev->driver->fault_reserve_notify(bo);
		switch (ret) {
		case 0:
			break;
		case -EBUSY:
		case -ERESTART:
		case -EINTR:
			kern_yield(0);
			goto reserve;
		default:
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	}

	/*
	 * Wait for buffer data in transit, due to a pipelined
	 * move.
	 */

	mtx_lock(&bdev->fence_lock);
	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
		/*
		 * Here, the behavior differs between Linux and FreeBSD.
		 *
		 * On Linux, the wait is interruptible (3rd argument to
		 * ttm_bo_wait). There must be some mechanism to resume
		 * page fault handling, once the signal is processed.
		 *
		 * On FreeBSD, the wait is uninteruptible. This is not a
		 * problem as we can't end up with an unkillable process
		 * here, because the wait will eventually time out.
		 *
		 * An example of this situation is the Xorg process
		 * which uses SIGALRM internally. The signal could
		 * interrupt the wait, causing the page fault to fail
		 * and the process to receive SIGSEGV.
		 */
		ret = ttm_bo_wait(bo, false, false, false);
		mtx_unlock(&bdev->fence_lock);
		if (unlikely(ret != 0)) {
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	} else
		mtx_unlock(&bdev->fence_lock);

	ret = ttm_mem_io_lock(man, true);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_unlock;
	}
	ret = ttm_mem_io_reserve_vm(bo);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_io_unlock;
	}

	/*
	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
	 * since the mmap_sem is only held in read mode. However, we
	 * modify only the caching bits of vma->vm_page_prot and
	 * consider those bits protected by
	 * the bo->mutex, as we should be the only writers.
	 * There shouldn't really be any readers of these bits except
	 * within vm_insert_mixed()? fork?
	 *
	 * TODO: Add a list of vmas to the bo, and change the
	 * vma->vm_page_prot when the object changes caching policy, with
	 * the correct locks held.
	 */
	if (!bo->mem.bus.is_iomem) {
		/* Allocate all page at once, most common usage */
		ttm = bo->ttm;
		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
	}

	if (bo->mem.bus.is_iomem) {
		m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset +
		    offset);
		KASSERT((m->flags & PG_FICTITIOUS) != 0,
		    ("physical address %#jx not fictitious",
		    (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset
		    + offset)));
		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
	} else {
		ttm = bo->ttm;
		m = ttm->pages[OFF_TO_IDX(offset)];
		if (unlikely(!m)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
		pmap_page_set_memattr(m,
		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
	}

	VM_OBJECT_WLOCK(vm_obj);
	if (vm_page_busied(m)) {
		vm_page_lock(m);
		VM_OBJECT_WUNLOCK(vm_obj);
		vm_page_busy_sleep(m, "ttmpbs");
		VM_OBJECT_WLOCK(vm_obj);
		ttm_mem_io_unlock(man);
		ttm_bo_unreserve(bo);
		goto retry;
	}
	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
	if (m1 == NULL) {
		if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
			VM_OBJECT_WUNLOCK(vm_obj);
			VM_WAIT;
			VM_OBJECT_WLOCK(vm_obj);
			ttm_mem_io_unlock(man);
			ttm_bo_unreserve(bo);
			goto retry;
		}
	} else {
		KASSERT(m == m1,
		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
		    bo, m, m1, (uintmax_t)offset));
	}
	m->valid = VM_PAGE_BITS_ALL;
	*mres = m;
	vm_page_xbusy(m);

	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_free(oldm);
		vm_page_unlock(oldm);
	}

out_io_unlock1:
	ttm_mem_io_unlock(man);
out_unlock1:
	ttm_bo_unreserve(bo);
	vm_object_pip_wakeup(vm_obj);
	return (retval);

out_io_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_io_unlock1;

out_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_unlock1;
}
Ejemplo n.º 8
0
static int
sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
	struct sglist *sg;
	vm_page_t m_paddr, page;
	vm_pindex_t offset;
	vm_paddr_t paddr;
	vm_memattr_t memattr;
	size_t space;
	int i;

	VM_OBJECT_ASSERT_WLOCKED(object);
	sg = object->handle;
	memattr = object->memattr;
	VM_OBJECT_WUNLOCK(object);
	offset = m[reqpage]->pindex;

	/*
	 * Lookup the physical address of the requested page.  An initial
	 * value of '1' instead of '0' is used so we can assert that the
	 * page is found since '0' can be a valid page-aligned physical
	 * address.
	 */
	space = 0;
	paddr = 1;
	for (i = 0; i < sg->sg_nseg; i++) {
		if (space + sg->sg_segs[i].ss_len <= (offset * PAGE_SIZE)) {
			space += sg->sg_segs[i].ss_len;
			continue;
		}
		paddr = sg->sg_segs[i].ss_paddr + offset * PAGE_SIZE - space;
		break;
	}
	KASSERT(paddr != 1, ("invalid SG page index"));

	/* If "paddr" is a real page, perform a sanity check on "memattr". */
	if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL &&
	    pmap_page_get_memattr(m_paddr) != memattr) {
		memattr = pmap_page_get_memattr(m_paddr);
		printf(
	    "WARNING: A device driver has set \"memattr\" inconsistently.\n");
	}

	/* Return a fake page for the requested page. */
	KASSERT(!(m[reqpage]->flags & PG_FICTITIOUS),
	    ("backing page for SG is fake"));

	/* Construct a new fake page. */
	page = vm_page_getfake(paddr, memattr);
	VM_OBJECT_WLOCK(object);
	TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq);

	/* Free the original pages and insert this fake page into the object. */
	for (i = 0; i < count; i++) {
		vm_page_lock(m[i]);
		vm_page_free(m[i]);
		vm_page_unlock(m[i]);
	}
	vm_page_insert(page, object, offset);
	m[reqpage] = page;
	page->valid = VM_PAGE_BITS_ALL;

	return (VM_PAGER_OK);
}
Ejemplo n.º 9
0
static int
pscnv_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
    vm_page_t *mres)
{
	struct drm_gem_object *gem_obj = vm_obj->handle;
	struct pscnv_bo *bo = gem_obj->driver_private;
	struct drm_device *dev = gem_obj->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	vm_page_t m = NULL;
	vm_page_t oldm;
	vm_memattr_t mattr;
	vm_paddr_t paddr;
	const char *what;

	if (bo->chan) {
		paddr = dev_priv->fb_phys + offset +
			nvc0_fifo_ctrl_offs(dev, bo->chan->cid);
		mattr = VM_MEMATTR_UNCACHEABLE;
		what = "fifo";
	} else switch (bo->flags & PSCNV_GEM_MEMTYPE_MASK) {
	case PSCNV_GEM_VRAM_SMALL:
	case PSCNV_GEM_VRAM_LARGE:
		paddr = dev_priv->fb_phys + bo->map1->start + offset;
		mattr = VM_MEMATTR_WRITE_COMBINING;
		what = "vram";
		break;
	case PSCNV_GEM_SYSRAM_SNOOP:
	case PSCNV_GEM_SYSRAM_NOSNOOP:
		paddr = bo->dmapages[OFF_TO_IDX(offset)];
		mattr = VM_MEMATTR_WRITE_BACK;
		what = "sysram";
		break;
	default: return (EINVAL);
	}

	if (offset >= bo->size) {
		if (pscnv_mem_debug > 0)
			NV_WARN(dev, "Reading %p + %08llx (%s) is past max size %08llx\n",
				bo, offset, what, bo->size);
		return (VM_PAGER_ERROR);
	}
	DRM_LOCK(dev);
	if (pscnv_mem_debug > 0)
		NV_WARN(dev, "Connecting %p+%08llx (%s) at phys %010llx\n",
			bo, offset, what, paddr);
	vm_object_pip_add(vm_obj, 1);

	if (*mres != NULL) {
		oldm = *mres;
		vm_page_lock(oldm);
		vm_page_remove(oldm);
		vm_page_unlock(oldm);
		*mres = NULL;
	} else
		oldm = NULL;
	//VM_OBJECT_LOCK(vm_obj);
	m = vm_phys_fictitious_to_vm_page(paddr);
	if (m == NULL) {
		DRM_UNLOCK(dev);
		return -EFAULT;
	}
	KASSERT((m->flags & PG_FICTITIOUS) != 0,
	    ("not fictitious %p", m));
	KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));

	if ((m->flags & VPO_BUSY) != 0) {
		DRM_UNLOCK(dev);
		return -EFAULT;
	}
	pmap_page_set_memattr(m, mattr);
	m->valid = VM_PAGE_BITS_ALL;
	*mres = m;
	vm_page_lock(m);
	vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
	vm_page_unlock(m);
	vm_page_busy(m);

	printf("fault %p %jx %x phys %x", gem_obj, offset, prot,
	    m->phys_addr);
	DRM_UNLOCK(dev);
	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_free(oldm);
		vm_page_unlock(oldm);
	}
	vm_object_pip_wakeup(vm_obj);
	return (VM_PAGER_OK);
}
Ejemplo n.º 10
0
/*
 * vm_contig_pg_kmap:
 *
 * Map previously allocated (vm_contig_pg_alloc) range of pages from
 * vm_page_array[] into the KVA.  Once mapped, the pages are part of
 * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size).
 *
 * No requirements.
 */
vm_offset_t
vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags)
{
	vm_offset_t addr, tmp_addr;
	vm_page_t pga = vm_page_array;
	int i, count;

	size = round_page(size);
	if (size == 0)
		panic("vm_contig_pg_kmap: size must not be 0");

	crit_enter();
	lwkt_gettoken(&vm_token);

	/*
	 * We've found a contiguous chunk that meets our requirements.
	 * Allocate KVM, and assign phys pages and return a kernel VM
	 * pointer.
	 */
	count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
	vm_map_lock(map);
	if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr) !=
	    KERN_SUCCESS) {
		/*
		 * XXX We almost never run out of kernel virtual
		 * space, so we don't make the allocated memory
		 * above available.
		 */
		vm_map_unlock(map);
		vm_map_entry_release(count);
		lwkt_reltoken(&vm_token);
		crit_exit();
		return (0);
	}

	/*
	 * kernel_object maps 1:1 to kernel_map.
	 */
	vm_object_hold(&kernel_object);
	vm_object_reference(&kernel_object);
	vm_map_insert(map, &count, 
		      &kernel_object, addr,
		      addr, addr + size,
		      VM_MAPTYPE_NORMAL,
		      VM_PROT_ALL, VM_PROT_ALL,
		      0);
	vm_map_unlock(map);
	vm_map_entry_release(count);

	tmp_addr = addr;
	for (i = start; i < (start + size / PAGE_SIZE); i++) {
		vm_page_t m = &pga[i];
		vm_page_insert(m, &kernel_object, OFF_TO_IDX(tmp_addr));
		if ((flags & M_ZERO) && !(m->flags & PG_ZERO))
			pmap_zero_page(VM_PAGE_TO_PHYS(m));
		m->flags = 0;
		tmp_addr += PAGE_SIZE;
 	}
	vm_map_wire(map, addr, addr + size, 0);

	vm_object_drop(&kernel_object);

	lwkt_reltoken(&vm_token);
	crit_exit();
	return (addr);
}
Ejemplo n.º 11
0
kern_return_t
kernel_memory_allocate(
	register vm_map_t	map,
	register vm_offset_t	*addrp,
	register vm_size_t	size,
	register vm_offset_t	mask,
	int			flags)
{
	vm_object_t 		object;
	vm_object_offset_t 	offset;
	vm_object_offset_t 	pg_offset;
	vm_map_entry_t 		entry;
	vm_map_offset_t 	map_addr, fill_start;
	vm_map_offset_t		map_mask;
	vm_map_size_t		map_size, fill_size;
	kern_return_t 		kr;
	vm_page_t		mem;
	vm_page_t		guard_page_list = NULL;
	vm_page_t		wired_page_list = NULL;
	int			guard_page_count = 0;
	int			wired_page_count = 0;
	int			i;
	int			vm_alloc_flags;

	if (! vm_kernel_ready) {
		panic("kernel_memory_allocate: VM is not ready");
	}

	if (size == 0) {
		*addrp = 0;
		return KERN_INVALID_ARGUMENT;
	}
	map_size = vm_map_round_page(size);
	map_mask = (vm_map_offset_t) mask;
	vm_alloc_flags = 0;


	/*
	 * limit the size of a single extent of wired memory
	 * to try and limit the damage to the system if
	 * too many pages get wired down
	 */
        if (map_size > (1 << 30)) {
                return KERN_RESOURCE_SHORTAGE;
        }

	/*
	 * Guard pages:
	 *
	 * Guard pages are implemented as ficticious pages.  By placing guard pages
	 * on either end of a stack, they can help detect cases where a thread walks
	 * off either end of its stack.  They are allocated and set up here and attempts
	 * to access those pages are trapped in vm_fault_page().
	 *
	 * The map_size we were passed may include extra space for
	 * guard pages.  If those were requested, then back it out of fill_size
	 * since vm_map_find_space() takes just the actual size not including
	 * guard pages.  Similarly, fill_start indicates where the actual pages
	 * will begin in the range.
	 */

	fill_start = 0;
	fill_size = map_size;

	if (flags & KMA_GUARD_FIRST) {
		vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE;
		fill_start += PAGE_SIZE_64;
		fill_size -= PAGE_SIZE_64;
		if (map_size < fill_start + fill_size) {
			/* no space for a guard page */
			*addrp = 0;
			return KERN_INVALID_ARGUMENT;
		}
		guard_page_count++;
	}
	if (flags & KMA_GUARD_LAST) {
		vm_alloc_flags |= VM_FLAGS_GUARD_AFTER;
		fill_size -= PAGE_SIZE_64;
		if (map_size <= fill_start + fill_size) {
			/* no space for a guard page */
			*addrp = 0;
			return KERN_INVALID_ARGUMENT;
		}
		guard_page_count++;
	}
	wired_page_count = (int) (fill_size / PAGE_SIZE_64);
	assert(wired_page_count * PAGE_SIZE_64 == fill_size);

	for (i = 0; i < guard_page_count; i++) {
		for (;;) {
			mem = vm_page_grab_guard();

			if (mem != VM_PAGE_NULL)
				break;
			if (flags & KMA_NOPAGEWAIT) {
				kr = KERN_RESOURCE_SHORTAGE;
				goto out;
			}
			vm_page_more_fictitious();
		}
		mem->pageq.next = (queue_entry_t)guard_page_list;
		guard_page_list = mem;
	}

	for (i = 0; i < wired_page_count; i++) {
		uint64_t	unavailable;
		
		for (;;) {
		        if (flags & KMA_LOMEM)
			        mem = vm_page_grablo();
			else
			        mem = vm_page_grab();

		        if (mem != VM_PAGE_NULL)
			        break;

			if (flags & KMA_NOPAGEWAIT) {
				kr = KERN_RESOURCE_SHORTAGE;
				goto out;
			}
			if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
				kr = KERN_RESOURCE_SHORTAGE;
				goto out;
			}
			unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;

			if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
				kr = KERN_RESOURCE_SHORTAGE;
				goto out;
			}
			VM_PAGE_WAIT();
		}
		mem->pageq.next = (queue_entry_t)wired_page_list;
		wired_page_list = mem;
	}

	/*
	 *	Allocate a new object (if necessary).  We must do this before
	 *	locking the map, or risk deadlock with the default pager.
	 */
	if ((flags & KMA_KOBJECT) != 0) {
		object = kernel_object;
		vm_object_reference(object);
	} else {
		object = vm_object_allocate(map_size);
	}

	kr = vm_map_find_space(map, &map_addr,
			       fill_size, map_mask,
			       vm_alloc_flags, &entry);
	if (KERN_SUCCESS != kr) {
		vm_object_deallocate(object);
		goto out;
	}

	entry->object.vm_object = object;
	entry->offset = offset = (object == kernel_object) ? 
		        map_addr : 0;

	entry->wired_count++;

	if (flags & KMA_PERMANENT)
		entry->permanent = TRUE;

	if (object != kernel_object)
		vm_object_reference(object);

	vm_object_lock(object);
	vm_map_unlock(map);

	pg_offset = 0;

	if (fill_start) {
		if (guard_page_list == NULL)
			panic("kernel_memory_allocate: guard_page_list == NULL");

		mem = guard_page_list;
		guard_page_list = (vm_page_t)mem->pageq.next;
		mem->pageq.next = NULL;

		vm_page_insert(mem, object, offset + pg_offset);

		mem->busy = FALSE;
		pg_offset += PAGE_SIZE_64;
	}
	for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) {
		if (wired_page_list == NULL)
			panic("kernel_memory_allocate: wired_page_list == NULL");

		mem = wired_page_list;
		wired_page_list = (vm_page_t)mem->pageq.next;
		mem->pageq.next = NULL;
		mem->wire_count++;

		vm_page_insert(mem, object, offset + pg_offset);

		mem->busy = FALSE;
		mem->pmapped = TRUE;
		mem->wpmapped = TRUE;

		PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, 
			   VM_PROT_READ | VM_PROT_WRITE, object->wimg_bits & VM_WIMG_MASK, TRUE);

		if (flags & KMA_NOENCRYPT) {
			bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE);

			pmap_set_noencrypt(mem->phys_page);
		}
	}
Ejemplo n.º 12
0
kern_return_t
kmem_alloc_contig(
	vm_map_t		map,
	vm_offset_t		*addrp,
	vm_size_t		size,
	vm_offset_t 		mask,
	ppnum_t			max_pnum,
	ppnum_t			pnum_mask,
	int 			flags)
{
	vm_object_t		object;
	vm_object_offset_t	offset;
	vm_map_offset_t		map_addr; 
	vm_map_offset_t		map_mask;
	vm_map_size_t		map_size, i;
	vm_map_entry_t		entry;
	vm_page_t		m, pages;
	kern_return_t		kr;

	if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) 
		return KERN_INVALID_ARGUMENT;
	
	if (size == 0) {
		*addrp = 0;
		return KERN_INVALID_ARGUMENT;
	}

	map_size = vm_map_round_page(size);
	map_mask = (vm_map_offset_t)mask;

	/*
	 *	Allocate a new object (if necessary) and the reference we
	 *	will be donating to the map entry.  We must do this before
	 *	locking the map, or risk deadlock with the default pager.
	 */
	if ((flags & KMA_KOBJECT) != 0) {
		object = kernel_object;
		vm_object_reference(object);
	} else {
		object = vm_object_allocate(map_size);
	}

	kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
	if (KERN_SUCCESS != kr) {
		vm_object_deallocate(object);
		return kr;
	}

	entry->object.vm_object = object;
	entry->offset = offset = (object == kernel_object) ? 
		        map_addr : 0;

	/* Take an extra object ref in case the map entry gets deleted */
	vm_object_reference(object);
	vm_map_unlock(map);

	kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags);

	if (kr != KERN_SUCCESS) {
		vm_map_remove(map, vm_map_trunc_page(map_addr),
			      vm_map_round_page(map_addr + map_size), 0);
		vm_object_deallocate(object);
		*addrp = 0;
		return kr;
	}

	vm_object_lock(object);
	for (i = 0; i < map_size; i += PAGE_SIZE) {
		m = pages;
		pages = NEXT_PAGE(m);
		*(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
		m->busy = FALSE;
		vm_page_insert(m, object, offset + i);
	}
	vm_object_unlock(object);

	if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
			      vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE)) 
		!= KERN_SUCCESS) {
		if (object == kernel_object) {
			vm_object_lock(object);
			vm_object_page_remove(object, offset, offset + map_size);
			vm_object_unlock(object);
		}
		vm_map_remove(map, vm_map_trunc_page(map_addr), 
			      vm_map_round_page(map_addr + map_size), 0);
		vm_object_deallocate(object);
		return kr;
	}
	vm_object_deallocate(object);

	if (object == kernel_object)
		vm_map_simplify(map, map_addr);

	*addrp = (vm_offset_t) map_addr;
	assert((vm_map_offset_t) *addrp == map_addr);
	return KERN_SUCCESS;
}