Пример #1
0
/*
 *	kmem_unback:
 *
 *	Unmap and free the physical pages underlying the specified virtual
 *	address range.
 *
 *	A physical page must exist within the specified object at each index
 *	that is being unmapped.
 */
static int
_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
{
	vm_page_t m, next;
	vm_offset_t end, offset;
	int domain;

	KASSERT(object == kernel_object,
	    ("kmem_unback: only supports kernel object."));

	if (size == 0)
		return (0);
	pmap_remove(kernel_pmap, addr, addr + size);
	offset = addr - VM_MIN_KERNEL_ADDRESS;
	end = offset + size;
	VM_OBJECT_WLOCK(object);
	m = vm_page_lookup(object, atop(offset)); 
	domain = vm_phys_domain(m);
	for (; offset < end; offset += PAGE_SIZE, m = next) {
		next = vm_page_next(m);
		vm_page_unwire(m, PQ_NONE);
		vm_page_free(m);
	}
	VM_OBJECT_WUNLOCK(object);

	return (domain);
}
Пример #2
0
int
exec_map_first_page(struct image_params *imgp)
{
	int rv, i, after, initial_pagein;
	vm_page_t ma[VM_INITIAL_PAGEIN];
	vm_object_t object;

	if (imgp->firstpage != NULL)
		exec_unmap_first_page(imgp);

	object = imgp->vp->v_object;
	if (object == NULL)
		return (EACCES);
	VM_OBJECT_WLOCK(object);
#if VM_NRESERVLEVEL > 0
	vm_object_color(object, 0);
#endif
	ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
	if (ma[0]->valid != VM_PAGE_BITS_ALL) {
		vm_page_xbusy(ma[0]);
		if (!vm_pager_has_page(object, 0, NULL, &after)) {
			vm_page_lock(ma[0]);
			vm_page_free(ma[0]);
			vm_page_unlock(ma[0]);
			VM_OBJECT_WUNLOCK(object);
			return (EIO);
		}
		initial_pagein = min(after, VM_INITIAL_PAGEIN);
		KASSERT(initial_pagein <= object->size,
		    ("%s: initial_pagein %d object->size %ju",
		    __func__, initial_pagein, (uintmax_t )object->size));
		for (i = 1; i < initial_pagein; i++) {
			if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
				if (ma[i]->valid)
					break;
				if (!vm_page_tryxbusy(ma[i]))
					break;
			} else {
				ma[i] = vm_page_alloc(object, i,
				    VM_ALLOC_NORMAL);
				if (ma[i] == NULL)
					break;
			}
		}
		initial_pagein = i;
		rv = vm_pager_get_pages(object, ma, initial_pagein, NULL, NULL);
		if (rv != VM_PAGER_OK) {
			for (i = 0; i < initial_pagein; i++) {
				vm_page_lock(ma[i]);
				vm_page_free(ma[i]);
				vm_page_unlock(ma[i]);
			}
			VM_OBJECT_WUNLOCK(object);
			return (EIO);
		}
		vm_page_xunbusy(ma[0]);
		for (i = 1; i < initial_pagein; i++)
			vm_page_readahead_finish(ma[i]);
	}
	vm_page_lock(ma[0]);
	vm_page_hold(ma[0]);
	vm_page_activate(ma[0]);
	vm_page_unlock(ma[0]);
	VM_OBJECT_WUNLOCK(object);

	imgp->firstpage = sf_buf_alloc(ma[0], 0);
	imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);

	return (0);
}
Пример #3
0
DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
{
    PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)pMem;
    int rc;

    switch (pMemFreeBSD->Core.enmType)
    {
        case RTR0MEMOBJTYPE_PAGE:
        case RTR0MEMOBJTYPE_LOW:
        case RTR0MEMOBJTYPE_CONT:
            rc = vm_map_remove(kernel_map,
                                (vm_offset_t)pMemFreeBSD->Core.pv,
                                (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
            AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
            break;

        case RTR0MEMOBJTYPE_LOCK:
        {
            vm_map_t pMap = kernel_map;

            if (pMemFreeBSD->Core.u.Lock.R0Process != NIL_RTR0PROCESS)
                pMap = &((struct proc *)pMemFreeBSD->Core.u.Lock.R0Process)->p_vmspace->vm_map;

            rc = vm_map_unwire(pMap,
                               (vm_offset_t)pMemFreeBSD->Core.pv,
                               (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb,
                               VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
            AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
            break;
        }

        case RTR0MEMOBJTYPE_RES_VIRT:
        {
            vm_map_t pMap = kernel_map;
            if (pMemFreeBSD->Core.u.ResVirt.R0Process != NIL_RTR0PROCESS)
                pMap = &((struct proc *)pMemFreeBSD->Core.u.ResVirt.R0Process)->p_vmspace->vm_map;
            rc = vm_map_remove(pMap,
                               (vm_offset_t)pMemFreeBSD->Core.pv,
                               (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
            AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
            break;
        }

        case RTR0MEMOBJTYPE_MAPPING:
        {
            vm_map_t pMap = kernel_map;

            if (pMemFreeBSD->Core.u.Mapping.R0Process != NIL_RTR0PROCESS)
                pMap = &((struct proc *)pMemFreeBSD->Core.u.Mapping.R0Process)->p_vmspace->vm_map;
            rc = vm_map_remove(pMap,
                               (vm_offset_t)pMemFreeBSD->Core.pv,
                               (vm_offset_t)pMemFreeBSD->Core.pv + pMemFreeBSD->Core.cb);
            AssertMsg(rc == KERN_SUCCESS, ("%#x", rc));
            break;
        }

        case RTR0MEMOBJTYPE_PHYS:
        case RTR0MEMOBJTYPE_PHYS_NC:
        {
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_LOCK(pMemFreeBSD->pObject);
#endif
            vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
            vm_page_lock_queues();
            for (vm_page_t pPage = vm_page_find_least(pMemFreeBSD->pObject, 0);
                 pPage != NULL;
                 pPage = vm_page_next(pPage))
            {
                vm_page_unwire(pPage, 0);
            }
            vm_page_unlock_queues();
#if __FreeBSD_version >= 1000030
            VM_OBJECT_WUNLOCK(pMemFreeBSD->pObject);
#else
            VM_OBJECT_UNLOCK(pMemFreeBSD->pObject);
#endif
            vm_object_deallocate(pMemFreeBSD->pObject);
            break;
        }

        default:
            AssertMsgFailed(("enmType=%d\n", pMemFreeBSD->Core.enmType));
            return VERR_INTERNAL_ERROR;
    }

    return VINF_SUCCESS;
}