示例#1
0
/*
 * shmfd object management including creation and reference counting
 * routines.
 */
static struct shmfd *
shm_alloc(struct ucred *ucred, mode_t mode)
{
	struct shmfd *shmfd;

	shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
	shmfd->shm_size = 0;
	shmfd->shm_uid = ucred->cr_uid;
	shmfd->shm_gid = ucred->cr_gid;
	shmfd->shm_mode = mode;
	shmfd->shm_object = vm_pager_allocate(OBJT_DEFAULT, NULL,
	    shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
	KASSERT(shmfd->shm_object != NULL, ("shm_create: vm_pager_allocate"));
	VM_OBJECT_LOCK(shmfd->shm_object);
	vm_object_clear_flag(shmfd->shm_object, OBJ_ONEMAPPING);
	vm_object_set_flag(shmfd->shm_object, OBJ_NOSPLIT);
	VM_OBJECT_UNLOCK(shmfd->shm_object);
	vfs_timestamp(&shmfd->shm_birthtime);
	shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
	    shmfd->shm_birthtime;
	refcount_init(&shmfd->shm_refs, 1);
#ifdef MAC
	mac_posixshm_init(shmfd);
	mac_posixshm_create(ucred, shmfd);
#endif

	return (shmfd);
}
示例#2
0
文件: vmm_mem.c 项目: Alkzndr/freebsd
vm_object_t
vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len,
	       vm_paddr_t hpa)
{
	int error;
	vm_object_t obj;
	struct sglist *sg;

	sg = sglist_alloc(1, M_WAITOK);
	error = sglist_append_phys(sg, hpa, len);
	KASSERT(error == 0, ("error %d appending physaddr to sglist", error));

	obj = vm_pager_allocate(OBJT_SG, sg, len, VM_PROT_RW, 0, NULL);
	if (obj != NULL) {
		/*
		 * VT-x ignores the MTRR settings when figuring out the
		 * memory type for translations obtained through EPT.
		 *
		 * Therefore we explicitly force the pages provided by
		 * this object to be mapped as uncacheable.
		 */
		VM_OBJECT_WLOCK(obj);
		error = vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
		VM_OBJECT_WUNLOCK(obj);
		if (error != KERN_SUCCESS) {
			panic("vmm_mmio_alloc: vm_object_set_memattr error %d",
				error);
		}
		error = vm_map_find(&vmspace->vm_map, obj, 0, &gpa, len, 0,
				    VMFS_NO_SPACE, VM_PROT_RW, VM_PROT_RW, 0);
		if (error != KERN_SUCCESS) {
			vm_object_deallocate(obj);
			obj = NULL;
		}
	}

	/*
	 * Drop the reference on the sglist.
	 *
	 * If the scatter/gather object was successfully allocated then it
	 * has incremented the reference count on the sglist. Dropping the
	 * initial reference count ensures that the sglist will be freed
	 * when the object is deallocated.
	 * 
	 * If the object could not be allocated then we end up freeing the
	 * sglist.
	 */
	sglist_free(sg);

	return (obj);
}
示例#3
0
文件: drm_gem.c 项目: coyizumi/cs111
int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size)
{
    KASSERT((size & (PAGE_SIZE - 1)) == 0,
            ("Bad size %ju", (uintmax_t)size));

    obj->dev = dev;
    obj->vm_obj = vm_pager_allocate(OBJT_DEFAULT, NULL, size,
                                    VM_PROT_READ | VM_PROT_WRITE, 0, curthread->td_ucred);

    obj->refcount = 1;
    obj->handle_count = 0;
    obj->size = size;

    return 0;
}
示例#4
0
static int
spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
                   vm_size_t size, struct vm_object **object, int nprot)
{
    device_t dev = cdev->si_drv1;
    struct spigen_softc *sc = device_get_softc(dev);
    vm_page_t *m;
    size_t n, pages;

    if (size == 0 ||
            (nprot & (PROT_EXEC | PROT_READ | PROT_WRITE))
            != (PROT_READ | PROT_WRITE))
        return (EINVAL);
    size = roundup2(size, PAGE_SIZE);
    pages = size / PAGE_SIZE;

    mtx_lock(&sc->sc_mtx);
    if (sc->sc_mmap_buffer != NULL) {
        mtx_unlock(&sc->sc_mtx);
        return (EBUSY);
    } else if (size > sc->sc_command_length_max + sc->sc_data_length_max) {
        mtx_unlock(&sc->sc_mtx);
        return (E2BIG);
    }
    sc->sc_mmap_buffer_size = size;
    *offset = 0;
    sc->sc_mmap_buffer = *object = vm_pager_allocate(OBJT_PHYS, 0, size,
                                   nprot, *offset, curthread->td_ucred);
    m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK);
    VM_OBJECT_WLOCK(*object);
    vm_object_reference_locked(*object); // kernel and userland both
    for (n = 0; n < pages; n++) {
        m[n] = vm_page_grab(*object, n,
                            VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
        m[n]->valid = VM_PAGE_BITS_ALL;
    }
    VM_OBJECT_WUNLOCK(*object);
    sc->sc_mmap_kvaddr = kva_alloc(size);
    pmap_qenter(sc->sc_mmap_kvaddr, m, pages);
    free(m, M_TEMP);
    mtx_unlock(&sc->sc_mtx);

    if (*object == NULL)
        return (EINVAL);
    return (0);
}
示例#5
0
文件: ttm_tt.c 项目: Alkzndr/freebsd
int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
{
	vm_object_t obj;
	vm_page_t from_page, to_page;
	int i;

	MPASS(ttm->state == tt_unbound || ttm->state == tt_unpopulated);
	MPASS(ttm->caching_state == tt_cached);

	if (persistent_swap_storage == NULL) {
		obj = vm_pager_allocate(OBJT_SWAP, NULL,
		    IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0,
		    curthread->td_ucred);
		if (obj == NULL) {
			printf("[TTM] Failed allocating swap storage\n");
			return (-ENOMEM);
		}
	} else
		obj = persistent_swap_storage;

	VM_OBJECT_WLOCK(obj);
	vm_object_pip_add(obj, 1);
	for (i = 0; i < ttm->num_pages; ++i) {
		from_page = ttm->pages[i];
		if (unlikely(from_page == NULL))
			continue;
		to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
		pmap_copy_page(from_page, to_page);
		to_page->valid = VM_PAGE_BITS_ALL;
		vm_page_dirty(to_page);
		vm_page_xunbusy(to_page);
	}
	vm_object_pip_wakeup(obj);
	VM_OBJECT_WUNLOCK(obj);

	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
	ttm->swap_storage = obj;
	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
	if (persistent_swap_storage != NULL)
		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
	return (0);
}
示例#6
0
/*
 * Create the 1:1 virtual to physical map for EFI
 */
bool
efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
{
	struct efi_md *p;
	pt_entry_t *l3;
	vm_offset_t va;
	uint64_t idx;
	int i, mode;

	obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, L0_ENTRIES +
	    L0_ENTRIES * Ln_ENTRIES + L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES +
	    L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES * Ln_ENTRIES,
	    VM_PROT_ALL, 0, NULL);
	VM_OBJECT_WLOCK(obj_1t1_pt);
	efi_l0_page = efi_1t1_page(0);
	VM_OBJECT_WUNLOCK(obj_1t1_pt);
	efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page));
	bzero(efi_l0, L0_ENTRIES * sizeof(*efi_l0));

	for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
	    descsz)) {
		if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
			continue;
		if (p->md_virt != NULL) {
			if (bootverbose)
				printf("EFI Runtime entry %d is mapped\n", i);
			goto fail;
		}
		if ((p->md_phys & EFI_PAGE_MASK) != 0) {
			if (bootverbose)
				printf("EFI Runtime entry %d is not aligned\n",
				    i);
			goto fail;
		}
		if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
		    p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
		    VM_MAXUSER_ADDRESS) {
			printf("EFI Runtime entry %d is not in mappable for RT:"
			    "base %#016jx %#jx pages\n",
			    i, (uintmax_t)p->md_phys,
			    (uintmax_t)p->md_pages);
			goto fail;
		}
		if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
			mode = VM_MEMATTR_WRITE_BACK;
		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
			mode = VM_MEMATTR_WRITE_THROUGH;
		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
			mode = VM_MEMATTR_WRITE_COMBINING;
		else if ((p->md_attr & EFI_MD_ATTR_UC) != 0)
			mode = VM_MEMATTR_UNCACHEABLE;
		else {
			if (bootverbose)
				printf("EFI Runtime entry %d mapping "
				    "attributes unsupported\n", i);
			mode = VM_MEMATTR_UNCACHEABLE;
		}

		printf("MAP %lx mode %x pages %lu\n", p->md_phys, mode, p->md_pages);
		VM_OBJECT_WLOCK(obj_1t1_pt);
		for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++,
		    va += PAGE_SIZE) {
			l3 = efi_1t1_l3(va);
			*l3 = va | ATTR_DEFAULT | ATTR_IDX(mode) |
			    ATTR_AP(ATTR_AP_RW) | L3_PAGE;
		}
		VM_OBJECT_WUNLOCK(obj_1t1_pt);
	}

	return (true);
fail:
	efi_destroy_1t1_map();
	return (false);
}
示例#7
0
	sx_xlock(&shared_page_alloc_sx);
	res = shared_page_alloc_locked(size, align);
	if (res != -1)
		shared_page_write(res, size, data);
	sx_xunlock(&shared_page_alloc_sx);
	return (res);
}

static void
shared_page_init(void *dummy __unused)
{
	vm_page_t m;
	vm_offset_t addr;

	sx_init(&shared_page_alloc_sx, "shpsx");
	shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
	    VM_PROT_DEFAULT, 0, NULL);
	VM_OBJECT_LOCK(shared_page_obj);
	m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_RETRY | VM_ALLOC_NOBUSY |
	    VM_ALLOC_ZERO);
	m->valid = VM_PAGE_BITS_ALL;
	VM_OBJECT_UNLOCK(shared_page_obj);
	addr = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
	pmap_qenter(addr, &m, 1);
	shared_page_mapping = (char *)addr;
}

SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)shared_page_init,
    NULL);

static void
timehands_update(struct sysentvec *sv)
示例#8
0
bool
efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
{
	struct efi_md *p;
	pt_entry_t *pte;
	vm_offset_t va;
	uint64_t idx;
	int bits, i, mode;

	obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, ptoa(1 +
	    NPML4EPG + NPML4EPG * NPDPEPG + NPML4EPG * NPDPEPG * NPDEPG),
	    VM_PROT_ALL, 0, NULL);
	efi_1t1_idx = 0;
	VM_OBJECT_WLOCK(obj_1t1_pt);
	efi_pml4_page = efi_1t1_page();
	VM_OBJECT_WUNLOCK(obj_1t1_pt);
	efi_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_pml4_page));
	pmap_pinit_pml4(efi_pml4_page);

	for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
	    descsz)) {
		if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
			continue;
		if (p->md_virt != NULL && (uint64_t)p->md_virt != p->md_phys) {
			if (bootverbose)
				printf("EFI Runtime entry %d is mapped\n", i);
			goto fail;
		}
		if ((p->md_phys & EFI_PAGE_MASK) != 0) {
			if (bootverbose)
				printf("EFI Runtime entry %d is not aligned\n",
				    i);
			goto fail;
		}
		if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
		    p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
		    VM_MAXUSER_ADDRESS) {
			printf("EFI Runtime entry %d is not in mappable for RT:"
			    "base %#016jx %#jx pages\n",
			    i, (uintmax_t)p->md_phys,
			    (uintmax_t)p->md_pages);
			goto fail;
		}
		if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
			mode = VM_MEMATTR_WRITE_BACK;
		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
			mode = VM_MEMATTR_WRITE_THROUGH;
		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
			mode = VM_MEMATTR_WRITE_COMBINING;
		else if ((p->md_attr & EFI_MD_ATTR_WP) != 0)
			mode = VM_MEMATTR_WRITE_PROTECTED;
		else if ((p->md_attr & EFI_MD_ATTR_UC) != 0)
			mode = VM_MEMATTR_UNCACHEABLE;
		else {
			if (bootverbose)
				printf("EFI Runtime entry %d mapping "
				    "attributes unsupported\n", i);
			mode = VM_MEMATTR_UNCACHEABLE;
		}
		bits = pmap_cache_bits(kernel_pmap, mode, FALSE) | X86_PG_RW |
		    X86_PG_V;
		VM_OBJECT_WLOCK(obj_1t1_pt);
		for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++,
		    va += PAGE_SIZE) {
			pte = efi_1t1_pte(va);
			pte_store(pte, va | bits);
		}
		VM_OBJECT_WUNLOCK(obj_1t1_pt);
	}

	return (true);

fail:
	efi_destroy_1t1_map();
	return (false);
}