示例#1
0
文件: vm_machdep.c 项目: hlcherub/src
/*
 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
 */
static void
sf_buf_init(void *arg)
{
    struct sf_buf *sf_bufs;
    vm_offset_t sf_base;
    int i;

    /* Don't bother on systems with a direct map */
    if (hw_direct_map)
        return;

    nsfbufs = NSFBUFS;
    TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);

    sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
    TAILQ_INIT(&sf_buf_freelist);
    sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
    sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
                     M_NOWAIT | M_ZERO);

    for (i = 0; i < nsfbufs; i++) {
        sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
        TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
    }
    sf_buf_alloc_want = 0;
    mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
}
示例#2
0
static int
gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
	struct xen_add_to_physmap xatp;
	unsigned int i = end_idx;

	/*
	 * Loop backwards, so that the first hypercall has the largest index,
	 * ensuring that the table will grow only once.
	 */
	do {
		xatp.domid = DOMID_SELF;
		xatp.idx = i;
		xatp.space = XENMAPSPACE_grant_table;
		xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
		if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
			panic("HYPERVISOR_memory_op failed to map gnttab");
	} while (i-- > start_idx);

	if (shared == NULL) {
		vm_offset_t area;

		area = kva_alloc(PAGE_SIZE * max_nr_grant_frames());
		KASSERT(area, ("can't allocate VM space for grant table"));
		shared = (grant_entry_t *)area;
	}

	for (i = start_idx; i <= end_idx; i++) {
		pmap_kenter((vm_offset_t) shared + i * PAGE_SIZE,
		    resume_frames + i * PAGE_SIZE);
	}

	return (0);
}
示例#3
0
/*
 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
 */
static void
sf_buf_init(void *arg)
{
    struct sf_buf *sf_bufs;
    vm_offset_t sf_base;
    int i;

#ifdef SFBUF_OPTIONAL_DIRECT_MAP
    if (SFBUF_OPTIONAL_DIRECT_MAP)
        return;
#endif

    nsfbufs = NSFBUFS;
    TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);

    sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
    TAILQ_INIT(&sf_buf_freelist);
    sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
    sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
                     M_WAITOK | M_ZERO);
    for (i = 0; i < nsfbufs; i++) {
        sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
        TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
    }
    sf_buf_alloc_want = 0;
    mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
}
示例#4
0
/*
 * Map a set of physical memory pages into the kernel virtual address space.
 * Return a pointer to where it is mapped.
 *
 * This uses a pre-established static mapping if one exists for the requested
 * range, otherwise it allocates kva space and maps the physical pages into it.
 *
 * This routine is intended to be used for mapping device memory, NOT real
 * memory; the mapping type is inherently VM_MEMATTR_DEVICE in
 * pmap_kenter_device().
 */
void *
pmap_mapdev(vm_offset_t pa, vm_size_t size)
{
	vm_offset_t va, offset;
	void * rva;

	/* First look in the static mapping table. */
	if ((rva = devmap_ptov(pa, size)) != NULL)
		return (rva);

	offset = pa & PAGE_MASK;
	pa = trunc_page(pa);
	size = round_page(size + offset);

#if defined(__aarch64__) || defined(__riscv__)
	if (early_boot) {
		akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size);
		va = akva_devmap_vaddr;
		KASSERT(va >= VM_MAX_KERNEL_ADDRESS - L2_SIZE,
		    ("Too many early devmap mappings"));
	} else
#endif
		va = kva_alloc(size);
	if (!va)
		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");

	pmap_kenter_device(va, size, pa);

	return ((void *)(va + offset));
}
示例#5
0
static int
spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
                   vm_size_t size, struct vm_object **object, int nprot)
{
    device_t dev = cdev->si_drv1;
    struct spigen_softc *sc = device_get_softc(dev);
    vm_page_t *m;
    size_t n, pages;

    if (size == 0 ||
            (nprot & (PROT_EXEC | PROT_READ | PROT_WRITE))
            != (PROT_READ | PROT_WRITE))
        return (EINVAL);
    size = roundup2(size, PAGE_SIZE);
    pages = size / PAGE_SIZE;

    mtx_lock(&sc->sc_mtx);
    if (sc->sc_mmap_buffer != NULL) {
        mtx_unlock(&sc->sc_mtx);
        return (EBUSY);
    } else if (size > sc->sc_command_length_max + sc->sc_data_length_max) {
        mtx_unlock(&sc->sc_mtx);
        return (E2BIG);
    }
    sc->sc_mmap_buffer_size = size;
    *offset = 0;
    sc->sc_mmap_buffer = *object = vm_pager_allocate(OBJT_PHYS, 0, size,
                                   nprot, *offset, curthread->td_ucred);
    m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK);
    VM_OBJECT_WLOCK(*object);
    vm_object_reference_locked(*object); // kernel and userland both
    for (n = 0; n < pages; n++) {
        m[n] = vm_page_grab(*object, n,
                            VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
        m[n]->valid = VM_PAGE_BITS_ALL;
    }
    VM_OBJECT_WUNLOCK(*object);
    sc->sc_mmap_kvaddr = kva_alloc(size);
    pmap_qenter(sc->sc_mmap_kvaddr, m, pages);
    free(m, M_TEMP);
    mtx_unlock(&sc->sc_mtx);

    if (*object == NULL)
        return (EINVAL);
    return (0);
}
示例#6
0
文件: os.c 项目: nf-mlo/open-vm-tools
Mapping
OS_MapPageHandle(PageHandle handle)     // IN
{
#if __FreeBSD_version < 1000000
   vm_offset_t res = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
#else
   vm_offset_t res = kva_alloc(PAGE_SIZE);
#endif

   vm_page_t page = (vm_page_t)handle;

   if (!res) {
      return MAPPING_INVALID;
   }

   pmap_qenter(res, &page, 1);

   return (Mapping)res;
}
示例#7
0
文件: gnttab.c 项目: coyizumi/cs111
static int
gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
	struct gnttab_setup_table setup;
	u_long *frames;

	unsigned int nr_gframes = end_idx + 1;
	int i, rc;

	frames = malloc(nr_gframes * sizeof(unsigned long), M_DEVBUF, M_NOWAIT);
	if (!frames)
		return (ENOMEM);

	setup.dom        = DOMID_SELF;
	setup.nr_frames  = nr_gframes;
	set_xen_guest_handle(setup.frame_list, frames);

	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
	if (rc == -ENOSYS) {
		free(frames, M_DEVBUF);
		return (ENOSYS);
	}
	KASSERT(!(rc || setup.status),
	    ("unexpected result from grant_table_op"));

	if (shared == NULL) {
		vm_offset_t area;

		area = kva_alloc(PAGE_SIZE * max_nr_grant_frames());
		KASSERT(area, ("can't allocate VM space for grant table"));
		shared = (grant_entry_t *)area;
	}

	for (i = 0; i < nr_gframes; i++)
		PT_SET_MA(((caddr_t)shared) + i*PAGE_SIZE, 
		    ((vm_paddr_t)frames[i]) << PAGE_SHIFT | PG_RW | PG_V);

	free(frames, M_DEVBUF);

	return (0);
}
示例#8
0
void
kmem_init_zero_region(void)
{
	vm_offset_t addr, i;
	vm_page_t m;

	/*
	 * Map a single physical page of zeros to a larger virtual range.
	 * This requires less looping in places that want large amounts of
	 * zeros, while not using much more physical resources.
	 */
	addr = kva_alloc(ZERO_REGION_SIZE);
	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
	if ((m->flags & PG_ZERO) == 0)
		pmap_zero_page(m);
	for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
		pmap_qenter(addr + i, &m, 1);
	pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);

	zero_region = (const void *)addr;
}
示例#9
0
}

static void
shared_page_init(void *dummy __unused)
{
	vm_page_t m;
	vm_offset_t addr;

	sx_init(&shared_page_alloc_sx, "shpsx");
	shared_page_obj = vm_pager_allocate(OBJT_PHYS, 0, PAGE_SIZE,
	    VM_PROT_DEFAULT, 0, NULL);
	VM_OBJECT_WLOCK(shared_page_obj);
	m = vm_page_grab(shared_page_obj, 0, VM_ALLOC_NOBUSY | VM_ALLOC_ZERO);
	m->valid = VM_PAGE_BITS_ALL;
	VM_OBJECT_WUNLOCK(shared_page_obj);
	addr = kva_alloc(PAGE_SIZE);
	pmap_qenter(addr, &m, 1);
	shared_page_mapping = (char *)addr;
}

SYSINIT(shp, SI_SUB_EXEC, SI_ORDER_FIRST, (sysinit_cfunc_t)shared_page_init,
    NULL);

/*
 * Push the timehands update to the shared page.
 *
 * The lockless update scheme is similar to the one used to update the
 * in-kernel timehands, see sys/kern/kern_tc.c:tc_windup() (which
 * calls us after the timehands are updated).
 */
static void
示例#10
0
文件: sb_zbpci.c 项目: 2asoft/freebsd
static int
zbpci_attach(device_t dev)
{
	int n, rid, size;
	vm_offset_t va;
	struct resource *res;
	
	/*
	 * Reserve the physical memory window used to map PCI I/O space.
	 */
	rid = 0;
	res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
				 PCI_IOSPACE_ADDR,
				 PCI_IOSPACE_ADDR + PCI_IOSPACE_SIZE - 1,
				 PCI_IOSPACE_SIZE, 0);
	if (res == NULL)
		panic("Cannot allocate resource for PCI I/O space mapping.");

	port_rman.rm_start = 0;
	port_rman.rm_end = PCI_IOSPACE_SIZE - 1;
	port_rman.rm_type = RMAN_ARRAY;
	port_rman.rm_descr = "PCI I/O ports";
	if (rman_init(&port_rman) != 0 ||
	    rman_manage_region(&port_rman, 0, PCI_IOSPACE_SIZE - 1) != 0)
		panic("%s: port_rman", __func__);

	/*
	 * Reserve the physical memory that is used to read/write to the
	 * pci config space but don't activate it. We are using a page worth
	 * of KVA as a window over this region.
	 */
	rid = 1;
	size = (PCI_BUSMAX + 1) * (PCI_SLOTMAX + 1) * (PCI_FUNCMAX + 1) * 256;
	res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, CFG_PADDR_BASE,
				 CFG_PADDR_BASE + size - 1, size, 0);
	if (res == NULL)
		panic("Cannot allocate resource for config space accesses.");

	/*
	 * Allocate the entire "match bit lanes" address space.
	 */
#if _BYTE_ORDER == _BIG_ENDIAN
	rid = 2;
	res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 
				 PCI_MATCH_BIT_LANES_START,
				 PCI_MATCH_BIT_LANES_END,
				 PCI_MATCH_BIT_LANES_SIZE, 0);
	if (res == NULL)
		panic("Cannot allocate resource for pci match bit lanes.");
#endif	/* _BYTE_ORDER ==_BIG_ENDIAN */

	/*
	 * Allocate KVA for accessing PCI config space.
	 */
	va = kva_alloc(PAGE_SIZE * mp_ncpus);
	if (va == 0) {
		device_printf(dev, "Cannot allocate virtual addresses for "
				   "config space access.\n");
		return (ENOMEM);
	}

	for (n = 0; n < mp_ncpus; ++n)
		zbpci_config_space[n].vaddr = va + n * PAGE_SIZE;

	/*
	 * Sibyte has the PCI bus hierarchy rooted at bus 0 and HT-PCI
	 * hierarchy rooted at bus 1.
	 */
	if (device_add_child(dev, "pci", 0) == NULL)
		panic("zbpci_attach: could not add pci bus 0.\n");

	if (device_add_child(dev, "pci", 1) == NULL)
		panic("zbpci_attach: could not add pci bus 1.\n");

	if (bootverbose)
		device_printf(dev, "attached.\n");

	return (bus_generic_attach(dev));
}
示例#11
0
int
sparc64_bus_mem_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size,
    int flags, vm_offset_t vaddr, bus_space_handle_t *hp)
{
	vm_offset_t sva;
	vm_offset_t va;
	vm_paddr_t pa;
	vm_size_t vsz;
	u_long pm_flags;

	/*
	 * Given that we use physical access for bus_space(9) there's no need
	 * need to map anything in unless BUS_SPACE_MAP_LINEAR is requested.
	 */
	if ((flags & BUS_SPACE_MAP_LINEAR) == 0) {
		*hp = addr;
		return (0);
	}

	if (tag->bst_cookie == NULL) {
		printf("%s: resource cookie not set\n", __func__);
		return (EINVAL);
	}

	size = round_page(size);
	if (size == 0) {
		printf("%s: zero size\n", __func__);
		return (EINVAL);
	}

	switch (tag->bst_type) {
	case PCI_CONFIG_BUS_SPACE:
	case PCI_IO_BUS_SPACE:
	case PCI_MEMORY_BUS_SPACE:
		pm_flags = TD_IE;
		break;
	default:
		pm_flags = 0;
		break;
	}

	if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
		pm_flags |= TD_E;

	if (vaddr != 0L)
		sva = trunc_page(vaddr);
	else {
		if ((sva = kva_alloc(size)) == 0)
			panic("%s: cannot allocate virtual memory", __func__);
	}

	pa = trunc_page(addr);
	if ((flags & BUS_SPACE_MAP_READONLY) == 0)
		pm_flags |= TD_W;

	va = sva;
	vsz = size;
	do {
		pmap_kenter_flags(va, pa, pm_flags);
		va += PAGE_SIZE;
		pa += PAGE_SIZE;
	} while ((vsz -= PAGE_SIZE) > 0);
	tlb_range_demap(kernel_pmap, sva, sva + size - 1);

	/* Note: we preserve the page offset. */
	rman_set_virtual(tag->bst_cookie, (void *)(sva | (addr & PAGE_MASK)));
	return (0);
}