コード例 #1
0
ファイル: seg_mf.c プロジェクト: bahamas10/openzfs
static void
segmf_softunlock(struct hat *hat, struct seg *seg, caddr_t addr, size_t len)
{
	struct segmf_data *data = seg->s_data;

	hat_unlock(hat, addr, len);

	mutex_enter(&freemem_lock);
	ASSERT(data->softlockcnt >= btopr(len));
	data->softlockcnt -= btopr(len);
	mutex_exit(&freemem_lock);

	if (data->softlockcnt == 0) {
		struct as *as = seg->s_as;

		if (AS_ISUNMAPWAIT(as)) {
			mutex_enter(&as->a_contents);
			if (AS_ISUNMAPWAIT(as)) {
				AS_CLRUNMAPWAIT(as);
				cv_broadcast(&as->a_cv);
			}
			mutex_exit(&as->a_contents);
		}
	}
}
コード例 #2
0
static int
swapfs_recalc(pgcnt_t pgs)
{
	pgcnt_t new_swapfs_desfree;
	pgcnt_t new_swapfs_minfree;
	pgcnt_t new_swapfs_reserve;

	new_swapfs_desfree = initial_swapfs_desfree;
	new_swapfs_minfree = initial_swapfs_minfree;
	new_swapfs_reserve = initial_swapfs_reserve;

	if (new_swapfs_desfree == 0)
		new_swapfs_desfree = btopr(7 * 512 * 1024); /* 3-1/2Mb */;

	if (new_swapfs_minfree == 0) {
		/*
		 * We set this lower than we'd like here, 2Mb, because we
		 * always boot on swapfs. It's up to a safer value,
		 * swapfs_desfree, when/if we add physical swap devices
		 * in swapadd(). Users who want to change the amount of
		 * memory that can be used as swap space should do so by
		 * setting swapfs_desfree at boot time, not swapfs_minfree.
		 * However, swapfs_minfree is tunable by install as a
		 * workaround for bugid 1147463.
		 */
		new_swapfs_minfree = MAX(btopr(2 * 1024 * 1024), pgs >> 3);
	}
コード例 #3
0
static int
sbmem_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
	size_t *maplen, uint_t model)
{
	struct sbusmem_unit *un;
	int instance, error;

#if defined(lint) || defined(__lint)
	model = model;
#endif /* lint || __lint */

	instance = getminor(dev);
	if ((un = ddi_get_soft_state(sbusmem_state_head, instance)) == NULL) {
		return (ENXIO);
	}
	if (off + len > un->size) {
		return (ENXIO);
	}
	if ((error = devmap_devmem_setup(dhp, un->dip, NULL, 0,
	    off, len, PROT_ALL, DEVMAP_DEFAULTS, NULL)) < 0) {
		return (error);
	}
	*maplen = ptob(btopr(len));
	return (0);
}
コード例 #4
0
ファイル: fbio.c プロジェクト: JackieXie168/mac-zfs
/*
 * Similar to fbread() but we call segmap_pagecreate instead of using
 * segmap_fault for SOFTLOCK to create the pages without using VOP_GETPAGE
 * and then we zero up to the length rounded to a page boundary.
 * XXX - this won't work right when bsize < PAGESIZE!!!
 */
void
fbzero(vnode_t *vp, offset_t off, uint_t len, struct fbuf **fbpp)
{
	caddr_t addr;
	ulong_t o, zlen;
	struct fbuf *fbp;

	o = (ulong_t)(off & MAXBOFFSET);
	if (o + len > MAXBSIZE)
		cmn_err(CE_PANIC, "fbzero: Bad offset/length");

	if (segmap_kpm) {
		addr = segmap_getmapflt(segkmap, vp, off & (offset_t)MAXBMASK,
				MAXBSIZE, SM_PAGECREATE, S_WRITE) + o;
	} else {
		addr = segmap_getmap(segkmap, vp, off & (offset_t)MAXBMASK) + o;
	}

	*fbpp = fbp = kmem_alloc(sizeof (struct fbuf), KM_SLEEP);
	fbp->fb_addr = addr;
	fbp->fb_count = len;

	(void) segmap_pagecreate(segkmap, addr, len, 1);

	/*
	 * Now we zero all the memory in the mapping we are interested in.
	 */
	zlen = (caddr_t)ptob(btopr((uintptr_t)(len + addr))) - addr;
	if (zlen < len || (o + zlen > MAXBSIZE))
		cmn_err(CE_PANIC, "fbzero: Bad zlen");
	bzero(addr, zlen);
}
コード例 #5
0
ファイル: seg_kmem.c プロジェクト: bahamas10/openzfs
/*ARGSUSED*/
static int
segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
	page_t ***ppp, enum lock_type type, enum seg_rw rw)
{
	page_t **pplist, *pp;
	pgcnt_t npages;
	spgcnt_t pg;
	size_t nb;
	struct vnode *vp = seg->s_data;

	ASSERT(ppp != NULL);

	/*
	 * If it is one of segkp pages, call into segkp.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));

	npages = btopr(len);
	nb = sizeof (page_t *) * npages;

	if (type == L_PAGEUNLOCK) {
		pplist = *ppp;
		ASSERT(pplist != NULL);

		for (pg = 0; pg < npages; pg++) {
			pp = pplist[pg];
			page_unlock(pp);
		}
		kmem_free(pplist, nb);
		return (0);
	}

	ASSERT(type == L_PAGELOCK);

	pplist = kmem_alloc(nb, KM_NOSLEEP);
	if (pplist == NULL) {
		*ppp = NULL;
		return (ENOTSUP);	/* take the slow path */
	}

	for (pg = 0; pg < npages; pg++) {
		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
		if (pp == NULL) {
			while (--pg >= 0)
				page_unlock(pplist[pg]);
			kmem_free(pplist, nb);
			*ppp = NULL;
			return (ENOTSUP);
		}
		pplist[pg] = pp;
		addr += PAGESIZE;
	}

	*ppp = pplist;
	return (0);
}
コード例 #6
0
ファイル: seg_kmem.c プロジェクト: bahamas10/openzfs
/*
 * Any changes to this routine must also be carried over to
 * devmap_free_pages() in the seg_dev driver. This is because
 * we currently don't have a special kernel segment for non-paged
 * kernel memory that is exported by drivers to user space.
 */
static void
segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
    void (*func)(page_t *))
{
	page_t *pp;
	caddr_t addr = inaddr;
	caddr_t eaddr;
	pgcnt_t npages = btopr(size);

	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
	ASSERT(vp != NULL);

	if (kvseg.s_base == NULL) {
		segkmem_gc_list_t *gc = inaddr;
		gc->gc_arena = vmp;
		gc->gc_size = size;
		gc->gc_next = segkmem_gc_list;
		segkmem_gc_list = gc;
		return;
	}

	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);

	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
#if defined(__x86)
		pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
		if (pp == NULL)
			panic("segkmem_free: page not found");
		if (!page_tryupgrade(pp)) {
			/*
			 * Some other thread has a sharelock. Wait for
			 * it to drop the lock so we can free this page.
			 */
			page_unlock(pp);
			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
			    SE_EXCL);
		}
#else
		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
#endif
		if (pp == NULL)
			panic("segkmem_free: page not found");
		/* Clear p_lckcnt so page_destroy() doesn't update availrmem */
		pp->p_lckcnt = 0;
		if (func)
			func(pp);
		else
			page_destroy(pp, 0);
	}
	if (func == NULL)
		page_unresv(npages);

	if (vmp != NULL)
		vmem_free(vmp, inaddr, size);

}
コード例 #7
0
ファイル: memlist.c プロジェクト: apprisi/illumos-gate
pgcnt_t
size_virtalloc(prom_memlist_t *avail, size_t nelems)
{

	u_longlong_t	start, end;
	pgcnt_t		allocpages = 0;
	uint_t		hole_allocated = 0;
	uint_t		i;

	for (i = 0; i < nelems - 1; i++) {

		start = avail[i].addr + avail[i].size;
		end = avail[i + 1].addr;

		/*
		 * Notes:
		 *
		 * (1) OBP on platforms with US I/II pre-allocates the hole
		 * represented by [spec_hole_start, spec_hole_end);
		 * pre-allocation is done to make this range unavailable
		 * for any allocation.
		 *
		 * (2) OBP on starcat always pre-allocates the hole similar to
		 * platforms with US I/II.
		 *
		 * (3) OBP on serengeti does _not_ pre-allocate the hole.
		 *
		 * (4) OBP ignores Spitfire Errata #21; i.e. it does _not_
		 * fill up or pre-allocate an additional 4GB on both sides
		 * of the hole.
		 *
		 * (5) kernel virtual range [spec_hole_start, spec_hole_end)
		 * is _not_ used on any platform including those with
		 * UltraSPARC III where there is no hole.
		 *
		 * Algorithm:
		 *
		 * Check if range [spec_hole_start, spec_hole_end) is
		 * pre-allocated by OBP; if so, subtract that range from
		 * allocpages.
		 */
		if (end >= spec_hole_end && start <= spec_hole_start)
			hole_allocated = 1;

		allocpages += btopr(end - start);
	}

	if (hole_allocated)
		allocpages -= btop(spec_hole_end - spec_hole_start);

	return (allocpages);
}
コード例 #8
0
ファイル: xpvtap.c プロジェクト: pcd1193182/openzfs
/*
 * xpvtap_segmf_register()
 */
static int
xpvtap_segmf_register(xpvtap_state_t *state)
{
	struct seg *seg;
	uint64_t pte_ma;
	struct as *as;
	caddr_t uaddr;
	uint_t pgcnt;
	int i;


	as = state->bt_map.um_as;
	pgcnt = btopr(state->bt_map.um_guest_size);
	uaddr = state->bt_map.um_guest_pages;

	if (pgcnt == 0) {
		return (DDI_FAILURE);
	}

	AS_LOCK_ENTER(as, RW_READER);

	seg = as_findseg(as, state->bt_map.um_guest_pages, 0);
	if ((seg == NULL) || ((uaddr + state->bt_map.um_guest_size) >
	    (seg->s_base + seg->s_size))) {
		AS_LOCK_EXIT(as);
		return (DDI_FAILURE);
	}

	/*
	 * lock down the htables so the HAT can't steal them. Register the
	 * PTE MA's for each gref page with seg_mf so we can do user space
	 * gref mappings.
	 */
	for (i = 0; i < pgcnt; i++) {
		hat_prepare_mapping(as->a_hat, uaddr, &pte_ma);
		hat_devload(as->a_hat, uaddr, PAGESIZE, (pfn_t)0,
		    PROT_READ | PROT_WRITE | PROT_USER | HAT_UNORDERED_OK,
		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
		hat_release_mapping(as->a_hat, uaddr);
		segmf_add_gref_pte(seg, uaddr, pte_ma);
		uaddr += PAGESIZE;
	}

	state->bt_map.um_registered = B_TRUE;

	AS_LOCK_EXIT(as);

	return (DDI_SUCCESS);
}
コード例 #9
0
ファイル: xmem_vnops.c プロジェクト: andreiw/polaris
/* ARGSUSED2 */
static int
xmem_getattr(struct vnode *vp, struct vattr *vap, int flags, struct cred *cred)
{
	struct xmemnode *xp = (struct xmemnode *)VTOXN(vp);
	struct xmount *xm = (struct xmount *)VTOXM(vp);

	mutex_enter(&xp->xn_tlock);

	*vap = xp->xn_attr;

	vap->va_mode = xp->xn_mode & MODEMASK;
	vap->va_type = vp->v_type;
	vap->va_blksize = xm->xm_bsize;
	vap->va_nblocks = (fsblkcnt64_t)btodb(ptob(btopr(vap->va_size)));

	mutex_exit(&xp->xn_tlock);
	return (0);
}
コード例 #10
0
ファイル: seg_kmem.c プロジェクト: bahamas10/openzfs
/*
 * Get pages from boot and hash them into the kernel's vp.
 * Used after page structs have been allocated, but before segkmem is ready.
 */
void *
boot_alloc(void *inaddr, size_t size, uint_t align)
{
	caddr_t addr = inaddr;

	if (bootops == NULL)
		prom_panic("boot_alloc: attempt to allocate memory after "
		    "BOP_GONE");

	size = ptob(btopr(size));
#ifdef __sparc
	if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
		panic("boot_alloc: bop_alloc_chunk failed");
#else
	if (BOP_ALLOC(bootops, addr, size, align) != addr)
		panic("boot_alloc: BOP_ALLOC failed");
#endif
	boot_mapin((caddr_t)addr, size);
	return (addr);
}
コード例 #11
0
ファイル: xpvtap.c プロジェクト: pcd1193182/openzfs
/*ARGSUSED*/
static void
xpvtap_segmf_unregister(struct as *as, void *arg, uint_t event)
{
	xpvtap_state_t *state;
	caddr_t uaddr;
	uint_t pgcnt;
	int i;


	state = (xpvtap_state_t *)arg;
	if (!state->bt_map.um_registered) {
		/* remove the callback (which is this routine) */
		(void) as_delete_callback(as, arg);
		return;
	}

	pgcnt = btopr(state->bt_map.um_guest_size);
	uaddr = state->bt_map.um_guest_pages;

	/* unmap any outstanding req's grefs */
	xpvtap_rs_flush(state->bt_map.um_rs, xpvtap_user_request_unmap, state);

	/* Unlock the gref pages */
	for (i = 0; i < pgcnt; i++) {
		AS_LOCK_ENTER(as, RW_WRITER);
		hat_prepare_mapping(as->a_hat, uaddr, NULL);
		hat_unload(as->a_hat, uaddr, PAGESIZE, HAT_UNLOAD_UNLOCK);
		hat_release_mapping(as->a_hat, uaddr);
		AS_LOCK_EXIT(as);
		uaddr += PAGESIZE;
	}

	/* remove the callback (which is this routine) */
	(void) as_delete_callback(as, arg);

	state->bt_map.um_registered = B_FALSE;
}
コード例 #12
0
ファイル: seg_kmem.c プロジェクト: bahamas10/openzfs
static void
segkmem_free_one_lp(caddr_t addr, size_t size)
{
	page_t		*pp, *rootpp = NULL;
	pgcnt_t 	pgs_left = btopr(size);

	ASSERT(size == segkmem_lpsize);

	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);

	for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
		if (pp == NULL)
			panic("segkmem_free_one_lp: page not found");
		ASSERT(PAGE_EXCL(pp));
		pp->p_lckcnt = 0;
		if (rootpp == NULL)
			rootpp = pp;
	}
	ASSERT(rootpp != NULL);
	page_destroy_pages(rootpp);

	/* page_unresv() is done by the caller */
}
コード例 #13
0
int
tmp_convnum(char *str, pgcnt_t *maxpg)
{
	uint64_t num = 0, oldnum;
#ifdef _LP64
	uint64_t max_bytes = ULONG_MAX;
#else
	uint64_t max_bytes = PAGESIZE * (uint64_t)ULONG_MAX;
#endif
	char *c;

	if (str == NULL)
		return (EINVAL);
	c = str;

	/*
	 * Convert str to number
	 */
	while ((*c >= '0') && (*c <= '9')) {
		oldnum = num;
		num = num * 10 + (*c++ - '0');
		if (oldnum > num) /* overflow */
			return (EINVAL);
	}

	/*
	 * Terminate on null
	 */
	while (*c != '\0') {
		switch (*c++) {

		/*
		 * convert from kilobytes
		 */
		case 'k':
		case 'K':
			if (num > max_bytes / 1024) /* will overflow */
				return (EINVAL);
			num *= 1024;
			break;

		/*
		 * convert from megabytes
		 */
		case 'm':
		case 'M':
			if (num > max_bytes / (1024 * 1024)) /* will overflow */
				return (EINVAL);
			num *= 1024 * 1024;
			break;

		default:
			return (EINVAL);
		}
	}

	/*
	 * Since btopr() rounds up to page granularity, this round-up can
	 * cause an overflow only if 'num' is between (max_bytes - PAGESIZE)
	 * and (max_bytes). In this case the resulting number is zero, which
	 * is what we check for below.
	 */
	if ((*maxpg = (pgcnt_t)btopr(num)) == 0 && num != 0)
		return (EINVAL);
	return (0);
}
コード例 #14
0
ファイル: seg_kmem.c プロジェクト: bahamas10/openzfs
	    segkmem_page_create_large, NULL);
	return (addr);
}

/*
 * segkmem_free_lpi() returns virtual memory back into large page heap arena
 * from kmem_lp arena. Beore doing this it unmaps the segment and frees
 * large pages used to map it.
 */
static void
segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
{
	pgcnt_t		nlpages = size >> segkmem_lpshift;
	size_t		lpsize = segkmem_lpsize;
	caddr_t		addr = inaddr;
	pgcnt_t 	npages = btopr(size);
	int		i;

	ASSERT(vmp == heap_lp_arena);
	ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
	ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);

	for (i = 0; i < nlpages; i++) {
		segkmem_free_one_lp(addr, lpsize);
		addr += lpsize;
	}

	page_unresv(npages);

	vmem_free(vmp, inaddr, size);
}
コード例 #15
0
/*
 * Find the largest contiguous block which contains `addr' for file offset
 * `offset' in it while living within the file system block sizes (`vp_off'
 * and `vp_len') and the address space limits for which no pages currently
 * exist and which map to consecutive file offsets.
 */
page_t *
pvn_read_kluster(
    struct vnode *vp,
    u_offset_t off,
    struct seg *seg,
    caddr_t addr,
    u_offset_t *offp,			/* return values */
    size_t *lenp,				/* return values */
    u_offset_t vp_off,
    size_t vp_len,
    int isra)
{
    ssize_t deltaf, deltab;
    page_t *pp;
    page_t *plist = NULL;
    spgcnt_t pagesavail;
    u_offset_t vp_end;

    ASSERT(off >= vp_off && off < vp_off + vp_len);

    /*
     * We only want to do klustering/read ahead if there
     * is more than minfree pages currently available.
     */
    pagesavail = freemem - minfree;

    if (pagesavail <= 0)
        if (isra)
            return ((page_t *)NULL);    /* ra case - give up */
        else
            pagesavail = 1;		    /* must return a page */

    /* We calculate in pages instead of bytes due to 32-bit overflows */
    if (pagesavail < (spgcnt_t)btopr(vp_len)) {
        /*
         * Don't have enough free memory for the
         * max request, try sizing down vp request.
         */
        deltab = (ssize_t)(off - vp_off);
        vp_len -= deltab;
        vp_off += deltab;
        if (pagesavail < btopr(vp_len)) {
            /*
             * Still not enough memory, just settle for
             * pagesavail which is at least 1.
             */
            vp_len = ptob(pagesavail);
        }
    }

    vp_end = vp_off + vp_len;
    ASSERT(off >= vp_off && off < vp_end);

    if (isra && SEGOP_KLUSTER(seg, addr, 0))
        return ((page_t *)NULL);	/* segment driver says no */

    if ((plist = page_create_va(vp, off,
                                PAGESIZE, PG_EXCL | PG_WAIT, seg, addr)) == NULL)
        return ((page_t *)NULL);

    if (vp_len <= PAGESIZE || pvn_nofodklust) {
        *offp = off;
        *lenp = MIN(vp_len, PAGESIZE);
    } else {
        /*
         * Scan back from front by incrementing "deltab" and
         * comparing "off" with "vp_off + deltab" to avoid
         * "signed" versus "unsigned" conversion problems.
         */
        for (deltab = PAGESIZE; off >= vp_off + deltab;
                deltab += PAGESIZE) {
            /*
             * Call back to the segment driver to verify that
             * the klustering/read ahead operation makes sense.
             */
            if (SEGOP_KLUSTER(seg, addr, -deltab))
                break;		/* page not eligible */
            if ((pp = page_create_va(vp, off - deltab,
                                     PAGESIZE, PG_EXCL, seg, addr - deltab))
                    == NULL)
                break;		/* already have the page */
            /*
             * Add page to front of page list.
             */
            page_add(&plist, pp);
        }
        deltab -= PAGESIZE;

        /* scan forward from front */
        for (deltaf = PAGESIZE; off + deltaf < vp_end;
                deltaf += PAGESIZE) {
            /*
             * Call back to the segment driver to verify that
             * the klustering/read ahead operation makes sense.
             */
            if (SEGOP_KLUSTER(seg, addr, deltaf))
                break;		/* page not file extension */
            if ((pp = page_create_va(vp, off + deltaf,
                                     PAGESIZE, PG_EXCL, seg, addr + deltaf))
                    == NULL)
                break;		/* already have page */

            /*
             * Add page to end of page list.
             */
            page_add(&plist, pp);
            plist = plist->p_next;
        }
        *offp = off = off - deltab;
        *lenp = deltab + deltaf;
        ASSERT(off >= vp_off);

        /*
         * If we ended up getting more than was actually
         * requested, retract the returned length to only
         * reflect what was requested.  This might happen
         * if we were allowed to kluster pages across a
         * span of (say) 5 frags, and frag size is less
         * than PAGESIZE.  We need a whole number of
         * pages to contain those frags, but the returned
         * size should only allow the returned range to
         * extend as far as the end of the frags.
         */
        if ((vp_off + vp_len) < (off + *lenp)) {
            ASSERT(vp_end > off);
            *lenp = vp_end - off;
        }
    }
    TRACE_3(TR_FAC_VM, TR_PVN_READ_KLUSTER,
            "pvn_read_kluster:seg %p addr %x isra %x",
            seg, addr, isra);
    return (plist);
}
コード例 #16
0
ファイル: seg_kmem.c プロジェクト: bahamas10/openzfs
/*
 * Allocate a large page to back the virtual address range
 * [addr, addr + size).  If addr is NULL, allocate the virtual address
 * space as well.
 */
static void *
segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
    uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
    void *pcarg)
{
	caddr_t addr = inaddr, pa;
	size_t  lpsize = segkmem_lpsize;
	pgcnt_t npages = btopr(size);
	pgcnt_t nbpages = btop(lpsize);
	pgcnt_t nlpages = size >> segkmem_lpshift;
	size_t  ppasize = nbpages * sizeof (page_t *);
	page_t *pp, *rootpp, **ppa, *pplist = NULL;
	int i;

	vmflag |= VM_NOSLEEP;

	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
		return (NULL);
	}

	/*
	 * allocate an array we need for hat_memload_array.
	 * we use a separate arena to avoid recursion.
	 * we will not need this array when hat_memload_array learns pp++
	 */
	if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
		goto fail_array_alloc;
	}

	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
		goto fail_vmem_alloc;

	ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);

	/* create all the pages */
	for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
		if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
			goto fail_page_create;
		page_list_concat(&pplist, &pp);
	}

	/* at this point we have all the resource to complete the request */
	while ((rootpp = pplist) != NULL) {
		for (i = 0; i < nbpages; i++) {
			ASSERT(pplist != NULL);
			pp = pplist;
			page_sub(&pplist, pp);
			ASSERT(page_iolock_assert(pp));
			page_io_unlock(pp);
			ppa[i] = pp;
		}
		/*
		 * Load the locked entry. It's OK to preload the entry into the
		 * TSB since we now support large mappings in the kernel TSB.
		 */
		hat_memload_array(kas.a_hat,
		    (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
		    ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
		    HAT_LOAD_LOCK);

		for (--i; i >= 0; --i) {
			ppa[i]->p_lckcnt = 1;
			page_unlock(ppa[i]);
		}
	}

	vmem_free(segkmem_ppa_arena, ppa, ppasize);
	return (addr);

fail_page_create:
	while ((rootpp = pplist) != NULL) {
		for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
			ASSERT(pp != NULL);
			page_sub(&pplist, pp);
			ASSERT(page_iolock_assert(pp));
			page_io_unlock(pp);
		}
		page_destroy_pages(rootpp);
	}

	if (inaddr == NULL)
		vmem_free(vmp, addr, size);

fail_vmem_alloc:
	vmem_free(segkmem_ppa_arena, ppa, ppasize);

fail_array_alloc:
	page_unresv(npages);

	return (NULL);
}
コード例 #17
0
ファイル: seg_kmem.c プロジェクト: bahamas10/openzfs
/*
 * Allocate pages to back the virtual address range [addr, addr + size).
 * If addr is NULL, allocate the virtual address space as well.
 */
void *
segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
	page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
{
	page_t *ppl;
	caddr_t addr = inaddr;
	pgcnt_t npages = btopr(size);
	int allocflag;

	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
		return (NULL);

	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);

	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
		if (inaddr == NULL)
			vmem_free(vmp, addr, size);
		return (NULL);
	}

	ppl = page_create_func(addr, size, vmflag, pcarg);
	if (ppl == NULL) {
		if (inaddr == NULL)
			vmem_free(vmp, addr, size);
		page_unresv(npages);
		return (NULL);
	}

	/*
	 * Under certain conditions, we need to let the HAT layer know
	 * that it cannot safely allocate memory.  Allocations from
	 * the hat_memload vmem arena always need this, to prevent
	 * infinite recursion.
	 *
	 * In addition, the x86 hat cannot safely do memory
	 * allocations while in vmem_populate(), because there
	 * is no simple bound on its usage.
	 */
	if (vmflag & VM_MEMLOAD)
		allocflag = HAT_NO_KALLOC;
#if defined(__x86)
	else if (vmem_is_populator())
		allocflag = HAT_NO_KALLOC;
#endif
	else
		allocflag = 0;

	while (ppl != NULL) {
		page_t *pp = ppl;
		page_sub(&ppl, pp);
		ASSERT(page_iolock_assert(pp));
		ASSERT(PAGE_EXCL(pp));
		page_io_unlock(pp);
		hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
		    (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
		    HAT_LOAD_LOCK | allocflag);
		pp->p_lckcnt = 1;
#if defined(__x86)
		page_downgrade(pp);
#else
		if (vmflag & SEGKMEM_SHARELOCKED)
			page_downgrade(pp);
		else
			page_unlock(pp);
#endif
	}

	return (addr);
}
コード例 #18
0
ファイル: seg_kmem.c プロジェクト: bahamas10/openzfs
/*ARGSUSED*/
static faultcode_t
segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
	enum fault_type type, enum seg_rw rw)
{
	pgcnt_t npages;
	spgcnt_t pg;
	page_t *pp;
	struct vnode *vp = seg->s_data;

	ASSERT(RW_READ_HELD(&seg->s_as->a_lock));

	if (seg->s_as != &kas || size > seg->s_size ||
	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
		panic("segkmem_fault: bad args");

	/*
	 * If it is one of segkp pages, call segkp_fault.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));

	if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
		return (FC_NOSUPPORT);

	npages = btopr(size);

	switch (type) {
	case F_SOFTLOCK:	/* lock down already-loaded translations */
		for (pg = 0; pg < npages; pg++) {
			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
			    SE_SHARED);
			if (pp == NULL) {
				/*
				 * Hmm, no page. Does a kernel mapping
				 * exist for it?
				 */
				if (!hat_probe(kas.a_hat, addr)) {
					addr -= PAGESIZE;
					while (--pg >= 0) {
						pp = page_find(vp, (u_offset_t)
						    (uintptr_t)addr);
						if (pp)
							page_unlock(pp);
						addr -= PAGESIZE;
					}
					return (FC_NOMAP);
				}
			}
			addr += PAGESIZE;
		}
		if (rw == S_OTHER)
			hat_reserve(seg->s_as, addr, size);
		return (0);
	case F_SOFTUNLOCK:
		while (npages--) {
			pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
			if (pp)
				page_unlock(pp);
			addr += PAGESIZE;
		}
		return (0);
	default:
		return (FC_NOSUPPORT);
	}
	/*NOTREACHED*/
}