/**
 * Allocates one page.
 *
 * @param virtAddr       The virtual address to which this page maybe mapped in
 *                       the future.
 *
 * @returns Pointer to the allocated page, NULL on failure.
 */
static page_t *rtR0MemObjSolPageAlloc(caddr_t virtAddr)
{
    u_offset_t      offPage;
    seg_t           KernelSeg;

    /*
     * 16777215 terabytes of total memory for all VMs or
     * restart 8000 1GB VMs 2147483 times until wraparound!
     */
    mutex_enter(&g_OffsetMtx);
    AssertCompileSize(u_offset_t, sizeof(uint64_t)); NOREF(RTASSERTVAR);
    g_offPage = RT_ALIGN_64(g_offPage, PAGE_SIZE) + PAGE_SIZE;
    offPage   = g_offPage;
    mutex_exit(&g_OffsetMtx);

    KernelSeg.s_as = &kas;
    page_t *pPage = page_create_va(&g_PageVnode, offPage, PAGE_SIZE, PG_WAIT | PG_NORELOC, &KernelSeg, virtAddr);
    if (RT_LIKELY(pPage))
    {
        /*
         * Lock this page into memory "long term" to prevent this page from being paged out
         * when we drop the page lock temporarily (during free). Downgrade to a shared lock
         * to prevent page relocation.
         */
        page_pp_lock(pPage, 0 /* COW */, 1 /* Kernel */);
        page_io_unlock(pPage);
        page_downgrade(pPage);
        Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));
    }

    return pPage;
}
Example #2
0
/*ARGSUSED*/
page_t *
segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
{
	struct seg kseg;
	int pgflags;
	struct vnode *vp = arg;

	if (vp == NULL)
		vp = &kvp;

	kseg.s_as = &kas;
	pgflags = PG_EXCL;

	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
		pgflags |= PG_NORELOC;
	if ((vmflag & VM_NOSLEEP) == 0)
		pgflags |= PG_WAIT;
	if (vmflag & VM_PANIC)
		pgflags |= PG_PANIC;
	if (vmflag & VM_PUSHPAGE)
		pgflags |= PG_PUSHPAGE;
	if (vmflag & VM_NORMALPRI) {
		ASSERT(vmflag & VM_NOSLEEP);
		pgflags |= PG_NORMALPRI;
	}

	return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
	    pgflags, &kseg, addr));
}
Example #3
0
/*ARGSUSED*/
static int
bootfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
    page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, enum seg_rw rw,
    cred_t *cr)
{
	bootfs_node_t *bnp = vp->v_data;
	page_t *pp, *fpp;
	pfn_t pfn;

	for (;;) {
		/* Easy case where the page exists */
		pp = page_lookup(vp, off, rw == S_CREATE ? SE_EXCL : SE_SHARED);
		if (pp != NULL) {
			if (pl != NULL) {
				pl[0] = pp;
				pl[1] = NULL;
			} else {
				page_unlock(pp);
			}
			return (0);
		}

		pp = page_create_va(vp, off, PAGESIZE, PG_EXCL | PG_WAIT, seg,
		    addr);

		/*
		 * If we didn't get the page, that means someone else beat us to
		 * creating this so we need to try again.
		 */
		if (pp != NULL)
			break;
	}

	pfn = btop((bnp->bvn_addr + off) & PAGEMASK);
	fpp = page_numtopp_nolock(pfn);

	if (ppcopy(fpp, pp) == 0) {
		pvn_read_done(pp, B_ERROR);
		return (EIO);
	}

	if (pl != NULL) {
		pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
	} else {
		pvn_io_done(pp);
	}

	return (0);
}
Example #4
0
/*
 * Find the largest contiguous block which contains `addr' for file offset
 * `offset' in it while living within the file system block sizes (`vp_off'
 * and `vp_len') and the address space limits for which no pages currently
 * exist and which map to consecutive file offsets.
 */
page_t *
pvn_read_kluster(
    struct vnode *vp,
    u_offset_t off,
    struct seg *seg,
    caddr_t addr,
    u_offset_t *offp,			/* return values */
    size_t *lenp,				/* return values */
    u_offset_t vp_off,
    size_t vp_len,
    int isra)
{
    ssize_t deltaf, deltab;
    page_t *pp;
    page_t *plist = NULL;
    spgcnt_t pagesavail;
    u_offset_t vp_end;

    ASSERT(off >= vp_off && off < vp_off + vp_len);

    /*
     * We only want to do klustering/read ahead if there
     * is more than minfree pages currently available.
     */
    pagesavail = freemem - minfree;

    if (pagesavail <= 0)
        if (isra)
            return ((page_t *)NULL);    /* ra case - give up */
        else
            pagesavail = 1;		    /* must return a page */

    /* We calculate in pages instead of bytes due to 32-bit overflows */
    if (pagesavail < (spgcnt_t)btopr(vp_len)) {
        /*
         * Don't have enough free memory for the
         * max request, try sizing down vp request.
         */
        deltab = (ssize_t)(off - vp_off);
        vp_len -= deltab;
        vp_off += deltab;
        if (pagesavail < btopr(vp_len)) {
            /*
             * Still not enough memory, just settle for
             * pagesavail which is at least 1.
             */
            vp_len = ptob(pagesavail);
        }
    }

    vp_end = vp_off + vp_len;
    ASSERT(off >= vp_off && off < vp_end);

    if (isra && SEGOP_KLUSTER(seg, addr, 0))
        return ((page_t *)NULL);	/* segment driver says no */

    if ((plist = page_create_va(vp, off,
                                PAGESIZE, PG_EXCL | PG_WAIT, seg, addr)) == NULL)
        return ((page_t *)NULL);

    if (vp_len <= PAGESIZE || pvn_nofodklust) {
        *offp = off;
        *lenp = MIN(vp_len, PAGESIZE);
    } else {
        /*
         * Scan back from front by incrementing "deltab" and
         * comparing "off" with "vp_off + deltab" to avoid
         * "signed" versus "unsigned" conversion problems.
         */
        for (deltab = PAGESIZE; off >= vp_off + deltab;
                deltab += PAGESIZE) {
            /*
             * Call back to the segment driver to verify that
             * the klustering/read ahead operation makes sense.
             */
            if (SEGOP_KLUSTER(seg, addr, -deltab))
                break;		/* page not eligible */
            if ((pp = page_create_va(vp, off - deltab,
                                     PAGESIZE, PG_EXCL, seg, addr - deltab))
                    == NULL)
                break;		/* already have the page */
            /*
             * Add page to front of page list.
             */
            page_add(&plist, pp);
        }
        deltab -= PAGESIZE;

        /* scan forward from front */
        for (deltaf = PAGESIZE; off + deltaf < vp_end;
                deltaf += PAGESIZE) {
            /*
             * Call back to the segment driver to verify that
             * the klustering/read ahead operation makes sense.
             */
            if (SEGOP_KLUSTER(seg, addr, deltaf))
                break;		/* page not file extension */
            if ((pp = page_create_va(vp, off + deltaf,
                                     PAGESIZE, PG_EXCL, seg, addr + deltaf))
                    == NULL)
                break;		/* already have page */

            /*
             * Add page to end of page list.
             */
            page_add(&plist, pp);
            plist = plist->p_next;
        }
        *offp = off = off - deltab;
        *lenp = deltab + deltaf;
        ASSERT(off >= vp_off);

        /*
         * If we ended up getting more than was actually
         * requested, retract the returned length to only
         * reflect what was requested.  This might happen
         * if we were allowed to kluster pages across a
         * span of (say) 5 frags, and frag size is less
         * than PAGESIZE.  We need a whole number of
         * pages to contain those frags, but the returned
         * size should only allow the returned range to
         * extend as far as the end of the frags.
         */
        if ((vp_off + vp_len) < (off + *lenp)) {
            ASSERT(vp_end > off);
            *lenp = vp_end - off;
        }
    }
    TRACE_3(TR_FAC_VM, TR_PVN_READ_KLUSTER,
            "pvn_read_kluster:seg %p addr %x isra %x",
            seg, addr, isra);
    return (plist);
}