예제 #1
0
파일: lacy.c 프로젝트: teneighty/lacy
struct page *
page_find(char *file_path)
{
    struct page *p = page_list;
    while (NULL != p) {
        if (0 == strcmp(p->file_path, file_path))
            return p;

        p = p->next;
    }
    p = page_slurp(file_path);
    page_add(p);

    return p;
}
예제 #2
0
/*
 * Find the largest contiguous block which contains `addr' for file offset
 * `offset' in it while living within the file system block sizes (`vp_off'
 * and `vp_len') and the address space limits for which no pages currently
 * exist and which map to consecutive file offsets.
 */
page_t *
pvn_read_kluster(
    struct vnode *vp,
    u_offset_t off,
    struct seg *seg,
    caddr_t addr,
    u_offset_t *offp,			/* return values */
    size_t *lenp,				/* return values */
    u_offset_t vp_off,
    size_t vp_len,
    int isra)
{
    ssize_t deltaf, deltab;
    page_t *pp;
    page_t *plist = NULL;
    spgcnt_t pagesavail;
    u_offset_t vp_end;

    ASSERT(off >= vp_off && off < vp_off + vp_len);

    /*
     * We only want to do klustering/read ahead if there
     * is more than minfree pages currently available.
     */
    pagesavail = freemem - minfree;

    if (pagesavail <= 0)
        if (isra)
            return ((page_t *)NULL);    /* ra case - give up */
        else
            pagesavail = 1;		    /* must return a page */

    /* We calculate in pages instead of bytes due to 32-bit overflows */
    if (pagesavail < (spgcnt_t)btopr(vp_len)) {
        /*
         * Don't have enough free memory for the
         * max request, try sizing down vp request.
         */
        deltab = (ssize_t)(off - vp_off);
        vp_len -= deltab;
        vp_off += deltab;
        if (pagesavail < btopr(vp_len)) {
            /*
             * Still not enough memory, just settle for
             * pagesavail which is at least 1.
             */
            vp_len = ptob(pagesavail);
        }
    }

    vp_end = vp_off + vp_len;
    ASSERT(off >= vp_off && off < vp_end);

    if (isra && SEGOP_KLUSTER(seg, addr, 0))
        return ((page_t *)NULL);	/* segment driver says no */

    if ((plist = page_create_va(vp, off,
                                PAGESIZE, PG_EXCL | PG_WAIT, seg, addr)) == NULL)
        return ((page_t *)NULL);

    if (vp_len <= PAGESIZE || pvn_nofodklust) {
        *offp = off;
        *lenp = MIN(vp_len, PAGESIZE);
    } else {
        /*
         * Scan back from front by incrementing "deltab" and
         * comparing "off" with "vp_off + deltab" to avoid
         * "signed" versus "unsigned" conversion problems.
         */
        for (deltab = PAGESIZE; off >= vp_off + deltab;
                deltab += PAGESIZE) {
            /*
             * Call back to the segment driver to verify that
             * the klustering/read ahead operation makes sense.
             */
            if (SEGOP_KLUSTER(seg, addr, -deltab))
                break;		/* page not eligible */
            if ((pp = page_create_va(vp, off - deltab,
                                     PAGESIZE, PG_EXCL, seg, addr - deltab))
                    == NULL)
                break;		/* already have the page */
            /*
             * Add page to front of page list.
             */
            page_add(&plist, pp);
        }
        deltab -= PAGESIZE;

        /* scan forward from front */
        for (deltaf = PAGESIZE; off + deltaf < vp_end;
                deltaf += PAGESIZE) {
            /*
             * Call back to the segment driver to verify that
             * the klustering/read ahead operation makes sense.
             */
            if (SEGOP_KLUSTER(seg, addr, deltaf))
                break;		/* page not file extension */
            if ((pp = page_create_va(vp, off + deltaf,
                                     PAGESIZE, PG_EXCL, seg, addr + deltaf))
                    == NULL)
                break;		/* already have page */

            /*
             * Add page to end of page list.
             */
            page_add(&plist, pp);
            plist = plist->p_next;
        }
        *offp = off = off - deltab;
        *lenp = deltab + deltaf;
        ASSERT(off >= vp_off);

        /*
         * If we ended up getting more than was actually
         * requested, retract the returned length to only
         * reflect what was requested.  This might happen
         * if we were allowed to kluster pages across a
         * span of (say) 5 frags, and frag size is less
         * than PAGESIZE.  We need a whole number of
         * pages to contain those frags, but the returned
         * size should only allow the returned range to
         * extend as far as the end of the frags.
         */
        if ((vp_off + vp_len) < (off + *lenp)) {
            ASSERT(vp_end > off);
            *lenp = vp_end - off;
        }
    }
    TRACE_3(TR_FAC_VM, TR_PVN_READ_KLUSTER,
            "pvn_read_kluster:seg %p addr %x isra %x",
            seg, addr, isra);
    return (plist);
}
예제 #3
0
/*
 * Handle pages for this vnode on either side of the page "pp"
 * which has been locked by the caller.  This routine will also
 * do klustering in the range [vp_off, vp_off + vp_len] up
 * until a page which is not found.  The offset and length
 * of pages included is returned in "*offp" and "*lenp".
 *
 * Returns a list of dirty locked pages all ready to be
 * written back.
 */
page_t *
pvn_write_kluster(
    struct vnode *vp,
    page_t *pp,
    u_offset_t *offp,		/* return values */
    size_t *lenp,			/* return values */
    u_offset_t vp_off,
    size_t vp_len,
    int flags)
{
    u_offset_t off;
    page_t *dirty;
    size_t deltab, deltaf;
    se_t se;
    u_offset_t vp_end;

    off = pp->p_offset;

    /*
     * Kustering should not be done if we are invalidating
     * pages since we could destroy pages that belong to
     * some other process if this is a swap vnode.
     */
    if (pvn_write_noklust || ((flags & B_INVAL) && IS_SWAPVP(vp))) {
        *offp = off;
        *lenp = PAGESIZE;
        return (pp);
    }

    if (flags & (B_FREE | B_INVAL))
        se = SE_EXCL;
    else
        se = SE_SHARED;

    dirty = pp;
    /*
     * Scan backwards looking for pages to kluster by incrementing
     * "deltab" and comparing "off" with "vp_off + deltab" to
     * avoid "signed" versus "unsigned" conversion problems.
     */
    for (deltab = PAGESIZE; off >= vp_off + deltab; deltab += PAGESIZE) {
        pp = page_lookup_nowait(vp, off - deltab, se);
        if (pp == NULL)
            break;		/* page not found */
        if (pvn_getdirty(pp, flags | B_DELWRI) == 0)
            break;
        page_add(&dirty, pp);
    }
    deltab -= PAGESIZE;

    vp_end = vp_off + vp_len;
    /* now scan forwards looking for pages to kluster */
    for (deltaf = PAGESIZE; off + deltaf < vp_end; deltaf += PAGESIZE) {
        pp = page_lookup_nowait(vp, off + deltaf, se);
        if (pp == NULL)
            break;		/* page not found */
        if (pvn_getdirty(pp, flags | B_DELWRI) == 0)
            break;
        page_add(&dirty, pp);
        dirty = dirty->p_next;
    }

    *offp = off - deltab;
    *lenp = deltab + deltaf;
    return (dirty);
}