/* * If we were unable to commit the pages to swap make sure they are marked * as needing a commit (again). If we were, clear the flag to allow the * pages to be freed. */ static void tmpfs_strategy_done(struct bio *bio) { struct buf *bp; vm_page_t m; int i; bp = bio->bio_buf; if (bp->b_flags & B_ERROR) { bp->b_flags &= ~B_ERROR; bp->b_error = 0; bp->b_resid = 0; for (i = 0; i < bp->b_xio.xio_npages; ++i) { m = bp->b_xio.xio_pages[i]; vm_page_need_commit(m); } } else { for (i = 0; i < bp->b_xio.xio_npages; ++i) { m = bp->b_xio.xio_pages[i]; vm_page_clear_commit(m); } } bio = pop_bio(bio); biodone(bio); }
/* * The strategy function is typically only called when memory pressure * forces the system to attempt to pageout pages. It can also be called * by [n]vtruncbuf() when a truncation cuts a page in half. Normal write * operations */ static int tmpfs_strategy(struct vop_strategy_args *ap) { struct bio *bio = ap->a_bio; struct bio *nbio; struct buf *bp = bio->bio_buf; struct vnode *vp = ap->a_vp; struct tmpfs_node *node; vm_object_t uobj; vm_page_t m; int i; if (vp->v_type != VREG) { bp->b_resid = bp->b_bcount; bp->b_flags |= B_ERROR | B_INVAL; bp->b_error = EINVAL; biodone(bio); return(0); } lwkt_gettoken(&vp->v_mount->mnt_token); node = VP_TO_TMPFS_NODE(vp); uobj = node->tn_reg.tn_aobj; /* * Don't bother flushing to swap if there is no swap, just * ensure that the pages are marked as needing a commit (still). */ if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) { for (i = 0; i < bp->b_xio.xio_npages; ++i) { m = bp->b_xio.xio_pages[i]; vm_page_need_commit(m); } bp->b_resid = 0; bp->b_error = 0; biodone(bio); } else { nbio = push_bio(bio); nbio->bio_done = tmpfs_strategy_done; nbio->bio_offset = bio->bio_offset; swap_pager_strategy(uobj, nbio); } lwkt_reltoken(&vp->v_mount->mnt_token); return 0; }