/* Try to invalidate pages, for "fs flush" or "fs flushv"; or * try to free pages, when deleting a file. * * Locking: the vcache entry's lock is held. It may be dropped and * re-obtained. * * Since we drop and re-obtain the lock, we can't guarantee that there won't * be some pages around when we return, newly created by concurrent activity. */ void osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync) { struct vnode *vp; int tries, code; int islocked; vp = AFSTOV(avc); VI_LOCK(vp); if (vp->v_iflag & VI_DOOMED) { VI_UNLOCK(vp); return; } VI_UNLOCK(vp); islocked = islocked_vnode(vp); if (islocked == LK_EXCLOTHER) panic("Trying to Smush over someone else's lock"); else if (islocked == LK_SHARED) { afs_warn("Trying to Smush with a shared lock"); lock_vnode(vp, LK_UPGRADE); } else if (!islocked) lock_vnode(vp, LK_EXCLUSIVE); if (vp->v_bufobj.bo_object != NULL) { AFS_VM_OBJECT_WLOCK(vp->v_bufobj.bo_object); /* * Do we really want OBJPC_SYNC? OBJPC_INVAL would be * faster, if invalidation is really what we are being * asked to do. (It would make more sense, too, since * otherwise this function is practically identical to * osi_VM_StoreAllSegments().) -GAW */ /* * Dunno. We no longer resemble osi_VM_StoreAllSegments, * though maybe that's wrong, now. And OBJPC_SYNC is the * common thing in 70 file systems, it seems. Matt. */ vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); AFS_VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object); } tries = 5; code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0); while (code && (tries > 0)) { afs_warn("TryToSmush retrying vinvalbuf"); code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0); --tries; } if (islocked == LK_SHARED) lock_vnode(vp, LK_DOWNGRADE); else if (!islocked) unlock_vnode(vp); }
/* Try to invalidate pages, for "fs flush" or "fs flushv"; or * try to free pages, when deleting a file. * * Locking: the vcache entry's lock is held. It may be dropped and * re-obtained. * * Since we drop and re-obtain the lock, we can't guarantee that there won't * be some pages around when we return, newly created by concurrent activity. */ void osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync) { struct vnode *vp; int tries, code; SPLVAR; vp = AFSTOV(avc); if (vp->v_iflag & VI_DOOMED) { USERPRI; return; } if (vp->v_bufobj.bo_object != NULL) { VM_OBJECT_LOCK(vp->v_bufobj.bo_object); /* * Do we really want OBJPC_SYNC? OBJPC_INVAL would be * faster, if invalidation is really what we are being * asked to do. (It would make more sense, too, since * otherwise this function is practically identical to * osi_VM_StoreAllSegments().) -GAW */ /* * Dunno. We no longer resemble osi_VM_StoreAllSegments, * though maybe that's wrong, now. And OBJPC_SYNC is the * common thing in 70 file systems, it seems. Matt. */ vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC); VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object); } tries = 5; code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0); while (code && (tries > 0)) { code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0); --tries; } USERPRI; }
/* Purge VM for a file when its callback is revoked. * * Locking: No lock is held, not even the global lock. */ void osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp) { struct vnode *vp; struct vm_object *obj; vp = AFSTOV(avc); ASSERT_VOP_LOCKED(vp, __func__); if (VOP_GETVOBJECT(vp, &obj) == 0) { VM_OBJECT_LOCK(obj); vm_object_page_remove(obj, 0, 0, FALSE); VM_OBJECT_UNLOCK(obj); } osi_vinvalbuf(vp, 0, 0, 0); }
/* Purge VM for a file when its callback is revoked. * * Locking: No lock is held, not even the global lock. */ void osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp) { struct vnode *vp; struct vm_object *obj; vp = AFSTOV(avc); ASSERT_VOP_LOCKED(vp, __func__); obj = vp->v_object; if (obj != NULL) { AFS_VM_OBJECT_WLOCK(obj); vm_object_page_remove(obj, 0, 0, FALSE); AFS_VM_OBJECT_WUNLOCK(obj); } osi_vinvalbuf(vp, 0, 0, 0); }