void ttm_bo_release_mmap(struct ttm_buffer_object *bo) { vm_object_t vm_obj; vm_page_t m; int i; vm_obj = cdev_pager_lookup(bo); if (vm_obj == NULL) return; VM_OBJECT_WLOCK(vm_obj); for (i = 0; i < bo->num_pages; i++) { m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm"); if (m == NULL) continue; cdev_pager_free_page(vm_obj, m); } VM_OBJECT_WUNLOCK(vm_obj); vm_object_deallocate(vm_obj); }
/* * Lets the VM system know about a change in size for a file. * We adjust our own internal size and flush any cached pages in * the associated object that are affected by the size change. * * NOTE: This routine may be invoked as a result of a pager put * operation (possibly at object termination time), so we must be careful. * * NOTE: vp->v_filesize is initialized to NOOFFSET (-1), be sure that * we do not blow up on the case. nsize will always be >= 0, however. */ void vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) { vm_pindex_t nobjsize; vm_pindex_t oobjsize; vm_object_t object; object = vp->v_object; if (object == NULL) return; vm_object_hold(object); KKASSERT(vp->v_object == object); /* * Hasn't changed size */ if (nsize == vp->v_filesize) { vm_object_drop(object); return; } /* * Has changed size. Adjust the VM object's size and v_filesize * before we start scanning pages to prevent new pages from being * allocated during the scan. */ nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); oobjsize = object->size; object->size = nobjsize; /* * File has shrunk. Toss any cached pages beyond the new EOF. */ if (nsize < vp->v_filesize) { vp->v_filesize = nsize; if (nobjsize < oobjsize) { vm_object_page_remove(object, nobjsize, oobjsize, FALSE); } /* * This gets rid of garbage at the end of a page that is now * only partially backed by the vnode. Since we are setting * the entire page valid & clean after we are done we have * to be sure that the portion of the page within the file * bounds is already valid. If it isn't then making it * valid would create a corrupt block. */ if (nsize & PAGE_MASK) { vm_offset_t kva; vm_page_t m; m = vm_page_lookup_busy_wait(object, OFF_TO_IDX(nsize), TRUE, "vsetsz"); if (m && m->valid) { int base = (int)nsize & PAGE_MASK; int size = PAGE_SIZE - base; struct lwbuf *lwb; struct lwbuf lwb_cache; /* * Clear out partial-page garbage in case * the page has been mapped. * * This is byte aligned. */ lwb = lwbuf_alloc(m, &lwb_cache); kva = lwbuf_kva(lwb); bzero((caddr_t)kva + base, size); lwbuf_free(lwb); /* * XXX work around SMP data integrity race * by unmapping the page from user processes. * The garbage we just cleared may be mapped * to a user process running on another cpu * and this code is not running through normal * I/O channels which handle SMP issues for * us, so unmap page to synchronize all cpus. * * XXX should vm_pager_unmap_page() have * dealt with this? */ vm_page_protect(m, VM_PROT_NONE); /* * Clear out partial-page dirty bits. This * has the side effect of setting the valid * bits, but that is ok. There are a bunch * of places in the VM system where we expected * m->dirty == VM_PAGE_BITS_ALL. The file EOF * case is one of them. If the page is still * partially dirty, make it fully dirty. * * NOTE: We do not clear out the valid * bits. This would prevent bogus_page * replacement from working properly. * * NOTE: We do not want to clear the dirty * bit for a partial DEV_BSIZE'd truncation! * This is DEV_BSIZE aligned! */ vm_page_clear_dirty_beg_nonincl(m, base, size); if (m->dirty != 0) m->dirty = VM_PAGE_BITS_ALL; vm_page_wakeup(m); } else if (m) { vm_page_wakeup(m); } } } else { vp->v_filesize = nsize; } vm_object_drop(object); }