/* Returns count of number of matching invalid lsegs remaining in list * after call. */ int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, struct list_head *tmp_list, struct pnfs_layout_range *recall_range) { struct pnfs_layout_segment *lseg, *next; int invalid = 0, removed = 0; dprintk("%s:Begin lo %p\n", __func__, lo); if (list_empty(&lo->plh_segs)) { /* Reset MDS Threshold I/O counters */ NFS_I(lo->plh_inode)->write_io = 0; NFS_I(lo->plh_inode)->read_io = 0; if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) put_layout_hdr_locked(lo); return 0; } list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) if (!recall_range || should_free_lseg(&lseg->pls_range, recall_range)) { dprintk("%s: freeing lseg %p iomode %d " "offset %llu length %llu\n", __func__, lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length); invalid++; removed += mark_lseg_invalid(lseg, tmp_list); } dprintk("%s:Return %i\n", __func__, invalid - removed); return invalid - removed; }
/* * Retrieve a page from fscache */ int __nfs_readpage_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct page *page) { int ret; dfprintk(FSCACHE, "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n", NFS_I(inode)->fscache, page, page->index, page->flags, inode); ret = fscache_read_or_alloc_page(NFS_I(inode)->fscache, page, nfs_readpage_from_fscache_complete, ctx, GFP_KERNEL); switch (ret) { case 0: /* read BIO submitted (page in fscache) */ dfprintk(FSCACHE, "NFS: readpage_from_fscache: BIO submitted\n"); nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK, 1); return ret; case -ENOBUFS: /* inode not in cache */ case -ENODATA: /* page not in cache */ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1); dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret); return 1; default: dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret); nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1); } return ret; }
/* * Enable or disable caching for a file that is being opened as appropriate. * The cookie is allocated when the inode is initialised, but is not enabled at * that time. Enablement is deferred to file-open time to avoid stat() and * access() thrashing the cache. * * For now, with NFS, only regular files that are open read-only will be able * to use the cache. * * We enable the cache for an inode if we open it read-only and it isn't * currently open for writing. We disable the cache if the inode is open * write-only. * * The caller uses the file struct to pin i_writecount on the inode before * calling us when a file is opened for writing, so we can make use of that. * * Note that this may be invoked multiple times in parallel by parallel * nfs_open() functions. */ void nfs_fscache_open_file(struct inode *inode, struct file *filp) { struct nfs_fscache_inode_auxdata auxdata; struct nfs_inode *nfsi = NFS_I(inode); struct fscache_cookie *cookie = nfs_i_fscache(inode); if (!fscache_cookie_valid(cookie)) return; memset(&auxdata, 0, sizeof(auxdata)); auxdata.mtime = timespec64_to_timespec(nfsi->vfs_inode.i_mtime); auxdata.ctime = timespec64_to_timespec(nfsi->vfs_inode.i_ctime); if (inode_is_open_for_write(inode)) { dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi); clear_bit(NFS_INO_FSCACHE, &nfsi->flags); fscache_disable_cookie(cookie, &auxdata, true); fscache_uncache_all_inode_pages(cookie, inode); } else { dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi); fscache_enable_cookie(cookie, &auxdata, nfsi->vfs_inode.i_size, nfs_fscache_can_enable, inode); if (fscache_cookie_enabled(cookie)) set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); } }
static void nfs3_cache_acls(struct inode *inode, struct posix_acl *acl, struct posix_acl *dfacl) { struct nfs_inode *nfsi = NFS_I(inode); dprintk("nfs3_cache_acls(%s/%ld, %p, %p)\n", inode->i_sb->s_id, inode->i_ino, acl, dfacl); spin_lock(&inode->i_lock); __nfs3_forget_cached_acls(NFS_I(inode)); nfsi->acl_access = posix_acl_dup(acl); nfsi->acl_default = posix_acl_dup(dfacl); spin_unlock(&inode->i_lock); }
/* * Turn off the cache with regard to a per-inode cookie if opened for writing, * invalidating all the pages in the page cache relating to the associated * inode to clear the per-page caching. */ static void nfs_fscache_disable_inode_cookie(struct inode *inode) { clear_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); if (NFS_I(inode)->fscache) { // dfprintk(FSCACHE, ; /* Need to uncache any pages attached to this inode that * fscache knows about before turning off the cache. */ fscache_uncache_all_inode_pages(NFS_I(inode)->fscache, inode); nfs_fscache_zap_inode_cookie(inode); } }
/* * Invalidate the local caches */ static void nfs_zap_caches_locked(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); int mode = inode->i_mode; nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); nfsi->attrtimeo_timestamp = jiffies; memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf)); if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; else nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; }
static struct posix_acl *nfs3_get_cached_acl(struct inode *inode, int type) { struct nfs_inode *nfsi = NFS_I(inode); struct posix_acl *acl = ERR_PTR(-EINVAL); spin_lock(&inode->i_lock); switch(type) { case ACL_TYPE_ACCESS: acl = nfsi->acl_access; break; case ACL_TYPE_DEFAULT: acl = nfsi->acl_default; break; default: goto out; } if (IS_ERR(acl)) acl = ERR_PTR(-EAGAIN); else acl = posix_acl_dup(acl); out: spin_unlock(&inode->i_lock); dprintk("NFS: nfs3_get_cached_acl(%s/%ld, %d) = %p\n", inode->i_sb->s_id, inode->i_ino, type, acl); return acl; }
/* * Insert a write request into an inode */ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) { struct nfs_inode *nfsi = NFS_I(inode); int error; error = radix_tree_preload(GFP_NOFS); if (error != 0) goto out; /* Lock the request! */ nfs_lock_request_dontget(req); spin_lock(&inode->i_lock); error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); BUG_ON(error); if (!nfsi->npages) { igrab(inode); if (nfs_have_delegation(inode, FMODE_WRITE)) nfsi->change_attr++; } SetPagePrivate(req->wb_page); set_page_private(req->wb_page, (unsigned long)req); nfsi->npages++; kref_get(&req->wb_kref); radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); spin_unlock(&inode->i_lock); radix_tree_preload_end(); out: return error; }
/* * Wait for a request to complete. * * Interruptible by fatal signals only. */ static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_page *req; pgoff_t idx_end, next; unsigned int res = 0; int error; if (npages == 0) idx_end = ~0; else idx_end = idx_start + npages - 1; next = idx_start; while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) { if (req->wb_index > idx_end) break; next = req->wb_index + 1; BUG_ON(!NFS_WBACK_BUSY(req)); kref_get(&req->wb_kref); spin_unlock(&inode->i_lock); error = nfs_wait_on_request(req); nfs_release_request(req); spin_lock(&inode->i_lock); if (error < 0) return error; res++; } return res; }
int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; unsigned long *bitlock = &NFS_I(inode)->flags; struct nfs_pageio_descriptor pgio; int err; /* Stop dirtying of new pages while we sync */ err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING, nfs_wait_bit_killable, TASK_KILLABLE); if (err) goto out_err; nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); nfs_pageio_init_write(&pgio, inode, wb_priority(wbc)); err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); nfs_pageio_complete(&pgio); clear_bit_unlock(NFS_INO_FLUSHING, bitlock); smp_mb__after_clear_bit(); wake_up_bit(bitlock, NFS_INO_FLUSHING); if (err < 0) goto out_err; err = pgio.pg_error; if (err < 0) goto out_err; return 0; out_err: return err; }
struct inode * nfs_dq_reserve_inode(struct inode * dir) { struct inode * inode; struct nfs_inode *nfsi; /* Second, allocate "quota" inode and initialize required fields */ inode = new_inode(dir->i_sb); if (inode == NULL) return ERR_PTR(-ENOMEM); nfsi = NFS_I(inode); nfsi->access_cache = RB_ROOT; #ifdef CONFIG_NFS_FSCACHE nfsi->fscache = NULL; #endif inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); /* Is this optional? */ if (dir->i_mode & S_ISGID) inode->i_gid = dir->i_gid; if (vfs_dq_alloc_inode(inode) == NO_QUOTA) goto err_drop; dprintk("NFS: DQ reserve inode (ino: %ld)\n", inode->i_ino); return inode; err_drop: vfs_dq_drop(inode); inode->i_flags |= S_NOQUOTA; iput(inode); return ERR_PTR(-EDQUOT); }
/* * This does the "real" work of the write. We must allocate and lock the * page to be sent back to the generic routine, which then copies the * data from user space. * * If the writer ends up delaying the write, the writer needs to * increment the page use counts until he is done with the page. */ static int nfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; pgoff_t index; struct page *page; index = pos >> PAGE_CACHE_SHIFT; dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n", file->f_path.dentry->d_parent->d_name.name, file->f_path.dentry->d_name.name, mapping->host->i_ino, len, (long long) pos); /* * Prevent starvation issues if someone is doing a consistency * sync-to-disk */ ret = wait_on_bit(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING, nfs_wait_bit_killable, TASK_KILLABLE); if (ret) return ret; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; ret = nfs_flush_incompatible(file, page); if (ret) { unlock_page(page); page_cache_release(page); } return ret; }
static void nfs_check_dirty_writeback(struct page *page, bool *dirty, bool *writeback) { struct nfs_inode *nfsi; struct address_space *mapping = page_file_mapping(page); if (!mapping || PageSwapCache(page)) return; /* * Check if an unstable page is currently being committed and * if so, have the VM treat it as if the page is under writeback * so it will not block due to pages that will shortly be freeable. */ nfsi = NFS_I(mapping->host); if (test_bit(NFS_INO_COMMIT, &nfsi->flags)) { *writeback = true; return; } /* * If PagePrivate() is set, then the page is not freeable and as the * inode is not being committed, it's not going to be cleaned in the * near future so treat it as dirty */ if (PagePrivate(page)) *dirty = true; }
static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, struct posix_acl *dfacl) { struct nfs_server *server = NFS_SERVER(inode); struct nfs_fattr fattr; struct page *pages[NFSACL_MAXPAGES] = { }; struct nfs3_setaclargs args = { .inode = inode, .mask = NFS_ACL, .acl_access = acl, .pages = pages, }; int status, count; status = -EOPNOTSUPP; if (!nfs_server_capable(inode, NFS_CAP_ACLS)) goto out; /* We are doing this here, because XDR marshalling can only return -ENOMEM. */ status = -ENOSPC; if (acl != NULL && acl->a_count > NFS_ACL_MAX_ENTRIES) goto out; if (dfacl != NULL && dfacl->a_count > NFS_ACL_MAX_ENTRIES) goto out; if (S_ISDIR(inode->i_mode)) { args.mask |= NFS_DFACL; args.acl_default = dfacl; } dprintk("NFS call setacl\n"); nfs_begin_data_update(inode); status = rpc_call(server->client_acl, ACLPROC3_SETACL, &args, &fattr, 0); spin_lock(&inode->i_lock); NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS; spin_unlock(&inode->i_lock); nfs_end_data_update(inode); dprintk("NFS reply setacl: %d\n", status); /* pages may have been allocated at the xdr layer. */ for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++) __free_page(args.pages[count]); switch (status) { case 0: status = nfs_refresh_inode(inode, &fattr); break; case -EPFNOSUPPORT: case -EPROTONOSUPPORT: dprintk("NFS_V3_ACL SETACL RPC not supported" "(will not retry)\n"); server->caps &= ~NFS_CAP_ACLS; case -ENOTSUPP: status = -EOPNOTSUPP; } out: return status; }
/* * For the moment, the only task for the NFS clear_inode method is to * release the mmap credential */ static void nfs_clear_inode(struct inode *inode) { struct rpc_cred *cred = NFS_I(inode)->mm_cred; if (cred) put_rpccred(cred); }
static void free_lseg(struct pnfs_layout_segment *lseg) { struct inode *ino = lseg->pls_layout->plh_inode; NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); /* Matched by get_layout_hdr in pnfs_insert_layout */ put_layout_hdr(NFS_I(ino)->layout); }
void nfs_zap_mapping(struct inode *inode, struct address_space *mapping) { if (mapping->nrpages != 0) { spin_lock(&inode->i_lock); NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); } }
/** * nfs_set_page_tag_locked - Tag a request as locked * @req: */ int nfs_set_page_tag_locked(struct nfs_page *req) { if (!nfs_lock_request_dontget(req)) return 0; if (test_bit(PG_MAPPED, &req->wb_flags)) radix_tree_tag_set(&NFS_I(req->wb_context->dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); return 1; }
void nfs3_forget_cached_acls(struct inode *inode) { dprintk("NFS: nfs3_forget_cached_acls(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino); spin_lock(&inode->i_lock); __nfs3_forget_cached_acls(NFS_I(inode)); spin_unlock(&inode->i_lock); }
/** * nfs_set_page_tag_locked - Tag a request as locked * @req: */ int nfs_set_page_tag_locked(struct nfs_page *req) { if (!nfs_lock_request_dontget(req)) return 0; if (req->wb_page != NULL) radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); return 1; }
/* * Lock against someone else trying to also acquire or relinquish a cookie */ static inline void nfs_fscache_inode_lock(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); while (test_and_set_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags)) wait_on_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK, nfs_fscache_wait_bit, TASK_UNINTERRUPTIBLE); }
/* * Retrieve a set of pages from fscache */ int __nfs_readpages_from_fscache(struct nfs_open_context *ctx, struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) { unsigned npages = *nr_pages; int ret; dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n", NFS_I(inode)->fscache, npages, inode); ret = fscache_read_or_alloc_pages(NFS_I(inode)->fscache, mapping, pages, nr_pages, nfs_readpage_from_fscache_complete, ctx, mapping_gfp_mask(mapping)); if (*nr_pages < npages) nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK, npages); if (*nr_pages > 0) nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, *nr_pages); switch (ret) { case 0: /* read submitted to the cache for all pages */ BUG_ON(!list_empty(pages)); BUG_ON(*nr_pages != 0); dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache: submitted\n"); return ret; case -ENOBUFS: /* some pages aren't cached and can't be */ case -ENODATA: /* some pages aren't cached */ dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache: no page: %d\n", ret); return 1; default: dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache: ret %d\n", ret); } return ret; }
/* * Turn off the cache with regard to a per-inode cookie if opened for writing, * invalidating all the pages in the page cache relating to the associated * inode to clear the per-page caching. */ static void nfs_fscache_disable_inode_cookie(struct inode *inode) { clear_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); if (NFS_I(inode)->fscache) { dfprintk(FSCACHE, "NFS: nfsi 0x%p turning cache off\n", NFS_I(inode)); /* Need to invalidate any mapped pages that were read in before * turning off the cache. */ if (inode->i_mapping && inode->i_mapping->nrpages) invalidate_inode_pages2(inode->i_mapping); nfs_fscache_zap_inode_cookie(inode); } }
static void destroy_layout_hdr(struct pnfs_layout_hdr *lo) { ; BUG_ON(!list_empty(&lo->plh_layouts)); NFS_I(lo->plh_inode)->layout = NULL; pnfs_free_layout_hdr(lo); }
static void destroy_layout_hdr(struct pnfs_layout_hdr *lo) { dprintk("%s: freeing layout cache %p\n", __func__, lo); BUG_ON(!list_empty(&lo->plh_layouts)); NFS_I(lo->plh_inode)->layout = NULL; pnfs_free_layout_hdr(lo); }
/** * nfs_set_page_tag_locked - Tag a request as locked * @req: */ static int nfs_set_page_tag_locked(struct nfs_page *req) { struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode); if (!nfs_lock_request(req)) return 0; radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); return 1; }
/* * Unlock cookie management lock */ static inline void nfs_fscache_inode_unlock(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); smp_mb__before_clear_bit(); clear_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags); smp_mb__after_clear_bit(); wake_up_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK); }
/** * nfs_set_page_writeback_locked - Lock a request for writeback * @req: */ int nfs_set_page_writeback_locked(struct nfs_page *req) { struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); if (!nfs_lock_request(req)) return 0; radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK); return 1; }
/* * Retire a per-inode cookie, destroying the data attached to it. */ void nfs_fscache_zap_inode_cookie(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); // dfprintk(FSCACHE, "NFS: zapping cookie (0x%p/0x%p)\n", ; fscache_relinquish_cookie(nfsi->fscache, 1); nfsi->fscache = NULL; }
/* * Release a per-inode cookie. */ void nfs_fscache_release_inode_cookie(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); // dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", ; fscache_relinquish_cookie(nfsi->fscache, 0); nfsi->fscache = NULL; }