/** * Implementation of struct cl_req_operations::cro_attr_set() for VVP * layer. VVP is responsible for * * - o_[mac]time * * - o_mode * * - o_parent_seq * * - o_[ug]id * * - o_parent_oid * * - o_parent_ver * * - o_ioepoch, * */ static void vvp_req_attr_set(const struct lu_env *env, const struct cl_req_slice *slice, const struct cl_object *obj, struct cl_req_attr *attr, u64 flags) { struct inode *inode; struct obdo *oa; u32 valid_flags; oa = attr->cra_oa; inode = vvp_object_inode(obj); valid_flags = OBD_MD_FLTYPE; if (slice->crs_req->crq_type == CRT_WRITE) { if (flags & OBD_MD_FLEPOCH) { oa->o_valid |= OBD_MD_FLEPOCH; oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch; valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLUID | OBD_MD_FLGID; } } obdo_from_inode(oa, inode, valid_flags & flags); obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid); if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID)) oa->o_parent_oid++; memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE); }
/** * For swapping layout. The file's layout may have changed. * To avoid populating pages to a wrong stripe, we have to verify the * correctness of layout. It works because swapping layout processes * have to acquire group lock. */ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io, struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct ccc_io *cio = ccc_env_io(env); bool rc = true; switch (io->ci_type) { case CIT_READ: case CIT_WRITE: /* don't need lock here to check lli_layout_gen as we have held * extent lock and GROUP lock has to hold to swap layout */ if (lli->lli_layout_gen != cio->cui_layout_gen) { io->ci_need_restart = 1; /* this will return application a short read/write */ io->ci_continue = 0; rc = false; } case CIT_FAULT: /* fault is okay because we've already had a page. */ default: break; } return rc; }
int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf) { struct ll_inode_info *lli = ll_i2info(conf->coc_inode); if (conf->coc_opc == OBJECT_CONF_INVALIDATE) { lli->lli_layout_gen = LL_LAYOUT_GEN_NONE; return 0; } if (conf->coc_opc != OBJECT_CONF_SET) return 0; if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL) { CDEBUG(D_VFSTRACE, "layout lock change: %u -> %u\n", lli->lli_layout_gen, conf->u.coc_md->lsm->lsm_layout_gen); lli->lli_has_smd = true; lli->lli_layout_gen = conf->u.coc_md->lsm->lsm_layout_gen; } else { CDEBUG(D_VFSTRACE, "layout lock destroyed: %u.\n", lli->lli_layout_gen); lli->lli_has_smd = false; lli->lli_layout_gen = LL_LAYOUT_GEN_EMPTY; } return 0; }
static int cl_io_get(struct inode *inode, struct lu_env **envout, struct cl_io **ioout, int *refcheck) { struct lu_env *env; struct cl_io *io; struct ll_inode_info *lli = ll_i2info(inode); struct cl_object *clob = lli->lli_clob; int result; if (S_ISREG(inode->i_mode)) { env = cl_env_get(refcheck); if (!IS_ERR(env)) { io = vvp_env_thread_io(env); io->ci_obj = clob; *envout = env; *ioout = io; result = 1; } else { result = PTR_ERR(env); } } else { result = 0; } return result; }
static void ll_inode_destroy_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct ll_inode_info *ptr = ll_i2info(inode); kmem_cache_free(ll_inode_cachep, ptr); }
/** * Implements Linux VM address_space::invalidatepage() method. This method is * called when the page is truncate from a file, either as a result of * explicit truncate, or when inode is removed from memory (as a result of * final iput(), umount, or memory pressure induced icache shrinking). * * [0, offset] bytes of the page remain valid (this is for a case of not-page * aligned truncate). Lustre leaves partially truncated page in the cache, * relying on struct inode::i_size to limit further accesses. */ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, unsigned int length) { struct inode *inode; struct lu_env *env; struct cl_page *page; struct cl_object *obj; LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); /* * It is safe to not check anything in invalidatepage/releasepage * below because they are run with page locked and all our io is * happening with locked page too */ if (offset == 0 && length == PAGE_SIZE) { /* See the comment in ll_releasepage() */ env = cl_env_percpu_get(); LASSERT(!IS_ERR(env)); inode = vmpage->mapping->host; obj = ll_i2info(inode)->lli_clob; if (obj) { page = cl_vmpage_page(vmpage, obj); if (page) { cl_page_delete(env, page); cl_page_put(env, page); } } else { LASSERT(vmpage->private == 0); } cl_env_percpu_put(env); }
int vvp_object_invariant(const struct cl_object *obj) { struct inode *inode = vvp_object_inode(obj); struct ll_inode_info *lli = ll_i2info(inode); return (S_ISREG(inode->i_mode) || inode->i_mode == 0) && lli->lli_clob == obj; }
/** * API independent part for page fault initialization. * \param env - corespondent lu_env to processing * \param vma - virtual memory area addressed to page fault * \param index - page index corespondent to fault. * \parm ra_flags - vma readahead flags. * * \return error codes from cl_io_init. */ static struct cl_io * ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma, pgoff_t index, unsigned long *ra_flags) { struct file *file = vma->vm_file; struct inode *inode = file_inode(file); struct cl_io *io; struct cl_fault_io *fio; int rc; ENTRY; if (ll_file_nolock(file)) RETURN(ERR_PTR(-EOPNOTSUPP)); restart: io = vvp_env_thread_io(env); io->ci_obj = ll_i2info(inode)->lli_clob; LASSERT(io->ci_obj != NULL); fio = &io->u.ci_fault; fio->ft_index = index; fio->ft_executable = vma->vm_flags&VM_EXEC; /* * disable VM_SEQ_READ and use VM_RAND_READ to make sure that * the kernel will not read other pages not covered by ldlm in * filemap_nopage. we do our readahead in ll_readpage. */ if (ra_flags != NULL) *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ); vma->vm_flags &= ~VM_SEQ_READ; vma->vm_flags |= VM_RAND_READ; CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, fio->ft_index, fio->ft_executable); rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); if (rc == 0) { struct vvp_io *vio = vvp_env_io(env); struct ll_file_data *fd = LUSTRE_FPRIVATE(file); LASSERT(vio->vui_cl.cis_io == io); /* mmap lock must be MANDATORY it has to cache * pages. */ io->ci_lockreq = CILR_MANDATORY; vio->vui_fd = fd; } else { LASSERT(rc < 0); cl_io_fini(env, io); if (io->ci_need_restart) goto restart; io = ERR_PTR(rc); } RETURN(io); }
static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj, struct cl_req_attr *attr) { struct inode *inode; struct obdo *oa; u64 valid_flags = OBD_MD_FLTYPE; oa = attr->cra_oa; inode = vvp_object_inode(obj); if (attr->cra_type == CRT_WRITE) valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLUID | OBD_MD_FLGID; obdo_from_inode(oa, inode, valid_flags & attr->cra_flags); obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid); if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID)) oa->o_parent_oid++; memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE); }
struct vvp_object *cl_inode2vvp(struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct cl_object *obj = lli->lli_clob; struct lu_object *lu; lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type); LASSERT(lu); return lu2vvp(lu); }
static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) { struct lu_env *env; struct cl_object *obj; struct cl_page *page; struct address_space *mapping; int result = 0; LASSERT(PageLocked(vmpage)); if (PageWriteback(vmpage) || PageDirty(vmpage)) return 0; mapping = vmpage->mapping; if (mapping == NULL) return 1; obj = ll_i2info(mapping->host)->lli_clob; if (obj == NULL) return 1; /* 1 for caller, 1 for cl_page and 1 for page cache */ if (page_count(vmpage) > 3) return 0; page = cl_vmpage_page(vmpage, obj); if (page == NULL) return 1; env = cl_env_percpu_get(); LASSERT(!IS_ERR(env)); if (!cl_page_in_use(page)) { result = 1; cl_page_delete(env, page); } /* To use percpu env array, the call path can not be rescheduled; * otherwise percpu array will be messed if ll_releaspage() called * again on the same CPU. * * If this page holds the last refc of cl_object, the following * call path may cause reschedule: * cl_page_put -> cl_page_free -> cl_object_put -> * lu_object_put -> lu_object_free -> lov_delete_raid0. * * However, the kernel can't get rid of this inode until all pages have * been cleaned up. Now that we hold page lock here, it's pretty safe * that we won't get into object delete path. */ LASSERT(cl_object_refc(obj) > 1); cl_page_put(env, page); cl_env_percpu_put(env); return result; }
/** records that a write is in flight */ void vvp_write_pending(struct ccc_object *club, struct ccc_page *page) { struct ll_inode_info *lli = ll_i2info(club->cob_inode); spin_lock(&lli->lli_lock); lli->lli_flags |= LLIF_SOM_DIRTY; if (page != NULL && list_empty(&page->cpg_pending_linkage)) list_add(&page->cpg_pending_linkage, &club->cob_pending_list); spin_unlock(&lli->lli_lock); }
static struct dentry * ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *parent) { struct inode *inode; struct dentry *result; CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid)); if (!fid_is_sane(fid)) return ERR_PTR(-ESTALE); inode = search_inode_for_lustre(sb, fid); if (IS_ERR(inode)) return ERR_CAST(inode); if (is_bad_inode(inode)) { /* we didn't find the right inode.. */ iput(inode); return ERR_PTR(-ESTALE); } /** * It is an anonymous dentry without OST objects created yet. * We have to find the parent to tell MDS how to init lov objects. */ if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd && parent != NULL) { struct ll_inode_info *lli = ll_i2info(inode); spin_lock(&lli->lli_lock); lli->lli_pfid = *parent; spin_unlock(&lli->lli_lock); } result = d_obtain_alias(inode); if (IS_ERR(result)) { iput(inode); return result; } return result; }
/** Queues DONE_WRITING if * - done writing is allowed; * - inode has no no dirty pages; */ void ll_queue_done_writing(struct inode *inode, unsigned long flags) { struct ll_inode_info *lli = ll_i2info(inode); struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob); ENTRY; spin_lock(&lli->lli_lock); lli->lli_flags |= flags; if ((lli->lli_flags & LLIF_DONE_WRITING) && cfs_list_empty(&club->cob_pending_list)) { struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq; if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) CWARN("ino %lu/%u(flags %u) som valid it just after " "recovery\n", inode->i_ino, inode->i_generation, lli->lli_flags); /* DONE_WRITING is allowed and inode has no dirty page. */ spin_lock(&lcq->lcq_lock); LASSERT(cfs_list_empty(&lli->lli_close_list)); CDEBUG(D_INODE, "adding inode %lu/%u to close list\n", inode->i_ino, inode->i_generation); cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head); /* Avoid a concurrent insertion into the close thread queue: * an inode is already in the close thread, open(), write(), * close() happen, epoch is closed as the inode is marked as * LLIF_EPOCH_PENDING. When pages are written inode should not * be inserted into the queue again, clear this flag to avoid * it. */ lli->lli_flags &= ~LLIF_DONE_WRITING; cfs_waitq_signal(&lcq->lcq_waitq); spin_unlock(&lcq->lcq_lock); } spin_unlock(&lli->lli_lock); EXIT; }
/** records that a write has completed */ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page) { struct ll_inode_info *lli = ll_i2info(club->cob_inode); int rc = 0; spin_lock(&lli->lli_lock); if (page != NULL && !list_empty(&page->cpg_pending_linkage)) { list_del_init(&page->cpg_pending_linkage); rc = 1; } spin_unlock(&lli->lli_lock); if (rc) ll_queue_done_writing(club->cob_inode, 0); }
/** * Cliens updates SOM attributes on MDS (including llog cookies): * obd_getattr with no lock and md_setattr. */ int ll_som_update(struct inode *inode, struct md_op_data *op_data) { struct ll_inode_info *lli = ll_i2info(inode); struct ptlrpc_request *request = NULL; __u32 old_flags; struct obdo *oa; int rc; ENTRY; LASSERT(op_data != NULL); if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) CERROR("ino %lu/%u(flags %u) som valid it just after " "recovery\n", inode->i_ino, inode->i_generation, lli->lli_flags); OBDO_ALLOC(oa); if (!oa) { CERROR("can't allocate memory for Size-on-MDS update.\n"); RETURN(-ENOMEM); } old_flags = op_data->op_flags; op_data->op_flags = MF_SOM_CHANGE; /* If inode is already in another epoch, skip getattr from OSTs. */ if (lli->lli_ioepoch == op_data->op_ioepoch) { rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch, old_flags & MF_GETATTR_LOCK); if (rc) { oa->o_valid = 0; if (rc != -ENOENT) CERROR("inode_getattr failed (%d): unable to " "send a Size-on-MDS attribute update " "for inode %lu/%u\n", rc, inode->i_ino, inode->i_generation); } else { CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n", PFID(&lli->lli_fid)); } /* Install attributes into op_data. */ md_from_obdo(op_data, oa, oa->o_valid); } rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data, NULL, 0, NULL, 0, &request, NULL); ptlrpc_req_finished(request); OBDO_FREE(oa); RETURN(rc); }
/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */ void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data) { struct ll_inode_info *lli = ll_i2info(inode); op_data->op_flags |= MF_SOM_CHANGE; /* Check if Size-on-MDS attributes are valid. */ if (lli->lli_flags & LLIF_MDS_SIZE_LOCK) CERROR("ino %lu/%u(flags %u) som valid it just after " "recovery\n", inode->i_ino, inode->i_generation, lli->lli_flags); if (!cl_local_size(inode)) { /* Send Size-on-MDS Attributes if valid. */ op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET | ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS; } }
static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf) { struct ll_inode_info *lli = ll_i2info(conf->coc_inode); if (conf->coc_opc == OBJECT_CONF_INVALIDATE) { CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n", PFID(&lli->lli_fid)); ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE); /* Clean up page mmap for this inode. * The reason for us to do this is that if the page has * already been installed into memory space, the process * can access it without interacting with lustre, so this * page may be stale due to layout change, and the process * will never be notified. * This operation is expensive but mmap processes have to pay * a price themselves. */ unmap_mapping_range(conf->coc_inode->i_mapping, 0, OBD_OBJECT_EOF, 0); return 0; } if (conf->coc_opc != OBJECT_CONF_SET) return 0; if (conf->u.coc_md && conf->u.coc_md->lsm) { CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n", PFID(&lli->lli_fid), lli->lli_layout_gen, conf->u.coc_md->lsm->lsm_layout_gen); lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm); ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen); } else { CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n", PFID(&lli->lli_fid), lli->lli_layout_gen); lli->lli_has_smd = false; ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY); } return 0; }
static int vvp_io_read_lock(const struct lu_env *env, const struct cl_io_slice *ios) { struct cl_io *io = ios->cis_io; struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj)); int result; ENTRY; /* XXX: Layer violation, we shouldn't see lsm at llite level. */ if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */ result = vvp_io_rw_lock(env, io, CLM_READ, io->u.ci_rd.rd.crw_pos, io->u.ci_rd.rd.crw_pos + io->u.ci_rd.rd.crw_count - 1); else result = 0; RETURN(result); }
/* find any ldlm lock of the inode in mdc and lov * return 0 not find * 1 find one * < 0 error */ static int find_cbdata(struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_sb_info *sbi = ll_i2sbi(inode); int rc = 0; ENTRY; LASSERT(inode); rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode), return_if_equal, NULL); if (rc != 0) RETURN(rc); if (lli->lli_smd) rc = obd_find_cbdata(sbi->ll_dt_exp, lli->lli_smd, return_if_equal, NULL); RETURN(rc); }
/** * Implements Linux VM address_space::invalidatepage() method. This method is * called when the page is truncate from a file, either as a result of * explicit truncate, or when inode is removed from memory (as a result of * final iput(), umount, or memory pressure induced icache shrinking). * * [0, offset] bytes of the page remain valid (this is for a case of not-page * aligned truncate). Lustre leaves partially truncated page in the cache, * relying on struct inode::i_size to limit further accesses. */ static void ll_invalidatepage(struct page *vmpage, #ifdef HAVE_INVALIDATE_RANGE unsigned int offset, unsigned int length #else unsigned long offset #endif ) { struct inode *inode; struct lu_env *env; struct cl_page *page; struct cl_object *obj; int refcheck; LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); /* * It is safe to not check anything in invalidatepage/releasepage * below because they are run with page locked and all our io is * happening with locked page too */ #ifdef HAVE_INVALIDATE_RANGE if (offset == 0 && length == PAGE_CACHE_SIZE) { #else if (offset == 0) { #endif env = cl_env_get(&refcheck); if (!IS_ERR(env)) { inode = vmpage->mapping->host; obj = ll_i2info(inode)->lli_clob; if (obj != NULL) { page = cl_vmpage_page(vmpage, obj); if (page != NULL) { cl_page_delete(env, page); cl_page_put(env, page); } } else LASSERT(vmpage->private == 0); cl_env_put(env, &refcheck); } }
static int vvp_object_print(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o) { struct vvp_object *obj = lu2vvp(o); struct inode *inode = obj->vob_inode; struct ll_inode_info *lli; (*p)(env, cookie, "(%d %d) inode: %p ", atomic_read(&obj->vob_transient_pages), atomic_read(&obj->vob_mmap_cnt), inode); if (inode) { lli = ll_i2info(inode); (*p)(env, cookie, "%lu/%u %o %u %d %p "DFID, inode->i_ino, inode->i_generation, inode->i_mode, inode->i_nlink, atomic_read(&inode->i_count), lli->lli_clob, PFID(&lli->lli_fid)); } return 0; }
/* check again */ if (save != lli->lli_rmtperm_time) { rc = do_check_remote_perm(lli, mask); if (!rc || (rc != -ENOENT && i)) { mutex_unlock(&lli->lli_rmtperm_mutex); break; } } if (i++ > 5) { CERROR("check remote perm falls in dead loop!\n"); LBUG(); } oc = ll_mdscapa_get(inode); rc = md_get_remote_perm(sbi->ll_md_exp, ll_inode2fid(inode), oc, ll_i2suppgid(inode), &req); capa_put(oc); if (rc) { mutex_unlock(&lli->lli_rmtperm_mutex); break; } perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL, lustre_swab_mdt_remote_perm); if (unlikely(perm == NULL)) { mutex_unlock(&lli->lli_rmtperm_mutex); rc = -EPROTO; break; } rc = ll_update_remote_perm(inode, perm); mutex_unlock(&lli->lli_rmtperm_mutex); if (rc == -ENOMEM) break; ptlrpc_req_finished(req); req = NULL; } while (1); ptlrpc_req_finished(req); return rc; } #if 0 /* NB: remote perms can't be freed in ll_mdc_blocking_ast of UPDATE lock, * because it will fail sanity test 48. */ void ll_free_remote_perms(struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); struct hlist_head *hash = lli->lli_remote_perms; struct ll_remote_perm *lrp; struct hlist_node *node, *next; int i; LASSERT(hash); spin_lock(&lli->lli_lock); for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) { hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list) free_ll_remote_perm(lrp); } spin_unlock(&lli->lli_lock); }
/** * Implements Linux VM address_space::invalidatepage() method. This method is * called when the page is truncate from a file, either as a result of * explicit truncate, or when inode is removed from memory (as a result of * final iput(), umount, or memory pressure induced icache shrinking). * * [0, offset] bytes of the page remain valid (this is for a case of not-page * aligned truncate). Lustre leaves partially truncated page in the cache, * relying on struct inode::i_size to limit further accesses. */ static int cl_invalidatepage(struct page *vmpage, unsigned long offset) { struct inode *inode; struct lu_env *env; struct cl_page *page; struct cl_object *obj; int result; int refcheck; LASSERT(PageLocked(vmpage)); LASSERT(!PageWriteback(vmpage)); /* * It is safe to not check anything in invalidatepage/releasepage * below because they are run with page locked and all our io is * happening with locked page too */ result = 0; if (offset == 0) { env = cl_env_get(&refcheck); if (!IS_ERR(env)) { inode = vmpage->mapping->host; obj = ll_i2info(inode)->lli_clob; if (obj != NULL) { page = cl_vmpage_page(vmpage, obj); if (page != NULL) { lu_ref_add(&page->cp_reference, "delete", vmpage); cl_page_delete(env, page); result = 1; lu_ref_del(&page->cp_reference, "delete", vmpage); cl_page_put(env, page); } } else LASSERT(vmpage->private == 0); cl_env_put(env, &refcheck); }
static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr) { struct inode *inode = vvp_object_inode(obj); /* * lov overwrites most of these fields in * lov_attr_get()->...lov_merge_lvb_kms(), except when inode * attributes are newer. */ attr->cat_size = i_size_read(inode); attr->cat_mtime = inode->i_mtime.tv_sec; attr->cat_atime = inode->i_atime.tv_sec; attr->cat_ctime = inode->i_ctime.tv_sec; attr->cat_blocks = inode->i_blocks; attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid); attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid); attr->cat_projid = ll_i2info(inode)->lli_projid; /* KMS is not known by this layer */ return 0; /* layers below have to fill in the rest */ }
void ll_release_page(struct inode *inode, struct page *page, bool remove) { kunmap(page); /* * Always remove the page for striped dir, because the page is * built from temporarily in LMV layer */ if (inode && S_ISDIR(inode->i_mode) && ll_i2info(inode)->lli_lsm_md) { __free_page(page); return; } if (remove) { lock_page(page); if (likely(page->mapping)) truncate_complete_page(page->mapping, page); unlock_page(page); } put_page(page); }
static int ll_readlink_internal(struct inode *inode, struct ptlrpc_request **request, char **symname) { struct ll_inode_info *lli = ll_i2info(inode); struct ll_sb_info *sbi = ll_i2sbi(inode); int rc, symlen = i_size_read(inode) + 1; struct mdt_body *body; struct md_op_data *op_data; *request = NULL; if (lli->lli_symlink_name) { int print_limit = min_t(int, PAGE_SIZE - 128, symlen); *symname = lli->lli_symlink_name; /* If the total CDEBUG() size is larger than a page, it * will print a warning to the console, avoid this by * printing just the last part of the symlink. */ CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n", print_limit < symlen ? "..." : "", print_limit, (*symname) + symlen - print_limit, symlen); return 0; }
static int vvp_attr_update(const struct lu_env *env, struct cl_object *obj, const struct cl_attr *attr, unsigned valid) { struct inode *inode = vvp_object_inode(obj); if (valid & CAT_UID) inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid); if (valid & CAT_GID) inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid); if (valid & CAT_ATIME) inode->i_atime.tv_sec = attr->cat_atime; if (valid & CAT_MTIME) inode->i_mtime.tv_sec = attr->cat_mtime; if (valid & CAT_CTIME) inode->i_ctime.tv_sec = attr->cat_ctime; if (0 && valid & CAT_SIZE) i_size_write(inode, attr->cat_size); if (valid & CAT_PROJID) ll_i2info(inode)->lli_projid = attr->cat_projid; /* not currently necessary */ if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE|CAT_PROJID)) mark_inode_dirty(inode); return 0; }
/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data, struct obd_client_handle **och, unsigned long flags) { struct ll_inode_info *lli = ll_i2info(inode); struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob); spin_lock(&lli->lli_lock); if (!(list_empty(&club->cob_pending_list))) { if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) { LASSERT(*och != NULL); LASSERT(lli->lli_pending_och == NULL); /* Inode is dirty and there is no pending write done * request yet, DONE_WRITE is to be sent later. */ lli->lli_flags |= LLIF_EPOCH_PENDING; lli->lli_pending_och = *och; spin_unlock(&lli->lli_lock); inode = igrab(inode); LASSERT(inode); GOTO(out, 0); } if (flags & LLIF_DONE_WRITING) { /* Some pages are still dirty, it is early to send * DONE_WRITE. Wait untill all pages will be flushed * and try DONE_WRITE again later. */ LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING)); lli->lli_flags |= LLIF_DONE_WRITING; spin_unlock(&lli->lli_lock); inode = igrab(inode); LASSERT(inode); GOTO(out, 0); } } CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID"\n", ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid)); op_data->op_flags |= MF_EPOCH_CLOSE; if (flags & LLIF_DONE_WRITING) { LASSERT(lli->lli_flags & LLIF_SOM_DIRTY); LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING)); *och = lli->lli_pending_och; lli->lli_pending_och = NULL; lli->lli_flags &= ~LLIF_EPOCH_PENDING; } else { /* Pack Size-on-MDS inode attributes only if they has changed */ if (!(lli->lli_flags & LLIF_SOM_DIRTY)) { spin_unlock(&lli->lli_lock); GOTO(out, 0); } /* There is a pending DONE_WRITE -- close epoch with no * attribute change. */ if (lli->lli_flags & LLIF_EPOCH_PENDING) { spin_unlock(&lli->lli_lock); GOTO(out, 0); } } LASSERT(list_empty(&club->cob_pending_list)); lli->lli_flags &= ~LLIF_SOM_DIRTY; spin_unlock(&lli->lli_lock); ll_done_writing_attr(inode, op_data); out: return; }
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct inode *inode = dentry->d_inode; int rc = 0, rc2 = 0; struct lov_mds_md *lmm = NULL; struct ptlrpc_request *request = NULL; int lmmsize; LASSERT(inode); CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino, inode->i_generation, inode); ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1); rc = ll_getxattr_common(inode, NULL, buffer, size, OBD_MD_FLXATTRLS); if (rc < 0) GOTO(out, rc); if (buffer != NULL) { struct ll_sb_info *sbi = ll_i2sbi(inode); char *xattr_name = buffer; int xlen, rem = rc; while (rem > 0) { xlen = strnlen(xattr_name, rem - 1) + 1; rem -= xlen; if (xattr_type_filter(sbi, get_xattr_type(xattr_name)) == 0) { /* skip OK xattr type * leave it in buffer */ xattr_name += xlen; continue; } /* move up remaining xattrs in buffer * removing the xattr that is not OK */ memmove(xattr_name, xattr_name + xlen, rem); rc -= xlen; } } if (S_ISREG(inode->i_mode)) { if (!ll_i2info(inode)->lli_has_smd) rc2 = -1; } else if (S_ISDIR(inode->i_mode)) { rc2 = ll_dir_getstripe(inode, &lmm, &lmmsize, &request); } if (rc2 < 0) { GOTO(out, rc2 = 0); } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) { const int prefix_len = sizeof(XATTR_LUSTRE_PREFIX) - 1; const size_t name_len = sizeof("lov") - 1; const size_t total_len = prefix_len + name_len + 1; if (((rc + total_len) > size) && (buffer != NULL)) { ptlrpc_req_finished(request); return -ERANGE; } if (buffer != NULL) { buffer += rc; memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len); memcpy(buffer + prefix_len, "lov", name_len); buffer[prefix_len + name_len] = '\0'; } rc2 = total_len; } out: ptlrpc_req_finished(request); rc = rc + rc2; return rc; }