int __v9fs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) { int ret; const struct v9fs_inode *v9inode = V9FS_I(inode); p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages); if (!v9inode->fscache) return -ENOBUFS; ret = fscache_read_or_alloc_pages(v9inode->fscache, mapping, pages, nr_pages, v9fs_vfs_readpage_complete, NULL, mapping_gfp_mask(mapping)); switch (ret) { case -ENOBUFS: case -ENODATA: p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret); return 1; case 0: BUG_ON(!list_empty(pages)); BUG_ON(*nr_pages != 0); p9_debug(P9_DEBUG_FSC, "BIO submitted\n"); return ret; default: p9_debug(P9_DEBUG_FSC, "ret %d\n", ret); return ret; } }
static void req_done(struct virtqueue *vq) { struct virtio_chan *chan = vq->vdev->priv; struct p9_fcall *rc; unsigned int len; struct p9_req_t *req; unsigned long flags; p9_debug(P9_DEBUG_TRANS, ": request done\n"); while (1) { spin_lock_irqsave(&chan->lock, flags); rc = virtqueue_get_buf(chan->vq, &len); if (rc == NULL) { spin_unlock_irqrestore(&chan->lock, flags); break; } chan->ring_bufs_avail = 1; spin_unlock_irqrestore(&chan->lock, flags); /* Wakeup if anyone waiting for VirtIO ring space. */ wake_up(chan->vc_wq); p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc); p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); req = p9_tag_lookup(chan->client, rc->tag); req->status = REQ_STATUS_RCVD; p9_client_cb(chan->client, req); } }
int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page) { int ret; const struct v9fs_inode *v9inode = V9FS_I(inode); p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); if (!v9inode->fscache) return -ENOBUFS; ret = fscache_read_or_alloc_page(v9inode->fscache, page, v9fs_vfs_readpage_complete, NULL, GFP_KERNEL); switch (ret) { case -ENOBUFS: case -ENODATA: p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret); return 1; case 0: p9_debug(P9_DEBUG_FSC, "BIO submitted\n"); return ret; default: p9_debug(P9_DEBUG_FSC, "ret %d\n", ret); return ret; } }
/** * parse_opts - parse mount options into rdma options structure * @params: options string passed from mount * @opts: rdma transport-specific structure to parse options into * * Returns 0 upon success, -ERRNO upon failure */ static int parse_opts(char *params, struct p9_rdma_opts *opts) { char *p; substring_t args[MAX_OPT_ARGS]; int option; char *options, *tmp_options; opts->port = P9_PORT; opts->sq_depth = P9_RDMA_SQ_DEPTH; opts->rq_depth = P9_RDMA_RQ_DEPTH; opts->timeout = P9_RDMA_TIMEOUT; if (!params) return 0; tmp_options = kstrdup(params, GFP_KERNEL); if (!tmp_options) { p9_debug(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM; } options = tmp_options; while ((p = strsep(&options, ",")) != NULL) { int token; int r; if (!*p) continue; token = match_token(p, tokens, args); if (token == Opt_err) continue; r = match_int(&args[0], &option); if (r < 0) { p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); continue; } switch (token) { case Opt_port: opts->port = option; break; case Opt_sq_depth: opts->sq_depth = option; break; case Opt_rq_depth: opts->rq_depth = option; break; case Opt_timeout: opts->timeout = option; break; default: continue; } } /* RQ must be at least as large as the SQ */ opts->rq_depth = max(opts->rq_depth, opts->sq_depth); kfree(tmp_options); return 0; }
static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) { bool over; struct p9_wstat st; int err = 0; struct p9_fid *fid; int buflen; int reclen = 0; struct p9_rdir *rdir; struct kvec kvec; p9_debug(P9_DEBUG_VFS, "name %pD\n", file); fid = file->private_data; buflen = fid->clnt->msize - P9_IOHDRSZ; rdir = v9fs_alloc_rdir_buf(file, buflen); if (!rdir) return -ENOMEM; kvec.iov_base = rdir->buf; kvec.iov_len = buflen; while (1) { if (rdir->tail == rdir->head) { struct iov_iter to; int n; iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen); n = p9_client_read(file->private_data, ctx->pos, &to, &err); if (err) return err; rdir->head = 0; rdir->tail = n; } while (rdir->head < rdir->tail) { p9stat_init(&st); err = p9stat_read(fid->clnt, rdir->buf + rdir->head, rdir->tail - rdir->head, &st); if (err) { p9_debug(P9_DEBUG_VFS, "returned %d\n", err); p9stat_free(&st); return -EIO; } reclen = st.size+2; over = !dir_emit(ctx, st.name, strlen(st.name), v9fs_qid2ino(&st.qid), dt_type(&st)); p9stat_free(&st); if (over) return 0; rdir->head += reclen; ctx->pos += reclen; } } }
void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page) { int ret; const struct v9fs_inode *v9inode = V9FS_I(inode); p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL); p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret); if (ret != 0) v9fs_uncache_page(inode, page); }
static int p9_virtio_request(struct p9_client *client, struct p9_req_t *req) { int err; int in, out, out_sgs, in_sgs; unsigned long flags; struct virtio_chan *chan = client->trans; struct scatterlist *sgs[2]; p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n"); req->status = REQ_STATUS_SENT; req_retry: spin_lock_irqsave(&chan->lock, flags); out_sgs = in_sgs = 0; /* Handle out VirtIO ring buffers */ out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); if (out) sgs[out_sgs++] = chan->sg; in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity); if (in) sgs[out_sgs + in_sgs++] = chan->sg + out; err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, GFP_ATOMIC); if (err < 0) { if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); err = wait_event_interruptible(*chan->vc_wq, chan->ring_bufs_avail); if (err == -ERESTARTSYS) return err; p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); goto req_retry; } else { spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio rpc add_sgs returned failure\n"); return -EIO; } } virtqueue_kick(chan->vq); spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); return 0; }
static void v9fs_kill_super(struct super_block *s) { struct v9fs_session_info *v9ses = s->s_fs_info; p9_debug(P9_DEBUG_VFS, " %p\n", s); kill_anon_super(s); v9fs_session_cancel(v9ses); v9fs_session_close(v9ses); kfree(v9ses); s->s_fs_info = NULL; p9_debug(P9_DEBUG_VFS, "exiting kill_super\n"); }
/** * v9fs_dir_readdir_dotl - iterate through a directory * @file: opened file structure * @ctx: actor we feed the entries to * */ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx) { int err = 0; struct p9_fid *fid; int buflen; struct p9_rdir *rdir; struct p9_dirent curdirent; p9_debug(P9_DEBUG_VFS, "name %pD\n", file); fid = file->private_data; buflen = fid->clnt->msize - P9_READDIRHDRSZ; rdir = v9fs_alloc_rdir_buf(file, buflen); if (!rdir) return -ENOMEM; while (1) { if (rdir->tail == rdir->head) { err = p9_client_readdir(fid, rdir->buf, buflen, ctx->pos); if (err <= 0) return err; rdir->head = 0; rdir->tail = err; } while (rdir->head < rdir->tail) { err = p9dirent_read(fid->clnt, rdir->buf + rdir->head, rdir->tail - rdir->head, &curdirent); if (err < 0) { p9_debug(P9_DEBUG_VFS, "returned %d\n", err); return -EIO; } if (!dir_emit(ctx, curdirent.d_name, strlen(curdirent.d_name), v9fs_qid2ino(&curdirent.qid), curdirent.d_type)) return 0; ctx->pos = curdirent.d_off; rdir->head += err; } } }
/* * v9fs_xattr_set() * * Create, replace or remove an extended attribute for this inode. Buffer * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE * specify that an extended attribute must exist and must not exist * previous to the call, respectively. * * Returns 0, or a negative error number on failure. */ int v9fs_xattr_set(struct dentry *dentry, const char *name, const void *value, size_t value_len, int flags) { u64 offset = 0; int retval, msize, write_count; struct p9_fid *fid = NULL; p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", name, value_len, flags); fid = v9fs_fid_clone(dentry); if (IS_ERR(fid)) { retval = PTR_ERR(fid); fid = NULL; goto error; } /* * On success fid points to xattr */ retval = p9_client_xattrcreate(fid, name, value_len, flags); if (retval < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n", retval); goto error; } msize = fid->clnt->msize; while (value_len) { if (value_len > (msize - P9_IOHDRSZ)) write_count = msize - P9_IOHDRSZ; else write_count = value_len; write_count = p9_client_write(fid, ((char *)value)+offset, NULL, offset, write_count); if (write_count < 0) { /* error in xattr write */ retval = write_count; goto error; } offset += write_count; value_len -= write_count; } /* Total read xattr bytes */ retval = offset; error: if (fid) retval = p9_client_clunk(fid); return retval; }
int p9dirent_read(struct p9_client *clnt, char *buf, int len, struct p9_dirent *dirent) { struct p9_fcall fake_pdu; int ret; char *nameptr; fake_pdu.size = len; fake_pdu.capacity = len; fake_pdu.sdata = buf; fake_pdu.offset = 0; ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid, &dirent->d_off, &dirent->d_type, &nameptr); if (ret) { p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret); trace_9p_protocol_dump(clnt, &fake_pdu); goto out; } strcpy(dirent->d_name, nameptr); kfree(nameptr); out: return fake_pdu.offset; }
/* * wait for a page to complete writing to the cache */ void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page) { const struct v9fs_inode *v9inode = V9FS_I(inode); p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); if (PageFsCache(page)) fscache_wait_on_page_write(v9inode->fscache, page); }
ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name, void *buffer, size_t buffer_size) { ssize_t retval; u64 attr_size; struct p9_fid *attr_fid; struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size}; struct iov_iter to; int err; iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size); attr_fid = p9_client_xattrwalk(fid, name, &attr_size); if (IS_ERR(attr_fid)) { retval = PTR_ERR(attr_fid); p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n", retval); return retval; } if (attr_size > buffer_size) { if (!buffer_size) /* request to get the attr_size */ retval = attr_size; else retval = -ERANGE; } else { iov_iter_truncate(&to, attr_size); retval = p9_client_read(attr_fid, 0, &to, &err); if (err) retval = err; } p9_client_clunk(attr_fid); return retval; }
static int post_recv(struct p9_client *client, struct p9_rdma_context *c) { struct p9_trans_rdma *rdma = client->trans; struct ib_recv_wr wr; struct ib_sge sge; c->busa = ib_dma_map_single(rdma->cm_id->device, c->rc.sdata, client->msize, DMA_FROM_DEVICE); if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) goto error; c->cqe.done = recv_done; sge.addr = c->busa; sge.length = client->msize; sge.lkey = rdma->pd->local_dma_lkey; wr.next = NULL; wr.wr_cqe = &c->cqe; wr.sg_list = &sge; wr.num_sge = 1; return ib_post_recv(rdma->qp, &wr, NULL); error: p9_debug(P9_DEBUG_ERROR, "EIO\n"); return -EIO; }
static void handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len) { struct p9_req_t *req; int err = 0; int16_t tag; req = NULL; ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, DMA_FROM_DEVICE); if (status != IB_WC_SUCCESS) goto err_out; err = p9_parse_header(c->rc, NULL, NULL, &tag, 1); if (err) goto err_out; req = p9_tag_lookup(client, tag); if (!req) goto err_out; req->rc = c->rc; req->status = REQ_STATUS_RCVD; p9_client_cb(client, req); return; err_out: p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status); rdma->state = P9_RDMA_FLUSHING; client->status = Disconnected; }
void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses) { p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n", v9ses, v9ses->fscache); fscache_relinquish_cookie(v9ses->fscache, 0); v9ses->fscache = NULL; }
static int post_recv(struct p9_client *client, struct p9_rdma_context *c) { struct p9_trans_rdma *rdma = client->trans; struct ib_recv_wr wr, *bad_wr; struct ib_sge sge; c->busa = ib_dma_map_single(rdma->cm_id->device, c->rc->sdata, client->msize, DMA_FROM_DEVICE); if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) goto error; sge.addr = c->busa; sge.length = client->msize; sge.lkey = rdma->lkey; wr.next = NULL; c->wc_op = IB_WC_RECV; wr.wr_id = (unsigned long) c; wr.sg_list = &sge; wr.num_sge = 1; return ib_post_recv(rdma->qp, &wr, &bad_wr); error: p9_debug(P9_DEBUG_ERROR, "EIO\n"); return -EIO; }
static int v9fs_file_flock_dotl(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = file_inode(filp); int ret = -ENOLCK; p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", filp, cmd, fl, filp); /* No mandatory locks */ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) goto out_err; if (!(fl->fl_flags & FL_FLOCK)) goto out_err; if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { filemap_write_and_wait(inode->i_mapping); invalidate_mapping_pages(&inode->i_data, 0, -1); } /* Convert flock to posix lock */ fl->fl_flags |= FL_POSIX; fl->fl_flags ^= FL_FLOCK; if (IS_SETLK(cmd) | IS_SETLKW(cmd)) ret = v9fs_file_do_lock(filp, cmd, fl); else ret = -EINVAL; out_err: return ret; }
int v9fs_file_open(struct inode *inode, struct file *file) { int err; struct v9fs_inode *v9inode; struct v9fs_session_info *v9ses; struct p9_fid *fid; int omode; p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file); v9inode = V9FS_I(inode); v9ses = v9fs_inode2v9ses(inode); if (v9fs_proto_dotl(v9ses)) omode = v9fs_open_to_dotl_flags(file->f_flags); else omode = v9fs_uflags2omode(file->f_flags, v9fs_proto_dotu(v9ses)); fid = file->private_data; if (!fid) { fid = v9fs_fid_clone(file->f_path.dentry); if (IS_ERR(fid)) return PTR_ERR(fid); err = p9_client_open(fid, omode); if (err < 0) { p9_client_clunk(fid); return err; } if ((file->f_flags & O_APPEND) && (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))) generic_file_llseek(file, 0, SEEK_END); } file->private_data = fid; mutex_lock(&v9inode->v_mutex); if (v9ses->cache && !v9inode->writeback_fid && ((file->f_flags & O_ACCMODE) != O_RDONLY)) { /* * clone a fid and add it to writeback_fid * we do it during open time instead of * page dirty time via write_begin/page_mkwrite * because we want write after unlink usecase * to work. */ fid = v9fs_writeback_fid(file->f_path.dentry); if (IS_ERR(fid)) { err = PTR_ERR(fid); mutex_unlock(&v9inode->v_mutex); goto out_error; } v9inode->writeback_fid = (void *) fid; } mutex_unlock(&v9inode->v_mutex); if (v9ses->cache) v9fs_cache_inode_set_cookie(inode, file); return 0; out_error: p9_client_clunk(file->private_data); file->private_data = NULL; return err; }
static int v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct v9fs_inode *v9inode; struct page *page = vmf->page; struct file *filp = vma->vm_file; struct inode *inode = file_inode(filp); p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n", page, (unsigned long)filp->private_data); /* Update file times before taking page lock */ file_update_time(filp); v9inode = V9FS_I(inode); /* make sure the cache has finished storing the page */ v9fs_fscache_wait_on_page_write(inode, page); BUG_ON(!v9inode->writeback_fid); lock_page(page); if (page->mapping != inode->i_mapping) goto out_unlock; wait_for_stable_page(page); return VM_FAULT_LOCKED; out_unlock: unlock_page(page); return VM_FAULT_NOPAGE; }
static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = file_inode(filp); int ret = -ENOLCK; p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", filp, cmd, fl, filp->f_path.dentry->d_name.name); /* No mandatory locks */ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) goto out_err; if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { filemap_write_and_wait(inode->i_mapping); invalidate_mapping_pages(&inode->i_data, 0, -1); } if (IS_SETLK(cmd) || IS_SETLKW(cmd)) ret = v9fs_file_do_lock(filp, cmd, fl); else if (IS_GETLK(cmd)) ret = v9fs_file_getlock(filp, fl); else ret = -EINVAL; out_err: return ret; }
/** * v9fs_fid_readn - read from a fid * @fid: fid to read * @data: data buffer to read data into * @udata: user data buffer to read data into * @count: size of buffer * @offset: offset at which to read data * */ ssize_t v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count, u64 offset) { int n, total, size; p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", fid->fid, (long long unsigned)offset, count); n = 0; total = 0; size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ; do { n = p9_client_read(fid, data, udata, offset, count); if (n <= 0) break; if (data) data += n; if (udata) udata += n; offset += n; count -= n; total += n; } while (count > 0 && n == size); if (n < 0) total = n; return total; }
ssize_t v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid, const char __user *data, size_t count, loff_t *offset, int invalidate) { int n; loff_t i_size; size_t total = 0; loff_t origin = *offset; unsigned long pg_start, pg_end; p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n", data, (int)count, (int)*offset); do { n = p9_client_write(fid, NULL, data+total, origin+total, count); if (n <= 0) break; count -= n; total += n; } while (count > 0); if (invalidate && (total > 0)) { pg_start = origin >> PAGE_CACHE_SHIFT; pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT; if (inode->i_mapping && inode->i_mapping->nrpages) invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); *offset += total; i_size = i_size_read(inode); if (*offset > i_size) { inode_add_bytes(inode, *offset - i_size); i_size_write(inode, *offset); } }
int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, const void *value, size_t value_len, int flags) { struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len}; struct iov_iter from; int retval; iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len); p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", name, value_len, flags); /* Clone it */ fid = p9_client_walk(fid, 0, NULL, 1); if (IS_ERR(fid)) return PTR_ERR(fid); /* * On success fid points to xattr */ retval = p9_client_xattrcreate(fid, name, value_len, flags); if (retval < 0) p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n", retval); else p9_client_write(fid, 0, &from, &retval); p9_client_clunk(fid); return retval; } ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) { return v9fs_xattr_get(dentry, NULL, buffer, buffer_size); } const struct xattr_handler *v9fs_xattr_handlers[] = { &v9fs_xattr_user_handler, &v9fs_xattr_trusted_handler, #ifdef CONFIG_9P_FS_POSIX_ACL &v9fs_xattr_acl_access_handler, &v9fs_xattr_acl_default_handler, #endif #ifdef CONFIG_9P_FS_SECURITY &v9fs_xattr_security_handler, #endif NULL };
static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct v9fs_inode *v9inode = cookie_netfs_data; memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path)); p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n", &v9inode->vfs_inode, v9inode->qid.path); return sizeof(v9inode->qid.path); }
static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data, uint64_t *size) { const struct v9fs_inode *v9inode = cookie_netfs_data; *size = i_size_read(&v9inode->vfs_inode); p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n", &v9inode->vfs_inode, *size); }
static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen) { const struct v9fs_inode *v9inode = cookie_netfs_data; memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version)); p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n", &v9inode->vfs_inode, v9inode->qid.version); return sizeof(v9inode->qid.version); }
int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, const void *value, size_t value_len, int flags) { u64 offset = 0; int retval, msize, write_count; p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", name, value_len, flags); /* Clone it */ fid = p9_client_walk(fid, 0, NULL, 1); if (IS_ERR(fid)) return PTR_ERR(fid); /* * On success fid points to xattr */ retval = p9_client_xattrcreate(fid, name, value_len, flags); if (retval < 0) { p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n", retval); p9_client_clunk(fid); return retval; } msize = fid->clnt->msize; while (value_len) { if (value_len > (msize - P9_IOHDRSZ)) write_count = msize - P9_IOHDRSZ; else write_count = value_len; write_count = p9_client_write(fid, ((char *)value)+offset, NULL, offset, write_count); if (write_count < 0) { /* error in xattr write */ retval = write_count; break; } offset += write_count; value_len -= write_count; } return p9_client_clunk(fid); }
int v9fs_dir_release(struct inode *inode, struct file *filp) { struct p9_fid *fid; fid = filp->private_data; p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n", inode, filp, fid ? fid->fid : -1); if (fid) p9_client_clunk(fid); return 0; }
static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { struct v9fs_session_info *v9ses; uint16_t klen = 0; v9ses = (struct v9fs_session_info *)cookie_netfs_data; p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n", v9ses, buffer, bufmax); if (v9ses->cachetag) klen = strlen(v9ses->cachetag); if (klen > bufmax) return 0; memcpy(buffer, v9ses->cachetag, klen); p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag); return klen; }