int HgfsDestroySession(void) { HgfsReq *req; int result; HgfsStatus status; HgfsOp opUsed; LOG(4, ("Entry()\n")); if (!gState->sessionEnabled) { return 0; } req = HgfsGetNewRequest(); if (!req) { LOG(4, ("Out of memory while getting new request.\n")); result = -ENOMEM; goto out; } opUsed = hgfsVersionDestroySession; result = HgfsPackDestroySessionRequest(opUsed, req); if (result != 0) { LOG(4, ("Error packing request.\n")); goto out; } result = HgfsSendRequest(req); if (result == 0) { LOG(6, ("Got reply.\n")); status = HgfsGetReplyStatus(req); result = HgfsStatusConvertToLinux(status); switch (result) { case 0: status = HgfsDestroySessionProcessResult(HGFS_REQ_PAYLOAD(req), req->payloadSize); ASSERT(status == HGFS_STATUS_SUCCESS); break; case -EPROTO: /* Fallthrough. */ default: LOG(6, ("Session was not created, error %d\n", result)); break; } } else if (result == -EIO) { LOG(4, ("Timed out. error: %d\n", result)); } else if (result == -EPROTO) { LOG(4, ("Server returned error: %d\n", result)); } else { LOG(4, ("Unknown error: %d\n", result)); } out: HgfsFreeRequest(req); LOG(4, ("Exit(%d)\n", result)); return result; }
int HgfsSymlink(const char* source, // IN: Source name const char *symname) // IN: Target name { HgfsReq *req; int result = 0; HgfsOp opUsed; HgfsStatus replyStatus; req = HgfsGetNewRequest(); if (!req) { LOG(4, ("Out of memory while getting new request.\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionCreateSymlink; result = HgfsPackSymlinkCreateRequest(source, symname, opUsed, req); if (result != 0) { LOG(4, ("Error packing request.\n")); goto out; } result = HgfsSendRequest(req); if (result == 0) { LOG(6, ("Got reply.\n")); replyStatus = HgfsGetReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); if (result == 0) { LOG(6, ("Symlink created successfully, instantiating dentry.\n")); } else if (result == -EPROTO) { /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_CREATE_SYMLINK_V3) { LOG(4, ("Version 3 not supported. Falling back to version 2.\n")); hgfsVersionCreateSymlink = HGFS_OP_CREATE_SYMLINK; goto retry; } else { LOG(6, ("Symlink was not created, error %d\n", result)); } } } else if (result == -EIO) { LOG(4, ("Timed out. error: %d\n", result)); } else if (result == -EPROTO) { LOG(4, ("Server returned error: %d\n", result)); } else { LOG(4, ("Unknown error: %d\n", result)); } out: HgfsFreeRequest(req); return result; }
static int HgfsOpen(struct inode *inode, // IN: Inode of the file to open struct file *file) // IN: File pointer for this open { HgfsReq *req; HgfsOp opUsed; HgfsStatus replyStatus; HgfsHandle replyFile; HgfsServerLock replyLock; HgfsInodeInfo *iinfo; int result = 0; ASSERT(inode); ASSERT(inode->i_sb); ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_inode); iinfo = INODE_GET_II_P(inode); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: /* * Set up pointers using the proper struct This lets us check the * version exactly once and use the pointers later. */ opUsed = hgfsVersionOpen; result = HgfsPackOpenRequest(inode, file, opUsed, req); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: error packing request\n")); goto out; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply and check return status. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: iinfo->createdAndUnopened = FALSE; LOG(10, (KERN_DEBUG "VMware hgfs: HgfsOpen: old hostFileId = " "%"FMT64"u\n", iinfo->hostFileId)); /* * Invalidate the hostFileId as we need to retrieve it from * the server. */ iinfo->hostFileId = 0; result = HgfsUnpackOpenReply(req, opUsed, &replyFile, &replyLock); if (result != 0) { break; } result = HgfsCreateFileInfo(file, replyFile); if (result != 0) { break; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsOpen: set handle to %u\n", replyFile)); /* * HgfsCreate faked all of the inode's attributes, so by the time * we're done in HgfsOpen, we need to make sure that the attributes * in the inode are real. The following is only necessary when * O_CREAT is set, otherwise we got here after HgfsLookup (which sent * a getattr to the server and got the real attributes). * * In particular, we'd like to at least try and set the inode's * uid/gid to match the caller's. We don't expect this to work, * because Windows servers will ignore it, and Linux servers running * as non-root won't be able to change it, but we're forward thinking * people. * * Either way, we force a revalidate following the setattr so that * we'll get the actual uid/gid from the server. */ if (file->f_flags & O_CREAT) { struct dentry *dparent; struct inode *iparent; /* * This is not the root of our file system so there should always * be a parent. */ ASSERT(file->f_dentry->d_parent); /* * Here we obtain a reference on the parent to make sure it doesn't * go away. This might not be necessary, since the existence of * a child (which we hold a reference to in this call) should * account for a reference in the parent, but it's safe to do so. * Overly cautious and safe is better than risky and broken. * * XXX Note that this and a handful of other hacks wouldn't be * necessary if we actually created the file in our create * implementation (where references and locks are properly held). * We could do this if we were willing to give up support for * O_EXCL on 2.4 kernels. */ dparent = dget(file->f_dentry->d_parent); iparent = dparent->d_inode; HgfsSetUidGid(iparent, file->f_dentry, current_fsuid(), current_fsgid()); dput(dparent); } break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_OPEN_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: Version 3 not " "supported. Falling back to version 2.\n")); hgfsVersionOpen = HGFS_OP_OPEN_V2; goto retry; } /* Retry with Version 1 of Open. Set globally. */ if (opUsed == HGFS_OP_OPEN_V2) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: Version 2 not " "supported. Falling back to version 1.\n")); hgfsVersionOpen = HGFS_OP_OPEN; goto retry; } /* Fallthrough. */ default: break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); /* * If the open failed (for any reason) and we tried to open a newly created * file, we must ensure that the next operation on this inode triggers a * revalidate to the server. This is because the file wasn't created on the * server, yet we currently believe that it was, because we created a fake * inode with a hashed dentry for it in HgfsCreate. We will continue to * believe this until the dentry's ttl expires, which will cause a * revalidate to the server that will reveal the truth. So in order to find * the truth as soon as possible, we'll reset the dentry's last revalidate * time now to force a revalidate the next time someone uses the dentry. * * We're using our own flag to track this case because using O_CREAT isn't * good enough: HgfsOpen will be called with O_CREAT even if the file exists * on the server, and if that's the case, there's no need to revalidate. * * XXX: Note that this will need to be reworked if/when we support hard * links, because multiple dentries will point to the same inode, and * forcing a revalidate on one will not force it on any others. */ if (result != 0 && iinfo->createdAndUnopened == TRUE) { HgfsDentryAgeForce(file->f_dentry); } return result; }
static int HgfsRelease(struct inode *inode, // IN: Inode that this file points to struct file *file) // IN: File that is getting released { HgfsReq *req; HgfsHandle handle; HgfsOp opUsed; HgfsStatus replyStatus; int result = 0; ASSERT(inode); ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_sb); handle = FILE_GET_FI_P(file)->handle; LOG(6, (KERN_DEBUG "VMware hgfs: HgfsRelease: close fh %u\n", handle)); /* * This may be our last open handle to an inode, so we should flush our * dirty pages before closing it. */ compat_filemap_write_and_wait(inode->i_mapping); HgfsReleaseFileInfo(file); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionClose; if (opUsed == HGFS_OP_CLOSE_V3) { HgfsRequest *header; HgfsRequestCloseV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestCloseV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->reserved = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestClose *request; request = (HgfsRequestClose *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: released handle %u\n", handle)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_CLOSE_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionClose = HGFS_OP_CLOSE; goto retry; } break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: failed handle %u\n", handle)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; }
static int HgfsDoWrite(HgfsHandle handle, // IN: Handle for this file HgfsDataPacket dataPacket[], // IN: Data description uint32 numEntries, // IN: Number of entries in dataPacket loff_t offset) // IN: Offset to begin writing at { HgfsReq *req; int result = 0; HgfsOp opUsed; uint32 requiredSize = 0; uint32 actualSize = 0; char *payload = NULL; uint32 reqSize; HgfsStatus replyStatus; char *buf; uint32 count; ASSERT(numEntries == 1); count = dataPacket[0].len; req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionWrite; if (opUsed == HGFS_OP_WRITE_FAST_V4) { HgfsRequest *header; HgfsRequestWriteV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = count; request->reserved = 0; payload = request->payload; requiredSize = request->requiredSize; req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0], GFP_KERNEL); if (!req->dataPacket) { LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__)); result = -ENOMEM; goto out; } memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]); req->numEntries = numEntries; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); req->payloadSize = reqSize; LOG(4, (KERN_WARNING "VMware hgfs: Fast Write V4\n")); } else if (opUsed == HGFS_OP_WRITE_V3) { HgfsRequest *header; HgfsRequestWriteV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *header - sizeof *request, count); LOG(4, (KERN_WARNING "VMware hgfs: Using write V3\n")); request->reserved = 0; payload = request->payload; requiredSize = request->requiredSize; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); req->dataPacket = NULL; req->numEntries = 0; buf = kmap(dataPacket[0].page) + dataPacket[0].offset; memcpy(payload, buf, requiredSize); kunmap(dataPacket[0].page); req->payloadSize = reqSize + requiredSize - 1; } else { HgfsRequestWrite *request; request = (HgfsRequestWrite *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request, count); payload = request->payload; requiredSize = request->requiredSize; reqSize = sizeof *request; req->dataPacket = NULL; req->numEntries = 0; buf = kmap(dataPacket[0].page) + dataPacket[0].offset; memcpy(payload, buf, requiredSize); kunmap(dataPacket[0].page); req->payloadSize = reqSize + requiredSize - 1; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: res %u\n", result)); switch (result) { case 0: if (opUsed == HGFS_OP_WRITE_V3 || opUsed == HGFS_OP_WRITE_FAST_V4) { actualSize = ((HgfsReplyWriteV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; } else { actualSize = ((HgfsReplyWrite *)HGFS_REQ_PAYLOAD(req))->actualSize; } /* Return result. */ LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoWrite: wrote %u bytes\n", actualSize)); result = actualSize; break; case -EPROTO: /* Retry with older version(s). Set globally. */ switch (opUsed) { case HGFS_OP_WRITE_FAST_V4: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Fast Write V4 not " "supported. Falling back to V3 write.\n")); if (req->dataPacket) { kfree(req->dataPacket); req->dataPacket = NULL; } hgfsVersionWrite = HGFS_OP_WRITE_V3; goto retry; case HGFS_OP_WRITE_V3: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionWrite = HGFS_OP_WRITE; goto retry; default: break; } break; default: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server " "returned error: %d\n", result)); break; } } else if (result == -EIO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: unknown error: " "%d\n", result)); } out: if (req->dataPacket) { kfree(req->dataPacket); } HgfsFreeRequest(req); return result; }
static int HgfsDoRead(HgfsHandle handle, // IN: Handle for this file HgfsDataPacket dataPacket[], // IN/OUT: Data description uint32 numEntries, // IN: Number of entries in dataPacket loff_t offset) // IN: Offset at which to read { HgfsReq *req; HgfsOp opUsed; int result = 0; uint32 actualSize = 0; char *payload = NULL; HgfsStatus replyStatus; char *buf; uint32 count; ASSERT(numEntries == 1); count = dataPacket[0].len; req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionRead; if (opUsed == HGFS_OP_READ_FAST_V4) { HgfsRequest *header; HgfsRequestReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->offset = offset; request->requiredSize = count; request->reserved = 0; req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0], GFP_KERNEL); if (!req->dataPacket) { LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__)); result = -ENOMEM; goto out; } memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]); req->numEntries = numEntries; LOG(4, (KERN_WARNING "VMware hgfs: Fast Read V4\n")); } else if (opUsed == HGFS_OP_READ_V3) { HgfsRequest *header; HgfsRequestReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request - sizeof *header, count); request->reserved = 0; req->dataPacket = NULL; req->numEntries = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestRead *request; request = (HgfsRequestRead *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request, count); req->dataPacket = NULL; req->numEntries = 0; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: if (opUsed == HGFS_OP_READ_FAST_V4) { actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; } else if (opUsed == HGFS_OP_READ_V3) { actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; payload = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->payload; } else { actualSize = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->actualSize; payload = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->payload; } /* Sanity check on read size. */ if (actualSize > count) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: read too big!\n")); result = -EPROTO; goto out; } if (!actualSize) { /* We got no bytes. */ LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: server returned " "zero\n")); result = actualSize; goto out; } /* Return result. */ if (opUsed == HGFS_OP_READ_V3 || opUsed == HGFS_OP_READ) { buf = kmap(dataPacket[0].page) + dataPacket[0].offset; ASSERT(buf); memcpy(buf, payload, actualSize); LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: copied %u\n", actualSize)); kunmap(dataPacket[0].page); } result = actualSize; break; case -EPROTO: /* Retry with older version(s). Set globally. */ switch (opUsed) { case HGFS_OP_READ_FAST_V4: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Fast Read V4 not " "supported. Falling back to V3 Read.\n")); if (req->dataPacket) { kfree(req->dataPacket); req->dataPacket = NULL; } hgfsVersionRead = HGFS_OP_READ_V3; goto retry; case HGFS_OP_READ_V3: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionRead = HGFS_OP_READ; goto retry; default: break; } break; default: break; } } else if (result == -EIO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: unknown error: " "%d\n", result)); } out: if (req->dataPacket) { kfree(req->dataPacket); } HgfsFreeRequest(req); return result; }
int HgfsStatfs(const char* path, // IN : Path to the file struct statvfs *stat) // OUT: Stat to fill in { HgfsReq *req; int result = 0; HgfsOp opUsed; HgfsStatus replyStatus; uint64 freeBytes; uint64 totalBytes; LOG(6, ("Entered.\n")); memset(stat, 0, sizeof *stat); req = HgfsGetNewRequest(); if (!req) { LOG(4, ("Out of memory while getting new request.\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionQueryVolumeInfo; result = HgfsPackQueryVolumeRequest(path, opUsed, req); if (result != 0) { LOG(4, ("Error packing request.\n")); goto out; } result = HgfsSendRequest(req); if (result == 0) { LOG(6, ("Got reply.\n")); replyStatus = HgfsGetReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); /* * If the statfs succeeded on the server, copy the stats * into the statvfs struct, otherwise return an error. */ switch (result) { case 0: stat->f_bsize = HGFS_BLOCKSIZE; if (opUsed == HGFS_OP_QUERY_VOLUME_INFO_V3) { HgfsReplyQueryVolumeV3 * replyV3 = HgfsGetReplyPayload(req); totalBytes = replyV3->totalBytes; freeBytes = replyV3->freeBytes; } else { totalBytes = ((HgfsReplyQueryVolume *)HGFS_REQ_PAYLOAD(req))->totalBytes; freeBytes = ((HgfsReplyQueryVolume *)HGFS_REQ_PAYLOAD(req))->freeBytes; } stat->f_blocks = (totalBytes + HGFS_BLOCKSIZE - 1) / HGFS_BLOCKSIZE; stat->f_bfree = (freeBytes + HGFS_BLOCKSIZE - 1) / HGFS_BLOCKSIZE; stat->f_bavail = stat->f_bfree; break; case -EPERM: /* * We're cheating! This will cause statfs will return success. * We're doing this because an old server will complain when it gets * a statfs on a per-share mount. Rather than have 'df' spit an * error, let's just return all zeroes. */ result = 0; break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_QUERY_VOLUME_INFO_V3) { LOG(4, ("Version 3 not supported. Falling back to version 1.\n")); hgfsVersionQueryVolumeInfo = HGFS_OP_QUERY_VOLUME_INFO; goto retry; } break; default: break; } } else if (result == -EIO) { LOG(4, ("Timed out. error: %d\n", result)); } else if (result == -EPROTO) { LOG(4, ("Server returned error: %d\n", result)); } else { LOG(4, ("Unknown error: %d\n", result)); } out: HgfsFreeRequest(req); return result; }
static int HgfsPrivateDirRelease(struct file *file, // IN: File for the dir getting released HgfsHandle handle) // IN: Hgfs handle { HgfsReq *req; HgfsStatus replyStatus; HgfsOp opUsed; int result = 0; ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_sb); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: close fh %u\n", handle)); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionSearchClose; if (opUsed == HGFS_OP_SEARCH_CLOSE_V3) { HgfsRequestSearchCloseV3 *request; HgfsRequest *header; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestSearchCloseV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->search = handle; request->reserved = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestSearchClose *request; request = (HgfsRequestSearchClose *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->search = handle; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: release handle %u\n", handle)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_SEARCH_CLOSE_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionSearchClose = HGFS_OP_SEARCH_CLOSE; goto retry; } break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: failed handle %u\n", handle)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; }
static int HgfsPrivateDirOpen(struct file *file, // IN: File pointer for this open HgfsHandle *handle) // IN: Hgfs handle { HgfsReq *req; int result; HgfsOp opUsed; HgfsStatus replyStatus; HgfsHandle *replySearch; ASSERT(file); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionSearchOpen; if (opUsed == HGFS_OP_SEARCH_OPEN_V3) { replySearch = &((HgfsReplySearchOpenV3 *)HGFS_REP_PAYLOAD_V3(req))->search; } else { replySearch = &((HgfsReplySearchOpen *)HGFS_REQ_PAYLOAD(req))->search; } result = HgfsPackDirOpenRequest(file, opUsed, req); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen error packing request\n")); goto out; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply and check return status. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: /* Save the handle value */ *handle = *replySearch; LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: Handle returned = %u\n", *replySearch)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_SEARCH_OPEN_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionSearchOpen = HGFS_OP_SEARCH_OPEN; goto retry; } LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; }
static int HgfsGetNextDirEntry(HgfsSuperInfo *si, // IN: Superinfo for this SB HgfsHandle searchHandle, // IN: Handle of dir uint32 offset, // IN: Offset of next dentry to get HgfsAttrInfo *attr, // OUT: File attributes of dentry char **entryName, // OUT: File name Bool *done) // OUT: Set true when there are // no more dentries { HgfsReq *req; HgfsOp opUsed; HgfsStatus replyStatus; int result = 0; ASSERT(si); ASSERT(attr); ASSERT(done); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: out of memory " "while getting new request\n")); return -ENOMEM; } retry: opUsed = hgfsVersionSearchRead; if (opUsed == HGFS_OP_SEARCH_READ_V3) { HgfsRequest *header; HgfsRequestSearchReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->op = attr->requestType = opUsed; header->id = req->id; request = (HgfsRequestSearchReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->search = searchHandle; request->offset = offset; request->flags = 0; request->reserved = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestSearchRead *request; request = (HgfsRequestSearchRead *)(HGFS_REQ_PAYLOAD(req)); request->header.op = attr->requestType = opUsed; request->header.id = req->id; request->search = searchHandle; request->offset = offset; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: got reply\n")); replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch(result) { case 0: result = HgfsUnpackSearchReadReply(req, attr, entryName); if (result == 0 && *entryName == NULL) { /* We're at the end of the directory. */ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: end of " "dir\n")); *done = TRUE; } break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (attr->requestType == HGFS_OP_SEARCH_READ_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: Version 3 " "not supported. Falling back to version 2.\n")); hgfsVersionSearchRead = HGFS_OP_SEARCH_READ_V2; goto retry; } else if (attr->requestType == HGFS_OP_SEARCH_READ_V2) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: Version 2 " "not supported. Falling back to version 1.\n")); hgfsVersionSearchRead = HGFS_OP_SEARCH_READ; goto retry; } /* Fallthrough. */ default: break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: unknown error: " "%d\n", result)); } HgfsFreeRequest(req); return result; }