INLINE void* HgfsGetReplyPayload(HgfsReq *rep) // IN: { if (gState->sessionEnabled) { return (void*) HGFS_REP_GET_PAYLOAD_HDRV2(rep); } else { return (void*) HGFS_REP_PAYLOAD_V3(rep); } }
static int HgfsUnpackOpenReply(HgfsReq *req, // IN: Packet with reply inside HgfsOp opUsed, // IN: What request op did we send HgfsHandle *file, // OUT: Handle in reply packet HgfsServerLock *lock) // OUT: The server lock we got { HgfsReplyOpenV3 *replyV3; HgfsReplyOpenV2 *replyV2; HgfsReplyOpen *replyV1; size_t replySize; ASSERT(req); ASSERT(file); ASSERT(lock); switch (opUsed) { case HGFS_OP_OPEN_V3: replyV3 = (HgfsReplyOpenV3 *)HGFS_REP_PAYLOAD_V3(req); replySize = HGFS_REP_PAYLOAD_SIZE_V3(replyV3); *file = replyV3->file; *lock = replyV3->acquiredLock; break; case HGFS_OP_OPEN_V2: replyV2 = (HgfsReplyOpenV2 *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV2; *file = replyV2->file; *lock = replyV2->acquiredLock; break; case HGFS_OP_OPEN: replyV1 = (HgfsReplyOpen *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV1; *file = replyV1->file; *lock = HGFS_LOCK_NONE; break; default: /* This really shouldn't happen since we set opUsed ourselves. */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackOpenReply: unexpected " "OP type encountered\n")); ASSERT(FALSE); return -EPROTO; } if (req->payloadSize != replySize) { /* * The reply to Open is a fixed size. So the size of the payload * really ought to match the expected size of an HgfsReplyOpen[V2]. */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackOpenReply: wrong packet " "size\n")); return -EPROTO; } return 0; }
static int HgfsDoWrite(HgfsHandle handle, // IN: Handle for this file HgfsDataPacket dataPacket[], // IN: Data description uint32 numEntries, // IN: Number of entries in dataPacket loff_t offset) // IN: Offset to begin writing at { HgfsReq *req; int result = 0; HgfsOp opUsed; uint32 requiredSize = 0; uint32 actualSize = 0; char *payload = NULL; uint32 reqSize; HgfsStatus replyStatus; char *buf; uint32 count; ASSERT(numEntries == 1); count = dataPacket[0].len; req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionWrite; if (opUsed == HGFS_OP_WRITE_FAST_V4) { HgfsRequest *header; HgfsRequestWriteV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = count; request->reserved = 0; payload = request->payload; requiredSize = request->requiredSize; req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0], GFP_KERNEL); if (!req->dataPacket) { LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__)); result = -ENOMEM; goto out; } memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]); req->numEntries = numEntries; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); req->payloadSize = reqSize; LOG(4, (KERN_WARNING "VMware hgfs: Fast Write V4\n")); } else if (opUsed == HGFS_OP_WRITE_V3) { HgfsRequest *header; HgfsRequestWriteV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *header - sizeof *request, count); LOG(4, (KERN_WARNING "VMware hgfs: Using write V3\n")); request->reserved = 0; payload = request->payload; requiredSize = request->requiredSize; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); req->dataPacket = NULL; req->numEntries = 0; buf = kmap(dataPacket[0].page) + dataPacket[0].offset; memcpy(payload, buf, requiredSize); kunmap(dataPacket[0].page); req->payloadSize = reqSize + requiredSize - 1; } else { HgfsRequestWrite *request; request = (HgfsRequestWrite *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request, count); payload = request->payload; requiredSize = request->requiredSize; reqSize = sizeof *request; req->dataPacket = NULL; req->numEntries = 0; buf = kmap(dataPacket[0].page) + dataPacket[0].offset; memcpy(payload, buf, requiredSize); kunmap(dataPacket[0].page); req->payloadSize = reqSize + requiredSize - 1; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: res %u\n", result)); switch (result) { case 0: if (opUsed == HGFS_OP_WRITE_V3 || opUsed == HGFS_OP_WRITE_FAST_V4) { actualSize = ((HgfsReplyWriteV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; } else { actualSize = ((HgfsReplyWrite *)HGFS_REQ_PAYLOAD(req))->actualSize; } /* Return result. */ LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoWrite: wrote %u bytes\n", actualSize)); result = actualSize; break; case -EPROTO: /* Retry with older version(s). Set globally. */ switch (opUsed) { case HGFS_OP_WRITE_FAST_V4: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Fast Write V4 not " "supported. Falling back to V3 write.\n")); if (req->dataPacket) { kfree(req->dataPacket); req->dataPacket = NULL; } hgfsVersionWrite = HGFS_OP_WRITE_V3; goto retry; case HGFS_OP_WRITE_V3: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionWrite = HGFS_OP_WRITE; goto retry; default: break; } break; default: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server " "returned error: %d\n", result)); break; } } else if (result == -EIO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: unknown error: " "%d\n", result)); } out: if (req->dataPacket) { kfree(req->dataPacket); } HgfsFreeRequest(req); return result; }
static int HgfsDoRead(HgfsHandle handle, // IN: Handle for this file HgfsDataPacket dataPacket[], // IN/OUT: Data description uint32 numEntries, // IN: Number of entries in dataPacket loff_t offset) // IN: Offset at which to read { HgfsReq *req; HgfsOp opUsed; int result = 0; uint32 actualSize = 0; char *payload = NULL; HgfsStatus replyStatus; char *buf; uint32 count; ASSERT(numEntries == 1); count = dataPacket[0].len; req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionRead; if (opUsed == HGFS_OP_READ_FAST_V4) { HgfsRequest *header; HgfsRequestReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->offset = offset; request->requiredSize = count; request->reserved = 0; req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0], GFP_KERNEL); if (!req->dataPacket) { LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__)); result = -ENOMEM; goto out; } memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]); req->numEntries = numEntries; LOG(4, (KERN_WARNING "VMware hgfs: Fast Read V4\n")); } else if (opUsed == HGFS_OP_READ_V3) { HgfsRequest *header; HgfsRequestReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request - sizeof *header, count); request->reserved = 0; req->dataPacket = NULL; req->numEntries = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestRead *request; request = (HgfsRequestRead *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request, count); req->dataPacket = NULL; req->numEntries = 0; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: if (opUsed == HGFS_OP_READ_FAST_V4) { actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; } else if (opUsed == HGFS_OP_READ_V3) { actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; payload = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->payload; } else { actualSize = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->actualSize; payload = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->payload; } /* Sanity check on read size. */ if (actualSize > count) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: read too big!\n")); result = -EPROTO; goto out; } if (!actualSize) { /* We got no bytes. */ LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: server returned " "zero\n")); result = actualSize; goto out; } /* Return result. */ if (opUsed == HGFS_OP_READ_V3 || opUsed == HGFS_OP_READ) { buf = kmap(dataPacket[0].page) + dataPacket[0].offset; ASSERT(buf); memcpy(buf, payload, actualSize); LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: copied %u\n", actualSize)); kunmap(dataPacket[0].page); } result = actualSize; break; case -EPROTO: /* Retry with older version(s). Set globally. */ switch (opUsed) { case HGFS_OP_READ_FAST_V4: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Fast Read V4 not " "supported. Falling back to V3 Read.\n")); if (req->dataPacket) { kfree(req->dataPacket); req->dataPacket = NULL; } hgfsVersionRead = HGFS_OP_READ_V3; goto retry; case HGFS_OP_READ_V3: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionRead = HGFS_OP_READ; goto retry; default: break; } break; default: break; } } else if (result == -EIO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: unknown error: " "%d\n", result)); } out: if (req->dataPacket) { kfree(req->dataPacket); } HgfsFreeRequest(req); return result; }
static int HgfsPrivateDirOpen(struct file *file, // IN: File pointer for this open HgfsHandle *handle) // IN: Hgfs handle { HgfsReq *req; int result; HgfsOp opUsed; HgfsStatus replyStatus; HgfsHandle *replySearch; ASSERT(file); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionSearchOpen; if (opUsed == HGFS_OP_SEARCH_OPEN_V3) { replySearch = &((HgfsReplySearchOpenV3 *)HGFS_REP_PAYLOAD_V3(req))->search; } else { replySearch = &((HgfsReplySearchOpen *)HGFS_REQ_PAYLOAD(req))->search; } result = HgfsPackDirOpenRequest(file, opUsed, req); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen error packing request\n")); goto out; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply and check return status. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: /* Save the handle value */ *handle = *replySearch; LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: Handle returned = %u\n", *replySearch)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_SEARCH_OPEN_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionSearchOpen = HGFS_OP_SEARCH_OPEN; goto retry; } LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; }
/* *---------------------------------------------------------------------- * * HgfsUnpackSearchReadReply -- * * This function abstracts the differences between a SearchReadV1 and * a SearchReadV2. The caller provides the packet containing the reply * and we populate the AttrInfo with version-independent information. * * Note that attr->requestType has already been populated so that we * know whether to expect a V1 or V2 reply. * * Results: * 0 on success, anything else on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsUnpackSearchReadReply(HgfsReq *req, // IN: Reply packet HgfsAttrInfo *attr, // IN/OUT: Attributes char **entryName) // OUT: file name { char *fileName; uint32 fileNameLength; uint32 replySize; int result; ASSERT(req); ASSERT(attr); result = HgfsUnpackCommonAttr(req, attr); if (result != 0) { return result; } switch(attr->requestType) { case HGFS_OP_SEARCH_READ_V3: { HgfsReplySearchReadV3 *replyV3; HgfsDirEntry *dirent; /* Currently V3 returns only 1 entry. */ replyV3 = (HgfsReplySearchReadV3 *)(HGFS_REP_PAYLOAD_V3(req)); replyV3->count = 1; replySize = HGFS_REP_PAYLOAD_SIZE_V3(replyV3) + sizeof *dirent; dirent = (HgfsDirEntry *)replyV3->payload; fileName = dirent->fileName.name; fileNameLength = dirent->fileName.length; break; } case HGFS_OP_SEARCH_READ_V2: { HgfsReplySearchReadV2 *replyV2; replyV2 = (HgfsReplySearchReadV2 *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV2; fileName = replyV2->fileName.name; fileNameLength = replyV2->fileName.length; break; } case HGFS_OP_SEARCH_READ: { HgfsReplySearchRead *replyV1; replyV1 = (HgfsReplySearchRead *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV1; fileName = replyV1->fileName.name; fileNameLength = replyV1->fileName.length; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackSearchReadReply: unexpected " "OP type encountered\n")); return -EPROTO; } /* * Make sure name length is legal. */ if (fileNameLength > NAME_MAX || fileNameLength > req->bufferSize - replySize) { return -ENAMETOOLONG; } /* * If the size of the name is valid (meaning the end of the directory has * not yet been reached), copy the name to the AttrInfo struct. * * XXX: This operation happens often and the length of the filename is * bounded by NAME_MAX. Perhaps I should just put a statically-sized * array in HgfsAttrInfo and use a slab allocator to allocate the struct. */ if (fileNameLength > 0) { /* Sanity check on name length. */ if (fileNameLength != strlen(fileName)) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackSearchReadReply: name " "length mismatch %u/%Zu, name \"%s\"\n", fileNameLength, strlen(fileName), fileName)); return -EPROTO; } *entryName = kmalloc(fileNameLength + 1, GFP_KERNEL); if (*entryName == NULL) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackSearchReadReply: out of " "memory allocating filename, ignoring\n")); return -ENOMEM; } memcpy(*entryName, fileName, fileNameLength + 1); } else { *entryName = NULL; } return 0; }