static int HgfsBdChannelSend(HgfsTransportChannel *channel, // IN: Channel HgfsReq *req) // IN: request to send { char const *replyPacket = NULL; size_t payloadSize; int ret; ASSERT(req); ASSERT(req->state == HGFS_REQ_STATE_UNSENT); ASSERT(req->payloadSize <= req->bufferSize); LOG(8, ("VMware hgfs: %s: backdoor sending.\n", __func__)); payloadSize = req->payloadSize; ret = HgfsBd_Dispatch(channel->priv, HGFS_REQ_PAYLOAD(req), &payloadSize, &replyPacket); if (ret == 0) { LOG(8, ("VMware hgfs: %s: Backdoor reply received.\n", __func__)); /* Request sent successfully. Copy the reply and wake the client. */ ASSERT(replyPacket); ASSERT(payloadSize <= req->bufferSize); memcpy(HGFS_REQ_PAYLOAD(req), replyPacket, payloadSize); req->payloadSize = payloadSize; HgfsCompleteReq(req); } return ret; }
static int HgfsUnpackOpenReply(HgfsReq *req, // IN: Packet with reply inside HgfsOp opUsed, // IN: What request op did we send HgfsHandle *file, // OUT: Handle in reply packet HgfsServerLock *lock) // OUT: The server lock we got { HgfsReplyOpenV3 *replyV3; HgfsReplyOpenV2 *replyV2; HgfsReplyOpen *replyV1; size_t replySize; ASSERT(req); ASSERT(file); ASSERT(lock); switch (opUsed) { case HGFS_OP_OPEN_V3: replyV3 = (HgfsReplyOpenV3 *)HGFS_REP_PAYLOAD_V3(req); replySize = HGFS_REP_PAYLOAD_SIZE_V3(replyV3); *file = replyV3->file; *lock = replyV3->acquiredLock; break; case HGFS_OP_OPEN_V2: replyV2 = (HgfsReplyOpenV2 *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV2; *file = replyV2->file; *lock = replyV2->acquiredLock; break; case HGFS_OP_OPEN: replyV1 = (HgfsReplyOpen *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV1; *file = replyV1->file; *lock = HGFS_LOCK_NONE; break; default: /* This really shouldn't happen since we set opUsed ourselves. */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackOpenReply: unexpected " "OP type encountered\n")); ASSERT(FALSE); return -EPROTO; } if (req->payloadSize != replySize) { /* * The reply to Open is a fixed size. So the size of the payload * really ought to match the expected size of an HgfsReplyOpen[V2]. */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackOpenReply: wrong packet " "size\n")); return -EPROTO; } return 0; }
static int HgfsPackQueryVolumeRequest(const char *path, // IN: File pointer for this open HgfsOp opUsed, // IN: Op to be used. HgfsReq *req) // IN/OUT: Packet to write into { char *name; uint32 *nameLength; size_t requestSize; int result; ASSERT(req); switch (opUsed) { case HGFS_OP_QUERY_VOLUME_INFO_V3: { HgfsRequestQueryVolumeV3 *requestV3 = HgfsGetRequestPayload(req); /* We'll use these later. */ name = requestV3->fileName.name; nameLength = &requestV3->fileName.length; requestV3->fileName.flags = 0; requestV3->fileName.fid = HGFS_INVALID_HANDLE; requestV3->fileName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->reserved = 0; requestSize = sizeof(*requestV3) + HgfsGetRequestHeaderSize(); break; } case HGFS_OP_QUERY_VOLUME_INFO: { HgfsRequestQueryVolume *request; request = (HgfsRequestQueryVolume *)(HGFS_REQ_PAYLOAD(req)); /* We'll use these later. */ name = request->fileName.name; nameLength = &request->fileName.length; requestSize = sizeof *request; break; } default: LOG(4, ("Unexpected OP type encountered. opUsed = %d\n", opUsed)); return -EPROTO; } /* Convert to CP name. */ result = CPName_ConvertTo(path, HGFS_LARGE_PACKET_MAX - (requestSize - 1), name); if (result < 0) { LOG(4, ("CP conversion failed.\n")); return -EINVAL; } *nameLength = (uint32) result; req->payloadSize = requestSize + result; /* Fill in header here as payloadSize needs to be there. */ HgfsPackHeader(req, opUsed); return 0; }
HgfsStatus HgfsGetReplyStatus(HgfsReq *req) // IN { HgfsStatus status; if (req->payloadSize < sizeof (HgfsReply)) { LOG(4, ("Malformed packet received.\n")); status = HGFS_STATUS_PROTOCOL_ERROR; goto out; } if (gState->sessionEnabled && req->payloadSize < sizeof (HgfsHeader)) { /* * We have enabled an HGFS protocol session which uses the new header * format. And an HGFS protocol session uses the new header format only. * A reply without the new header indicates a message with the * old reply header format. */ gState->sessionEnabled = FALSE; } if (gState->sessionEnabled) { HgfsHeader *header = (HgfsHeader *)(HGFS_REQ_PAYLOAD(req)); status = header->status; if (status == HGFS_STATUS_STALE_SESSION) { LOG(4, ("Session stale! Try to recreate session ...\n")); HgfsCreateSession(); /* * XXX: User might want to retry it later, and status will not be * changed here. But due to the fail safe directory access like * searching for dynamic library path, user hardly ever notice * the failure. */ } } else { HgfsReply *reply = (HgfsReply *)(HGFS_REQ_PAYLOAD(req)); status = reply->status; } out: LOG(4, ("Exit(status = %d)\n", status)); return status; }
int HgfsDestroySession(void) { HgfsReq *req; int result; HgfsStatus status; HgfsOp opUsed; LOG(4, ("Entry()\n")); if (!gState->sessionEnabled) { return 0; } req = HgfsGetNewRequest(); if (!req) { LOG(4, ("Out of memory while getting new request.\n")); result = -ENOMEM; goto out; } opUsed = hgfsVersionDestroySession; result = HgfsPackDestroySessionRequest(opUsed, req); if (result != 0) { LOG(4, ("Error packing request.\n")); goto out; } result = HgfsSendRequest(req); if (result == 0) { LOG(6, ("Got reply.\n")); status = HgfsGetReplyStatus(req); result = HgfsStatusConvertToLinux(status); switch (result) { case 0: status = HgfsDestroySessionProcessResult(HGFS_REQ_PAYLOAD(req), req->payloadSize); ASSERT(status == HGFS_STATUS_SUCCESS); break; case -EPROTO: /* Fallthrough. */ default: LOG(6, ("Session was not created, error %d\n", result)); break; } } else if (result == -EIO) { LOG(4, ("Timed out. error: %d\n", result)); } else if (result == -EPROTO) { LOG(4, ("Server returned error: %d\n", result)); } else { LOG(4, ("Unknown error: %d\n", result)); } out: HgfsFreeRequest(req); LOG(4, ("Exit(%d)\n", result)); return result; }
HgfsStatus HgfsReplyStatus(HgfsReq *req) // IN { HgfsReply *rep; rep = (HgfsReply *)(HGFS_REQ_PAYLOAD(req)); return rep->status; }
HgfsStatus HgfsPackHeader(HgfsReq *req, // IN/OUT: HgfsOp opUsed) // IN { if (gState->sessionEnabled) { /* use new header */ HgfsHeader *header = (HgfsHeader*)HGFS_REQ_PAYLOAD(req); LOG(4, ("sessionEnabled, use HgfsHeader. opUsed = %d\n", opUsed)); header->version = gState->headerVersion; header->dummy = HGFS_OP_NEW_HEADER; header->headerSize = sizeof *header; header->packetSize = req->payloadSize; header->requestId = req->id; header->op = opUsed; header->sessionId = gState->sessionId; header->flags = HGFS_PACKET_FLAG_REQUEST; /* * Currently unused fields which can be used in version 2 and later. * Version 1 didn't zero these fields, hence the server cannot determine * their validity. */ header->status = 0; header->information = 0; header->reserved = 0; memset(&header->reserved1[0], 0, sizeof header->reserved1); } else { HgfsRequest *header = (HgfsRequest*)HGFS_REQ_PAYLOAD(req); LOG(4, ("not sessionEnabled, use HgfsRequest. opUsed = %d\n", opUsed)); header->id = req->id; header->op = opUsed; } return HGFS_STATUS_SUCCESS; }
void HgfsCompleteReq(HgfsReq *req, // IN: Request char const *reply, // IN: Reply packet size_t replySize) // IN: Size of reply packet { ASSERT(req); ASSERT(reply); ASSERT(replySize <= HGFS_LARGE_PACKET_MAX); memcpy(HGFS_REQ_PAYLOAD(req), reply, replySize); req->payloadSize = replySize; req->state = HGFS_REQ_STATE_COMPLETED; if (!list_empty(&req->list)) { list_del_init(&req->list); } }
static int HgfsBdChannelSend(HgfsTransportChannel *channel, // IN: Channel HgfsReq *req) // IN: request to send { char const *replyPacket = NULL; size_t payloadSize; int ret; ASSERT(req); ASSERT(req->state == HGFS_REQ_STATE_UNSENT); ASSERT(req->payloadSize <= HGFS_LARGE_PACKET_MAX); pthread_mutex_lock(&channel->connLock); if (channel->status != HGFS_CHANNEL_CONNECTED) { LOG(6, ("Backdoor not opened.\n")); pthread_mutex_unlock(&channel->connLock); return -ENOTCONN; } payloadSize = req->payloadSize; LOG(8, ("Backdoor sending.\n")); ret = HgfsBd_Dispatch(channel->priv, HGFS_REQ_PAYLOAD(req), &payloadSize, &replyPacket); if (ret == 0) { LOG(8, ("Backdoor reply received.\n")); /* Request sent successfully. Copy the reply and wake the client. */ ASSERT(replyPacket); HgfsCompleteReq(req, replyPacket, payloadSize); } else { /* Map rpc failure to EIO. */ ret = -EIO; } pthread_mutex_unlock(&channel->connLock); return ret; }
int HgfsStatfs(const char* path, // IN : Path to the file struct statvfs *stat) // OUT: Stat to fill in { HgfsReq *req; int result = 0; HgfsOp opUsed; HgfsStatus replyStatus; uint64 freeBytes; uint64 totalBytes; LOG(6, ("Entered.\n")); memset(stat, 0, sizeof *stat); req = HgfsGetNewRequest(); if (!req) { LOG(4, ("Out of memory while getting new request.\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionQueryVolumeInfo; result = HgfsPackQueryVolumeRequest(path, opUsed, req); if (result != 0) { LOG(4, ("Error packing request.\n")); goto out; } result = HgfsSendRequest(req); if (result == 0) { LOG(6, ("Got reply.\n")); replyStatus = HgfsGetReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); /* * If the statfs succeeded on the server, copy the stats * into the statvfs struct, otherwise return an error. */ switch (result) { case 0: stat->f_bsize = HGFS_BLOCKSIZE; if (opUsed == HGFS_OP_QUERY_VOLUME_INFO_V3) { HgfsReplyQueryVolumeV3 * replyV3 = HgfsGetReplyPayload(req); totalBytes = replyV3->totalBytes; freeBytes = replyV3->freeBytes; } else { totalBytes = ((HgfsReplyQueryVolume *)HGFS_REQ_PAYLOAD(req))->totalBytes; freeBytes = ((HgfsReplyQueryVolume *)HGFS_REQ_PAYLOAD(req))->freeBytes; } stat->f_blocks = (totalBytes + HGFS_BLOCKSIZE - 1) / HGFS_BLOCKSIZE; stat->f_bfree = (freeBytes + HGFS_BLOCKSIZE - 1) / HGFS_BLOCKSIZE; stat->f_bavail = stat->f_bfree; break; case -EPERM: /* * We're cheating! This will cause statfs will return success. * We're doing this because an old server will complain when it gets * a statfs on a per-share mount. Rather than have 'df' spit an * error, let's just return all zeroes. */ result = 0; break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_QUERY_VOLUME_INFO_V3) { LOG(4, ("Version 3 not supported. Falling back to version 1.\n")); hgfsVersionQueryVolumeInfo = HGFS_OP_QUERY_VOLUME_INFO; goto retry; } break; default: break; } } else if (result == -EIO) { LOG(4, ("Timed out. error: %d\n", result)); } else if (result == -EPROTO) { LOG(4, ("Server returned error: %d\n", result)); } else { LOG(4, ("Unknown error: %d\n", result)); } out: HgfsFreeRequest(req); return result; }
static int HgfsPackDirOpenRequest(struct file *file, // IN: File pointer for this open HgfsOp opUsed, // IN: Op to be used HgfsReq *req) // IN/OUT: Packet to write into { char *name; uint32 *nameLength; size_t requestSize; int result; ASSERT(file); ASSERT(req); switch (opUsed) { case HGFS_OP_SEARCH_OPEN_V3: { HgfsRequest *requestHeader; HgfsRequestSearchOpenV3 *requestV3; requestHeader = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); requestHeader->op = opUsed; requestHeader->id = req->id; requestV3 = (HgfsRequestSearchOpenV3 *)HGFS_REQ_PAYLOAD_V3(req); /* We'll use these later. */ name = requestV3->dirName.name; nameLength = &requestV3->dirName.length; requestV3->dirName.flags = 0; requestV3->dirName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->dirName.fid = HGFS_INVALID_HANDLE; requestV3->reserved = 0; requestSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); break; } case HGFS_OP_SEARCH_OPEN: { HgfsRequestSearchOpen *request; request = (HgfsRequestSearchOpen *)(HGFS_REQ_PAYLOAD(req)); request->header.op = opUsed; request->header.id = req->id; /* We'll use these later. */ name = request->dirName.name; nameLength = &request->dirName.length; requestSize = sizeof *request; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackDirOpenRequest: unexpected " "OP type encountered\n")); return -EPROTO; } /* Build full name to send to server. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) if (HgfsBuildPath(name, req->bufferSize - (requestSize - 1), file->f_dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackDirOpenRequest: build path failed\n")); return -EINVAL; } #else if (HgfsBuildPath(name, req->bufferSize - (requestSize - 1), file->f_path.dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackDirOpenRequest: build path failed\n")); return -EINVAL; } #endif LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackDirOpenRequest: opening \"%s\"\n", name)); /* Convert to CP name. */ result = CPName_ConvertTo(name, req->bufferSize - (requestSize - 1), name); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackDirOpenRequest: CP conversion failed\n")); return -EINVAL; } *nameLength = (uint32) result; req->payloadSize = requestSize + result; return 0; }
static int HgfsPrivateDirOpen(struct file *file, // IN: File pointer for this open HgfsHandle *handle) // IN: Hgfs handle { HgfsReq *req; int result; HgfsOp opUsed; HgfsStatus replyStatus; HgfsHandle *replySearch; ASSERT(file); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionSearchOpen; if (opUsed == HGFS_OP_SEARCH_OPEN_V3) { replySearch = &((HgfsReplySearchOpenV3 *)HGFS_REP_PAYLOAD_V3(req))->search; } else { replySearch = &((HgfsReplySearchOpen *)HGFS_REQ_PAYLOAD(req))->search; } result = HgfsPackDirOpenRequest(file, opUsed, req); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen error packing request\n")); goto out; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply and check return status. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: /* Save the handle value */ *handle = *replySearch; LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: Handle returned = %u\n", *replySearch)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_SEARCH_OPEN_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionSearchOpen = HGFS_OP_SEARCH_OPEN; goto retry; } LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; }
static int HgfsPackOpenRequest(struct inode *inode, // IN: Inode of the file to open struct file *file, // IN: File pointer for this open HgfsOp opUsed, // IN: Op to use HgfsReq *req) // IN/OUT: Packet to write into { char *name; uint32 *nameLength; size_t requestSize; int result; ASSERT(inode); ASSERT(file); ASSERT(req); switch (opUsed) { case HGFS_OP_OPEN_V3: { HgfsRequest *requestHeader; HgfsRequestOpenV3 *requestV3; requestHeader = (HgfsRequest *)HGFS_REQ_PAYLOAD(req); requestHeader->op = opUsed; requestHeader->id = req->id; requestV3 = (HgfsRequestOpenV3 *)HGFS_REQ_PAYLOAD_V3(req); requestSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); /* We'll use these later. */ name = requestV3->fileName.name; nameLength = &requestV3->fileName.length; requestV3->mask = HGFS_FILE_OPEN_MASK; /* Linux clients need case-sensitive lookups. */ requestV3->fileName.flags = 0; requestV3->fileName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->fileName.fid = HGFS_INVALID_HANDLE; /* Set mode. */ result = HgfsGetOpenMode(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open mode\n")); return -EINVAL; } requestV3->mode = result; /* Set flags. */ result = HgfsGetOpenFlags(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open flags\n")); return -EINVAL; } requestV3->flags = result; /* Set permissions. */ requestV3->specialPerms = (inode->i_mode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9; requestV3->ownerPerms = (inode->i_mode & S_IRWXU) >> 6; requestV3->groupPerms = (inode->i_mode & S_IRWXG) >> 3; requestV3->otherPerms = (inode->i_mode & S_IRWXO); /* XXX: Request no lock for now. */ requestV3->desiredLock = HGFS_LOCK_NONE; requestV3->reserved1 = 0; requestV3->reserved2 = 0; break; } case HGFS_OP_OPEN_V2: { HgfsRequestOpenV2 *requestV2; requestV2 = (HgfsRequestOpenV2 *)(HGFS_REQ_PAYLOAD(req)); requestV2->header.op = opUsed; requestV2->header.id = req->id; /* We'll use these later. */ name = requestV2->fileName.name; nameLength = &requestV2->fileName.length; requestSize = sizeof *requestV2; requestV2->mask = HGFS_FILE_OPEN_MASK; /* Set mode. */ result = HgfsGetOpenMode(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open mode\n")); return -EINVAL; } requestV2->mode = result; /* Set flags. */ result = HgfsGetOpenFlags(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open flags\n")); return -EINVAL; } requestV2->flags = result; /* Set permissions. */ requestV2->specialPerms = (inode->i_mode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9; requestV2->ownerPerms = (inode->i_mode & S_IRWXU) >> 6; requestV2->groupPerms = (inode->i_mode & S_IRWXG) >> 3; requestV2->otherPerms = (inode->i_mode & S_IRWXO); /* XXX: Request no lock for now. */ requestV2->desiredLock = HGFS_LOCK_NONE; break; } case HGFS_OP_OPEN: { HgfsRequestOpen *request; request = (HgfsRequestOpen *)(HGFS_REQ_PAYLOAD(req)); request->header.op = opUsed; request->header.id = req->id; /* We'll use these later. */ name = request->fileName.name; nameLength = &request->fileName.length; requestSize = sizeof *request; /* Set mode. */ result = HgfsGetOpenMode(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open mode\n")); return -EINVAL; } request->mode = result; /* Set flags. */ result = HgfsGetOpenFlags(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open flags\n")); return -EINVAL; } request->flags = result; /* Set permissions. */ request->permissions = (inode->i_mode & S_IRWXU) >> 6; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: unexpected " "OP type encountered\n")); return -EPROTO; } /* Build full name to send to server. */ if (HgfsBuildPath(name, req->bufferSize - (requestSize - 1), file->f_dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: build path " "failed\n")); return -EINVAL; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: opening \"%s\", " "flags %o, create perms %o\n", name, file->f_flags, file->f_mode)); /* Convert to CP name. */ result = CPName_ConvertTo(name, req->bufferSize - (requestSize - 1), name); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: CP conversion " "failed\n")); return -EINVAL; } *nameLength = (uint32) result; req->payloadSize = requestSize + result; return 0; }
static int HgfsRelease(struct inode *inode, // IN: Inode that this file points to struct file *file) // IN: File that is getting released { HgfsReq *req; HgfsHandle handle; HgfsOp opUsed; HgfsStatus replyStatus; int result = 0; ASSERT(inode); ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_sb); handle = FILE_GET_FI_P(file)->handle; LOG(6, (KERN_DEBUG "VMware hgfs: HgfsRelease: close fh %u\n", handle)); /* * This may be our last open handle to an inode, so we should flush our * dirty pages before closing it. */ compat_filemap_write_and_wait(inode->i_mapping); HgfsReleaseFileInfo(file); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionClose; if (opUsed == HGFS_OP_CLOSE_V3) { HgfsRequest *header; HgfsRequestCloseV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestCloseV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->reserved = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestClose *request; request = (HgfsRequestClose *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: released handle %u\n", handle)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_CLOSE_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionClose = HGFS_OP_CLOSE; goto retry; } break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: failed handle %u\n", handle)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; }
static int HgfsGetNextDirEntry(HgfsSuperInfo *si, // IN: Superinfo for this SB HgfsHandle searchHandle, // IN: Handle of dir uint32 offset, // IN: Offset of next dentry to get HgfsAttrInfo *attr, // OUT: File attributes of dentry char **entryName, // OUT: File name Bool *done) // OUT: Set true when there are // no more dentries { HgfsReq *req; HgfsOp opUsed; HgfsStatus replyStatus; int result = 0; ASSERT(si); ASSERT(attr); ASSERT(done); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: out of memory " "while getting new request\n")); return -ENOMEM; } retry: opUsed = hgfsVersionSearchRead; if (opUsed == HGFS_OP_SEARCH_READ_V3) { HgfsRequest *header; HgfsRequestSearchReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->op = attr->requestType = opUsed; header->id = req->id; request = (HgfsRequestSearchReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->search = searchHandle; request->offset = offset; request->flags = 0; request->reserved = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestSearchRead *request; request = (HgfsRequestSearchRead *)(HGFS_REQ_PAYLOAD(req)); request->header.op = attr->requestType = opUsed; request->header.id = req->id; request->search = searchHandle; request->offset = offset; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: got reply\n")); replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch(result) { case 0: result = HgfsUnpackSearchReadReply(req, attr, entryName); if (result == 0 && *entryName == NULL) { /* We're at the end of the directory. */ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: end of " "dir\n")); *done = TRUE; } break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (attr->requestType == HGFS_OP_SEARCH_READ_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: Version 3 " "not supported. Falling back to version 2.\n")); hgfsVersionSearchRead = HGFS_OP_SEARCH_READ_V2; goto retry; } else if (attr->requestType == HGFS_OP_SEARCH_READ_V2) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: Version 2 " "not supported. Falling back to version 1.\n")); hgfsVersionSearchRead = HGFS_OP_SEARCH_READ; goto retry; } /* Fallthrough. */ default: break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: unknown error: " "%d\n", result)); } HgfsFreeRequest(req); return result; }
static int HgfsPrivateDirRelease(struct file *file, // IN: File for the dir getting released HgfsHandle handle) // IN: Hgfs handle { HgfsReq *req; HgfsStatus replyStatus; HgfsOp opUsed; int result = 0; ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_sb); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: close fh %u\n", handle)); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionSearchClose; if (opUsed == HGFS_OP_SEARCH_CLOSE_V3) { HgfsRequestSearchCloseV3 *request; HgfsRequest *header; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestSearchCloseV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->search = handle; request->reserved = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestSearchClose *request; request = (HgfsRequestSearchClose *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->search = handle; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: release handle %u\n", handle)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_SEARCH_CLOSE_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionSearchClose = HGFS_OP_SEARCH_CLOSE; goto retry; } break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: failed handle %u\n", handle)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; }
static int HgfsPackSymlinkCreateRequest(const char* symlink, // IN: path of the link const char *symname, // IN: Target name HgfsOp opUsed, // IN: Op to be used HgfsReq *req) // IN/OUT: Packet to write into { HgfsRequestSymlinkCreateV3 *requestV3 = NULL; HgfsRequestSymlinkCreate *request = NULL; char *symlinkName; uint32 *symlinkNameLength; char *targetName; uint32 *targetNameLength; size_t targetNameBytes; size_t requestSize; int result; switch (opUsed) { case HGFS_OP_CREATE_SYMLINK_V3: { requestV3 = HgfsGetRequestPayload(req); /* We'll use these later. */ symlinkName = requestV3->symlinkName.name; symlinkNameLength = &requestV3->symlinkName.length; requestV3->symlinkName.flags = 0; requestV3->symlinkName.fid = HGFS_INVALID_HANDLE; requestV3->symlinkName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->reserved = 0; requestSize = sizeof(*requestV3) + HgfsGetRequestHeaderSize(); break; } case HGFS_OP_CREATE_SYMLINK: { request = (HgfsRequestSymlinkCreate *)(HGFS_REQ_PAYLOAD(req)); /* We'll use these later. */ symlinkName = request->symlinkName.name; symlinkNameLength = &request->symlinkName.length; requestSize = sizeof *request; break; } default: LOG(4, ("Unexpected OP type encountered. opUsed = %d\n", opUsed)); return -EPROTO; } /* Convert symlink name to CP format. */ result = CPName_ConvertTo(symlink, HGFS_LARGE_PACKET_MAX - (requestSize - 1), symlinkName); if (result < 0) { LOG(4, ("SymlinkName CP conversion failed.\n")); return -EINVAL; } *symlinkNameLength = result; req->payloadSize = requestSize + result; /* * Note the different buffer length. This is because HgfsRequestSymlink * contains two filenames, and once we place the first into the packet we * must account for it when determining the amount of buffer available for * the second. * * Also note that targetNameBytes accounts for the NUL character. Once * we've converted it to CP name, it won't be NUL-terminated and the length * of the string in the packet itself won't account for it. */ if (opUsed == HGFS_OP_CREATE_SYMLINK_V3) { HgfsFileNameV3 *fileNameP; fileNameP = (HgfsFileNameV3 *)((char *)&requestV3->symlinkName + sizeof requestV3->symlinkName + result); targetName = fileNameP->name; targetNameLength = &fileNameP->length; fileNameP->flags = 0; fileNameP->fid = HGFS_INVALID_HANDLE; fileNameP->caseType = HGFS_FILE_NAME_CASE_SENSITIVE; } else { HgfsFileName *fileNameP; fileNameP = (HgfsFileName *)((char *)&request->symlinkName + sizeof request->symlinkName + result); targetName = fileNameP->name; targetNameLength = &fileNameP->length; } targetNameBytes = strlen(symname) + 1; /* Copy target name into request packet. */ if (targetNameBytes > HGFS_LARGE_PACKET_MAX - (requestSize - 1)) { LOG(4, ("Target name is too long.\n")); return -EINVAL; } memcpy(targetName, symname, targetNameBytes); LOG(6, ("Target name: \"%s\"\n", targetName)); /* Convert target name to CPName-lite format. */ CPNameLite_ConvertTo(targetName, targetNameBytes - 1, '/'); *targetNameLength = targetNameBytes - 1; req->payloadSize += targetNameBytes - 1; /* Fill in header here as payloadSize needs to be there. */ HgfsPackHeader(req, opUsed); return 0; }
static int HgfsPackOpenRequest(const char *path, // IN: Path to file struct fuse_file_info *fi, // IN: File info structure mode_t permsMode, // IN: Permissions, in this context HgfsOpenValid mask, // IN: Open validation mask HgfsOp opUsed, // IN: Op to use HgfsReq *req) // IN/OUT: Packet to write into { char *name; uint32 *nameLength; size_t reqSize; int result; int openMode, openFlags; ASSERT(path); ASSERT(req); openMode = HgfsGetOpenMode(fi->flags); if (openMode < 0) { LOG(4, ("Failed to get open mode.\n")); return -EINVAL; } openFlags = HgfsGetOpenFlags(fi->flags); if (openFlags < 0) { LOG(4, ("Failed to get open flags.\n")); return -EINVAL; } switch (opUsed) { case HGFS_OP_OPEN_V3: { HgfsRequestOpenV3 *requestV3 = HgfsGetRequestPayload(req); reqSize = sizeof(*requestV3) + HgfsGetRequestHeaderSize(); /* We'll use these later. */ name = requestV3->fileName.name; nameLength = &requestV3->fileName.length; /* Linux clients need case-sensitive lookups. */ requestV3->fileName.flags = 0; requestV3->fileName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->fileName.fid = HGFS_INVALID_HANDLE; requestV3->mask = mask; requestV3->mode = openMode; requestV3->flags = openFlags; /* Set permissions. */ if (requestV3->mask & HGFS_FILE_OPEN_PERMS) { requestV3->specialPerms = (permsMode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9; requestV3->ownerPerms = (permsMode & S_IRWXU) >> 6; requestV3->groupPerms = (permsMode & S_IRWXG) >> 3; requestV3->otherPerms = (permsMode & S_IRWXO); } /* XXX: Request no lock for now. */ requestV3->desiredLock = HGFS_LOCK_NONE; requestV3->reserved1 = 0; requestV3->reserved2 = 0; break; } case HGFS_OP_OPEN_V2: { HgfsRequestOpenV2 *requestV2; requestV2 = (HgfsRequestOpenV2 *)(HGFS_REQ_PAYLOAD(req)); /* We'll use these later. */ name = requestV2->fileName.name; nameLength = &requestV2->fileName.length; reqSize = sizeof *requestV2; requestV2->mask = mask; requestV2->mode = openMode; requestV2->flags = openFlags; /* Set permissions, requires discussion... default, will set max permission*/ if (requestV2->mask & HGFS_FILE_OPEN_PERMS) { requestV2->specialPerms = (permsMode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9; requestV2->ownerPerms = (permsMode & S_IRWXU) >> 6; requestV2->groupPerms = (permsMode & S_IRWXG) >> 3; requestV2->otherPerms = (permsMode & S_IRWXO); } /* XXX: Request no lock for now. */ requestV2->desiredLock = HGFS_LOCK_NONE; break; }
static int HgfsDoRead(HgfsHandle handle, // IN: Handle for this file HgfsDataPacket dataPacket[], // IN/OUT: Data description uint32 numEntries, // IN: Number of entries in dataPacket loff_t offset) // IN: Offset at which to read { HgfsReq *req; HgfsOp opUsed; int result = 0; uint32 actualSize = 0; char *payload = NULL; HgfsStatus replyStatus; char *buf; uint32 count; ASSERT(numEntries == 1); count = dataPacket[0].len; req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionRead; if (opUsed == HGFS_OP_READ_FAST_V4) { HgfsRequest *header; HgfsRequestReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->offset = offset; request->requiredSize = count; request->reserved = 0; req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0], GFP_KERNEL); if (!req->dataPacket) { LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__)); result = -ENOMEM; goto out; } memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]); req->numEntries = numEntries; LOG(4, (KERN_WARNING "VMware hgfs: Fast Read V4\n")); } else if (opUsed == HGFS_OP_READ_V3) { HgfsRequest *header; HgfsRequestReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request - sizeof *header, count); request->reserved = 0; req->dataPacket = NULL; req->numEntries = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestRead *request; request = (HgfsRequestRead *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request, count); req->dataPacket = NULL; req->numEntries = 0; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: if (opUsed == HGFS_OP_READ_FAST_V4) { actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; } else if (opUsed == HGFS_OP_READ_V3) { actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; payload = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->payload; } else { actualSize = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->actualSize; payload = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->payload; } /* Sanity check on read size. */ if (actualSize > count) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: read too big!\n")); result = -EPROTO; goto out; } if (!actualSize) { /* We got no bytes. */ LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: server returned " "zero\n")); result = actualSize; goto out; } /* Return result. */ if (opUsed == HGFS_OP_READ_V3 || opUsed == HGFS_OP_READ) { buf = kmap(dataPacket[0].page) + dataPacket[0].offset; ASSERT(buf); memcpy(buf, payload, actualSize); LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: copied %u\n", actualSize)); kunmap(dataPacket[0].page); } result = actualSize; break; case -EPROTO: /* Retry with older version(s). Set globally. */ switch (opUsed) { case HGFS_OP_READ_FAST_V4: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Fast Read V4 not " "supported. Falling back to V3 Read.\n")); if (req->dataPacket) { kfree(req->dataPacket); req->dataPacket = NULL; } hgfsVersionRead = HGFS_OP_READ_V3; goto retry; case HGFS_OP_READ_V3: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionRead = HGFS_OP_READ; goto retry; default: break; } break; default: break; } } else if (result == -EIO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: unknown error: " "%d\n", result)); } out: if (req->dataPacket) { kfree(req->dataPacket); } HgfsFreeRequest(req); return result; }
static int HgfsDoWrite(HgfsHandle handle, // IN: Handle for this file HgfsDataPacket dataPacket[], // IN: Data description uint32 numEntries, // IN: Number of entries in dataPacket loff_t offset) // IN: Offset to begin writing at { HgfsReq *req; int result = 0; HgfsOp opUsed; uint32 requiredSize = 0; uint32 actualSize = 0; char *payload = NULL; uint32 reqSize; HgfsStatus replyStatus; char *buf; uint32 count; ASSERT(numEntries == 1); count = dataPacket[0].len; req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionWrite; if (opUsed == HGFS_OP_WRITE_FAST_V4) { HgfsRequest *header; HgfsRequestWriteV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = count; request->reserved = 0; payload = request->payload; requiredSize = request->requiredSize; req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0], GFP_KERNEL); if (!req->dataPacket) { LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__)); result = -ENOMEM; goto out; } memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]); req->numEntries = numEntries; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); req->payloadSize = reqSize; LOG(4, (KERN_WARNING "VMware hgfs: Fast Write V4\n")); } else if (opUsed == HGFS_OP_WRITE_V3) { HgfsRequest *header; HgfsRequestWriteV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *header - sizeof *request, count); LOG(4, (KERN_WARNING "VMware hgfs: Using write V3\n")); request->reserved = 0; payload = request->payload; requiredSize = request->requiredSize; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); req->dataPacket = NULL; req->numEntries = 0; buf = kmap(dataPacket[0].page) + dataPacket[0].offset; memcpy(payload, buf, requiredSize); kunmap(dataPacket[0].page); req->payloadSize = reqSize + requiredSize - 1; } else { HgfsRequestWrite *request; request = (HgfsRequestWrite *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request, count); payload = request->payload; requiredSize = request->requiredSize; reqSize = sizeof *request; req->dataPacket = NULL; req->numEntries = 0; buf = kmap(dataPacket[0].page) + dataPacket[0].offset; memcpy(payload, buf, requiredSize); kunmap(dataPacket[0].page); req->payloadSize = reqSize + requiredSize - 1; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: res %u\n", result)); switch (result) { case 0: if (opUsed == HGFS_OP_WRITE_V3 || opUsed == HGFS_OP_WRITE_FAST_V4) { actualSize = ((HgfsReplyWriteV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; } else { actualSize = ((HgfsReplyWrite *)HGFS_REQ_PAYLOAD(req))->actualSize; } /* Return result. */ LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoWrite: wrote %u bytes\n", actualSize)); result = actualSize; break; case -EPROTO: /* Retry with older version(s). Set globally. */ switch (opUsed) { case HGFS_OP_WRITE_FAST_V4: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Fast Write V4 not " "supported. Falling back to V3 write.\n")); if (req->dataPacket) { kfree(req->dataPacket); req->dataPacket = NULL; } hgfsVersionWrite = HGFS_OP_WRITE_V3; goto retry; case HGFS_OP_WRITE_V3: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionWrite = HGFS_OP_WRITE; goto retry; default: break; } break; default: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server " "returned error: %d\n", result)); break; } } else if (result == -EIO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: unknown error: " "%d\n", result)); } out: if (req->dataPacket) { kfree(req->dataPacket); } HgfsFreeRequest(req); return result; }
/* *---------------------------------------------------------------------- * * HgfsUnpackSearchReadReply -- * * This function abstracts the differences between a SearchReadV1 and * a SearchReadV2. The caller provides the packet containing the reply * and we populate the AttrInfo with version-independent information. * * Note that attr->requestType has already been populated so that we * know whether to expect a V1 or V2 reply. * * Results: * 0 on success, anything else on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsUnpackSearchReadReply(HgfsReq *req, // IN: Reply packet HgfsAttrInfo *attr, // IN/OUT: Attributes char **entryName) // OUT: file name { char *fileName; uint32 fileNameLength; uint32 replySize; int result; ASSERT(req); ASSERT(attr); result = HgfsUnpackCommonAttr(req, attr); if (result != 0) { return result; } switch(attr->requestType) { case HGFS_OP_SEARCH_READ_V3: { HgfsReplySearchReadV3 *replyV3; HgfsDirEntry *dirent; /* Currently V3 returns only 1 entry. */ replyV3 = (HgfsReplySearchReadV3 *)(HGFS_REP_PAYLOAD_V3(req)); replyV3->count = 1; replySize = HGFS_REP_PAYLOAD_SIZE_V3(replyV3) + sizeof *dirent; dirent = (HgfsDirEntry *)replyV3->payload; fileName = dirent->fileName.name; fileNameLength = dirent->fileName.length; break; } case HGFS_OP_SEARCH_READ_V2: { HgfsReplySearchReadV2 *replyV2; replyV2 = (HgfsReplySearchReadV2 *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV2; fileName = replyV2->fileName.name; fileNameLength = replyV2->fileName.length; break; } case HGFS_OP_SEARCH_READ: { HgfsReplySearchRead *replyV1; replyV1 = (HgfsReplySearchRead *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV1; fileName = replyV1->fileName.name; fileNameLength = replyV1->fileName.length; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackSearchReadReply: unexpected " "OP type encountered\n")); return -EPROTO; } /* * Make sure name length is legal. */ if (fileNameLength > NAME_MAX || fileNameLength > req->bufferSize - replySize) { return -ENAMETOOLONG; } /* * If the size of the name is valid (meaning the end of the directory has * not yet been reached), copy the name to the AttrInfo struct. * * XXX: This operation happens often and the length of the filename is * bounded by NAME_MAX. Perhaps I should just put a statically-sized * array in HgfsAttrInfo and use a slab allocator to allocate the struct. */ if (fileNameLength > 0) { /* Sanity check on name length. */ if (fileNameLength != strlen(fileName)) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackSearchReadReply: name " "length mismatch %u/%Zu, name \"%s\"\n", fileNameLength, strlen(fileName), fileName)); return -EPROTO; } *entryName = kmalloc(fileNameLength + 1, GFP_KERNEL); if (*entryName == NULL) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackSearchReadReply: out of " "memory allocating filename, ignoring\n")); return -ENOMEM; } memcpy(*entryName, fileName, fileNameLength + 1); } else { *entryName = NULL; } return 0; }