static struct sf_dir_buf *sf_dir_buf_alloc(void) { struct sf_dir_buf *b; TRACE(); b = kmalloc(sizeof(*b), GFP_KERNEL); if (!b) { LogRelFunc(("could not alloc directory buffer\n")); return NULL; } #ifdef USE_VMALLOC b->buf = vmalloc(DIR_BUFFER_SIZE); #else b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL); #endif if (!b->buf) { kfree(b); LogRelFunc(("could not alloc directory buffer storage\n")); return NULL; } INIT_LIST_HEAD(&b->head); b->cEntries = 0; b->cbUsed = 0; b->cbFree = DIR_BUFFER_SIZE; return b; }
/** * Try to dynamically load the PulseAudio libraries. This function is not * thread-safe, and should be called before attempting to use any of the * PulseAudio functions. * * @returns iprt status code */ int audioLoadPulseLib(void) { int rc = VINF_SUCCESS; unsigned i; static enum { NO = 0, YES, FAIL } isLibLoaded = NO; RTLDRMOD hLib; LogFlowFunc(("\n")); /* If this is not NO then the function has obviously been called twice, which is likely to be a bug. */ if (NO != isLibLoaded) { AssertMsgFailed(("isLibLoaded == %s\n", YES == isLibLoaded ? "YES" : "NO")); return YES == isLibLoaded ? VINF_SUCCESS : VERR_NOT_SUPPORTED; } isLibLoaded = FAIL; rc = RTLdrLoad(VBOX_PULSE_LIB, &hLib); if (RT_FAILURE(rc)) { LogRelFunc(("Failed to load library %s\n", VBOX_PULSE_LIB)); return rc; } for (i=0; i<RT_ELEMENTS(SharedFuncs); i++) { rc = RTLdrGetSymbol(hLib, SharedFuncs[i].name, (void**)SharedFuncs[i].fn); if (RT_FAILURE(rc)) return rc; } isLibLoaded = YES; return rc; }
virtual int run(bool fDaemonised /* = false */) { int rc = vboxClipboardConnect(); if (RT_SUCCESS(rc)) rc = vboxClipboardMain(); if (RT_FAILURE(rc)) LogRelFunc(("guest clipboard service terminated abnormally: return code %Rrc\n", rc)); return rc; }
/** * Waits for a seamless state change events from the host and dispatch it. * * @returns IRPT return code. */ int SeamlessMain::nextStateChangeEvent(void) { VMMDevSeamlessMode newMode = VMMDev_Seamless_Disabled; LogRelFlowFunc(("\n")); int rc = VbglR3SeamlessWaitEvent(&newMode); if (RT_SUCCESS(rc)) { mMode = newMode; switch (newMode) { case VMMDev_Seamless_Visible_Region: /* A simplified seamless mode, obtained by making the host VM window * borderless and making the guest desktop transparent. */ LogRelFlowFunc(("\"Visible region\" mode requested (VBoxClient).\n")); break; case VMMDev_Seamless_Disabled: LogRelFlowFunc(("\"Disabled\" mode requested (VBoxClient).\n")); break; case VMMDev_Seamless_Host_Window: /* One host window represents one guest window. Not yet implemented. */ LogRelFunc(("Unsupported \"host window\" mode requested (VBoxClient).\n")); return VERR_NOT_SUPPORTED; default: LogRelFunc(("Unsupported mode %d requested (VBoxClient).\n", newMode)); return VERR_NOT_SUPPORTED; } } if (RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN) { if (mMode == VMMDev_Seamless_Visible_Region) mfPaused = false; else mfPaused = true; mX11Monitor.interruptEventWait(); } else { LogRelFunc(("VbglR3SeamlessWaitEvent returned %Rrc (VBoxClient)\n", rc)); } LogRelFlowFunc(("returning %Rrc\n", rc)); return rc; }
/** * This should allocate memory for sf_inode_info, compute a unique inode * number, get an inode from vfs, initialize inode info, instantiate * dentry. * * @param parent inode entry of the directory * @param dentry directory cache entry * @param path path name * @param info file information * @param handle handle * @returns 0 on success, Linux error code otherwise */ static int sf_instantiate(struct inode *parent, struct dentry *dentry, SHFLSTRING *path, PSHFLFSOBJINFO info, SHFLHANDLE handle) { int err; ino_t ino; struct inode *inode; struct sf_inode_info *sf_new_i; struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb); TRACE(); BUG_ON(!sf_g); sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL); if (!sf_new_i) { LogRelFunc(("could not allocate inode info.\n")); err = -ENOMEM; goto fail0; } ino = iunique(parent->i_sb, 1); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) inode = iget_locked(parent->i_sb, ino); #else inode = iget(parent->i_sb, ino); #endif if (!inode) { LogFunc(("iget failed\n")); err = -ENOMEM; goto fail1; } sf_init_inode(sf_g, inode, info); sf_new_i->path = path; SET_INODE_INFO(inode, sf_new_i); sf_new_i->force_restat = 1; sf_new_i->force_reread = 0; d_instantiate(dentry, inode); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) unlock_new_inode(inode); #endif /* Store this handle if we leave the handle open. */ sf_new_i->handle = handle; return 0; fail1: kfree(sf_new_i); fail0: return err; }
/** * Update the set of visible rectangles in the host. */ static void sendRegionUpdate(RTRECT *pRects, size_t cRects) { LogRelFlowFunc(("\n")); if (cRects && !pRects) /* Assertion */ { LogRelFunc(("ERROR: called with null pointer!\n")); return; } VbglR3SeamlessSendRects(cRects, pRects); LogRelFlowFunc(("returning\n")); }
static int sf_make_path(const char *caller, struct sf_inode_info *sf_i, const char *d_name, size_t d_len, SHFLSTRING **result) { size_t path_len, shflstring_len; SHFLSTRING *tmp; uint16_t p_len; uint8_t *p_name; int fRoot = 0; TRACE(); p_len = sf_i->path->u16Length; p_name = sf_i->path->String.utf8; if (p_len == 1 && *p_name == '/') { path_len = d_len + 1; fRoot = 1; } else { /* lengths of constituents plus terminating zero plus slash */ path_len = p_len + d_len + 2; if (path_len > 0xffff) { LogFunc(("path too long. caller=%s, path_len=%zu\n", caller, path_len)); return -ENAMETOOLONG; } } shflstring_len = offsetof(SHFLSTRING, String.utf8) + path_len; tmp = kmalloc(shflstring_len, GFP_KERNEL); if (!tmp) { LogRelFunc(("kmalloc failed, caller=%s\n", caller)); return -ENOMEM; } tmp->u16Length = path_len - 1; tmp->u16Size = path_len; if (fRoot) memcpy(&tmp->String.utf8[0], d_name, d_len + 1); else { memcpy(&tmp->String.utf8[0], p_name, p_len); tmp->String.utf8[p_len] = '/'; memcpy(&tmp->String.utf8[p_len + 1], d_name, d_len); tmp->String.utf8[p_len + 1 + d_len] = '\0'; } *result = tmp; return 0; }
/** * This is called when vfs wants to populate internal buffers with * directory [dir]s contents. [opaque] is an argument to the * [filldir]. [filldir] magically modifies it's argument - [opaque] * and takes following additional arguments (which i in turn get from * the host via sf_getdent): * * name : name of the entry (i must also supply it's length huh?) * type : type of the entry (FILE | DIR | etc) (i ellect to use DT_UNKNOWN) * pos : position/index of the entry * ino : inode number of the entry (i fake those) * * [dir] contains: * f_pos : cursor into the directory listing * private_data : mean of communication with the host side * * Extract elements from the directory listing (incrementing f_pos * along the way) and feed them to [filldir] until: * * a. there are no more entries (i.e. sf_getdent set done to 1) * b. failure to compute fake inode number * c. filldir returns an error (see comment on that) */ static int sf_dir_read (struct file *dir, void *opaque, filldir_t filldir) { TRACE(); for (;;) { int err; ino_t fake_ino; loff_t sanity; char d_name[NAME_MAX]; err = sf_getdent(dir, d_name); switch (err) { case 1: return 0; case 0: break; case -1: default: /* skip erroneous entry and proceed */ LogFunc(("sf_getdent error %d\n", err)); dir->f_pos += 1; continue; } /* d_name now contains a valid entry name */ sanity = dir->f_pos + 0xbeef; fake_ino = sanity; if (sanity - fake_ino) { LogRelFunc(("can not compute ino\n")); return -EINVAL; } err = filldir(opaque, d_name, strlen(d_name), dir->f_pos, fake_ino, DT_UNKNOWN); if (err) { LogFunc(("filldir returned error %d\n", err)); /* Rely on the fact that filldir returns error only when it runs out of space in opaque */ return 0; } dir->f_pos += 1; } BUG(); }
/** * Start the X11 window configuration change monitor thread. */ int SeamlessMain::startX11MonitorThread(void) { int rc; mX11MonitorThreadStopping = false; if (isX11MonitorThreadRunning()) return VINF_SUCCESS; rc = RTThreadCreate(&mX11MonitorThread, x11MonitorThread, this, 0, RTTHREADTYPE_MSG_PUMP, RTTHREADFLAGS_WAITABLE, "X11 events"); if (RT_FAILURE(rc)) LogRelFunc(("Warning: failed to start X11 monitor thread (VBoxClient).\n")); return rc; }
/** * Create a new directory buffer descriptor. */ struct sf_dir_info *sf_dir_info_alloc(void) { struct sf_dir_info *p; TRACE(); p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) { LogRelFunc(("could not alloc directory info\n")); return NULL; } INIT_LIST_HEAD(&p->info_list); return p; }
static int run(struct VBCLSERVICE **ppInterface, bool fDaemonised) { int rc; NOREF(ppInterface); /* Initialise the guest library. */ rc = VbglR3InitUser(); if (RT_FAILURE(rc)) VBClFatalError(("Failed to connect to the VirtualBox kernel service, rc=%Rrc\n", rc)); rc = vboxClipboardConnect(); /* Not RT_SUCCESS: VINF_PERMISSION_DENIED is host service not present. */ if (rc == VINF_SUCCESS) rc = vboxClipboardMain(); if (rc == VERR_NOT_SUPPORTED) rc = VINF_SUCCESS; /* Prevent automatic restart. */ if (RT_FAILURE(rc)) LogRelFunc(("guest clipboard service terminated abnormally: return code %Rrc\n", rc)); return rc; }
/** * This is called (by sf_read_super_[24|26] when vfs mounts the fs and * wants to read super_block. * * calls [sf_glob_alloc] to map the folder and allocate global * information structure. * * initializes [sb], initializes root inode and dentry. * * should respect [flags] */ static int sf_read_super_aux(struct super_block *sb, void *data, int flags) { int err; struct dentry *droot; struct inode *iroot; struct sf_inode_info *sf_i; struct sf_glob_info *sf_g; SHFLFSOBJINFO fsinfo; struct vbsf_mount_info_new *info; TRACE(); if (!data) { LogFunc(("no mount info specified\n")); return -EINVAL; } info = data; if (flags & MS_REMOUNT) { LogFunc(("remounting is not supported\n")); return -ENOSYS; } err = sf_glob_alloc(info, &sf_g); if (err) goto fail0; sf_i = kmalloc(sizeof (*sf_i), GFP_KERNEL); if (!sf_i) { err = -ENOMEM; LogRelFunc(("could not allocate memory for root inode info\n")); goto fail1; } sf_i->handle = SHFL_HANDLE_NIL; sf_i->path = kmalloc(sizeof(SHFLSTRING) + 1, GFP_KERNEL); if (!sf_i->path) { err = -ENOMEM; LogRelFunc(("could not allocate memory for root inode path\n")); goto fail2; } sf_i->path->u16Length = 1; sf_i->path->u16Size = 2; sf_i->path->String.utf8[0] = '/'; sf_i->path->String.utf8[1] = 0; err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0); if (err) { LogFunc(("could not stat root of share\n")); goto fail3; } sb->s_magic = 0xface; sb->s_blocksize = 1024; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 3) /* Required for seek/sendfile. * * Must by less than or equal to INT64_MAX despite the fact that the * declaration of this variable is unsigned long long. See determination * of 'loff_t max' in fs/read_write.c / do_sendfile(). I don't know the * correct limit but MAX_LFS_FILESIZE (8TB-1 on 32-bit boxes) takes the * page cache into account and is the suggested limit. */ # if defined MAX_LFS_FILESIZE sb->s_maxbytes = MAX_LFS_FILESIZE; # else sb->s_maxbytes = 0x7fffffffffffffffULL; # endif #endif sb->s_op = &sf_super_ops; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) iroot = iget_locked(sb, 0); #else iroot = iget(sb, 0); #endif if (!iroot) { err = -ENOMEM; /* XXX */ LogFunc(("could not get root inode\n")); goto fail3; } if (sf_init_backing_dev(sf_g)) { err = -EINVAL; LogFunc(("could not init bdi\n")); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) unlock_new_inode(iroot); #endif goto fail4; } sf_init_inode(sf_g, iroot, &fsinfo); SET_INODE_INFO(iroot, sf_i); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) unlock_new_inode(iroot); #endif droot = d_alloc_root(iroot); if (!droot) { err = -ENOMEM; /* XXX */ LogFunc(("d_alloc_root failed\n")); goto fail5; } sb->s_root = droot; SET_GLOB_INFO(sb, sf_g); return 0; fail5: sf_done_backing_dev(sf_g); fail4: iput(iroot); fail3: kfree(sf_i->path); fail2: kfree(sf_i); fail1: sf_glob_free(sf_g); fail0: return err; }
static int sf_symlink(struct inode *parent, struct dentry *dentry, const char *symname) { int err; int rc; struct sf_inode_info *sf_i; struct sf_glob_info *sf_g; SHFLSTRING *path, *ssymname; SHFLFSOBJINFO info; int symname_len = strlen(symname) + 1; TRACE(); sf_g = GET_GLOB_INFO(parent->i_sb); sf_i = GET_INODE_INFO(parent); BUG_ON(!sf_g); BUG_ON(!sf_i); err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); if (err) goto fail0; ssymname = kmalloc(offsetof(SHFLSTRING, String.utf8) + symname_len, GFP_KERNEL); if (!ssymname) { LogRelFunc(("kmalloc failed, caller=sf_symlink\n")); err = -ENOMEM; goto fail1; } ssymname->u16Length = symname_len - 1; ssymname->u16Size = symname_len; memcpy(ssymname->String.utf8, symname, symname_len); rc = vboxCallSymlink(&client_handle, &sf_g->map, path, ssymname, &info); kfree(ssymname); if (RT_FAILURE(rc)) { if (rc == VERR_WRITE_PROTECT) { err = -EROFS; goto fail1; } LogFunc(("vboxCallSymlink(%s) failed rc=%Rrc\n", sf_i->path->String.utf8, rc)); err = -EPROTO; goto fail1; } err = sf_instantiate(parent, dentry, path, &info, SHFL_HANDLE_NIL); if (err) { LogFunc(("could not instantiate dentry for %s err=%d\n", sf_i->path->String.utf8, err)); goto fail1; } sf_i->force_restat = 1; return 0; fail1: kfree(path); fail0: return err; }
/** * Open a regular file. * * @param inode the inode * @param file the file * @returns 0 on success, Linux error code otherwise */ static int sf_reg_open(struct inode *inode, struct file *file) { int rc, rc_linux = 0; struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); struct sf_inode_info *sf_i = GET_INODE_INFO(inode); struct sf_reg_info *sf_r; SHFLCREATEPARMS params; TRACE(); BUG_ON(!sf_g); BUG_ON(!sf_i); LogFunc(("open %s\n", sf_i->path->String.utf8)); sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL); if (!sf_r) { LogRelFunc(("could not allocate reg info\n")); return -ENOMEM; } /* Already open? */ if (sf_i->handle != SHFL_HANDLE_NIL) { /* * This inode was created with sf_create_aux(). Check the CreateFlags: * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure * about the access flags (SHFL_CF_ACCESS_*). */ sf_i->force_restat = 1; sf_r->handle = sf_i->handle; sf_i->handle = SHFL_HANDLE_NIL; sf_i->file = file; file->private_data = sf_r; return 0; } RT_ZERO(params); params.Handle = SHFL_HANDLE_NIL; /* We check the value of params.Handle afterwards to find out if * the call succeeded or failed, as the API does not seem to cleanly * distinguish error and informational messages. * * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to * make the shared folders host service use our fMode parameter */ if (file->f_flags & O_CREAT) { LogFunc(("O_CREAT set\n")); params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW; /* We ignore O_EXCL, as the Linux kernel seems to call create beforehand itself, so O_EXCL should always fail. */ if (file->f_flags & O_TRUNC) { LogFunc(("O_TRUNC set\n")); params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS | SHFL_CF_ACCESS_WRITE); } else params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS; } else { params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW; if (file->f_flags & O_TRUNC) { LogFunc(("O_TRUNC set\n")); params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS | SHFL_CF_ACCESS_WRITE); } } if (!(params.CreateFlags & SHFL_CF_ACCESS_READWRITE)) { switch (file->f_flags & O_ACCMODE) { case O_RDONLY: params.CreateFlags |= SHFL_CF_ACCESS_READ; break; case O_WRONLY: params.CreateFlags |= SHFL_CF_ACCESS_WRITE; break; case O_RDWR: params.CreateFlags |= SHFL_CF_ACCESS_READWRITE; break; default: BUG (); } } if (file->f_flags & O_APPEND) { LogFunc(("O_APPEND set\n")); params.CreateFlags |= SHFL_CF_ACCESS_APPEND; } params.Info.Attr.fMode = inode->i_mode; LogFunc(("sf_reg_open: calling vboxCallCreate, file %s, flags=%#x, %#x\n", sf_i->path->String.utf8 , file->f_flags, params.CreateFlags)); rc = vboxCallCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); if (RT_FAILURE(rc)) { LogFunc(("vboxCallCreate failed flags=%d,%#x rc=%Rrc\n", file->f_flags, params.CreateFlags, rc)); kfree(sf_r); return -RTErrConvertToErrno(rc); } if (SHFL_HANDLE_NIL == params.Handle) { switch (params.Result) { case SHFL_PATH_NOT_FOUND: case SHFL_FILE_NOT_FOUND: rc_linux = -ENOENT; break; case SHFL_FILE_EXISTS: rc_linux = -EEXIST; break; default: break; } } sf_i->force_restat = 1; sf_r->handle = params.Handle; sf_i->file = file; file->private_data = sf_r; return rc_linux; }
/* allocate global info, try to map host share */ static int sf_glob_alloc(struct vbsf_mount_info_new *info, struct sf_glob_info **sf_gp) { int err, rc; SHFLSTRING *str_name; size_t name_len, str_len; struct sf_glob_info *sf_g; TRACE(); sf_g = kmalloc(sizeof(*sf_g), GFP_KERNEL); if (!sf_g) { err = -ENOMEM; LogRelFunc(("could not allocate memory for global info\n")); goto fail0; } RT_ZERO(*sf_g); if ( info->nullchar != '\0' || info->signature[0] != VBSF_MOUNT_SIGNATURE_BYTE_0 || info->signature[1] != VBSF_MOUNT_SIGNATURE_BYTE_1 || info->signature[2] != VBSF_MOUNT_SIGNATURE_BYTE_2) { /* An old version of mount.vboxsf made the syscall. Translate the * old parameters to the new structure. */ struct vbsf_mount_info_old *info_old = (struct vbsf_mount_info_old *)info; static struct vbsf_mount_info_new info_compat; info = &info_compat; memset(info, 0, sizeof(*info)); memcpy(&info->name, &info_old->name, MAX_HOST_NAME); memcpy(&info->nls_name, &info_old->nls_name, MAX_NLS_NAME); info->length = offsetof(struct vbsf_mount_info_new, dmode); info->uid = info_old->uid; info->gid = info_old->gid; info->ttl = info_old->ttl; } info->name[sizeof(info->name) - 1] = 0; info->nls_name[sizeof(info->nls_name) - 1] = 0; name_len = strlen(info->name); if (name_len > 0xfffe) { err = -ENAMETOOLONG; LogFunc(("map name too big\n")); goto fail1; } str_len = offsetof(SHFLSTRING, String.utf8) + name_len + 1; str_name = kmalloc(str_len, GFP_KERNEL); if (!str_name) { err = -ENOMEM; LogRelFunc(("could not allocate memory for host name\n")); goto fail1; } str_name->u16Length = name_len; str_name->u16Size = name_len + 1; memcpy(str_name->String.utf8, info->name, name_len + 1); if (info->nls_name[0] && strcmp(info->nls_name, "utf8")) { sf_g->nls = load_nls(info->nls_name); if (!sf_g->nls) { err = -EINVAL; LogFunc(("failed to load nls %s\n", info->nls_name)); goto fail1; } } else sf_g->nls = NULL; rc = vboxCallMapFolder(&client_handle, str_name, &sf_g->map); kfree(str_name); if (RT_FAILURE(rc)) { err = -EPROTO; LogFunc(("vboxCallMapFolder failed rc=%d\n", rc)); goto fail2; } sf_g->ttl = info->ttl; sf_g->uid = info->uid; sf_g->gid = info->gid; if ((unsigned)info->length >= sizeof(struct vbsf_mount_info_new)) { /* new fields */ sf_g->dmode = info->dmode; sf_g->fmode = info->fmode; sf_g->dmask = info->dmask; sf_g->fmask = info->fmask; } else { sf_g->dmode = ~0; sf_g->fmode = ~0; } *sf_gp = sf_g; return 0; fail2: if (sf_g->nls) unload_nls(sf_g->nls); fail1: kfree(sf_g); fail0: return err; }
/* Module initialization/finalization handlers */ static int __init init(void) { int rcVBox; int rcRet = 0; int err; TRACE(); if (sizeof(struct vbsf_mount_info_new) > PAGE_SIZE) { printk(KERN_ERR "Mount information structure is too large %lu\n" "Must be less than or equal to %lu\n", (unsigned long)sizeof (struct vbsf_mount_info_new), (unsigned long)PAGE_SIZE); return -EINVAL; } err = register_filesystem(&vboxsf_fs_type); if (err) { LogFunc(("register_filesystem err=%d\n", err)); return err; } rcVBox = vboxInit(); if (RT_FAILURE(rcVBox)) { LogRelFunc(("vboxInit failed, rc=%d\n", rcVBox)); rcRet = -EPROTO; goto fail0; } rcVBox = vboxConnect(&client_handle); if (RT_FAILURE(rcVBox)) { LogRelFunc(("vboxConnect failed, rc=%d\n", rcVBox)); rcRet = -EPROTO; goto fail1; } rcVBox = vboxCallSetUtf8(&client_handle); if (RT_FAILURE(rcVBox)) { LogRelFunc(("vboxCallSetUtf8 failed, rc=%d\n", rcVBox)); rcRet = -EPROTO; goto fail2; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) if (!follow_symlinks) { rcVBox = vboxCallSetSymlinks(&client_handle); if (RT_FAILURE(rcVBox)) { printk(KERN_WARNING "vboxsf: Host unable to show symlinks, rc=%d\n", rcVBox); } } #endif printk(KERN_DEBUG "vboxsf: Successfully loaded version " VBOX_VERSION_STRING " (interface " RT_XSTR(VMMDEV_VERSION) ")\n"); return 0; fail2: vboxDisconnect(&client_handle); fail1: vboxUninit(); fail0: unregister_filesystem(&vboxsf_fs_type); return rcRet; }
/** * Enables the Hyper-V TSC page. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param GCPhysTscPage Where to map the TSC page. * @param fUseThisTscSeq Whether to set the TSC sequence number to the one * specified in @a uTscSeq. * @param uTscSeq The TSC sequence value to use. Ignored if * @a fUseThisTscSeq is false. */ VMMR3_INT_DECL(int) gimR3HvEnableTscPage(PVM pVM, RTGCPHYS GCPhysTscPage, bool fUseThisTscSeq, uint32_t uTscSeq) { PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3; PGIMMMIO2REGION pRegion = &pVM->gim.s.u.Hv.aMmio2Regions[GIM_HV_REF_TSC_PAGE_REGION_IDX]; AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED); int rc; if (pRegion->fMapped) { /* * Is it already enabled at the given guest-address? */ if (pRegion->GCPhysPage == GCPhysTscPage) return VINF_SUCCESS; /* * If it's mapped at a different address, unmap the previous address. */ rc = gimR3HvDisableTscPage(pVM); AssertRC(rc); } /* * Map the TSC-page at the specified address. */ Assert(!pRegion->fMapped); rc = GIMR3Mmio2Map(pVM, pRegion, GCPhysTscPage); if (RT_SUCCESS(rc)) { Assert(pRegion->GCPhysPage == GCPhysTscPage); /* * Update the TSC scale. Windows guests expect a non-zero TSC sequence, otherwise * they fallback to using the reference count MSR which is not ideal in terms of VM-exits. * * Also, Hyper-V normalizes the time in 10 MHz, see: * http://technet.microsoft.com/it-it/sysinternals/dn553408%28v=vs.110%29 */ PGIMHVREFTSC pRefTsc = (PGIMHVREFTSC)pRegion->pvPageR3; Assert(pRefTsc); uint64_t const u64TscKHz = TMCpuTicksPerSecond(pVM) / UINT64_C(1000); uint32_t u32TscSeq = 1; if ( fUseThisTscSeq && uTscSeq < UINT32_C(0xfffffffe)) u32TscSeq = uTscSeq + 1; pRefTsc->u32TscSequence = u32TscSeq; pRefTsc->u64TscScale = ((INT64_C(10000) << 32) / u64TscKHz) << 32; pRefTsc->i64TscOffset = 0; LogRel(("GIM: HyperV: Enabled TSC page at %#RGp - u64TscScale=%#RX64 u64TscKHz=%#RX64 (%'RU64) Seq=%#RU32\n", GCPhysTscPage, pRefTsc->u64TscScale, u64TscKHz, u64TscKHz, pRefTsc->u32TscSequence)); TMR3CpuTickParavirtEnable(pVM); return VINF_SUCCESS; } else LogRelFunc(("GIMR3Mmio2Map failed. rc=%Rrc\n", rc)); return VERR_GIM_OPERATION_FAILED; }
static int sf_dir_read(struct file *dir, void *opaque, filldir_t filldir) #endif { TRACE(); for (;;) { int err; ino_t fake_ino; loff_t sanity; char d_name[NAME_MAX]; int d_type = DT_UNKNOWN; err = sf_getdent(dir, d_name, &d_type); switch (err) { case 1: return 0; case 0: break; case -1: default: /* skip erroneous entry and proceed */ LogFunc(("sf_getdent error %d\n", err)); dir->f_pos += 1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) ctx->pos += 1; #endif continue; } /* d_name now contains a valid entry name */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) sanity = ctx->pos + 0xbeef; #else sanity = dir->f_pos + 0xbeef; #endif fake_ino = sanity; if (sanity - fake_ino) { LogRelFunc(("can not compute ino\n")); return -EINVAL; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) if (!dir_emit(ctx, d_name, strlen(d_name), fake_ino, d_type)) { LogFunc(("dir_emit failed\n")); return 0; } #else err = filldir(opaque, d_name, strlen(d_name), dir->f_pos, fake_ino, d_type); if (err) { LogFunc(("filldir returned error %d\n", err)); /* Rely on the fact that filldir returns error only when it runs out of space in opaque */ return 0; } #endif dir->f_pos += 1; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) ctx->pos += 1; #endif } BUG(); }
/** * Open a directory. Read the complete content into a buffer. * * @param inode inode * @param file file * @returns 0 on success, Linux error code otherwise */ static int sf_dir_open(struct inode *inode, struct file *file) { int rc; int err; struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); struct sf_dir_info *sf_d; struct sf_inode_info *sf_i = GET_INODE_INFO(inode); SHFLCREATEPARMS params; TRACE(); BUG_ON(!sf_g); BUG_ON(!sf_i); if (file->private_data) { LogFunc(("sf_dir_open() called on already opened directory '%s'\n", sf_i->path->String.utf8)); return 0; } sf_d = sf_dir_info_alloc(); if (!sf_d) { LogRelFunc(("could not allocate directory info for '%s'\n", sf_i->path->String.utf8)); return -ENOMEM; } RT_ZERO(params); params.Handle = SHFL_HANDLE_NIL; params.CreateFlags = 0 | SHFL_CF_DIRECTORY | SHFL_CF_ACT_OPEN_IF_EXISTS | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ ; LogFunc(("sf_dir_open(): calling vboxCallCreate, folder %s, flags %#x\n", sf_i->path->String.utf8, params.CreateFlags)); rc = vboxCallCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); if (RT_SUCCESS(rc)) { if (params.Result == SHFL_FILE_EXISTS) { err = sf_dir_read_all(sf_g, sf_i, sf_d, params.Handle); if (!err) file->private_data = sf_d; } else err = -ENOENT; rc = vboxCallClose(&client_handle, &sf_g->map, params.Handle); if (RT_FAILURE(rc)) LogFunc(("sf_dir_open(): vboxCallClose(%s) after err=%d failed rc=%Rrc\n", sf_i->path->String.utf8, err, rc)); } else err = -EPERM; if (err) sf_dir_info_free(sf_d); return err; }
int sf_dir_read_all(struct sf_glob_info *sf_g, struct sf_inode_info *sf_i, struct sf_dir_info *sf_d, SHFLHANDLE handle) { int err; SHFLSTRING *mask; struct sf_dir_buf *b; TRACE(); err = sf_make_path(__func__, sf_i, "*", 1, &mask); if (err) goto fail0; for (;;) { int rc; void *buf; uint32_t cbSize; uint32_t cEntries; b = sf_get_empty_dir_buf(sf_d); if (!b) { b = sf_dir_buf_alloc(); if (!b) { err = -ENOMEM; LogRelFunc(("could not alloc directory buffer\n")); goto fail1; } list_add(&b->head, &sf_d->info_list); } buf = b->buf; cbSize = b->cbFree; rc = VbglR0SfDirInfo(&client_handle, &sf_g->map, handle, mask, 0, 0, &cbSize, buf, &cEntries); switch (rc) { case VINF_SUCCESS: /* fallthrough */ case VERR_NO_MORE_FILES: break; case VERR_NO_TRANSLATION: LogFunc(("host could not translate entry\n")); /* XXX */ break; default: err = -RTErrConvertToErrno(rc); LogFunc(("VbglR0SfDirInfo failed rc=%Rrc\n", rc)); goto fail1; } b->cEntries += cEntries; b->cbFree -= cbSize; b->cbUsed += cbSize; if (RT_FAILURE(rc)) break; } err = 0; fail1: kfree(mask); fail0: return err; }
/** * This is called when vfs failed to locate dentry in the cache. The * job of this function is to allocate inode and link it to dentry. * [dentry] contains the name to be looked in the [parent] directory. * Failure to locate the name is not a "hard" error, in this case NULL * inode is added to [dentry] and vfs should proceed trying to create * the entry via other means. NULL(or "positive" pointer) ought to be * returned in case of success and "negative" pointer on error */ static struct dentry *sf_lookup(struct inode *parent, struct dentry *dentry #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) , unsigned int flags #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) , struct nameidata *nd #endif ) { int err; struct sf_inode_info *sf_i, *sf_new_i; struct sf_glob_info *sf_g; SHFLSTRING *path; struct inode *inode; ino_t ino; SHFLFSOBJINFO fsinfo; TRACE(); sf_g = GET_GLOB_INFO(parent->i_sb); sf_i = GET_INODE_INFO(parent); BUG_ON(!sf_g); BUG_ON(!sf_i); err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); if (err) goto fail0; err = sf_stat(__func__, sf_g, path, &fsinfo, 1); if (err) { if (err == -ENOENT) { /* -ENOENT: add NULL inode to dentry so it later can be created via call to create/mkdir/open */ kfree(path); inode = NULL; } else goto fail1; } else { sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL); if (!sf_new_i) { LogRelFunc(("could not allocate memory for new inode info\n")); err = -ENOMEM; goto fail1; } sf_new_i->handle = SHFL_HANDLE_NIL; sf_new_i->force_reread = 0; ino = iunique(parent->i_sb, 1); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) inode = iget_locked(parent->i_sb, ino); #else inode = iget(parent->i_sb, ino); #endif if (!inode) { LogFunc(("iget failed\n")); err = -ENOMEM; /* XXX: ??? */ goto fail2; } SET_INODE_INFO(inode, sf_new_i); sf_init_inode(sf_g, inode, &fsinfo); sf_new_i->path = path; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) unlock_new_inode(inode); #endif } sf_i->force_restat = 0; dentry->d_time = jiffies; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) d_set_d_op(dentry, &sf_dentry_ops); #else dentry->d_op = &sf_dentry_ops; #endif d_add(dentry, inode); return NULL; fail2: kfree(sf_new_i); fail1: kfree(path); fail0: return ERR_PTR(err); }
/* filemap_write_and_wait(inode->i_mapping); */ if ( inode->i_mapping->nrpages && filemap_fdatawrite(inode->i_mapping) != -EIO) filemap_fdatawait(inode->i_mapping); #endif rc = vboxCallClose(&client_handle, &sf_g->map, sf_r->handle); if (RT_FAILURE(rc)) LogFunc(("vboxCallClose failed rc=%Rrc\n", rc)); kfree(sf_r); sf_i->file = NULL; sf_i->handle = SHFL_HANDLE_NIL; file->private_data = NULL; return 0; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type) # define SET_TYPE(t) *type = (t) #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused) # define SET_TYPE(t) #endif { struct page *page; char *buf; loff_t off; uint32_t nread = PAGE_SIZE; int err; struct file *file = vma->vm_file; struct inode *inode = GET_F_DENTRY(file)->d_inode; struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); struct sf_reg_info *sf_r = file->private_data; TRACE(); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) if (vmf->pgoff > vma->vm_end) return VM_FAULT_SIGBUS; #else if (vaddr > vma->vm_end) { SET_TYPE(VM_FAULT_SIGBUS); return NOPAGE_SIGBUS; } #endif /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls vboxCallRead() * which works on virtual addresses. On Linux cannot reliably determine the * physical address for high memory, see rtR0MemObjNativeLockKernel(). */ page = alloc_page(GFP_USER); if (!page) { LogRelFunc(("failed to allocate page\n")); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) return VM_FAULT_OOM; #else SET_TYPE(VM_FAULT_OOM); return NOPAGE_OOM; #endif } buf = kmap(page); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) off = (vmf->pgoff << PAGE_SHIFT); #else off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); #endif err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off); if (err) { kunmap(page); put_page(page); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) return VM_FAULT_SIGBUS; #else SET_TYPE(VM_FAULT_SIGBUS); return NOPAGE_SIGBUS; #endif } BUG_ON (nread > PAGE_SIZE); if (!nread) { #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) clear_user_page(page_address(page), vmf->pgoff, page); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) clear_user_page(page_address(page), vaddr, page); #else clear_user_page(page_address(page), vaddr); #endif } else memset(buf + nread, 0, PAGE_SIZE - nread); flush_dcache_page(page); kunmap(page); #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) vmf->page = page; return 0; #else SET_TYPE(VM_FAULT_MAJOR); return page; #endif }
/* allocate global info, try to map host share */ static int sf_glob_alloc(struct vbsf_mount_info_new *info, struct sf_glob_info **sf_gp) { int err, rc; SHFLSTRING *str_name; size_t name_len, str_len; struct sf_glob_info *sf_g; TRACE(); sf_g = kmalloc(sizeof(*sf_g), GFP_KERNEL); if (!sf_g) { err = -ENOMEM; LogRelFunc(("could not allocate memory for global info\n")); goto fail0; } RT_ZERO(*sf_g); if ( info->nullchar != '\0' || info->signature[0] != VBSF_MOUNT_SIGNATURE_BYTE_0 || info->signature[1] != VBSF_MOUNT_SIGNATURE_BYTE_1 || info->signature[2] != VBSF_MOUNT_SIGNATURE_BYTE_2) { /* An old version of mount.vboxsf made the syscall. Translate the * old parameters to the new structure. */ struct vbsf_mount_info_old *info_old = (struct vbsf_mount_info_old *)info; static struct vbsf_mount_info_new info_compat; info = &info_compat; memset(info, 0, sizeof(*info)); memcpy(&info->name, &info_old->name, MAX_HOST_NAME); memcpy(&info->nls_name, &info_old->nls_name, MAX_NLS_NAME); info->length = offsetof(struct vbsf_mount_info_new, dmode); info->uid = info_old->uid; info->gid = info_old->gid; info->ttl = info_old->ttl; } info->name[sizeof(info->name) - 1] = 0; info->nls_name[sizeof(info->nls_name) - 1] = 0; name_len = strlen(info->name); if (name_len > 0xfffe) { err = -ENAMETOOLONG; LogFunc(("map name too big\n")); goto fail1; } str_len = offsetof(SHFLSTRING, String.utf8) + name_len + 1; str_name = kmalloc(str_len, GFP_KERNEL); if (!str_name) { err = -ENOMEM; LogRelFunc(("could not allocate memory for host name\n")); goto fail1; } str_name->u16Length = name_len; str_name->u16Size = name_len + 1; memcpy(str_name->String.utf8, info->name, name_len + 1); #define _IS_UTF8(_str) \ (strcmp(_str, "utf8") == 0) #define _IS_EMPTY(_str) \ (strcmp(_str, "") == 0) /* Check if NLS charset is valid and not points to UTF8 table */ if (info->nls_name[0]) { if (_IS_UTF8(info->nls_name)) sf_g->nls = NULL; else { sf_g->nls = load_nls(info->nls_name); if (!sf_g->nls) { err = -EINVAL; LogFunc(("failed to load nls %s\n", info->nls_name)); goto fail1; } } } else { #ifdef CONFIG_NLS_DEFAULT /* If no NLS charset specified, try to load the default * one if it's not points to UTF8. */ if (!_IS_UTF8(CONFIG_NLS_DEFAULT) && !_IS_EMPTY(CONFIG_NLS_DEFAULT)) sf_g->nls = load_nls_default(); else sf_g->nls = NULL; #else sf_g->nls = NULL; #endif #undef _IS_UTF8 #undef _IS_EMPTY } rc = VbglR0SfMapFolder(&client_handle, str_name, &sf_g->map); kfree(str_name); if (RT_FAILURE(rc)) { err = -EPROTO; LogFunc(("VbglR0SfMapFolder failed rc=%d\n", rc)); goto fail2; } sf_g->ttl = info->ttl; sf_g->uid = info->uid; sf_g->gid = info->gid; if ((unsigned)info->length >= sizeof(struct vbsf_mount_info_new)) { /* new fields */ sf_g->dmode = info->dmode; sf_g->fmode = info->fmode; sf_g->dmask = info->dmask; sf_g->fmask = info->fmask; } else { sf_g->dmode = ~0; sf_g->fmode = ~0; } *sf_gp = sf_g; return 0; fail2: if (sf_g->nls) unload_nls(sf_g->nls); fail1: kfree(sf_g); fail0: return err; }