/** * Convert guest absolute VFS path (starting from VFS root) to a host path * within mounted shared folder (returning it as a char *). * * @param mp Mount data structure * @param pszGuestPath Guest absolute VFS path (starting from VFS root) * @param cbGuestPath Size of pszGuestPath * @param pszHostPath Returned char * wich contains host path * @param cbHostPath Returned pszHostPath size * * @return 0 on success, error code otherwise */ int vboxvfs_guest_path_to_char_path_internal(mount_t mp, char *pszGuestPath, int cbGuestPath, char **pszHostPath, int *cbHostPath) { vboxvfs_mount_t *pMount; /* Guest side: mount point path buffer and its size */ char *pszMntPointPath; int cbMntPointPath = MAXPATHLEN; /* Host side: path within mounted shared folder and its size */ char *pszHostPathInternal; size_t cbHostPathInternal; int rc; AssertReturn(mp, EINVAL); AssertReturn(pszGuestPath, EINVAL); AssertReturn(cbGuestPath >= 0, EINVAL); AssertReturn(pszHostPath, EINVAL); AssertReturn(cbHostPath, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); AssertReturn(pMount->pRootVnode, EINVAL); /* Get mount point path */ pszMntPointPath = (char *)RTMemAllocZ(cbMntPointPath); if (pszMntPointPath) { rc = vn_getpath(pMount->pRootVnode, pszMntPointPath, &cbMntPointPath); if (rc == 0 && cbGuestPath >= cbMntPointPath) { cbHostPathInternal = cbGuestPath - cbMntPointPath + 1; pszHostPathInternal = (char *)RTMemAllocZ(cbHostPathInternal); if (pszHostPathInternal) { memcpy(pszHostPathInternal, pszGuestPath + cbMntPointPath, cbGuestPath - cbMntPointPath); PDEBUG("guest<->host path converion result: '%s' mounted to '%s'", pszHostPathInternal, pszMntPointPath); RTMemFree(pszMntPointPath); *pszHostPath = pszHostPathInternal; *cbHostPath = cbGuestPath - cbMntPointPath; return 0; } else { PDEBUG("No memory to allocate buffer for guest<->host path conversion (cbHostPathInternal)"); rc = ENOMEM; } } else { PDEBUG("Unable to get guest vnode path: %d", rc); } RTMemFree(pszMntPointPath); } else { PDEBUG("No memory to allocate buffer for guest<->host path conversion (pszMntPointPath)"); rc = ENOMEM; } return rc; }
static int vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, uint64_t *ashift) { spa_t *spa = vd->vdev_spa; vdev_disk_t *dvd = vd->vdev_tsd; vnode_t *devvp = NULLVP; vfs_context_t context = NULL; uint64_t blkcnt; uint32_t blksize; int fmode = 0; int error = 0; int isssd; /* * We must have a pathname, and it must be absolute. */ if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; return (SET_ERROR(EINVAL)); } /* * Reopen the device if it's not currently open. Otherwise, * just update the physical size of the device. */ if (dvd != NULL) { if (dvd->vd_offline) { /* * If we are opening a device in its offline notify * context, the LDI handle was just closed. Clean * up the LDI event callbacks and free vd->vdev_tsd. */ vdev_disk_free(vd); } else { ASSERT(vd->vdev_reopening); devvp = dvd->vd_devvp; goto skip_open; } } /* * Create vd->vdev_tsd. */ vdev_disk_alloc(vd); dvd = vd->vdev_tsd; /* * When opening a disk device, we want to preserve the user's original * intent. We always want to open the device by the path the user gave * us, even if it is one of multiple paths to the same device. But we * also want to be able to survive disks being removed/recabled. * Therefore the sequence of opening devices is: * * 1. Try opening the device by path. For legacy pools without the * 'whole_disk' property, attempt to fix the path by appending 's0'. * * 2. If the devid of the device matches the stored value, return * success. * * 3. Otherwise, the device may have moved. Try opening the device * by the devid instead. */ /* ### APPLE TODO ### */ #ifdef illumos if (vd->vdev_devid != NULL) { if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid, &dvd->vd_minor) != 0) { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; return (SET_ERROR(EINVAL)); } } #endif error = EINVAL; /* presume failure */ if (vd->vdev_path != NULL) { context = vfs_context_create( spl_vfs_context_kernel() ); /* Obtain an opened/referenced vnode for the device. */ if ((error = vnode_open(vd->vdev_path, spa_mode(spa), 0, 0, &devvp, context))) { goto out; } if (!vnode_isblk(devvp)) { error = ENOTBLK; goto out; } /* * ### APPLE TODO ### * vnode_authorize devvp for KAUTH_VNODE_READ_DATA and * KAUTH_VNODE_WRITE_DATA */ /* * Disallow opening of a device that is currently in use. * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp))) { goto out; } if (VNOP_FSYNC(devvp, MNT_WAIT, context) != 0) { error = ENOTBLK; goto out; } if ((error = buf_invalidateblks(devvp, BUF_WRITE_DATA, 0, 0))) { goto out; } } else { goto out; } int len = MAXPATHLEN; if (vn_getpath(devvp, dvd->vd_readlinkname, &len) == 0) { dprintf("ZFS: '%s' resolved name is '%s'\n", vd->vdev_path, dvd->vd_readlinkname); } else { dvd->vd_readlinkname[0] = 0; } skip_open: /* * Determine the actual size of the device. */ if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, context) != 0 || VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, context) != 0) { error = EINVAL; goto out; } *psize = blkcnt * (uint64_t)blksize; *max_psize = *psize; dvd->vd_ashift = highbit(blksize) - 1; dprintf("vdev_disk: Device %p ashift set to %d\n", devvp, dvd->vd_ashift); *ashift = highbit(MAX(blksize, SPA_MINBLOCKSIZE)) - 1; /* * ### APPLE TODO ### */ #ifdef illumos if (vd->vdev_wholedisk == 1) { int wce = 1; if (error == 0) { /* * If we have the capability to expand, we'd have * found out via success from DKIOCGMEDIAINFO{,EXT}. * Adjust max_psize upward accordingly since we know * we own the whole disk now. */ *max_psize += vdev_disk_get_space(vd, capacity, blksz); zfs_dbgmsg("capacity change: vdev %s, psize %llu, " "max_psize %llu", vd->vdev_path, *psize, *max_psize); } /* * Since we own the whole disk, try to enable disk write * caching. We ignore errors because it's OK if we can't do it. */ (void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce, FKIOCTL, kcred, NULL); } #endif /* * Clear the nowritecache bit, so that on a vdev_reopen() we will * try again. */ vd->vdev_nowritecache = B_FALSE; /* Inform the ZIO pipeline that we are non-rotational */ vd->vdev_nonrot = B_FALSE; if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, context) == 0) { if (isssd) vd->vdev_nonrot = B_TRUE; } dprintf("ZFS: vdev_disk(%s) isSSD %d\n", vd->vdev_path ? vd->vdev_path : "", isssd); dvd->vd_devvp = devvp; out: if (error) { if (devvp) { vnode_close(devvp, fmode, context); dvd->vd_devvp = NULL; } vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; } if (context) (void) vfs_context_rele(context); if (error) printf("ZFS: vdev_disk_open('%s') failed error %d\n", vd->vdev_path ? vd->vdev_path : "", error); return (error); }
static int vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, uint64_t *ashift) { spa_t *spa = vd->vdev_spa; vdev_disk_t *dvd = vd->vdev_tsd; ldi_ev_cookie_t ecookie; vdev_disk_ldi_cb_t *lcb; union { struct dk_minfo_ext ude; struct dk_minfo ud; } dks; struct dk_minfo_ext *dkmext = &dks.ude; struct dk_minfo *dkm = &dks.ud; int error; /* XXX Apple - must leave devid unchanged */ #ifdef illumos dev_t dev; int otyp; boolean_t validate_devid = B_FALSE; ddi_devid_t devid; #endif uint64_t capacity = 0, blksz = 0, pbsize; #ifdef __APPLE__ int isssd; #endif /* * We must have a pathname, and it must be absolute. */ if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; return (SET_ERROR(EINVAL)); } /* * Reopen the device if it's not currently open. Otherwise, * just update the physical size of the device. */ if (dvd != NULL) { if (dvd->vd_ldi_offline && dvd->vd_lh == NULL) { /* * If we are opening a device in its offline notify * context, the LDI handle was just closed. Clean * up the LDI event callbacks and free vd->vdev_tsd. */ vdev_disk_free(vd); } else { ASSERT(vd->vdev_reopening); goto skip_open; } } /* * Create vd->vdev_tsd. */ vdev_disk_alloc(vd); dvd = vd->vdev_tsd; /* * When opening a disk device, we want to preserve the user's original * intent. We always want to open the device by the path the user gave * us, even if it is one of multiple paths to the same device. But we * also want to be able to survive disks being removed/recabled. * Therefore the sequence of opening devices is: * * 1. Try opening the device by path. For legacy pools without the * 'whole_disk' property, attempt to fix the path by appending 's0'. * * 2. If the devid of the device matches the stored value, return * success. * * 3. Otherwise, the device may have moved. Try opening the device * by the devid instead. */ /* * XXX We must not set or modify the devid as this check would prevent * import on Solaris/illumos. */ #ifdef illumos if (vd->vdev_devid != NULL) { if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid, &dvd->vd_minor) != 0) { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; vdev_dbgmsg(vd, "vdev_disk_open: invalid " "vdev_devid '%s'", vd->vdev_devid); return (SET_ERROR(EINVAL)); } } #endif error = EINVAL; /* presume failure */ if (vd->vdev_path != NULL) { /* * XXX This assumes that if vdev_path refers to a device path /dev/dsk/cNtNdN, * then the whole disk can be found by slice 0 at path /dev/dsk/cNtNdNs0. */ #ifdef illumos if (vd->vdev_wholedisk == -1ULL) { size_t len = strlen(vd->vdev_path) + 3; char *buf = kmem_alloc(len, KM_SLEEP); (void) snprintf(buf, len, "%ss0", vd->vdev_path); error = ldi_open_by_name(buf, spa_mode(spa), kcred, &dvd->vd_lh, zfs_li); if (error == 0) { spa_strfree(vd->vdev_path); vd->vdev_path = buf; vd->vdev_wholedisk = 1ULL; } else { kmem_free(buf, len); } } #endif /* * If we have not yet opened the device, try to open it by the * specified path. */ if (error != 0) { error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), kcred, &dvd->vd_lh, zfs_li); } /* XXX Apple - must leave devid unchanged */ #ifdef illumos /* * Compare the devid to the stored value. */ if (error == 0 && vd->vdev_devid != NULL && ldi_get_devid(dvd->vd_lh, &devid) == 0) { if (ddi_devid_compare(devid, dvd->vd_devid) != 0) { error = SET_ERROR(EINVAL); (void) ldi_close(dvd->vd_lh, spa_mode(spa), kcred); dvd->vd_lh = NULL; } ddi_devid_free(devid); } #endif /* * If we succeeded in opening the device, but 'vdev_wholedisk' * is not yet set, then this must be a slice. */ if (error == 0 && vd->vdev_wholedisk == -1ULL) vd->vdev_wholedisk = 0; } /* XXX Apple - must leave devid unchanged */ #ifdef illumos /* * If we were unable to open by path, or the devid check fails, open by * devid instead. */ if (error != 0 && vd->vdev_devid != NULL) { error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor, spa_mode(spa), kcred, &dvd->vd_lh, zfs_li); } #endif /* * If all else fails, then try opening by physical path (if available) * or the logical path (if we failed due to the devid check). While not * as reliable as the devid, this will give us something, and the higher * level vdev validation will prevent us from opening the wrong device. */ if (error) { /* XXX Apple - must leave devid unchanged */ #ifdef illumos if (vd->vdev_devid != NULL) validate_devid = B_TRUE; #endif /* XXX Apple to do - make ddi_ interface for this, using IORegistry path */ #ifdef illumos if (vd->vdev_physpath != NULL && (dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != NODEV) error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode(spa), kcred, &dvd->vd_lh, zfs_li); #endif /* * Note that we don't support the legacy auto-wholedisk support * as above. This hasn't been used in a very long time and we * don't need to propagate its oddities to this edge condition. */ if (error && vd->vdev_path != NULL) error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), kcred, &dvd->vd_lh, zfs_li); } if (error) { vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; vdev_dbgmsg(vd, "vdev_disk_open: failed to open [error=%d]", error); return (error); } /* * XXX Apple - We must not set or modify the devid. Import on Solaris/illumos * expects a valid devid and fails if it cannot be decoded. */ #ifdef illumos /* * Now that the device has been successfully opened, update the devid * if necessary. */ if (validate_devid && spa_writeable(spa) && ldi_get_devid(dvd->vd_lh, &devid) == 0) { if (ddi_devid_compare(devid, dvd->vd_devid) != 0) { char *vd_devid; vd_devid = ddi_devid_str_encode(devid, dvd->vd_minor); vdev_dbgmsg(vd, "vdev_disk_open: update devid from " "'%s' to '%s'", vd->vdev_devid, vd_devid); spa_strfree(vd->vdev_devid); vd->vdev_devid = spa_strdup(vd_devid); ddi_devid_str_free(vd_devid); } ddi_devid_free(devid); } #endif /* XXX Apple to do, needs IORegistry physpath interface */ #ifdef illumos /* * Once a device is opened, verify that the physical device path (if * available) is up to date. */ if (ldi_get_dev(dvd->vd_lh, &dev) == 0 && ldi_get_otyp(dvd->vd_lh, &otyp) == 0) { char *physpath, *minorname; physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); minorname = NULL; if (ddi_dev_pathname(dev, otyp, physpath) == 0 && ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 && (vd->vdev_physpath == NULL || strcmp(vd->vdev_physpath, physpath) != 0)) { if (vd->vdev_physpath) spa_strfree(vd->vdev_physpath); (void) strlcat(physpath, ":", MAXPATHLEN); (void) strlcat(physpath, minorname, MAXPATHLEN); vd->vdev_physpath = spa_strdup(physpath); } if (minorname) kmem_free(minorname, strlen(minorname) + 1); kmem_free(physpath, MAXPATHLEN); } #endif /* * Register callbacks for the LDI offline event. */ if (ldi_ev_get_cookie(dvd->vd_lh, LDI_EV_OFFLINE, &ecookie) == LDI_EV_SUCCESS) { lcb = kmem_zalloc(sizeof (vdev_disk_ldi_cb_t), KM_SLEEP); list_insert_tail(&dvd->vd_ldi_cbs, lcb); (void) ldi_ev_register_callbacks(dvd->vd_lh, ecookie, &vdev_disk_off_callb, (void *) vd, &lcb->lcb_id); } /* XXX Apple to do - we could support the degrade event, or just no-op */ #ifdef illumos /* * Register callbacks for the LDI degrade event. */ if (ldi_ev_get_cookie(dvd->vd_lh, LDI_EV_DEGRADE, &ecookie) == LDI_EV_SUCCESS) { lcb = kmem_zalloc(sizeof (vdev_disk_ldi_cb_t), KM_SLEEP); list_insert_tail(&dvd->vd_ldi_cbs, lcb); (void) ldi_ev_register_callbacks(dvd->vd_lh, ecookie, &vdev_disk_dgrd_callb, (void *) vd, &lcb->lcb_id); } #endif #if 0 int len = MAXPATHLEN; if (vn_getpath(devvp, dvd->vd_readlinkname, &len) == 0) { dprintf("ZFS: '%s' resolved name is '%s'\n", vd->vdev_path, dvd->vd_readlinkname); } else { dvd->vd_readlinkname[0] = 0; } #endif skip_open: /* * Determine the actual size of the device. */ if (ldi_get_size(dvd->vd_lh, psize) != 0) { vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; vdev_dbgmsg(vd, "vdev_disk_open: failed to get size"); return (SET_ERROR(EINVAL)); } *max_psize = *psize; /* * Determine the device's minimum transfer size. * If the ioctl isn't supported, assume DEV_BSIZE. */ if ((error = ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFOEXT, (intptr_t)dkmext, FKIOCTL, kcred, NULL)) == 0) { capacity = dkmext->dki_capacity - 1; blksz = dkmext->dki_lbsize; pbsize = dkmext->dki_pbsize; } else if ((error = ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFO, (intptr_t)dkm, FKIOCTL, kcred, NULL)) == 0) { VDEV_DEBUG( "vdev_disk_open(\"%s\"): fallback to DKIOCGMEDIAINFO\n", vd->vdev_path); capacity = dkm->dki_capacity - 1; blksz = dkm->dki_lbsize; pbsize = blksz; } else { VDEV_DEBUG("vdev_disk_open(\"%s\"): " "both DKIOCGMEDIAINFO{,EXT} calls failed, %d\n", vd->vdev_path, error); pbsize = DEV_BSIZE; } *ashift = highbit64(MAX(pbsize, SPA_MINBLOCKSIZE)) - 1; /* XXX Now that we opened the device, determine if it is a whole disk. */ #ifdef __APPLE__ /* * XXX Apple to do - provide an ldi_ mechanism * to report whether this is a whole disk or a * partition. * Return 0 (no), 1 (yes), or -1 (error). */ // vd->vdev_wholedisk = ldi_is_wholedisk(vd->vd_lh); #endif if (vd->vdev_wholedisk == 1) { int wce = 1; /* Gets information about the disk if it has GPT partitions */ #ifdef illumos if (error == 0) { /* * If we have the capability to expand, we'd have * found out via success from DKIOCGMEDIAINFO{,EXT}. * Adjust max_psize upward accordingly since we know * we own the whole disk now. */ *max_psize = capacity * blksz; } #endif /* * Since we own the whole disk, try to enable disk write * caching. We ignore errors because it's OK if we can't do it. */ (void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce, FKIOCTL, kcred, NULL); } /* * Clear the nowritecache bit, so that on a vdev_reopen() we will * try again. */ vd->vdev_nowritecache = B_FALSE; #ifdef __APPLE__ /* Inform the ZIO pipeline that we are non-rotational */ vd->vdev_nonrot = B_FALSE; if (ldi_ioctl(dvd->vd_lh, DKIOCISSOLIDSTATE, (intptr_t)&isssd, FKIOCTL, kcred, NULL) == 0) { vd->vdev_nonrot = (isssd ? B_TRUE : B_FALSE); } #endif //__APPLE__ return (0); }
static int process_cred_label_update_execvew(kauth_cred_t old_cred, kauth_cred_t new_cred, struct proc *p, struct vnode *vp, off_t offset, struct vnode *scriptvp, struct label *vnodelabel, struct label *scriptvnodelabel, struct label *execlabel, u_int *csflags, void *macpolicyattr, size_t macpolicyattrlen, int *disjointp) { int path_len = MAXPATHLEN; if (!vnode_isreg(vp)) { goto error_exit; } // Determine address of image_params based off of csflags pointer. (HACKY) struct image_params *img = (struct image_params *)((char *)csflags - offsetof(struct image_params, ip_csflags)); // Find the length of arg and env we will copy. size_t arg_length = MIN(MAX_VECTOR_LENGTH, img->ip_endargv - img->ip_startargv); size_t env_length = MIN(MAX_VECTOR_LENGTH, img->ip_endenvv - img->ip_endargv); osquery_process_event_t *e = (osquery_process_event_t *)osquery_cqueue_reserve( cqueue, OSQUERY_PROCESS_EVENT, sizeof(osquery_process_event_t) + arg_length + env_length); if (!e) { goto error_exit; } // Copy the arg and env vectors. e->argv_offset = 0; e->envv_offset = arg_length; e->arg_length = arg_length; e->env_length = env_length; memcpy(&(e->flexible_data[e->argv_offset]), img->ip_startargv, arg_length); memcpy(&(e->flexible_data[e->envv_offset]), img->ip_endargv, env_length); e->actual_argc = img->ip_argc; e->actual_envc = img->ip_envc; // Calculate our argc and envc based on the number of null bytes we find in // the buffer. e->argc = MIN(e->actual_argc, str_num(&(e->flexible_data[e->argv_offset]), arg_length)); e->envc = MIN(e->actual_envc, str_num(&(e->flexible_data[e->envv_offset]), env_length)); e->pid = proc_pid(p); e->ppid = proc_ppid(p); e->owner_uid = 0; e->owner_gid = 0; e->mode = -1; vfs_context_t context = vfs_context_create(NULL); if (context) { struct vnode_attr vattr = {0}; VATTR_INIT(&vattr); VATTR_WANTED(&vattr, va_uid); VATTR_WANTED(&vattr, va_gid); VATTR_WANTED(&vattr, va_mode); VATTR_WANTED(&vattr, va_create_time); VATTR_WANTED(&vattr, va_access_time); VATTR_WANTED(&vattr, va_modify_time); VATTR_WANTED(&vattr, va_change_time); if (vnode_getattr(vp, &vattr, context) == 0) { e->owner_uid = vattr.va_uid; e->owner_gid = vattr.va_gid; e->mode = vattr.va_mode; e->create_time = vattr.va_create_time.tv_sec; e->access_time = vattr.va_access_time.tv_sec; e->modify_time = vattr.va_modify_time.tv_sec; e->change_time = vattr.va_change_time.tv_sec; } vfs_context_rele(context); } e->uid = kauth_cred_getruid(new_cred); e->euid = kauth_cred_getuid(new_cred); e->gid = kauth_cred_getrgid(new_cred); e->egid = kauth_cred_getgid(new_cred); vn_getpath(vp, e->path, &path_len); osquery_cqueue_commit(cqueue, e); error_exit: return 0; }
static int new_proc_listener ( kauth_cred_t cred, struct vnode *vp, struct vnode *scriptvp, struct label *vnodelabel, struct label *scriptlabel, struct label *execlabel, struct componentname *cnp, u_int *csflags, void *macpolicyattr, size_t macpolicyattrlen ) #endif { #ifdef _USE_KAUTH vnode_t prog = (vnode_t)arg0; const char* file_path = (const char*)arg1; #else int pathLen = sizeof( g_processes[ 0 ].path ); #endif pid_t pid = 0; pid_t ppid = 0; uid_t uid = 0; #ifdef _USE_KAUTH if( KAUTH_FILEOP_EXEC != action || ( NULL != prog && VREG != vnode_vtype( prog ) ) ) { return KAUTH_RESULT_DEFER; } #endif uid = kauth_getuid(); pid = proc_selfpid(); ppid = proc_selfppid(); // We skip a known false positive if( 0 == ppid && 1 == pid ) { #ifdef _USE_KAUTH return KAUTH_RESULT_DEFER; #else return 0; // Always allow #endif } if( NULL != file_path ) { // rpal_debug_info( "!!!!!! process start: %d/%d/%d %s", ppid, pid, uid, file_path ); } rpal_mutex_lock( g_collector_1_mutex ); #ifdef _USE_KAUTH if( NULL != file_path ) { strncpy( g_processes[ g_nextProcess ].path, file_path, sizeof( g_processes[ g_nextProcess ].path ) - 1 ); } #else vn_getpath( vp, g_processes[ g_nextProcess ].path, &pathLen ); #endif g_processes[ g_nextProcess ].pid = pid; g_processes[ g_nextProcess ].ppid = ppid; g_processes[ g_nextProcess ].uid = uid; g_processes[ g_nextProcess ].ts = rpal_time_getLocal(); g_nextProcess++; if( g_nextProcess == _NUM_BUFFERED_PROCESSES ) { g_nextProcess = 0; rpal_debug_warning( "overflow of the execution buffer" ); } // rpal_debug_info( "now %d processes in buffer", g_nextProcess ); rpal_mutex_unlock( g_collector_1_mutex ); #ifdef _USE_KAUTH return KAUTH_RESULT_DEFER; #else return 0; // Always allow #endif }
int mremap_encrypted(__unused struct proc *p, struct mremap_encrypted_args *uap, __unused int32_t *retval) { mach_vm_offset_t user_addr; mach_vm_size_t user_size; kern_return_t result; vm_map_t user_map; uint32_t cryptid; cpu_type_t cputype; cpu_subtype_t cpusubtype; pager_crypt_info_t crypt_info; const char * cryptname = 0; char *vpath; int len, ret; struct proc_regioninfo_internal pinfo; vnode_t vp; uintptr_t vnodeaddr; uint32_t vid; AUDIT_ARG(addr, uap->addr); AUDIT_ARG(len, uap->len); user_map = current_map(); user_addr = (mach_vm_offset_t) uap->addr; user_size = (mach_vm_size_t) uap->len; cryptid = uap->cryptid; cputype = uap->cputype; cpusubtype = uap->cpusubtype; if (user_addr & vm_map_page_mask(user_map)) { /* UNIX SPEC: user address is not page-aligned, return EINVAL */ return EINVAL; } switch(cryptid) { case 0: /* not encrypted, just an empty load command */ return 0; case 1: cryptname="com.apple.unfree"; break; case 0x10: /* some random cryptid that you could manually put into * your binary if you want NULL */ cryptname="com.apple.null"; break; default: return EINVAL; } if (NULL == text_crypter_create) return ENOTSUP; ret = fill_procregioninfo_onlymappedvnodes( proc_task(p), user_addr, &pinfo, &vnodeaddr, &vid); if (ret == 0 || !vnodeaddr) { /* No really, this returns 0 if the memory address is not backed by a file */ return (EINVAL); } vp = (vnode_t)vnodeaddr; if ((vnode_getwithvid(vp, vid)) == 0) { MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); if(vpath == NULL) { vnode_put(vp); return (ENOMEM); } len = MAXPATHLEN; ret = vn_getpath(vp, vpath, &len); if(ret) { FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); vnode_put(vp); return (ret); } vnode_put(vp); } else { return (EINVAL); } #if 0 kprintf("%s vpath %s cryptid 0x%08x cputype 0x%08x cpusubtype 0x%08x range 0x%016llx size 0x%016llx\n", __FUNCTION__, vpath, cryptid, cputype, cpusubtype, (uint64_t)user_addr, (uint64_t)user_size); #endif /* set up decrypter first */ crypt_file_data_t crypt_data = { .filename = vpath, .cputype = cputype, .cpusubtype = cpusubtype }; result = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data); #if VM_MAP_DEBUG_APPLE_PROTECT if (vm_map_debug_apple_protect) { printf("APPLE_PROTECT: %d[%s] map %p [0x%llx:0x%llx] %s(%s) -> 0x%x\n", p->p_pid, p->p_comm, user_map, (uint64_t) user_addr, (uint64_t) (user_addr + user_size), __FUNCTION__, vpath, result); } #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); if(result) { printf("%s: unable to create decrypter %s, kr=%d\n", __FUNCTION__, cryptname, result); if (result == kIOReturnNotPrivileged) { /* text encryption returned decryption failure */ return (EPERM); } else { return (ENOMEM); } } /* now remap using the decrypter */ vm_object_offset_t crypto_backing_offset; crypto_backing_offset = -1; /* i.e. use map entry's offset */ result = vm_map_apple_protected(user_map, user_addr, user_addr+user_size, crypto_backing_offset, &crypt_info); if (result) { printf("%s: mapping failed with %d\n", __FUNCTION__, result); } if (result) { return (EPERM); } return 0; }