/**
 * Device open. Called on open /dev/vboxnetctl
 *
 * @param   pInode      Pointer to inode info structure.
 * @param   pFilp       Associated file pointer.
 */
static int VBoxNetAdpDarwinOpen(dev_t Dev, int fFlags, int fDevType, struct proc *pProcess)
{
    char szName[128];
    szName[0] = '\0';
    proc_name(proc_pid(pProcess), szName, sizeof(szName));
    Log(("VBoxNetAdpDarwinOpen: pid=%d '%s'\n", proc_pid(pProcess), szName));
    return 0;
}
Esempio n. 2
0
/* Log information about external modification of a process,
 * using MessageTracer formatting. Assumes that both the caller
 * and target are appropriately locked.
 * Currently prints following information - 
 * 	1. Caller process name (truncated to 16 characters)
 *	2. Caller process Mach-O UUID
 *  3. Target process name (truncated to 16 characters)
 *  4. Target process Mach-O UUID
 */
void
fslog_extmod_msgtracer(proc_t caller, proc_t target)
{
	if ((caller != PROC_NULL) && (target != PROC_NULL)) {

		/*
		 * Print into buffer large enough for "ThisIsAnApplicat(BC223DD7-B314-42E0-B6B0-C5D2E6638337)",
		 * including space for escaping, and NUL byte included in sizeof(uuid_string_t).
		 */

		uuid_string_t uuidstr;
		char c_name[2*MAXCOMLEN + 2 /* () */ + sizeof(uuid_string_t)];
		char t_name[2*MAXCOMLEN + 2 /* () */ + sizeof(uuid_string_t)];

		strlcpy(c_name, caller->p_comm, sizeof(c_name));
		uuid_unparse_upper(caller->p_uuid, uuidstr);
		strlcat(c_name, "(", sizeof(c_name));
		strlcat(c_name, uuidstr, sizeof(c_name));
		strlcat(c_name, ")", sizeof(c_name));
		if (0 != escape_str(c_name, strlen(c_name), sizeof(c_name))) {
			return;
		}

		strlcpy(t_name, target->p_comm, sizeof(t_name));
		uuid_unparse_upper(target->p_uuid, uuidstr);
		strlcat(t_name, "(", sizeof(t_name));
		strlcat(t_name, uuidstr, sizeof(t_name));
		strlcat(t_name, ")", sizeof(t_name));
		if (0 != escape_str(t_name, strlen(t_name), sizeof(t_name))) {
			return;
		}
#if DEBUG
		printf("EXTMOD: %s(%d) -> %s(%d)\n",
			   c_name,
			   proc_pid(caller),
			   t_name,
			   proc_pid(target));
#endif

		kern_asl_msg(LOG_DEBUG, "messagetracer",
							5,
							"com.apple.message.domain", "com.apple.kernel.external_modification", /* 0 */
							"com.apple.message.signature", c_name, /* 1 */
							"com.apple.message.signature2", t_name, /* 2 */
							"com.apple.message.result", "noop", /* 3 */
							"com.apple.message.summarize", "YES", /* 4 */
							NULL);
	}
}
Esempio n. 3
0
static void
ktrace_set_owning_proc(proc_t p)
{
	lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);
	assert(p);

	if (ktrace_state != KTRACE_STATE_FG) {
		if (proc_uniqueid(p) == ktrace_bg_unique_id) {
			ktrace_state = KTRACE_STATE_BG;
		} else {
			if (ktrace_state == KTRACE_STATE_BG) {
				if (ktrace_active_mask & KTRACE_KPERF) {
					kperf_reset();
				}
				if (ktrace_active_mask & KTRACE_KDEBUG) {
					kdebug_reset();
				}

				ktrace_active_mask = 0;
			}
			ktrace_state = KTRACE_STATE_FG;
			should_notify_on_init = FALSE;
		}
	}

	ktrace_owning_unique_id = proc_uniqueid(p);
	ktrace_owning_pid = proc_pid(p);
	strlcpy(ktrace_last_owner_execname, proc_name_address(p),
		sizeof(ktrace_last_owner_execname));
}
Esempio n. 4
0
term_t bif_spawn0_1(term_t F, process_t *ctx)
{
	process_t *proc;
	term_t mod, fun, args = nil;
	term_t cons = nil;
	term_t fridge;
	int i, nfree;

	if (!is_fun(F))
		return A_BADARG;

	fridge = fun_fridge(F);
	nfree = int_value2(tup_size(fridge));

	if (int_value2(fun_arity(F)) != nfree)
		return A_BADARG;

	for (i = 0; i < nfree; i++)
		lst_add(args, cons, tup_elts(fridge)[i], proc_gc_pool(ctx));

	mod = fun_amod(F);
	fun = fun_afun(F);

	proc = proc_spawn(proc_code_base(ctx), proc_atoms(ctx), mod, fun, args);
	if (proc == 0)
		return A_BADARG;

	result(proc_pid(proc, proc_gc_pool(ctx)));
	return AI_OK;
}
Esempio n. 5
0
/**
 * Device I/O Control entry point.
 *
 * @returns Darwin for slow IOCtls and VBox status code for the fast ones.
 * @param   Dev         The device number (major+minor).
 * @param   iCmd        The IOCtl command.
 * @param   pData       Pointer to the data (if any it's a VBOXGUESTIOCTLDATA (kernel copy)).
 * @param   fFlags      Flag saying we're a character device (like we didn't know already).
 * @param   pProcess    The process issuing this request.
 */
static int VbgdDarwinIOCtl(dev_t Dev, u_long iCmd, caddr_t pData, int fFlags, struct proc *pProcess)
{
    //const bool          fUnrestricted = minor(Dev) == 0;
    const RTPROCESS     Process = proc_pid(pProcess);
    const unsigned      iHash = SESSION_HASH(Process);
    PVBOXGUESTSESSION   pSession;

    /*
     * Find the session.
     */
    RTSpinlockAcquire(g_Spinlock);
    pSession = g_apSessionHashTab[iHash];
    while (pSession && pSession->Process != Process && (/*later: pSession->fUnrestricted != fUnrestricted ||*/  !pSession->fOpened))
        pSession = pSession->pNextHash;
    RTSpinlockRelease(g_Spinlock);
    if (!pSession)
    {
        Log(("VBoxDrvDarwinIOCtl: WHAT?!? pSession == NULL! This must be a mistake... pid=%d iCmd=%#lx\n",
             (int)Process, iCmd));
        return EINVAL;
    }

    /*
     * No high speed IOCtls here yet.
     */

    return VbgdDarwinIOCtlSlow(pSession, iCmd, pData, pProcess);
}
Esempio n. 6
0
/**
 * Device I/O Control entry point.
 *
 * @returns Darwin for slow IOCtls and VBox status code for the fast ones.
 * @param   Dev         The device number (major+minor).
 * @param   iCmd        The IOCtl command.
 * @param   pData       Pointer to the data (if any it's a SUPDRVIOCTLDATA (kernel copy)).
 * @param   fFlags      Flag saying we're a character device (like we didn't know already).
 * @param   pProcess    The process issuing this request.
 */
static int VBoxDrvDarwinIOCtl(dev_t Dev, u_long iCmd, caddr_t pData, int fFlags, struct proc *pProcess)
{
    const bool          fUnrestricted = minor(Dev) == 0;
    const RTPROCESS     Process = proc_pid(pProcess);
    const unsigned      iHash = SESSION_HASH(Process);
    PSUPDRVSESSION      pSession;

    /*
     * Find the session.
     */
    RTSpinlockAcquire(g_Spinlock);
    pSession = g_apSessionHashTab[iHash];
    while (pSession && pSession->Process != Process && pSession->fUnrestricted == fUnrestricted && pSession->fOpened)
        pSession = pSession->pNextHash;
    RTSpinlockReleaseNoInts(g_Spinlock);
    if (!pSession)
    {
        OSDBGPRINT(("VBoxDrvDarwinIOCtl: WHAT?!? pSession == NULL! This must be a mistake... pid=%d iCmd=%#lx\n",
                    (int)Process, iCmd));
        return EINVAL;
    }

    /*
     * Deal with the two high-speed IOCtl that takes it's arguments from
     * the session and iCmd, and only returns a VBox status code.
     */
    if (   (    iCmd == SUP_IOCTL_FAST_DO_RAW_RUN
            ||  iCmd == SUP_IOCTL_FAST_DO_HM_RUN
            ||  iCmd == SUP_IOCTL_FAST_DO_NOP)
        && fUnrestricted)
        return supdrvIOCtlFast(iCmd, *(uint32_t *)pData, &g_DevExt, pSession);
    return VBoxDrvDarwinIOCtlSlow(pSession, iCmd, pData, pProcess);
}
Esempio n. 7
0
IOReturn RootDomainUserClient::secureSleepSystemOptions(
    const void      *inOptions,
    IOByteCount     inOptionsSize,
    uint32_t        *returnCode)
{

    int             local_priv = 0;
    int             admin_priv = 0;
    IOReturn        ret = kIOReturnNotPrivileged;
    OSDictionary    *unserializedOptions =  NULL;
    OSString        *unserializeErrorString = NULL;

    ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeLocalUser);
    local_priv = (kIOReturnSuccess == ret);

    ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator);
    admin_priv = (kIOReturnSuccess == ret);


    if (inOptions)
    {
        unserializedOptions = OSDynamicCast( OSDictionary,
                                             OSUnserializeXML((const char *)inOptions, inOptionsSize, &unserializeErrorString));

        if (!unserializedOptions) {
            IOLog("IOPMRootDomain SleepSystem unserialization failure: %s\n",
                unserializeErrorString ? unserializeErrorString->getCStringNoCopy() : "Unknown");
        }
    }

    if ( (local_priv || admin_priv) && fOwner )
    {
        proc_t p;
        p = (proc_t)get_bsdtask_info(fOwningTask);
        if (p) {
            fOwner->setProperty("SleepRequestedByPID", proc_pid(p), 32);
        }

        if (unserializedOptions)
        {
            // Publish Sleep Options in registry under root_domain
            fOwner->setProperty( kRootDomainSleepOptionsKey, unserializedOptions);

            *returnCode = fOwner->sleepSystemOptions( unserializedOptions );

            unserializedOptions->release();
        } else {
            // No options
            // Clear any pre-existing options
            fOwner->removeProperty( kRootDomainSleepOptionsKey );

            *returnCode = fOwner->sleepSystemOptions( NULL );
        }

    } else {
        *returnCode = kIOReturnNotPrivileged;
    }

    return kIOReturnSuccess;
}
Esempio n. 8
0
/*
 * pid_for_task
 *
 * Find the BSD process ID for the Mach task associated with the given Mach port 
 * name
 *
 * Parameters:	args		User argument descriptor (see below)
 *
 * Indirect parameters:	args->t		Mach port name
 * 			args->pid	Process ID (returned value; see below)
 *
 * Returns:	KERL_SUCCESS	Success
 * 		KERN_FAILURE	Not success           
 *
 * Implicit returns: args->pid		Process ID
 *
 */
kern_return_t
pid_for_task(
	struct pid_for_task_args *args)
{
	mach_port_name_t	t = args->t;
	user_addr_t		pid_addr  = args->pid;  
	proc_t p;
	task_t		t1;
	int	pid = -1;
	kern_return_t	err = KERN_SUCCESS;

	AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
	AUDIT_ARG(mach_port1, t);

	t1 = port_name_to_task(t);

	if (t1 == TASK_NULL) {
		err = KERN_FAILURE;
		goto pftout;
	} else {
		p = get_bsdtask_info(t1);
		if (p) {
			pid  = proc_pid(p);
			err = KERN_SUCCESS;
		} else {
			err = KERN_FAILURE;
		}
	}
	task_deallocate(t1);
pftout:
	AUDIT_ARG(pid, pid);
	(void) copyout((char *) &pid, pid_addr, sizeof(int));
	AUDIT_MACH_SYSCALL_EXIT(err);
	return(err);
}
Esempio n. 9
0
/*
 *	Initialize all of the debugging state in a port.
 *	Insert the port into a global list of all allocated ports.
 */
void
ipc_port_init_debug(
	ipc_port_t	port,
	uintptr_t 	*callstack,
	unsigned int	callstack_max)
{
	unsigned int	i;

	port->ip_thread = current_thread();
	port->ip_timetrack = port_timestamp++;
	for (i = 0; i < callstack_max; ++i)
		port->ip_callstack[i] = callstack[i];	
	for (i = 0; i < IP_NSPARES; ++i)
		port->ip_spares[i] = 0;	

#ifdef MACH_BSD
	task_t task = current_task();
	if (task != TASK_NULL) {
		struct proc* proc = (struct proc*) get_bsdtask_info(task);
		if (proc)
			port->ip_spares[0] = proc_pid(proc);
	}
#endif /* MACH_BSD */

#if 0
	lck_spin_lock(&port_alloc_queue_lock);
	++port_count;
	if (port_count_warning > 0 && port_count >= port_count_warning)
		assert(port_count < port_count_warning);
	queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links);
	lck_spin_unlock(&port_alloc_queue_lock);
#endif
}
DldIPCUserClient* DldIPCUserClient::withTask( __in task_t owningTask, __in bool trustedClient, uid_t proc_uid )
{
    DldIPCUserClient* client;
    
    DBG_PRINT(("DldIPCUserClient::withTask( %p, %d )\n", (void*)owningTask, (int)trustedClient ));
    
    client = new DldIPCUserClient();
    if( !client )
        return NULL;
    
    if (client->init() == false) {
        
        client->release();
        return NULL;
    }

    client->fClientUID = proc_uid;
    client->fClient = owningTask;
    client->trustedClient = trustedClient;
    client->fClientProc = DldTaskToBsdProc( owningTask );
    assert( client->fClientProc );
    if( client->fClientProc )
        client->fClientPID = proc_pid( client->fClientProc );
    
    return client;
}
Esempio n. 11
0
bool IOHIDEventSystemUserClient::
initWithTask(task_t owningTask, void * /* security_id */, UInt32 /* type */)
{
    bool result = false;
    
    OSObject* entitlement = copyClientEntitlement(owningTask, kIOHIDSystemUserAccessServiceEntitlement);
    if (entitlement) {
        result = (entitlement == kOSBooleanTrue);
        entitlement->release();
    }
    if (!result) {
        proc_t      process;
        process = (proc_t)get_bsdtask_info(owningTask);
        char name[255];
        bzero(name, sizeof(name));
        proc_name(proc_pid(process), name, sizeof(name));
        HIDLogError("%s is not entitled", name);
        goto exit;
    }
    
    result = super::init();
    require_action(result, exit, HIDLogError("failed"));
    
exit:
    return result;
}
Esempio n. 12
0
/*
 * /proc/pid/fd needs a special permission handler so that a process can still
 * access /proc/self/fd after it has executed a setuid().
 */
int proc_fd_permission(struct inode *inode, int mask)
{
	int rv = generic_permission(inode, mask);
	if (rv == 0)
		return 0;
	if (task_tgid(current) == proc_pid(inode))
		rv = 0;
	return rv;
}
Esempio n. 13
0
/**
 * Close device.
 */
static int VbgdDarwinClose(dev_t Dev, int fFlags, int fDevType, struct proc *pProcess)
{
    Log(("VbgdDarwinClose: pid=%d\n", (int)RTProcSelf()));
    Assert(proc_pid(pProcess) == (int)RTProcSelf());

    /*
     * Hand the session closing to org_virtualbox_VBoxGuestClient.
     */
    org_virtualbox_VBoxGuestClient::sessionClose(RTProcSelf());
    return 0;
}
Esempio n. 14
0
term_t bif_spawn0_3(term_t Mod, term_t Fun, term_t Args, process_t *ctx)
{
	process_t *proc;

	if (!is_atom(Mod) || !is_atom(Fun) || !is_list(Args))
		return A_BADARG;

	proc = proc_spawn(proc_code_base(ctx), proc_atoms(ctx), Mod, Fun, Args);
	if (proc == 0)
		return A_BADARG;

	result(proc_pid(proc, proc_gc_pool(ctx)));
	return AI_OK;
}
Esempio n. 15
0
term_t bif_open_socket2(term_t LocIP, term_t LocPort, process_t *ctx)
{
	apr_status_t rs;
	apr_pool_t *p;
	apr_sockaddr_t *sa;
	apr_socket_t *socket;
	port_t *port;
	term_t id;

	const char *host;
	apr_port_t udp_port;

	if (LocIP != A_ANY && !is_binary(LocIP))
		return A_BADARG;
	if (!is_int(LocPort))
		return A_BADARG;

	host = (LocIP == A_ANY) ?0 :(const char *)bin_data(LocIP);
	udp_port = (apr_port_t)int_value(LocPort);

	apr_pool_create(&p, 0);

	rs = apr_sockaddr_info_get(&sa, host, APR_INET, udp_port, 0, p);
	if (rs == 0)
		rs = apr_socket_create(&socket,
			APR_INET, SOCK_DGRAM, APR_PROTO_UDP, p); //only APR_INET is supported, not APR_INET6
	if (rs == 0)
		rs = apr_socket_bind(socket, sa);
	if (rs == 0)
		rs = apr_socket_opt_set(socket, APR_SO_NONBLOCK, 1);

	if (rs != 0)
	{
		apr_pool_destroy(p);
		return decipher_status(rs);
	}

	port = port_udp_make(socket);	//takes care of pool p

	//add to poll ring
	port_register(port);

	//set initial port owner
	port->owner_in = port->owner_out = proc_pid(ctx, port->xp);

	id = make_port(my_node, port->key, my_creation, proc_gc_pool(ctx));
	result(id);
	return AI_OK;
}
Esempio n. 16
0
/*
 * /proc/pid/fd needs a special permission handler so that a process can still
 * access /proc/self/fd after it has executed a setuid().
 */
int proc_fd_permission(struct inode *inode, int mask)
{
	struct task_struct *p;
	int rv;

	rv = generic_permission(inode, mask);
	if (rv == 0)
		return rv;

	rcu_read_lock();
	p = pid_task(proc_pid(inode), PIDTYPE_PID);
	if (p && same_thread_group(p, current))
		rv = 0;
	rcu_read_unlock();

	return rv;
}
Esempio n. 17
0
static struct net *get_proc_task_net(struct inode *dir)
{
	struct task_struct *task;
	struct nsproxy *ns;
	struct net *net = NULL;

	rcu_read_lock();
	task = pid_task(proc_pid(dir), PIDTYPE_PID);
	if (task != NULL) {
		ns = task_nsproxy(task);
		if (ns != NULL)
			net = get_net(ns->net_ns);
	}
	rcu_read_unlock();

	return net;
}
Esempio n. 18
0
static pid_t pid_of_stack(struct proc_maps_private *priv,
				struct vm_area_struct *vma, bool is_pid)
{
	struct inode *inode = priv->inode;
	struct task_struct *task;
	pid_t ret = 0;

	rcu_read_lock();
	task = pid_task(proc_pid(inode), PIDTYPE_PID);
	if (task) {
		task = task_of_stack(task, vma, is_pid);
		if (task)
			ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
	}
	rcu_read_unlock();

	return ret;
}
Esempio n. 19
0
/*
 * /proc/pid/fd needs a special permission handler so that a process can still
 * access /proc/self/fd after it has executed a setuid().
 */
int proc_fd_permission(struct inode *inode, int mask)
{
	struct task_struct *task;
	int rv = generic_permission(inode, mask);

	if (task_tgid(current) == proc_pid(inode))
		rv = 0;

	task = get_proc_task(inode);
	if (task == NULL)
		return rv;

	if (gr_acl_handle_procpidmem(task))
		rv = -EACCES;

	put_task_struct(task);

	return rv;
}
Esempio n. 20
0
static int
handle_lowresource(__unused int scope, int action, __unused int policy, int policy_subtype, __unused user_addr_t attrp, proc_t proc, __unused uint64_t target_threadid)
{
	int error = 0;

	switch(policy_subtype) {
		case PROC_POLICY_RS_NONE:
		case PROC_POLICY_RS_VIRTUALMEM:
			break;
		default:
			return(EINVAL);	
	}
	
	if (action == PROC_POLICY_ACTION_RESTORE)
		error = proc_resetpcontrol(proc_pid(proc));
	else
		error = EINVAL;

	return(error);
}
Esempio n. 21
0
static errno_t
ktrace_init_background(void)
{
	int err = 0;

	lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);

	if ((err = priv_check_cred(kauth_cred_get(), PRIV_KTRACE_BACKGROUND, 0))) {
		return err;
	}

	/*
	 * When a background tool first checks in, send a notification if ktrace
	 * is available.
	 */
	if (should_notify_on_init) {
		if (ktrace_state == KTRACE_STATE_OFF) {
			/*
			 * This notification can only fail if a process does not
			 * hold the receive right for the host special port.
			 * Return an error and don't make the current process
			 * the background tool.
			 */
			if (ktrace_background_available_notify_user() == KERN_FAILURE) {
				return EINVAL;
			}
		}
		should_notify_on_init = FALSE;
	}

	proc_t p = current_proc();

	ktrace_bg_unique_id = proc_uniqueid(p);
	ktrace_bg_pid = proc_pid(p);

	if (ktrace_state == KTRACE_STATE_BG) {
		ktrace_set_owning_proc(p);
	}

	return 0;
}
Esempio n. 22
0
static int is_stack(struct proc_maps_private *priv,
		    struct vm_area_struct *vma, int is_pid)
{
	struct mm_struct *mm = vma->vm_mm;
	int stack = 0;

	if (is_pid) {
		stack = vma->vm_start <= mm->start_stack &&
			vma->vm_end >= mm->start_stack;
	} else {
		struct inode *inode = priv->inode;
		struct task_struct *task;

		rcu_read_lock();
		task = pid_task(proc_pid(inode), PIDTYPE_PID);
		if (task)
			stack = vma_is_stack_for_task(vma, task);
		rcu_read_unlock();
	}
	return stack;
}
bool IOHIDLibUserClient::initWithTask(task_t owningTask, void * /* security_id */, UInt32 /* type */)
{
    if (!super::init())
        return false;

    if (IOUserClient::clientHasPrivilege(owningTask, kIOClientPrivilegeAdministrator) != kIOReturnSuccess) {
        // Preparing for extended data. Set a temporary key.
        setProperty(kIOHIDLibClientExtendedData, true);
    }

    fClient = owningTask;
    task_reference (fClient);

    proc_t p = (proc_t)get_bsdtask_info(fClient);
    fPid = proc_pid(p);

    fQueueMap = OSArray::withCapacity(4);
    if (!fQueueMap)
        return false;
    
    return true;
}
Esempio n. 24
0
void
unix_syscall64(x86_saved_state_t *state)
{
	thread_t	thread;
	void			*vt;
	unsigned int	code;
	struct sysent	*callp;
	int		args_in_regs;
	boolean_t	args_start_at_rdi;
	int		error;
	struct proc	*p;
	struct uthread	*uthread;
	x86_saved_state64_t *regs;
	pid_t		pid;

	assert(is_saved_state64(state));
	regs = saved_state64(state);
#if	DEBUG
	if (regs->rax == 0x2000800)
		thread_exception_return();
#endif
	thread = current_thread();
	uthread = get_bsdthread_info(thread);

#if PROC_REF_DEBUG
	uthread_reset_proc_refcount(uthread);
#endif

	/* Get the approriate proc; may be different from task's for vfork() */
	if (__probable(!(uthread->uu_flag & UT_VFORK)))
		p = (struct proc *)get_bsdtask_info(current_task());
	else 
		p = current_proc();

	/* Verify that we are not being called from a task without a proc */
	if (__improbable(p == NULL)) {
		regs->rax = EPERM;
		regs->isf.rflags |= EFL_CF;
		task_terminate_internal(current_task());
		thread_exception_return();
		/* NOTREACHED */
	}

	code = regs->rax & SYSCALL_NUMBER_MASK;
	DEBUG_KPRINT_SYSCALL_UNIX(
		"unix_syscall64: code=%d(%s) rip=%llx\n",
		code, syscallnames[code >= nsysent ? SYS_invalid : code], regs->isf.rip);
	callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];

	vt = (void *)uthread->uu_arg;

	if (__improbable(callp == sysent)) {
	        /*
		 * indirect system call... system call number
		 * passed as 'arg0'
		 */
		code = regs->rdi;
		callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];
		args_start_at_rdi = FALSE;
		args_in_regs = 5;
	} else {
		args_start_at_rdi = TRUE;
		args_in_regs = 6;
	}

	if (callp->sy_narg != 0) {
		assert(callp->sy_narg <= 8); /* size of uu_arg */

		args_in_regs = MIN(args_in_regs, callp->sy_narg);
		memcpy(vt, args_start_at_rdi ? &regs->rdi : &regs->rsi, args_in_regs * sizeof(syscall_arg_t));


		if (!code_is_kdebug_trace(code)) {
			uint64_t *ip = (uint64_t *)vt;

			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
				BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
				(int)(*ip), (int)(*(ip+1)), (int)(*(ip+2)), (int)(*(ip+3)), 0);
		}

		if (__improbable(callp->sy_narg > args_in_regs)) {
			int copyin_count;

			copyin_count = (callp->sy_narg - args_in_regs) * sizeof(syscall_arg_t);

			error = copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&uthread->uu_arg[args_in_regs], copyin_count);
			if (error) {
				regs->rax = error;
				regs->isf.rflags |= EFL_CF;
				thread_exception_return();
				/* NOTREACHED */
			}
		}
	} else
		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
			BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
			0, 0, 0, 0, 0);

	/*
	 * Delayed binding of thread credential to process credential, if we
	 * are not running with an explicitly set thread credential.
	 */
	kauth_cred_uthread_update(uthread, p);

	uthread->uu_rval[0] = 0;
	uthread->uu_rval[1] = 0;
	uthread->uu_flag |= UT_NOTCANCELPT;
	uthread->syscall_code = code;
	pid = proc_pid(p);

#ifdef JOE_DEBUG
        uthread->uu_iocount = 0;
        uthread->uu_vpindex = 0;
#endif

	AUDIT_SYSCALL_ENTER(code, p, uthread);
	error = (*(callp->sy_call))((void *) p, vt, &(uthread->uu_rval[0]));
	AUDIT_SYSCALL_EXIT(code, p, uthread, error);

#ifdef JOE_DEBUG
        if (uthread->uu_iocount)
               printf("system call returned with uu_iocount != 0\n");
#endif

#if CONFIG_DTRACE
	uthread->t_dtrace_errno = error;
#endif /* CONFIG_DTRACE */
	
	if (__improbable(error == ERESTART)) {
		/*
		 * all system calls come through via the syscall instruction
		 * in 64 bit mode... its 2 bytes in length
		 * move the user's pc back to repeat the syscall:
		 */
		pal_syscall_restart( thread, state );
	}
	else if (error != EJUSTRETURN) {
		if (__improbable(error)) {
			regs->rax = error;
			regs->isf.rflags |= EFL_CF;	/* carry bit */
		} else { /* (not error) */

			switch (callp->sy_return_type) {
			case _SYSCALL_RET_INT_T:
				regs->rax = uthread->uu_rval[0];
				regs->rdx = uthread->uu_rval[1];
				break;
			case _SYSCALL_RET_UINT_T:
				regs->rax = ((u_int)uthread->uu_rval[0]);
				regs->rdx = ((u_int)uthread->uu_rval[1]);
				break;
			case _SYSCALL_RET_OFF_T:
			case _SYSCALL_RET_ADDR_T:
			case _SYSCALL_RET_SIZE_T:
			case _SYSCALL_RET_SSIZE_T:
			case _SYSCALL_RET_UINT64_T:
			        regs->rax = *((uint64_t *)(&uthread->uu_rval[0]));
				regs->rdx = 0;
				break;
			case _SYSCALL_RET_NONE:
				break;
			default:
				panic("unix_syscall: unknown return type");
				break;
			}
			regs->isf.rflags &= ~EFL_CF;
		} 
	}

	DEBUG_KPRINT_SYSCALL_UNIX(
		"unix_syscall64: error=%d retval=(%llu,%llu)\n",
		error, regs->rax, regs->rdx);
	
	uthread->uu_flag &= ~UT_NOTCANCELPT;

	if (__improbable(uthread->uu_lowpri_window)) {
	        /*
		 * task is marked as a low priority I/O type
		 * and the I/O we issued while in this system call
		 * collided with normal I/O operations... we'll
		 * delay in order to mitigate the impact of this
		 * task on the normal operation of the system
		 */
		throttle_lowpri_io(1);
	}
	if (__probable(!code_is_kdebug_trace(code)))
		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
			error, uthread->uu_rval[0], uthread->uu_rval[1], pid, 0);

#if PROC_REF_DEBUG
	if (__improbable(uthread_get_proc_refcount(uthread))) {
		panic("system call returned with uu_proc_refcount != 0");
	}
#endif

	thread_exception_return();
	/* NOTREACHED */
}
Esempio n. 25
0
static int process_cred_label_update_execvew(kauth_cred_t old_cred,
                                             kauth_cred_t new_cred,
                                             struct proc *p,
                                             struct vnode *vp,
                                             off_t offset,
                                             struct vnode *scriptvp,
                                             struct label *vnodelabel,
                                             struct label *scriptvnodelabel,
                                             struct label *execlabel,
                                             u_int *csflags,
                                             void *macpolicyattr,
                                             size_t macpolicyattrlen,
                                             int *disjointp) {
  int path_len = MAXPATHLEN;

  if (!vnode_isreg(vp)) {
    goto error_exit;
  }

  // Determine address of image_params based off of csflags pointer. (HACKY)
  struct image_params *img =
      (struct image_params *)((char *)csflags -
                              offsetof(struct image_params, ip_csflags));

  // Find the length of arg and env we will copy.
  size_t arg_length =
      MIN(MAX_VECTOR_LENGTH, img->ip_endargv - img->ip_startargv);
  size_t env_length = MIN(MAX_VECTOR_LENGTH, img->ip_endenvv - img->ip_endargv);

  osquery_process_event_t *e =
      (osquery_process_event_t *)osquery_cqueue_reserve(
          cqueue,
          OSQUERY_PROCESS_EVENT,
          sizeof(osquery_process_event_t) + arg_length + env_length);

  if (!e) {
    goto error_exit;
  }
  // Copy the arg and env vectors.
  e->argv_offset = 0;
  e->envv_offset = arg_length;

  e->arg_length = arg_length;
  e->env_length = env_length;

  memcpy(&(e->flexible_data[e->argv_offset]), img->ip_startargv, arg_length);
  memcpy(&(e->flexible_data[e->envv_offset]), img->ip_endargv, env_length);

  e->actual_argc = img->ip_argc;
  e->actual_envc = img->ip_envc;

  // Calculate our argc and envc based on the number of null bytes we find in
  // the buffer.
  e->argc = MIN(e->actual_argc,
                str_num(&(e->flexible_data[e->argv_offset]), arg_length));
  e->envc = MIN(e->actual_envc,
                str_num(&(e->flexible_data[e->envv_offset]), env_length));

  e->pid = proc_pid(p);
  e->ppid = proc_ppid(p);
  e->owner_uid = 0;
  e->owner_gid = 0;
  e->mode = -1;
  vfs_context_t context = vfs_context_create(NULL);
  if (context) {
    struct vnode_attr vattr = {0};
    VATTR_INIT(&vattr);
    VATTR_WANTED(&vattr, va_uid);
    VATTR_WANTED(&vattr, va_gid);
    VATTR_WANTED(&vattr, va_mode);
    VATTR_WANTED(&vattr, va_create_time);
    VATTR_WANTED(&vattr, va_access_time);
    VATTR_WANTED(&vattr, va_modify_time);
    VATTR_WANTED(&vattr, va_change_time);

    if (vnode_getattr(vp, &vattr, context) == 0) {
      e->owner_uid = vattr.va_uid;
      e->owner_gid = vattr.va_gid;
      e->mode = vattr.va_mode;
      e->create_time = vattr.va_create_time.tv_sec;
      e->access_time = vattr.va_access_time.tv_sec;
      e->modify_time = vattr.va_modify_time.tv_sec;
      e->change_time = vattr.va_change_time.tv_sec;
    }

    vfs_context_rele(context);
  }

  e->uid = kauth_cred_getruid(new_cred);
  e->euid = kauth_cred_getuid(new_cred);

  e->gid = kauth_cred_getrgid(new_cred);
  e->egid = kauth_cred_getgid(new_cred);

  vn_getpath(vp, e->path, &path_len);

  osquery_cqueue_commit(cqueue, e);
error_exit:

  return 0;
}
Esempio n. 26
0
PUBLIC void syscall_handle(void)
{
  struct proc* target_proc;
  pid_t pid;
  u32_t syscall_num;
  u8_t res;

  arch_printf("syscall_handle\n");

  struct thread* th = cur_th;

  /* Get current thread */
  if (th == NULL)
    {
      res = IPC_FAILURE;
      goto end;
    }

  /* Get syscall number from source register */
  syscall_num = arch_ctx_get((arch_ctx_t*)th, ARCH_CONST_SOURCE);

  /* Put originator proc into source register instead */
  arch_ctx_set((arch_ctx_t*)th, ARCH_CONST_SOURCE,th->proc->pid);

  /* Destination proc, stored in EDI */
  pid = (pid_t)arch_ctx_get((arch_ctx_t*)th, ARCH_CONST_DEST);
  if ( pid == IPC_ANY)
    {
      target_proc = NULL;
    }
  else
    {
      /* Get proc structure from given id */
      target_proc = proc_pid(pid);
      if ( target_proc == NULL )
	{
	  res = IPC_FAILURE;
	  goto end;
	}
    }
  
  /* Dispatch call to effective primitives */
  switch(syscall_num)
    {
    case SYSCALL_SEND:
      {
	res = syscall_send(th, target_proc);
	break;
      }

    case SYSCALL_RECEIVE:
      {
	res = syscall_receive(th, target_proc);
	break;
      }

    case SYSCALL_NOTIFY:
      {
	res = syscall_notify(th, target_proc);
	break;
      }
    default:
      {
	arch_printf("not a syscall number\n");
	res = IPC_FAILURE;
	break;
      }  
    }

 end:
	
  /* Set result in caller's return register */
  arch_ctx_set((arch_ctx_t*)th, ARCH_CONST_RETURN,res);

  arch_printf("end of syscall :%u\n",arch_ctx_get((arch_ctx_t*)th, ARCH_CONST_RETURN));

  return;
}
Esempio n. 27
0
/**
 * Close device.
 */
static int VBoxNetAdpDarwinClose(dev_t Dev, int fFlags, int fDevType, struct proc *pProcess)
{
    Log(("VBoxNetAdpDarwinClose: pid=%d\n", proc_pid(pProcess)));
    return 0;
}
Esempio n. 28
0
void
unix_syscall(x86_saved_state_t *state)
{
	thread_t		thread;
	void			*vt;
	unsigned int		code;
	struct sysent		*callp;

	int			error;
	vm_offset_t		params;
	struct proc		*p;
	struct uthread		*uthread;
	x86_saved_state32_t	*regs;
	boolean_t		is_vfork;
	pid_t			pid;

	assert(is_saved_state32(state));
	regs = saved_state32(state);
#if DEBUG
	if (regs->eax == 0x800)
		thread_exception_return();
#endif
	thread = current_thread();
	uthread = get_bsdthread_info(thread);

#if PROC_REF_DEBUG
	uthread_reset_proc_refcount(uthread);
#endif

	/* Get the approriate proc; may be different from task's for vfork() */
	is_vfork = uthread->uu_flag & UT_VFORK;
	if (__improbable(is_vfork != 0))
		p = current_proc();
	else 
		p = (struct proc *)get_bsdtask_info(current_task());

	code = regs->eax & I386_SYSCALL_NUMBER_MASK;
	DEBUG_KPRINT_SYSCALL_UNIX("unix_syscall: code=%d(%s) eip=%u\n",
							  code, syscallnames[code >= nsysent ? SYS_invalid : code], (uint32_t)regs->eip);
	params = (vm_offset_t) (regs->uesp + sizeof (int));

	regs->efl &= ~(EFL_CF);

	callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];

	if (__improbable(callp == sysent)) {
		code = fuword(params);
		params += sizeof(int);
		callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code];
	}

	vt = (void *)uthread->uu_arg;

	if (callp->sy_arg_bytes != 0) {
#if CONFIG_REQUIRES_U32_MUNGING
		sy_munge_t	*mungerp;
#else
#error U32 syscalls on x86_64 kernel requires munging
#endif
		uint32_t	 nargs;

		assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg));
		nargs = callp->sy_arg_bytes;
		error = copyin((user_addr_t) params, (char *) vt, nargs);
		if (error) {
			regs->eax = error;
			regs->efl |= EFL_CF;
			thread_exception_return();
			/* NOTREACHED */
		}

		if (__probable(!code_is_kdebug_trace(code))) {
			int *ip = (int *)vt;

			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
				BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
				*ip, *(ip+1), *(ip+2), *(ip+3), 0);
		}

#if CONFIG_REQUIRES_U32_MUNGING
		mungerp = callp->sy_arg_munge32;

		if (mungerp != NULL)
			(*mungerp)(vt);
#endif
	} else
		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
			0, 0, 0, 0, 0);

	/*
	 * Delayed binding of thread credential to process credential, if we
	 * are not running with an explicitly set thread credential.
	 */
	kauth_cred_uthread_update(uthread, p);

	uthread->uu_rval[0] = 0;
	uthread->uu_rval[1] = 0;
	uthread->uu_flag |= UT_NOTCANCELPT;
	uthread->syscall_code = code;
	pid = proc_pid(p);

#ifdef JOE_DEBUG
        uthread->uu_iocount = 0;
        uthread->uu_vpindex = 0;
#endif

	AUDIT_SYSCALL_ENTER(code, p, uthread);
	error = (*(callp->sy_call))((void *) p, (void *) vt, &(uthread->uu_rval[0]));
	AUDIT_SYSCALL_EXIT(code, p, uthread, error);

#ifdef JOE_DEBUG
        if (uthread->uu_iocount)
                printf("system call returned with uu_iocount != 0\n");
#endif
#if CONFIG_DTRACE
	uthread->t_dtrace_errno = error;
#endif /* CONFIG_DTRACE */

	if (__improbable(error == ERESTART)) {
		/*
		 * Move the user's pc back to repeat the syscall:
		 * 5 bytes for a sysenter, or 2 for an int 8x.
		 * The SYSENTER_TF_CS covers single-stepping over a sysenter
		 * - see debug trap handler in idt.s/idt64.s
		 */

		pal_syscall_restart(thread, state);
	}
	else if (error != EJUSTRETURN) {
		if (__improbable(error)) {
		    regs->eax = error;
		    regs->efl |= EFL_CF;	/* carry bit */
		} else { /* (not error) */
			/*
			 * We split retval across two registers, in case the
			 * syscall had a 64-bit return value, in which case
			 * eax/edx matches the function call ABI.
			 */
		    regs->eax = uthread->uu_rval[0];
		    regs->edx = uthread->uu_rval[1];
		} 
	}

	DEBUG_KPRINT_SYSCALL_UNIX(
		"unix_syscall: error=%d retval=(%u,%u)\n",
		error, regs->eax, regs->edx);

	uthread->uu_flag &= ~UT_NOTCANCELPT;

	if (__improbable(uthread->uu_lowpri_window)) {
	        /*
		 * task is marked as a low priority I/O type
		 * and the I/O we issued while in this system call
		 * collided with normal I/O operations... we'll
		 * delay in order to mitigate the impact of this
		 * task on the normal operation of the system
		 */
		throttle_lowpri_io(1);
	}
	if (__probable(!code_is_kdebug_trace(code)))
		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
			error, uthread->uu_rval[0], uthread->uu_rval[1], pid, 0);

	if (__improbable(!is_vfork && callp->sy_call == (sy_call_t *)execve && !error)) {
		pal_execve_return(thread);
	}

#if PROC_REF_DEBUG
	if (__improbable(uthread_get_proc_refcount(uthread) != 0)) {
		panic("system call returned with uu_proc_refcount != 0");
	}
#endif

	thread_exception_return();
	/* NOTREACHED */
}
Esempio n. 29
0
/**
 * Device open. Called on open /dev/vboxguest and (later) /dev/vboxguestu.
 *
 * @param   Dev         The device number.
 * @param   fFlags      ???.
 * @param   fDevType    ???.
 * @param   pProcess    The process issuing this request.
 */
static int VbgdDarwinOpen(dev_t Dev, int fFlags, int fDevType, struct proc *pProcess)
{
    /*
     * Only two minor devices numbers are allowed.
     */
    if (minor(Dev) != 0 && minor(Dev) != 1)
        return EACCES;

    /*
     * Find the session created by org_virtualbox_VBoxGuestClient, fail
     * if no such session, and mark it as opened. We set the uid & gid
     * here too, since that is more straight forward at this point.
     */
    //const bool          fUnrestricted = minor(Dev) == 0;
    int                 rc = VINF_SUCCESS;
    PVBOXGUESTSESSION   pSession = NULL;
    kauth_cred_t        pCred = kauth_cred_proc_ref(pProcess);
    if (pCred)
    {
        RTPROCESS       Process = RTProcSelf();
        unsigned        iHash = SESSION_HASH(Process);
        RTSpinlockAcquire(g_Spinlock);

        pSession = g_apSessionHashTab[iHash];
        while (pSession && pSession->Process != Process)
            pSession = pSession->pNextHash;
        if (pSession)
        {
            if (!pSession->fOpened)
            {
                pSession->fOpened = true;
                /*pSession->fUnrestricted = fUnrestricted; - later */
            }
            else
                rc = VERR_ALREADY_LOADED;
        }
        else
            rc = VERR_GENERAL_FAILURE;

        RTSpinlockRelease(g_Spinlock);
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
        kauth_cred_unref(&pCred);
#else  /* 10.4 */
        /* The 10.4u SDK headers and 10.4.11 kernel source have inconsistent definitions
           of kauth_cred_unref(), so use the other (now deprecated) API for releasing it. */
        kauth_cred_rele(pCred);
#endif /* 10.4 */
    }
    else
        rc = VERR_INVALID_PARAMETER;

    Log(("VbgdDarwinOpen: g_DevExt=%p pSession=%p rc=%d pid=%d\n", &g_DevExt, pSession, rc, proc_pid(pProcess)));
    return VbgdDarwinErr2DarwinErr(rc);
}
Esempio n. 30
0
/**
 * Worker for VbgdDarwinIOCtl that takes the slow IOCtl functions.
 *
 * @returns Darwin errno.
 *
 * @param pSession  The session.
 * @param iCmd      The IOCtl command.
 * @param pData     Pointer to the kernel copy of the data buffer.
 * @param pProcess  The calling process.
 */
static int VbgdDarwinIOCtlSlow(PVBOXGUESTSESSION pSession, u_long iCmd, caddr_t pData, struct proc *pProcess)
{
    LogFlow(("VbgdDarwinIOCtlSlow: pSession=%p iCmd=%p pData=%p pProcess=%p\n", pSession, iCmd, pData, pProcess));


    /*
     * Buffered or unbuffered?
     */
    void *pvReqData;
    user_addr_t pUser = 0;
    void *pvPageBuf = NULL;
    uint32_t cbReq = IOCPARM_LEN(iCmd);
    if ((IOC_DIRMASK & iCmd) == IOC_INOUT)
    {
        /*
         * Raw buffered request data, common code validates it.
         */
        pvReqData = pData;
    }
    else if ((IOC_DIRMASK & iCmd) == IOC_VOID && !cbReq)
    {
        /*
         * Get the header and figure out how much we're gonna have to read.
         */
        VBGLBIGREQ Hdr;
        pUser = (user_addr_t)*(void **)pData;
        int rc = copyin(pUser, &Hdr, sizeof(Hdr));
        if (RT_UNLIKELY(rc))
        {
            Log(("VbgdDarwinIOCtlSlow: copyin(%llx,Hdr,) -> %#x; iCmd=%#lx\n", (unsigned long long)pUser, rc, iCmd));
            return rc;
        }
        if (RT_UNLIKELY(Hdr.u32Magic != VBGLBIGREQ_MAGIC))
        {
            Log(("VbgdDarwinIOCtlSlow: bad magic u32Magic=%#x; iCmd=%#lx\n", Hdr.u32Magic, iCmd));
            return EINVAL;
        }
        cbReq = Hdr.cbData;
        if (RT_UNLIKELY(cbReq > _1M*16))
        {
            Log(("VbgdDarwinIOCtlSlow: %#x; iCmd=%#lx\n", Hdr.cbData, iCmd));
            return EINVAL;
        }
        pUser = Hdr.pvDataR3;

        /*
         * Allocate buffer and copy in the data.
         */
        pvReqData = RTMemTmpAlloc(cbReq);
        if (!pvReqData)
            pvPageBuf = pvReqData = IOMallocAligned(RT_ALIGN_Z(cbReq, PAGE_SIZE), 8);
        if (RT_UNLIKELY(!pvReqData))
        {
            Log(("VbgdDarwinIOCtlSlow: failed to allocate buffer of %d bytes; iCmd=%#lx\n", cbReq, iCmd));
            return ENOMEM;
        }
        rc = copyin(pUser, pvReqData, Hdr.cbData);
        if (RT_UNLIKELY(rc))
        {
            Log(("VbgdDarwinIOCtlSlow: copyin(%llx,%p,%#x) -> %#x; iCmd=%#lx\n",
                 (unsigned long long)pUser, pvReqData, Hdr.cbData, rc, iCmd));
            if (pvPageBuf)
                IOFreeAligned(pvPageBuf, RT_ALIGN_Z(cbReq, PAGE_SIZE));
            else
                RTMemTmpFree(pvReqData);
            return rc;
        }
    }
    else
    {
        Log(("VbgdDarwinIOCtlSlow: huh? cbReq=%#x iCmd=%#lx\n", cbReq, iCmd));
        return EINVAL;
    }

    /*
     * Process the IOCtl.
     */
    size_t cbReqRet = 0;
    int rc = VBoxGuestCommonIOCtl(iCmd, &g_DevExt, pSession, pvReqData, cbReq, &cbReqRet);
    if (RT_SUCCESS(rc))
    {
        /*
         * If not buffered, copy back the buffer before returning.
         */
        if (pUser)
        {
            if (cbReqRet > cbReq)
            {
                Log(("VbgdDarwinIOCtlSlow: too much output! %#x > %#x; uCmd=%#lx!\n", cbReqRet, cbReq, iCmd));
                cbReqRet = cbReq;
            }
            rc = copyout(pvReqData, pUser, cbReqRet);
            if (RT_UNLIKELY(rc))
                Log(("VbgdDarwinIOCtlSlow: copyout(%p,%llx,%#x) -> %d; uCmd=%#lx!\n",
                     pvReqData, (unsigned long long)pUser, cbReqRet, rc, iCmd));

            /* cleanup */
            if (pvPageBuf)
                IOFreeAligned(pvPageBuf, RT_ALIGN_Z(cbReq, PAGE_SIZE));
            else
                RTMemTmpFree(pvReqData);
        }
        else
            rc = 0;
    }
    else
    {
        /*
         * The request failed, just clean up.
         */
        if (pUser)
        {
            if (pvPageBuf)
                IOFreeAligned(pvPageBuf, RT_ALIGN_Z(cbReq, PAGE_SIZE));
            else
                RTMemTmpFree(pvReqData);
        }

        Log(("VbgdDarwinIOCtlSlow: pid=%d iCmd=%lx pData=%p failed, rc=%d\n", proc_pid(pProcess), iCmd, (void *)pData, rc));
        rc = EINVAL;
    }

    Log2(("VbgdDarwinIOCtlSlow: returns %d\n", rc));
    return rc;
}