Beispiel #1
0
errno_t
mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)
{
	int i;
	errno_t error;

	if (callback == NULL)
		return (EINVAL);

	lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock);

	/* assume the worst */
	error = ENOENT;
	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
		if (mbuf_tx_compl_table[i] == callback) {
			mbuf_tx_compl_table[i] = NULL;
			error = 0;
			goto unlock;
		}
	}
unlock:
	lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock);

	return (error);
}
Beispiel #2
0
/*
 * Routine:	lck_rw_sleep_deadline
 */
wait_result_t
lck_rw_sleep_deadline(
	lck_rw_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible,
	uint64_t		deadline)
{
	wait_result_t   res;
	lck_rw_type_t	lck_rw_type;

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	res = assert_wait_deadline(event, interruptible, deadline);
	if (res == THREAD_WAITING) {
		lck_rw_type = lck_rw_done(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
			if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
				lck_rw_lock(lck, lck_rw_type);
			else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
				lck_rw_lock_exclusive(lck);
			else
				lck_rw_lock_shared(lck);
		}
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		(void)lck_rw_done(lck);

	return res;
}
Beispiel #3
0
errno_t
mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)
{
	int i;
	errno_t error;

	if (callback == NULL)
		return (EINVAL);

	lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock);

	i = get_tx_compl_callback_index_locked(callback);
	if (i != -1) {
		error = EEXIST;
		goto unlock;
	}

	/* assume the worst */
	error = ENOSPC;
	for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
		if (mbuf_tx_compl_table[i] == NULL) {
			mbuf_tx_compl_table[i] = callback;
			error = 0;
			goto unlock;
		}
	}
unlock:
	lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock);

	return (error);
}
Beispiel #4
0
static int
vboxvfs_vnode_close(struct vnop_close_args *args)
{
    vnode_t          vnode;
    mount_t          mp;
    vboxvfs_vnode_t *pVnodeData;
    vboxvfs_mount_t *pMount;

    int rc;

    PDEBUG("Closing vnode...");

    AssertReturn(args, EINVAL);

    vnode           = args->a_vp;                              AssertReturn(vnode,      EINVAL);
    pVnodeData      = (vboxvfs_vnode_t *)vnode_fsnode(vnode);  AssertReturn(pVnodeData, EINVAL);
    mp              = vnode_mount(vnode);                      AssertReturn(mp,         EINVAL);
    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);             AssertReturn(pMount,     EINVAL);

    lck_rw_lock_exclusive(pVnodeData->pLock);

    if (vnode_isinuse(vnode, 0))
    {
        PDEBUG("vnode '%s' (handle 0x%X) is still in use, just return ok",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle);

        lck_rw_unlock_exclusive(pVnodeData->pLock);
        return 0;
    }

    /* At this point we must make sure that vnode has VBoxVFS object handle assigned */
    if (pVnodeData->pHandle == SHFL_HANDLE_NIL)
    {
        PDEBUG("vnode has no active VBoxVFS object handle set, aborting");
        lck_rw_unlock_exclusive(pVnodeData->pLock);
        return EINVAL;
    }

    rc = vboxvfs_close_internal(pMount, pVnodeData->pHandle);
    if (rc == 0)
    {
        PDEBUG("Close success: '%s' (handle 0x%X)",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle);

        /* Forget about previously assigned VBoxVFS object handle */
        pVnodeData->pHandle = SHFL_HANDLE_NIL;
    }
    else
    {
        PDEBUG("Unable to close: '%s' (handle 0x%X): %d",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle, rc);
    }

    lck_rw_unlock_exclusive(pVnodeData->pLock);

    return rc;
}
Beispiel #5
0
errno_t kn_inject_after_http (mbuf_t otgn_data)
{
	errno_t retval = 0;
    mbuf_t otgn_data_dup;
    u_int16_t ms = 0;
    
    lck_rw_lock_exclusive(gMasterRecordLock);
    ms = master_record.http_delay_ms;
    lck_rw_unlock_exclusive(gMasterRecordLock);
    
    retval = mbuf_dup(otgn_data, MBUF_DONTWAIT, &otgn_data_dup);
    if (retval != 0) {
        kn_debug("mbuf_dup returned error %d\n", retval);
        return retval;
    }
    
    retval = kn_mbuf_set_tag(&otgn_data_dup, gidtag, kMY_TAG_TYPE, outgoing_direction);
    if (retval != 0) {
        kn_debug("kn_mbuf_set_tag returned error %d\n", retval);
        return retval;
    }
    
    retval = kn_delay_pkt_inject(otgn_data, ms, outgoing_direction);
    if (retval != 0) {
        kn_debug("kn_delay_pkt_inject returned error %d\n", retval);
        return retval;
    }
	return KERN_SUCCESS;
	
}
Beispiel #6
0
/*
 * Outer subroutine:
 * Connect from a socket to a specified address.
 * Both address and port must be specified in argument sin.
 * If don't have a local address for this socket yet,
 * then pick one.
 */
int
in6_pcbconnect(
	struct inpcb *inp,
	struct sockaddr *nam,
	struct proc *p)
{
	struct in6_addr addr6;
	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam;
	struct inpcb *pcb;
	int error;
	unsigned int outif = 0;

	/*
	 * Call inner routine, to assign local interface address.
	 * in6_pcbladdr() may automatically fill in sin6_scope_id.
	 */
	if ((error = in6_pcbladdr(inp, nam, &addr6, &outif)) != 0)
		return(error);
	socket_unlock(inp->inp_socket, 0);
	pcb = in6_pcblookup_hash(inp->inp_pcbinfo, &sin6->sin6_addr,
			       sin6->sin6_port,
			      IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)
			      ? &addr6 : &inp->in6p_laddr,
			      inp->inp_lport, 0, NULL);
	socket_lock(inp->inp_socket, 0);
	if (pcb != NULL) {
		in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
		return (EADDRINUSE);
	}
	if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
		if (inp->inp_lport == 0) {
			error = in6_pcbbind(inp, (struct sockaddr *)0, p);
			if (error)
				return (error);
		}
		inp->in6p_laddr = addr6;
		inp->in6p_last_outif = outif;
	}
	if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->mtx)) {
		/*lock inversion issue, mostly with udp multicast packets */
		socket_unlock(inp->inp_socket, 0);
		lck_rw_lock_exclusive(inp->inp_pcbinfo->mtx);
		socket_lock(inp->inp_socket, 0);
	}
	inp->in6p_faddr = sin6->sin6_addr;
	inp->inp_fport = sin6->sin6_port;
	/* update flowinfo - draft-itojun-ipv6-flowlabel-api-00 */
	inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK;
	if (inp->in6p_flags & IN6P_AUTOFLOWLABEL)
		inp->in6p_flowinfo |=
		    (htonl(ip6_flow_seq++) & IPV6_FLOWLABEL_MASK);

	in_pcbrehash(inp);
	lck_rw_done(inp->inp_pcbinfo->mtx);
	return (0);
}
Beispiel #7
0
/*
 * Routine:	lck_rw_sleep
 */
wait_result_t
lck_rw_sleep(
        lck_rw_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible)
{
	wait_result_t	res;
	lck_rw_type_t	lck_rw_type;
	thread_t		thread = current_thread();

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		/*
		 * Although we are dropping the RW lock, the intent in most cases
		 * is that this thread remains as an observer, since it may hold
		 * some secondary resource, but must yield to avoid deadlock. In
		 * this situation, make sure that the thread is boosted to the
		 * RW lock ceiling while blocked, so that it can re-acquire the
		 * RW lock at that priority.
		 */
		thread->rwlock_count++;
	}

	res = assert_wait(event, interruptible);
	if (res == THREAD_WAITING) {
		lck_rw_type = lck_rw_done(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
			if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
				lck_rw_lock(lck, lck_rw_type);
			else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
				lck_rw_lock_exclusive(lck);
			else
				lck_rw_lock_shared(lck);
		}
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		(void)lck_rw_done(lck);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
			/* sched_flags checked without lock, but will be rechecked while clearing */

			/* Only if the caller wanted the lck_rw_t returned unlocked should we drop to 0 */
			assert(lck_sleep_action & LCK_SLEEP_UNLOCK);

			lck_rw_clear_promotion(thread);
		}
	}

	return res;
}
Beispiel #8
0
__private_extern__ void
nlock_9p(node_9p *np, lcktype_9p type)
{
//	DEBUG("%p: %s", np, type==NODE_LCK_SHARED? "shared": "exclusive");
	if (type == NODE_LCK_SHARED)
		lck_rw_lock_shared(np->lck);
	else
		lck_rw_lock_exclusive(np->lck);
	np->lcktype = type;
}
/*
 *	Routine:	lck_rw_lock
 */
void
lck_rw_lock(
	lck_rw_t	*lck,
	lck_rw_type_t	lck_rw_type)
{
	if (lck_rw_type == LCK_RW_TYPE_SHARED)
		lck_rw_lock_shared(lck);
	else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE)
		lck_rw_lock_exclusive(lck);
	else
		panic("lck_rw_lock(): Invalid RW lock type: %x\n", lck_rw_type);
}
Beispiel #10
0
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
    if (rw == RW_READER) {
        lck_rw_lock_shared((lck_rw_t *)&rwlp->rw_lock[0]);
        OSIncrementAtomic((volatile SInt32 *)&rwlp->rw_readers);
    } else {
        if (rwlp->rw_owner == current_thread())
            panic("rw_enter: locking against myself!");
        lck_rw_lock_exclusive((lck_rw_t *)&rwlp->rw_lock[0]);
        rwlp->rw_owner = current_thread();
    }
}
Beispiel #11
0
int Lpx_PCB_alloc( struct socket *so,
				   struct lpxpcb *head,
				   struct proc *td )
{
    register struct lpxpcb *lpxp;
	
    DEBUG_PRINT(DEBUG_MASK_PCB_TRACE, ("Lpx_PCB_alloc\n"));
    	
    MALLOC(lpxp, struct lpxpcb *, sizeof *lpxp, M_PCB, M_WAITOK);
    if (lpxp == NULL) {
        DEBUG_PRINT(DEBUG_MASK_PCB_ERROR, ("Lpx_PCB_alloc:==> Failed\n"));
        return (ENOBUFS);
    }
    bzero(lpxp, sizeof(*lpxp));
    
    lpxp->lpxp_socket = so;
    if (lpxcksum)
        lpxp->lpxp_flags |= LPXP_CHECKSUM;
	
    read_random(&lpxp->lpxp_messageid, sizeof(lpxp->lpxp_messageid));
	
	lck_rw_lock_exclusive(head->lpxp_list_rw);	
    insque(lpxp, head);
	lck_rw_unlock_exclusive(head->lpxp_list_rw);
	
	lpxp->lpxp_head = head;
	
    so->so_pcb = (caddr_t)lpxp;
    //so->so_options |= SO_DONTROUTE;
	
	if (so->so_proto->pr_flags & PR_PCBLOCK) {
		
		if (head == &lpx_stream_pcb) {
			lpxp->lpxp_mtx = lck_mtx_alloc_init(stream_mtx_grp, stream_mtx_attr);
			lpxp->lpxp_mtx_grp = stream_mtx_grp;
		} else {
			lpxp->lpxp_mtx = lck_mtx_alloc_init(datagram_mtx_grp, datagram_mtx_attr);
			lpxp->lpxp_mtx_grp = datagram_mtx_grp;
		}
		
		if (lpxp->lpxp_mtx == NULL) {
			DEBUG_PRINT(DEBUG_MASK_PCB_ERROR, ("Lpx_PCB_alloc: can't alloc mutex! so=%p\n", so));
			
			FREE(lpxp, M_PCB);

			return(ENOMEM);
		}
	}
	
    return (0);
}
Beispiel #12
0
/*
 * Routine:	lck_rw_sleep_deadline
 */
wait_result_t
lck_rw_sleep_deadline(
	lck_rw_t		*lck,
	lck_sleep_action_t	lck_sleep_action,
	event_t			event,
	wait_interrupt_t	interruptible,
	uint64_t		deadline)
{
	wait_result_t   res;
	lck_rw_type_t	lck_rw_type;
	thread_t		thread = current_thread();

	if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0)
		panic("Invalid lock sleep action %x\n", lck_sleep_action);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		thread->rwlock_count++;
	}

	res = assert_wait_deadline(event, interruptible, deadline);
	if (res == THREAD_WAITING) {
		lck_rw_type = lck_rw_done(lck);
		res = thread_block(THREAD_CONTINUE_NULL);
		if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
			if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE)))
				lck_rw_lock(lck, lck_rw_type);
			else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE)
				lck_rw_lock_exclusive(lck);
			else
				lck_rw_lock_shared(lck);
		}
	}
	else
	if (lck_sleep_action & LCK_SLEEP_UNLOCK)
		(void)lck_rw_done(lck);

	if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
		if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
			/* sched_flags checked without lock, but will be rechecked while clearing */

			/* Only if the caller wanted the lck_rw_t returned unlocked should we drop to 0 */
			assert(lck_sleep_action & LCK_SLEEP_UNLOCK);

			lck_rw_clear_promotion(thread);
		}
	}

	return res;
}
adt_status _adt_xnu_write_lock(ADT_LOCK lock)
{
    adt_status ret;
    if(NULL == lock)
    {
        ret=ADT_INVALID_PARAM;
        goto end;
    }
    
    lck_rw_lock_exclusive(lock->rw_lock);
    ret=ADT_OK;
end:

    return ret;
}
Beispiel #14
0
void
rw_enter(krwlock_t *rwlp, krw_t rw)
{
    if (rw == RW_READER) {
        lck_rw_lock_shared((lck_rw_t *)&rwlp->rw_lock[0]);
        atomic_inc_32((volatile uint32_t *)&rwlp->rw_readers);
        ASSERT(rwlp->rw_owner == 0);
    } else {
        if (rwlp->rw_owner == current_thread())
            panic("rw_enter: locking against myself!");
        lck_rw_lock_exclusive((lck_rw_t *)&rwlp->rw_lock[0]);
        ASSERT(rwlp->rw_owner == 0);
        ASSERT(rwlp->rw_readers == 0);
        rwlp->rw_owner = current_thread();
    }
}
Beispiel #15
0
/*
 * Lock a webdavnode
 */
__private_extern__ int webdav_lock(struct webdavnode *pt, enum webdavlocktype locktype)
{
	if (locktype == WEBDAV_SHARED_LOCK)
		lck_rw_lock_shared(&pt->pt_rwlock);
	else
		lck_rw_lock_exclusive(&pt->pt_rwlock);

	pt->pt_lockState = locktype;
	
#if 0
	/* For Debugging... */
	if (locktype != WEBDAV_SHARED_LOCK) {
		pt->pt_activation = (void *) current_thread();
	}
#endif

	return (0);
}
Beispiel #16
0
void
in6_pcbdisconnect(
	struct inpcb *inp)
{
	if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->mtx)) {
		/*lock inversion issue, mostly with udp multicast packets */
		socket_unlock(inp->inp_socket, 0);
		lck_rw_lock_exclusive(inp->inp_pcbinfo->mtx);
		socket_lock(inp->inp_socket, 0);
	}
	bzero((caddr_t)&inp->in6p_faddr, sizeof(inp->in6p_faddr));
	inp->inp_fport = 0;
	/* clear flowinfo - draft-itojun-ipv6-flowlabel-api-00 */
	inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK;
	in_pcbrehash(inp);
	lck_rw_done(inp->inp_pcbinfo->mtx);
	if (inp->inp_socket->so_state & SS_NOFDREF)
		in6_pcbdetach(inp);
}
Beispiel #17
0
void Lpx_PCB_dispense(struct lpxpcb *lpxp )
{	
	struct stream_pcb *cb = NULL;

	DEBUG_PRINT(DEBUG_MASK_PCB_TRACE, ("Lpx_PCB_dispense: Entered.\n"));

	if (lpxp == 0) {
		return;
	}
	
	cb = (struct stream_pcb *)lpxp->lpxp_pcb;
	
	if (cb != 0) {
		
		register struct lpx_stream_q *q;
				
		for (q = cb->s_q.si_next; q != &cb->s_q; q = q->si_next) {
			q = q->si_prev;
			remque(q->si_next);
		}
		
		m_freem(dtom(cb->s_lpx));
		FREE(cb, M_PCB);
		lpxp->lpxp_pcb = 0;
	}	
	
    // Free Lock.
	if (lpxp->lpxp_mtx != NULL) {
		lck_mtx_free(lpxp->lpxp_mtx, lpxp->lpxp_mtx_grp);  
	}
				
	lck_rw_lock_exclusive(lpxp->lpxp_head->lpxp_list_rw);
	remque(lpxp);
	lck_rw_unlock_exclusive(lpxp->lpxp_head->lpxp_list_rw);
		
	FREE(lpxp, M_PCB);		
}
void sysctl_register_oid(struct sysctl_oid *oidp)
{
	struct sysctl_oid_list *parent = oidp->oid_parent;
	struct sysctl_oid *p;
	struct sysctl_oid *q;
	int n;
	funnel_t *fnl;

	fnl = spl_kernel_funnel();

	if(sysctl_geometry_lock == NULL)
	{
		/* Initialise the geometry lock for reading/modifying the sysctl tree
		 * This is done here because IOKit registers some sysctls before bsd_init()
		 * calls sysctl_register_fixed().
		 */

		lck_grp_t* lck_grp  = lck_grp_alloc_init("sysctl", NULL);
		sysctl_geometry_lock = lck_rw_alloc_init(lck_grp, NULL);
	}
	/* Get the write lock to modify the geometry */
	lck_rw_lock_exclusive(sysctl_geometry_lock);

	/*
	 * If this oid has a number OID_AUTO, give it a number which
	 * is greater than any current oid.  Make sure it is at least
	 * OID_AUTO_START to leave space for pre-assigned oid numbers.
	 */
	if (oidp->oid_number == OID_AUTO) {
		/* First, find the highest oid in the parent list >OID_AUTO_START-1 */
		n = OID_AUTO_START;
		SLIST_FOREACH(p, parent, oid_link) {
			if (p->oid_number > n)
				n = p->oid_number;
		}
		oidp->oid_number = n + 1;
	}
void
lock_write(
	register lock_t	* l)
{
	lck_rw_lock_exclusive(l);
}
Beispiel #20
0
void
lock_write_EXT(
	lck_rw_t	*lock)
{
	lck_rw_lock_exclusive(lock);
}
Beispiel #21
0
static int
vboxvfs_vnode_open(struct vnop_open_args *args)
{
    vnode_t           vnode;
    vboxvfs_vnode_t  *pVnodeData;
    uint32_t          fHostFlags;
    mount_t           mp;
    vboxvfs_mount_t  *pMount;

    int rc;

    PDEBUG("Opening vnode...");

    AssertReturn(args, EINVAL);

    vnode           = args->a_vp;                              AssertReturn(vnode,      EINVAL);
    pVnodeData      = (vboxvfs_vnode_t *)vnode_fsnode(vnode);  AssertReturn(pVnodeData, EINVAL);
    mp              = vnode_mount(vnode);                      AssertReturn(mp,         EINVAL);
    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);             AssertReturn(pMount,     EINVAL);

    lck_rw_lock_exclusive(pVnodeData->pLock);

    if (vnode_isinuse(vnode, 0))
    {
        PDEBUG("vnode '%s' (handle 0x%X) already has VBoxVFS object handle assigned, just return ok",
               (char *)pVnodeData->pPath->String.utf8,
               (int)pVnodeData->pHandle);

        lck_rw_unlock_exclusive(pVnodeData->pLock);
        return 0;
    }

    /* At this point we must make sure that nobody is using VBoxVFS object handle */
    //if (pVnodeData->Handle != SHFL_HANDLE_NIL)
    //{
    //    PDEBUG("vnode has active VBoxVFS object handle set, aborting");
    //    lck_rw_unlock_exclusive(pVnodeData->pLock);
    //    return EINVAL;
    //}

    fHostFlags  = vboxvfs_g2h_mode_inernal(args->a_mode);
    fHostFlags |= (vnode_isdir(vnode) ? SHFL_CF_DIRECTORY : 0);

    SHFLHANDLE Handle;
    rc = vboxvfs_open_internal(pMount, pVnodeData->pPath, fHostFlags, &Handle);
    if (rc == 0)
    {
        PDEBUG("Open success: '%s' (handle 0x%X)",
               (char *)pVnodeData->pPath->String.utf8,
               (int)Handle);

        pVnodeData->pHandle = Handle;
    }
    else
    {
        PDEBUG("Unable to open: '%s': %d",
               (char *)pVnodeData->pPath->String.utf8,
               rc);
    }

    lck_rw_unlock_exclusive(pVnodeData->pLock);

    return rc;
}
Beispiel #22
0
static int
vboxvfs_vnode_lookup(struct vnop_lookup_args *args)
{
    int rc;

    vnode_t          vnode;
    vboxvfs_vnode_t *pVnodeData;

    PDEBUG("Looking up for vnode...");

    AssertReturn(args,                      EINVAL);
    AssertReturn(args->a_dvp,               EINVAL);
    AssertReturn(vnode_isdir(args->a_dvp),  EINVAL);
    AssertReturn(args->a_cnp,               EINVAL);
    AssertReturn(args->a_cnp->cn_nameptr,   EINVAL);
    AssertReturn(args->a_vpp,               EINVAL);

    pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(args->a_dvp);
    AssertReturn(pVnodeData, EINVAL);
    AssertReturn(pVnodeData->pLock, EINVAL);

    /*
    todo: take care about args->a_cnp->cn_nameiop
    */

    if      (args->a_cnp->cn_nameiop == LOOKUP) PDEBUG("LOOKUP");
    else if (args->a_cnp->cn_nameiop == CREATE) PDEBUG("CREATE");
    else if (args->a_cnp->cn_nameiop == RENAME) PDEBUG("RENAME");
    else if (args->a_cnp->cn_nameiop == DELETE) PDEBUG("DELETE");
    else PDEBUG("Unknown cn_nameiop: 0x%X", (int)args->a_cnp->cn_nameiop);

    lck_rw_lock_exclusive(pVnodeData->pLock);

    /* Take care about '.' and '..' entries */
    if (vboxvfs_vnode_lookup_dot_handler(args, &vnode) == 0)
    {
        vnode_get(vnode);
        *args->a_vpp = vnode;

        lck_rw_unlock_exclusive(pVnodeData->pLock);

        return 0;
    }

    /* Look into VFS cache and attempt to find previously allocated vnode there. */
    rc = cache_lookup(args->a_dvp, &vnode, args->a_cnp);
    if (rc == -1) /* Record found */
    {
        PDEBUG("Found record in VFS cache");

        /* Check if VFS object still exist on a host side */
        if (vboxvfs_exist_internal(vnode))
        {
            /* Prepare & return cached vnode */
            vnode_get(vnode);
            *args->a_vpp = vnode;

            rc = 0;
        }
        else
        {
            /* If vnode exist in guets VFS cache, but not exist on a host -- just forget it. */
            cache_purge(vnode);
            /* todo: free vnode data here */
            rc = ENOENT;
        }
    }
    else
    {
        PDEBUG("cache_lookup() returned %d, create new VFS vnode", rc);

        rc = vboxvfs_vnode_lookup_instantinate_vnode(args->a_dvp, args->a_cnp->cn_nameptr, &vnode);
        if (rc == 0)
        {
            cache_enter(args->a_dvp, vnode, args->a_cnp);
            *args->a_vpp = vnode;
        }
        else
        {
            rc = ENOENT;
        }
    }

    lck_rw_unlock_exclusive(pVnodeData->pLock);

    return rc;
}
Beispiel #23
0
/*
 * XXX: this is borrowed from in6_pcbbind(). If possible, we should
 * share this function by all *bsd*...
 */
int
in6_pcbsetport(
	__unused struct in6_addr *laddr,
	struct inpcb *inp,
	struct proc *p,
	int locked)
{
	struct socket *so = inp->inp_socket;
	u_int16_t lport = 0, first, last, *lastport;
	int count, error = 0, wild = 0;
	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
	kauth_cred_t cred;
	if (!locked) { /* Make sure we don't run into a deadlock: 4052373 */
		if (!lck_rw_try_lock_exclusive(pcbinfo->mtx)) {
			socket_unlock(inp->inp_socket, 0);
			lck_rw_lock_exclusive(pcbinfo->mtx);
			socket_lock(inp->inp_socket, 0);
		}
	}

	/* XXX: this is redundant when called from in6_pcbbind */
	if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0)
		wild = INPLOOKUP_WILDCARD;

	inp->inp_flags |= INP_ANONPORT;

	if (inp->inp_flags & INP_HIGHPORT) {
		first = ipport_hifirstauto;	/* sysctl */
		last  = ipport_hilastauto;
		lastport = &pcbinfo->lasthi;
	} else if (inp->inp_flags & INP_LOWPORT) {
		cred = kauth_cred_proc_ref(p);
		error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0);
		kauth_cred_unref(&cred);
		if (error != 0) {
			if (!locked)
				lck_rw_done(pcbinfo->mtx);
			return error;
		}
		first = ipport_lowfirstauto;	/* 1023 */
		last  = ipport_lowlastauto;	/* 600 */
		lastport = &pcbinfo->lastlow;
	} else {
		first = ipport_firstauto;	/* sysctl */
		last  = ipport_lastauto;
		lastport = &pcbinfo->lastport;
	}
	/*
	 * Simple check to ensure all ports are not used up causing
	 * a deadlock here.
	 *
	 * We split the two cases (up and down) so that the direction
	 * is not being tested on each round of the loop.
	 */
	if (first > last) {
		/*
		 * counting down
		 */
		count = first - last;

		do {
			if (count-- < 0) {	/* completely used? */
				/*
				 * Undo any address bind that may have
				 * occurred above.
				 */
				inp->in6p_laddr = in6addr_any;
				inp->in6p_last_outif = 0;
				if (!locked)
					lck_rw_done(pcbinfo->mtx);
				return (EAGAIN);
			}
			--*lastport;
			if (*lastport > first || *lastport < last)
				*lastport = first;
			lport = htons(*lastport);
		} while (in6_pcblookup_local(pcbinfo,
					     &inp->in6p_laddr, lport, wild));
	} else {
		/*
			 * counting up
			 */
		count = last - first;

		do {
			if (count-- < 0) {	/* completely used? */
				/*
				 * Undo any address bind that may have
				 * occurred above.
				 */
				inp->in6p_laddr = in6addr_any;
				inp->in6p_last_outif = 0;
				if (!locked)
					lck_rw_done(pcbinfo->mtx);
				return (EAGAIN);
			}
			++*lastport;
			if (*lastport < first || *lastport > last)
				*lastport = first;
			lport = htons(*lastport);
		} while (in6_pcblookup_local(pcbinfo,
					     &inp->in6p_laddr, lport, wild));
	}

	inp->inp_lport = lport;
	if (in_pcbinshash(inp, 1) != 0) {
		inp->in6p_laddr = in6addr_any;
		inp->inp_lport = 0;
		inp->in6p_last_outif = 0;
		if (!locked)
			lck_rw_done(pcbinfo->mtx);
		return (EAGAIN);
	}

	if (!locked)
		lck_rw_done(pcbinfo->mtx);
	return(0);
}
Beispiel #24
0
int
in6_pcbbind(
	struct inpcb *inp,
	struct sockaddr *nam,
	struct proc *p)
{
	struct socket *so = inp->inp_socket;
	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)NULL;
	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
	u_short	lport = 0;
	int wild = 0, reuseport = (so->so_options & SO_REUSEPORT);

	if (!in6_ifaddrs) /* XXX broken! */
		return (EADDRNOTAVAIL);
	if (inp->inp_lport || !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
		return(EINVAL);
	if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0)
		wild = 1;
	socket_unlock(so, 0); /* keep reference */
	lck_rw_lock_exclusive(pcbinfo->mtx);
	if (nam) {
		sin6 = (struct sockaddr_in6 *)nam;
		if (nam->sa_len != sizeof(*sin6)) {
			lck_rw_done(pcbinfo->mtx);
			socket_lock(so, 0);
			return(EINVAL);
		}
		/*
		 * family check.
		 */
		if (nam->sa_family != AF_INET6) {
			lck_rw_done(pcbinfo->mtx);
			socket_lock(so, 0);
			return(EAFNOSUPPORT);
		}

		/* KAME hack: embed scopeid */
		if (in6_embedscope(&sin6->sin6_addr, sin6, inp, NULL) != 0) {
			lck_rw_done(pcbinfo->mtx);
			socket_lock(so, 0);
			return EINVAL;
		}
		/* this must be cleared for ifa_ifwithaddr() */
		sin6->sin6_scope_id = 0;

		lport = sin6->sin6_port;
		if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
			/*
			 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
			 * allow compepte duplication of binding if
			 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
			 * and a multicast address is bound on both
			 * new and duplicated sockets.
			 */
			if (so->so_options & SO_REUSEADDR)
				reuseport = SO_REUSEADDR|SO_REUSEPORT;
		} else if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
			struct ifaddr *ia = NULL;

			sin6->sin6_port = 0;		/* yech... */
			if ((ia = ifa_ifwithaddr((struct sockaddr *)sin6)) == 0) {
				lck_rw_done(pcbinfo->mtx);
				socket_lock(so, 0);
				return(EADDRNOTAVAIL);
			}

			/*
			 * XXX: bind to an anycast address might accidentally
			 * cause sending a packet with anycast source address.
			 * We should allow to bind to a deprecated address, since
			 * the application dare to use it.
			 */
			if (ia &&
			    ((struct in6_ifaddr *)ia)->ia6_flags &
			    (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY|IN6_IFF_DETACHED)) {
			    	ifafree(ia);
				lck_rw_done(pcbinfo->mtx);
				socket_lock(so, 0);
				return(EADDRNOTAVAIL);
			}
			ifafree(ia);
			ia = NULL;
		}
		if (lport) {
			struct inpcb *t;

			/* GROSS */
			if (ntohs(lport) < IPV6PORT_RESERVED && p &&
                            ((so->so_state & SS_PRIV) == 0)) {
				lck_rw_done(pcbinfo->mtx);
				socket_lock(so, 0);
				return(EACCES);
			}

			if (so->so_uid &&
			    !IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
				t = in6_pcblookup_local_and_cleanup(pcbinfo,
				    &sin6->sin6_addr, lport,
				    INPLOOKUP_WILDCARD);
				if (t &&
				    (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
				     !IN6_IS_ADDR_UNSPECIFIED(&t->in6p_laddr) ||
				     (t->inp_socket->so_options &
				      SO_REUSEPORT) == 0) &&
				     (so->so_uid != t->inp_socket->so_uid) &&
				     ((t->inp_socket->so_flags & SOF_REUSESHAREUID) == 0)) {
					lck_rw_done(pcbinfo->mtx);
					socket_lock(so, 0);
					return (EADDRINUSE);
				}
				if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
				    IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
					struct sockaddr_in sin;

					in6_sin6_2_sin(&sin, sin6);
					t = in_pcblookup_local_and_cleanup(pcbinfo,
						sin.sin_addr, lport,
						INPLOOKUP_WILDCARD);
					if (t && (t->inp_socket->so_options & SO_REUSEPORT) == 0 &&
					    (so->so_uid !=
					     t->inp_socket->so_uid) &&
					    (ntohl(t->inp_laddr.s_addr) !=
					     INADDR_ANY ||
					     INP_SOCKAF(so) ==
					     INP_SOCKAF(t->inp_socket))) {

						lck_rw_done(pcbinfo->mtx);
						socket_lock(so, 0);
						return (EADDRINUSE);
					}
				}
			}
			t = in6_pcblookup_local_and_cleanup(pcbinfo, &sin6->sin6_addr,
						lport, wild);
			if (t && (reuseport & t->inp_socket->so_options) == 0) {
				lck_rw_done(pcbinfo->mtx);
				socket_lock(so, 0);
				return(EADDRINUSE);
			}
			if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0 &&
			    IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
				struct sockaddr_in sin;

				in6_sin6_2_sin(&sin, sin6);
				t = in_pcblookup_local_and_cleanup(pcbinfo, sin.sin_addr,
						       lport, wild);
				if (t &&
				    (reuseport & t->inp_socket->so_options)
				    == 0 &&
				    (ntohl(t->inp_laddr.s_addr)
				     != INADDR_ANY ||
				     INP_SOCKAF(so) ==
				     INP_SOCKAF(t->inp_socket))) {
					lck_rw_done(pcbinfo->mtx);
					socket_lock(so, 0);
					return (EADDRINUSE);
				}
			}
		}
		inp->in6p_laddr = sin6->sin6_addr;
	}
	socket_lock(so, 0);
	if (lport == 0) {
		int e;
		if ((e = in6_pcbsetport(&inp->in6p_laddr, inp, p, 1)) != 0) {
			lck_rw_done(pcbinfo->mtx);
			return(e);
		}
	}
	else {
		inp->inp_lport = lport;
		if (in_pcbinshash(inp, 1) != 0) {
			inp->in6p_laddr = in6addr_any;
			inp->inp_lport = 0;
			lck_rw_done(pcbinfo->mtx);
			return (EAGAIN);
		}
	}	
	lck_rw_done(pcbinfo->mtx);
	sflt_notify(so, sock_evt_bound, NULL);
	return(0);
}
Beispiel #25
0
__private_extern__ int
get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo)
{
	int error = 0;
	int i, n;
	struct inpcb *inp, **inp_list = NULL;
	inp_gen_t gencnt;
	struct xinpgen xig;
	void *buf = NULL;
	size_t item_size = ROUNDUP64(sizeof (struct xinpcb_n)) +
	    ROUNDUP64(sizeof (struct xsocket_n)) +
	    2 * ROUNDUP64(sizeof (struct xsockbuf_n)) +
	    ROUNDUP64(sizeof (struct xsockstat_n));

	if (proto == IPPROTO_TCP)
		item_size += ROUNDUP64(sizeof (struct xtcpcb_n));

	/*
	 * The process of preparing the PCB list is too time-consuming and
	 * resource-intensive to repeat twice on every request.
	 */
	lck_rw_lock_exclusive(pcbinfo->ipi_lock);
	if (req->oldptr == USER_ADDR_NULL) {
		n = pcbinfo->ipi_count;
		req->oldidx = 2 * (sizeof (xig)) + (n + n/8) * item_size;
		goto done;
	}

	if (req->newptr != USER_ADDR_NULL) {
		error = EPERM;
		goto done;
	}

	/*
	 * OK, now we're committed to doing something.
	 */
	gencnt = pcbinfo->ipi_gencnt;
	n = pcbinfo->ipi_count;

	bzero(&xig, sizeof (xig));
	xig.xig_len = sizeof (xig);
	xig.xig_count = n;
	xig.xig_gen = gencnt;
	xig.xig_sogen = so_gencnt;
	error = SYSCTL_OUT(req, &xig, sizeof (xig));
	if (error) {
		goto done;
	}
	/*
	 * We are done if there is no pcb
	 */
	if (n == 0) {
		goto done;
	}

	buf = _MALLOC(item_size, M_TEMP, M_WAITOK);
	if (buf == NULL) {
		error = ENOMEM;
		goto done;
	}

	inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK);
	if (inp_list == NULL) {
		error = ENOMEM;
		goto done;
	}

	for (inp = pcbinfo->ipi_listhead->lh_first, i = 0; inp && i < n;
	    inp = inp->inp_list.le_next) {
		if (inp->inp_gencnt <= gencnt &&
		    inp->inp_state != INPCB_STATE_DEAD)
			inp_list[i++] = inp;
	}
	n = i;

	error = 0;
	for (i = 0; i < n; i++) {
		inp = inp_list[i];
		if (inp->inp_gencnt <= gencnt &&
		    inp->inp_state != INPCB_STATE_DEAD) {
			struct xinpcb_n *xi = (struct xinpcb_n *)buf;
			struct xsocket_n *xso = (struct xsocket_n *)
			    ADVANCE64(xi, sizeof (*xi));
			struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
			    ADVANCE64(xso, sizeof (*xso));
			struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
			    ADVANCE64(xsbrcv, sizeof (*xsbrcv));
			struct xsockstat_n *xsostats = (struct xsockstat_n *)
			    ADVANCE64(xsbsnd, sizeof (*xsbsnd));

			bzero(buf, item_size);

			inpcb_to_xinpcb_n(inp, xi);
			sotoxsocket_n(inp->inp_socket, xso);
			sbtoxsockbuf_n(inp->inp_socket ?
			    &inp->inp_socket->so_rcv : NULL, xsbrcv);
			sbtoxsockbuf_n(inp->inp_socket ?
			    &inp->inp_socket->so_snd : NULL, xsbsnd);
			sbtoxsockstat_n(inp->inp_socket, xsostats);
			if (proto == IPPROTO_TCP) {
				struct  xtcpcb_n *xt = (struct xtcpcb_n *)
				    ADVANCE64(xsostats, sizeof (*xsostats));

				/*
				 * inp->inp_ppcb, can only be NULL on
				 * an initialization race window.
				 * No need to lock.
				 */
				if (inp->inp_ppcb == NULL)
					continue;

				tcpcb_to_xtcpcb_n((struct tcpcb *)
				    inp->inp_ppcb, xt);
			}
			error = SYSCTL_OUT(req, buf, item_size);
		}
	}
	if (!error) {
		/*
		 * Give the user an updated idea of our state.
		 * If the generation differs from what we told
		 * her before, she knows that something happened
		 * while we were processing this request, and it
		 * might be necessary to retry.
		 */
		bzero(&xig, sizeof (xig));
		xig.xig_len = sizeof (xig);
		xig.xig_gen = pcbinfo->ipi_gencnt;
		xig.xig_sogen = so_gencnt;
		xig.xig_count = pcbinfo->ipi_count;
		error = SYSCTL_OUT(req, &xig, sizeof (xig));
	}
done:
	lck_rw_done(pcbinfo->ipi_lock);
	if (inp_list != NULL)
		FREE(inp_list, M_TEMP);
	if (buf != NULL)
		FREE(buf, M_TEMP);
	return (error);
}