static int vboxvfs_vnode_close(struct vnop_close_args *args) { vnode_t vnode; mount_t mp; vboxvfs_vnode_t *pVnodeData; vboxvfs_mount_t *pMount; int rc; PDEBUG("Closing vnode..."); AssertReturn(args, EINVAL); vnode = args->a_vp; AssertReturn(vnode, EINVAL); pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(vnode); AssertReturn(pVnodeData, EINVAL); mp = vnode_mount(vnode); AssertReturn(mp, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); lck_rw_lock_exclusive(pVnodeData->pLock); if (vnode_isinuse(vnode, 0)) { PDEBUG("vnode '%s' (handle 0x%X) is still in use, just return ok", (char *)pVnodeData->pPath->String.utf8, (int)pVnodeData->pHandle); lck_rw_unlock_exclusive(pVnodeData->pLock); return 0; } /* At this point we must make sure that vnode has VBoxVFS object handle assigned */ if (pVnodeData->pHandle == SHFL_HANDLE_NIL) { PDEBUG("vnode has no active VBoxVFS object handle set, aborting"); lck_rw_unlock_exclusive(pVnodeData->pLock); return EINVAL; } rc = vboxvfs_close_internal(pMount, pVnodeData->pHandle); if (rc == 0) { PDEBUG("Close success: '%s' (handle 0x%X)", (char *)pVnodeData->pPath->String.utf8, (int)pVnodeData->pHandle); /* Forget about previously assigned VBoxVFS object handle */ pVnodeData->pHandle = SHFL_HANDLE_NIL; } else { PDEBUG("Unable to close: '%s' (handle 0x%X): %d", (char *)pVnodeData->pPath->String.utf8, (int)pVnodeData->pHandle, rc); } lck_rw_unlock_exclusive(pVnodeData->pLock); return rc; }
errno_t kn_inject_after_http (mbuf_t otgn_data) { errno_t retval = 0; mbuf_t otgn_data_dup; u_int16_t ms = 0; lck_rw_lock_exclusive(gMasterRecordLock); ms = master_record.http_delay_ms; lck_rw_unlock_exclusive(gMasterRecordLock); retval = mbuf_dup(otgn_data, MBUF_DONTWAIT, &otgn_data_dup); if (retval != 0) { kn_debug("mbuf_dup returned error %d\n", retval); return retval; } retval = kn_mbuf_set_tag(&otgn_data_dup, gidtag, kMY_TAG_TYPE, outgoing_direction); if (retval != 0) { kn_debug("kn_mbuf_set_tag returned error %d\n", retval); return retval; } retval = kn_delay_pkt_inject(otgn_data, ms, outgoing_direction); if (retval != 0) { kn_debug("kn_delay_pkt_inject returned error %d\n", retval); return retval; } return KERN_SUCCESS; }
errno_t mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback) { int i; errno_t error; if (callback == NULL) return (EINVAL); lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock); /* assume the worst */ error = ENOENT; for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) { if (mbuf_tx_compl_table[i] == callback) { mbuf_tx_compl_table[i] = NULL; error = 0; goto unlock; } } unlock: lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock); return (error); }
errno_t mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback) { int i; errno_t error; if (callback == NULL) return (EINVAL); lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock); i = get_tx_compl_callback_index_locked(callback); if (i != -1) { error = EEXIST; goto unlock; } /* assume the worst */ error = ENOSPC; for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) { if (mbuf_tx_compl_table[i] == NULL) { mbuf_tx_compl_table[i] = callback; error = 0; goto unlock; } } unlock: lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock); return (error); }
void rw_exit(krwlock_t *rwlp) { if (rwlp->rw_owner == current_thread()) { rwlp->rw_owner = NULL; lck_rw_unlock_exclusive((lck_rw_t *)&rwlp->rw_lock[0]); } else { OSDecrementAtomic((volatile SInt32 *)&rwlp->rw_readers); lck_rw_unlock_shared((lck_rw_t *)&rwlp->rw_lock[0]); } }
/* * Routine: lck_rw_unlock */ void lck_rw_unlock( lck_rw_t *lck, lck_rw_type_t lck_rw_type) { if (lck_rw_type == LCK_RW_TYPE_SHARED) lck_rw_unlock_shared(lck); else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) lck_rw_unlock_exclusive(lck); else panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type); }
void rw_exit(krwlock_t *rwlp) { if (rwlp->rw_owner == current_thread()) { rwlp->rw_owner = NULL; ASSERT(rwlp->rw_readers == 0); lck_rw_unlock_exclusive((lck_rw_t *)&rwlp->rw_lock[0]); } else { atomic_dec_32((volatile uint32_t *)&rwlp->rw_readers); ASSERT(rwlp->rw_owner == 0); lck_rw_unlock_shared((lck_rw_t *)&rwlp->rw_lock[0]); } }
int Lpx_PCB_alloc( struct socket *so, struct lpxpcb *head, struct proc *td ) { register struct lpxpcb *lpxp; DEBUG_PRINT(DEBUG_MASK_PCB_TRACE, ("Lpx_PCB_alloc\n")); MALLOC(lpxp, struct lpxpcb *, sizeof *lpxp, M_PCB, M_WAITOK); if (lpxp == NULL) { DEBUG_PRINT(DEBUG_MASK_PCB_ERROR, ("Lpx_PCB_alloc:==> Failed\n")); return (ENOBUFS); } bzero(lpxp, sizeof(*lpxp)); lpxp->lpxp_socket = so; if (lpxcksum) lpxp->lpxp_flags |= LPXP_CHECKSUM; read_random(&lpxp->lpxp_messageid, sizeof(lpxp->lpxp_messageid)); lck_rw_lock_exclusive(head->lpxp_list_rw); insque(lpxp, head); lck_rw_unlock_exclusive(head->lpxp_list_rw); lpxp->lpxp_head = head; so->so_pcb = (caddr_t)lpxp; //so->so_options |= SO_DONTROUTE; if (so->so_proto->pr_flags & PR_PCBLOCK) { if (head == &lpx_stream_pcb) { lpxp->lpxp_mtx = lck_mtx_alloc_init(stream_mtx_grp, stream_mtx_attr); lpxp->lpxp_mtx_grp = stream_mtx_grp; } else { lpxp->lpxp_mtx = lck_mtx_alloc_init(datagram_mtx_grp, datagram_mtx_attr); lpxp->lpxp_mtx_grp = datagram_mtx_grp; } if (lpxp->lpxp_mtx == NULL) { DEBUG_PRINT(DEBUG_MASK_PCB_ERROR, ("Lpx_PCB_alloc: can't alloc mutex! so=%p\n", so)); FREE(lpxp, M_PCB); return(ENOMEM); } } return (0); }
adt_status _adt_xnu_write_unlock(ADT_LOCK lock) { adt_status ret; if(NULL == lock) { ret=ADT_INVALID_PARAM; goto end; } lck_rw_unlock_exclusive(lock->rw_lock); ret=ADT_OK; end: return ret; }
__private_extern__ void nunlock_9p(node_9p *np) { // DEBUG("%p", np); switch (np->lcktype) { case NODE_LCK_SHARED: lck_rw_unlock_shared(np->lck); break; case NODE_LCK_EXCLUSIVE: np->lcktype = NODE_LCK_NONE; lck_rw_unlock_exclusive(np->lck); break; case NODE_LCK_NONE: /* nothing here */ break; } }
void Lpx_PCB_dispense(struct lpxpcb *lpxp ) { struct stream_pcb *cb = NULL; DEBUG_PRINT(DEBUG_MASK_PCB_TRACE, ("Lpx_PCB_dispense: Entered.\n")); if (lpxp == 0) { return; } cb = (struct stream_pcb *)lpxp->lpxp_pcb; if (cb != 0) { register struct lpx_stream_q *q; for (q = cb->s_q.si_next; q != &cb->s_q; q = q->si_next) { q = q->si_prev; remque(q->si_next); } m_freem(dtom(cb->s_lpx)); FREE(cb, M_PCB); lpxp->lpxp_pcb = 0; } // Free Lock. if (lpxp->lpxp_mtx != NULL) { lck_mtx_free(lpxp->lpxp_mtx, lpxp->lpxp_mtx_grp); } lck_rw_lock_exclusive(lpxp->lpxp_head->lpxp_list_rw); remque(lpxp); lck_rw_unlock_exclusive(lpxp->lpxp_head->lpxp_list_rw); FREE(lpxp, M_PCB); }
static int vboxvfs_vnode_open(struct vnop_open_args *args) { vnode_t vnode; vboxvfs_vnode_t *pVnodeData; uint32_t fHostFlags; mount_t mp; vboxvfs_mount_t *pMount; int rc; PDEBUG("Opening vnode..."); AssertReturn(args, EINVAL); vnode = args->a_vp; AssertReturn(vnode, EINVAL); pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(vnode); AssertReturn(pVnodeData, EINVAL); mp = vnode_mount(vnode); AssertReturn(mp, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); lck_rw_lock_exclusive(pVnodeData->pLock); if (vnode_isinuse(vnode, 0)) { PDEBUG("vnode '%s' (handle 0x%X) already has VBoxVFS object handle assigned, just return ok", (char *)pVnodeData->pPath->String.utf8, (int)pVnodeData->pHandle); lck_rw_unlock_exclusive(pVnodeData->pLock); return 0; } /* At this point we must make sure that nobody is using VBoxVFS object handle */ //if (pVnodeData->Handle != SHFL_HANDLE_NIL) //{ // PDEBUG("vnode has active VBoxVFS object handle set, aborting"); // lck_rw_unlock_exclusive(pVnodeData->pLock); // return EINVAL; //} fHostFlags = vboxvfs_g2h_mode_inernal(args->a_mode); fHostFlags |= (vnode_isdir(vnode) ? SHFL_CF_DIRECTORY : 0); SHFLHANDLE Handle; rc = vboxvfs_open_internal(pMount, pVnodeData->pPath, fHostFlags, &Handle); if (rc == 0) { PDEBUG("Open success: '%s' (handle 0x%X)", (char *)pVnodeData->pPath->String.utf8, (int)Handle); pVnodeData->pHandle = Handle; } else { PDEBUG("Unable to open: '%s': %d", (char *)pVnodeData->pPath->String.utf8, rc); } lck_rw_unlock_exclusive(pVnodeData->pLock); return rc; }
static int vboxvfs_vnode_lookup(struct vnop_lookup_args *args) { int rc; vnode_t vnode; vboxvfs_vnode_t *pVnodeData; PDEBUG("Looking up for vnode..."); AssertReturn(args, EINVAL); AssertReturn(args->a_dvp, EINVAL); AssertReturn(vnode_isdir(args->a_dvp), EINVAL); AssertReturn(args->a_cnp, EINVAL); AssertReturn(args->a_cnp->cn_nameptr, EINVAL); AssertReturn(args->a_vpp, EINVAL); pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(args->a_dvp); AssertReturn(pVnodeData, EINVAL); AssertReturn(pVnodeData->pLock, EINVAL); /* todo: take care about args->a_cnp->cn_nameiop */ if (args->a_cnp->cn_nameiop == LOOKUP) PDEBUG("LOOKUP"); else if (args->a_cnp->cn_nameiop == CREATE) PDEBUG("CREATE"); else if (args->a_cnp->cn_nameiop == RENAME) PDEBUG("RENAME"); else if (args->a_cnp->cn_nameiop == DELETE) PDEBUG("DELETE"); else PDEBUG("Unknown cn_nameiop: 0x%X", (int)args->a_cnp->cn_nameiop); lck_rw_lock_exclusive(pVnodeData->pLock); /* Take care about '.' and '..' entries */ if (vboxvfs_vnode_lookup_dot_handler(args, &vnode) == 0) { vnode_get(vnode); *args->a_vpp = vnode; lck_rw_unlock_exclusive(pVnodeData->pLock); return 0; } /* Look into VFS cache and attempt to find previously allocated vnode there. */ rc = cache_lookup(args->a_dvp, &vnode, args->a_cnp); if (rc == -1) /* Record found */ { PDEBUG("Found record in VFS cache"); /* Check if VFS object still exist on a host side */ if (vboxvfs_exist_internal(vnode)) { /* Prepare & return cached vnode */ vnode_get(vnode); *args->a_vpp = vnode; rc = 0; } else { /* If vnode exist in guets VFS cache, but not exist on a host -- just forget it. */ cache_purge(vnode); /* todo: free vnode data here */ rc = ENOENT; } } else { PDEBUG("cache_lookup() returned %d, create new VFS vnode", rc); rc = vboxvfs_vnode_lookup_instantinate_vnode(args->a_dvp, args->a_cnp->cn_nameptr, &vnode); if (rc == 0) { cache_enter(args->a_dvp, vnode, args->a_cnp); *args->a_vpp = vnode; } else { rc = ENOENT; } } lck_rw_unlock_exclusive(pVnodeData->pLock); return rc; }