/*ARGSUSED*/ int consioctl(__unused dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) { dev_t device; boolean_t funnel_state; int error; funnel_state = thread_funnel_set(kernel_flock, TRUE); if (constty) device = constty->t_dev; else device = cons.t_dev; /* * Superuser can always use this to wrest control of console * output from the "virtual" console. */ if ((unsigned int)cmd == TIOCCONS && constty) { error = proc_suser(p); if (error) { goto out; } constty = NULL; error = 0; goto out; } error = (*cdevsw[major(device)].d_ioctl)(device, cmd, addr, flag, p); out: thread_funnel_set(kernel_flock, funnel_state); return(error); }
errno_t xi_sock_send(xi_socket_t so, void *buf, size_t *len, int flags) { #ifdef __KPI_SOCKET__ struct iovec aio; struct msghdr msg; size_t sentLen = *len; errno_t error; aio.iov_base = buf; aio.iov_len = sentLen; bzero(&msg, sizeof(msg)); msg.msg_iov = (struct iovec *) &aio; msg.msg_iovlen = 1; error = sock_send(so, &msg, flags, &sentLen); #if 1 if(error) DebugPrint(1, false, "xi_sock_send: so = %p, buf_len = %d error = %d\n", so, (int)*len, error); #endif /* if 0 */ *len = sentLen; return error; #else struct iovec aiov; struct uio auio = { 0 }; register struct proc *p = current_proc(); register int error = 0; aiov.iov_base = buf; aiov.iov_len = *len; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_WRITE; auio.uio_procp = p; auio.uio_offset = 0; /* XXX */ auio.uio_resid = *len; // IOLog("Before: so = %p, buf_len = %d, aiov.iov_len = %d, error = %d, auio.uio_resid = %d\n", so, (int)len, (int)aiov.iov_len, error,auio.uio_resid); thread_funnel_set(network_flock, TRUE); error = sosend(so, NULL, &auio, NULL, 0, flags); (void) thread_funnel_set(network_flock, FALSE); #if 0 if(error) IOLog("After: so = %p, buf_len = %d, aiov.iov_len = %d, error = %d, auio.uio_resid = %d\n", so, (int)*len, (int)aiov.iov_len, error, auio.uio_resid); #endif /* if 0 */ *len = *len - auio.uio_resid; return error; #endif }
/* * Routine: macx_backing_store_recovery * Function: * Syscall interface to set a tasks privilege * level so that it is not subject to * macx_backing_store_suspend */ int macx_backing_store_recovery( int pid) { int error; struct proc *p = current_proc(); boolean_t funnel_state; funnel_state = thread_funnel_set(kernel_flock, TRUE); if ((error = suser(p->p_ucred, &p->p_acflag))) goto backing_store_recovery_return; /* for now restrict backing_store_recovery */ /* usage to only present task */ if(pid != p->p_pid) { error = EINVAL; goto backing_store_recovery_return; } task_backing_store_privileged(p->task); backing_store_recovery_return: (void) thread_funnel_set(kernel_flock, FALSE); return(error); }
errno_t xi_sock_receivefrom(xi_socket_t so, void *buf, size_t *len, int flags, struct sockaddr *from, int *fromlen) { struct iovec aio; errno_t error; #ifdef __KPI_SOCKET__ struct msghdr msg; size_t recvLen; aio.iov_base = buf; aio.iov_len = *len; bzero(&msg, sizeof(msg)); msg.msg_iov = (struct iovec *) &aio; msg.msg_iovlen = 1; if(from != NULL && fromlen != NULL && *fromlen > 0) { msg.msg_name = from; msg.msg_namelen = *fromlen; } error = sock_receive(so, &msg, flags, &recvLen); *len = recvLen; return error; #else struct uio auio; struct sockaddr *fromsa = 0; aio.iov_base = (char *)buf; aio.iov_len = *len; auio.uio_iov = &aio; auio.uio_iovcnt = 1; auio.uio_segflg = UIO_SYSSPACE; auio.uio_rw = UIO_READ; auio.uio_offset = 0; /* XXX */ auio.uio_resid = *len; thread_funnel_set(network_flock, TRUE); error = soreceive(so, &fromsa, &auio, NULL, 0, &flags); (void)thread_funnel_set(network_flock, FALSE); if (from != NULL && fromsa) { bcopy(fromsa, from, sizeof(struct sockaddr_lpx)); FREE(fromsa, M_SONAME); } *len = *len - auio.uio_resid; return error; #endif }
/* * Routine: macx_backing_store_recovery * Function: * Syscall interface to set a tasks privilege * level so that it is not subject to * macx_backing_store_suspend */ int macx_backing_store_recovery( struct macx_backing_store_recovery_args *args) { int pid = args->pid; int error; struct proc *p = current_proc(); boolean_t funnel_state; funnel_state = thread_funnel_set(kernel_flock, TRUE); if ((error = suser(kauth_cred_get(), 0))) goto backing_store_recovery_return; /* for now restrict backing_store_recovery */ /* usage to only present task */ if(pid != proc_selfpid()) { error = EINVAL; goto backing_store_recovery_return; } task_backing_store_privileged(p->task); backing_store_recovery_return: (void) thread_funnel_set(kernel_flock, FALSE); return(error); }
static void splx_kernel_funnel(funnel_t *saved) { if (saved != kernel_flock) { thread_funnel_set(kernel_flock, FALSE); if (saved != NULL) thread_funnel_set(saved, TRUE); } }
static void kmtimeout(struct tty *tp) { boolean_t funnel_state; funnel_state = thread_funnel_set(kernel_flock, TRUE); kmoutput(tp); (void) thread_funnel_set(kernel_flock, funnel_state); }
static void in6_mtutimo_funneled(void *rock) { #ifdef __APPLE__ boolean_t funnel_state; funnel_state = thread_funnel_set(network_flock, TRUE); in6_mtutimo(rock); #endif #ifdef __APPLE__ (void) thread_funnel_set(network_flock, FALSE); #endif }
void xi_sock_close(xi_socket_t so) { #ifdef __KPI_SOCKET__ sock_close(so); #else thread_funnel_set(network_flock, TRUE); soclose(so); (void)thread_funnel_set(network_flock, FALSE); #endif }
void lightning_bolt(__unused void *dummy) { boolean_t funnel_state; funnel_state = thread_funnel_set(kernel_flock, TRUE); thread_wakeup(&lbolt); timeout(lightning_bolt,0,hz); klogwakeup(); (void) thread_funnel_set(kernel_flock, FALSE); }
/* * XXX this does not belong here */ static funnel_t * spl_kernel_funnel(void) { funnel_t *cfunnel; cfunnel = thread_funnel_get(); if (cfunnel != kernel_flock) { if (cfunnel != NULL) thread_funnel_set(cfunnel, FALSE); thread_funnel_set(kernel_flock, TRUE); } return(cfunnel); }
errno_t xi_sock_shutdown(xi_socket_t so, int how) { #ifdef __KPI_SOCKET__ return sock_shutdown(so, how); #else thread_funnel_set(network_flock, TRUE); errno_t error; error = soshutdown(so, how); (void)thread_funnel_set(network_flock, FALSE); return error; #endif }
errno_t xi_sock_connect(xi_socket_t so, struct sockaddr *to, int flags) { #ifdef __KPI_SOCKET__ return sock_connect(so, to, flags); #else thread_funnel_set(network_flock, TRUE); errno_t error; error = soconnect(so, to); (void)thread_funnel_set(network_flock, FALSE); return error; #endif }
errno_t xi_sock_socket(int domain, int type, int protocol, sock_upcall callback, void *cookie, xi_socket_t *new_so) { #ifdef __KPI_SOCKET__ return sock_socket(domain, type, protocol, callback, cookie, new_so); #else thread_funnel_set(network_flock, TRUE); errno_t error; error = socreate(domain, new_so, type, protocol); (void)thread_funnel_set(network_flock, FALSE); return error; #endif }
void machdep_syscall( struct i386_saved_state *regs) { int trapno, nargs; machdep_call_t *entry; thread_t thread; struct proc *p; struct proc *current_proc(); trapno = regs->eax; if (trapno < 0 || trapno >= machdep_call_count) { regs->eax = (unsigned int)kern_invalid(); thread_exception_return(); /* NOTREACHED */ } entry = &machdep_call_table[trapno]; nargs = entry->nargs; if (nargs > 0) { int args[nargs]; if (copyin((char *) regs->uesp + sizeof (int), (char *) args, nargs * sizeof (int))) { regs->eax = KERN_INVALID_ADDRESS; thread_exception_return(); /* NOTREACHED */ } switch (nargs) { case 1: regs->eax = (*entry->routine)(args[0]); break; case 2: regs->eax = (*entry->routine)(args[0],args[1]); break; case 3: regs->eax = (*entry->routine)(args[0],args[1],args[2]); break; case 4: regs->eax = (*entry->routine)(args[0],args[1],args[2],args[3]); break; default: panic("machdep_syscall(): too many args"); } } else regs->eax = (*entry->routine)(); if (current_thread()->funnel_lock) (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); thread_exception_return(); /* NOTREACHED */ }
/*ARGSUSED*/ int consread(__unused dev_t dev, struct uio *uio, int ioflag) { dev_t device; boolean_t funnel_state; int error; funnel_state = thread_funnel_set(kernel_flock, TRUE); if (constty) device = constty->t_dev; else device = cons.t_dev; error = (*cdevsw[major(device)].d_read)(device, uio, ioflag); thread_funnel_set(kernel_flock, funnel_state); return(error); }
int cons_putc(__unused dev_t dev, char c) { dev_t device; boolean_t funnel_state; int error; funnel_state = thread_funnel_set(kernel_flock, TRUE); if (constty) device = constty->t_dev; else device = cons.t_dev; error = (*cdevsw[major(device)].d_putc)(device, c); thread_funnel_set(kernel_flock, funnel_state); return(error); }
/*ARGSUSED*/ int consopen(__unused dev_t dev, int flag, int devtype, struct proc *pp) { dev_t device; boolean_t funnel_state; int error; funnel_state = thread_funnel_set(kernel_flock, TRUE); if (constty) device = constty->t_dev; else device = cons.t_dev; error = (*cdevsw[major(device)].d_open)(device, flag, devtype, pp); thread_funnel_set(kernel_flock, funnel_state); return(error); }
int macx_backing_store_suspend( boolean_t suspend) { int error; struct proc *p = current_proc(); boolean_t funnel_state; funnel_state = thread_funnel_set(kernel_flock, TRUE); if ((error = suser(p->p_ucred, &p->p_acflag))) goto backing_store_suspend_return; vm_backing_store_disable(suspend); backing_store_suspend_return: (void) thread_funnel_set(kernel_flock, FALSE); return(error); }
int macx_backing_store_suspend( struct macx_backing_store_suspend_args *args) { boolean_t suspend = args->suspend; int error; boolean_t funnel_state; funnel_state = thread_funnel_set(kernel_flock, TRUE); if ((error = suser(kauth_cred_get(), 0))) goto backing_store_suspend_return; vm_backing_store_disable(suspend); backing_store_suspend_return: (void) thread_funnel_set(kernel_flock, FALSE); return(error); }
int xi_lpx_isconnected(xi_socket_t so) { #ifdef __KPI_SOCKET__ return sock_isconnected(so); #else int retval; int s; thread_funnel_set(network_flock, TRUE); s = splnet(); retval = (so->so_state & SS_ISCONNECTED) != 0; splx(s); (void) thread_funnel_set(network_flock, FALSE); return retval; #endif }
void unix_syscall_return(int error) { thread_act_t thread; volatile int *rval; struct i386_saved_state *regs; struct proc *p; struct proc *current_proc(); unsigned short code; vm_offset_t params; struct sysent *callp; extern int nsysent; thread = current_act(); rval = (int *)get_bsduthreadrval(thread); p = current_proc(); regs = USER_REGS(thread); /* reconstruct code for tracing before blasting eax */ code = regs->eax; params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int)); callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; if (callp == sysent) { code = fuword(params); } if (error == ERESTART) { regs->eip -= 7; } else if (error != EJUSTRETURN) { if (error) { regs->eax = error; regs->efl |= EFL_CF; /* carry bit */ } else { /* (not error) */ regs->eax = rval[0]; regs->edx = rval[1]; regs->efl &= ~EFL_CF; } } ktrsysret(p, code, error, rval[0], callp->sy_funnel); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, error, rval[0], rval[1], 0, 0); if (callp->sy_funnel != NO_FUNNEL) (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); thread_exception_return(); /* NOTREACHED */ }
static int vnsize(dev_t dev) { int secsize; struct vn_softc *vn; int unit; boolean_t funnel_state; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { return (-1); } funnel_state = thread_funnel_set(kernel_flock, TRUE); vn = vn_table + unit; if ((vn->sc_flags & VNF_INITED) == 0) secsize = -1; else secsize = vn->sc_secsize; (void) thread_funnel_set(kernel_flock, funnel_state); return (secsize); }
void machdep_syscall64(x86_saved_state_t *state) { int trapno; machdep_call_t *entry; x86_saved_state64_t *regs; assert(is_saved_state64(state)); regs = saved_state64(state); trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK); DEBUG_KPRINT_SYSCALL_MDEP( "machdep_syscall64: trapno=%d\n", trapno); if (trapno < 0 || trapno >= machdep_call_count) { regs->rax = (unsigned int)kern_invalid(NULL); thread_exception_return(); /* NOTREACHED */ } entry = &machdep_call_table64[trapno]; switch (entry->nargs) { case 0: regs->rax = (*entry->routine.args_0)(); break; case 1: regs->rax = (*entry->routine.args64_1)(regs->rdi); break; default: panic("machdep_syscall64: too many args"); } if (current_thread()->funnel_lock) (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax); throttle_lowpri_io(TRUE); thread_exception_return(); /* NOTREACHED */ }
/* * Routine: macx_swapoff * Function: * Syscall interface to remove a file from backing store */ int macx_swapoff( struct macx_swapoff_args *args) { __unused int flags = args->flags; kern_return_t kr; mach_port_t backing_store; struct vnode *vp = 0; struct nameidata nd, *ndp; struct proc *p = current_proc(); int i; int error; boolean_t funnel_state; vfs_context_t ctx = vfs_context_current(); AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF); funnel_state = thread_funnel_set(kernel_flock, TRUE); backing_store = NULL; ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapoff_bailout; /* * Get the vnode for the paging area. */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapoff_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapoff_bailout; } #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapoff(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapoff_bailout; #endif for(i = 0; i < MAX_BACKING_STORE; i++) { if(bs_port_table[i].vp == vp) { break; } } if (i == MAX_BACKING_STORE) { error = EINVAL; goto swapoff_bailout; } backing_store = (mach_port_t)bs_port_table[i].bs; kr = default_pager_backing_store_delete(backing_store); switch (kr) { case KERN_SUCCESS: error = 0; bs_port_table[i].vp = 0; /* This vnode is no longer used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); /* get rid of macx_swapon() "long term" reference */ vnode_rele(vp); break; case KERN_FAILURE: error = EAGAIN; break; default: error = EAGAIN; break; } swapoff_bailout: /* get rid of macx_swapoff() namei() reference */ if (vp) vnode_put(vp); (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }
/* * Routine: macx_swapon * Function: * Syscall interface to add a file to backing store */ int macx_swapon( struct macx_swapon_args *args) { int size = args->size; vnode_t vp = (vnode_t)NULL; struct nameidata nd, *ndp; register int error; kern_return_t kr; mach_port_t backing_store; memory_object_default_t default_pager; int i; boolean_t funnel_state; off_t file_size; vfs_context_t ctx = vfs_context_current(); struct proc *p = current_proc(); int dp_cluster_size; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON); AUDIT_ARG(value32, args->priority); funnel_state = thread_funnel_set(kernel_flock, TRUE); ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapon_bailout; /* * Get a vnode for the paging area. */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapon_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapon_bailout; } /* get file size */ if ((error = vnode_size(vp, &file_size, ctx)) != 0) goto swapon_bailout; #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapon(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapon_bailout; #endif /* resize to desired size if it's too small */ if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0)) goto swapon_bailout; if (default_pager_init_flag == 0) { start_def_pager(NULL); default_pager_init_flag = 1; } /* add new backing store to list */ i = 0; while(bs_port_table[i].vp != 0) { if(i == MAX_BACKING_STORE) break; i++; } if(i == MAX_BACKING_STORE) { error = ENOMEM; goto swapon_bailout; } /* remember the vnode. This vnode has namei() reference */ bs_port_table[i].vp = vp; /* * Look to see if we are already paging to this file. */ /* make certain the copy send of kernel call will work */ default_pager = MEMORY_OBJECT_DEFAULT_NULL; kr = host_default_memory_manager(host_priv_self(), &default_pager, 0); if(kr != KERN_SUCCESS) { error = EAGAIN; bs_port_table[i].vp = 0; goto swapon_bailout; } if (vp->v_mount->mnt_kern_flag & MNTK_SSD) { /* * keep the cluster size small since the * seek cost is effectively 0 which means * we don't care much about fragmentation */ dp_isssd = TRUE; dp_cluster_size = 2 * PAGE_SIZE; } else { /* * use the default cluster size */ dp_isssd = FALSE; dp_cluster_size = 0; } kr = default_pager_backing_store_create(default_pager, -1, /* default priority */ dp_cluster_size, &backing_store); memory_object_default_deallocate(default_pager); if(kr != KERN_SUCCESS) { error = ENOMEM; bs_port_table[i].vp = 0; goto swapon_bailout; } /* Mark this vnode as being used for swapfile */ vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); /* * NOTE: we are able to supply PAGE_SIZE here instead of * an actual record size or block number because: * a: we do not support offsets from the beginning of the * file (allowing for non page size/record modulo offsets. * b: because allow paging will be done modulo page size */ kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp, PAGE_SIZE, (int)(file_size/PAGE_SIZE)); if(kr != KERN_SUCCESS) { bs_port_table[i].vp = 0; if(kr == KERN_INVALID_ARGUMENT) error = EINVAL; else error = ENOMEM; /* This vnode is not to be used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); goto swapon_bailout; } bs_port_table[i].bs = (void *)backing_store; error = 0; ubc_setthreadcred(vp, p, current_thread()); /* * take a long term reference on the vnode to keep * vnreclaim() away from this vnode. */ vnode_ref(vp); swapon_bailout: if (vp) { vnode_put(vp); } (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }
/* * thread_call_thread: */ static void thread_call_thread( thread_call_group_t group, wait_result_t wres) { thread_t self = current_thread(); boolean_t canwait; /* * A wakeup with THREAD_INTERRUPTED indicates that * we should terminate. */ if (wres == THREAD_INTERRUPTED) { thread_terminate(self); /* NOTREACHED */ panic("thread_terminate() returned?"); } (void)disable_ints_and_lock(); thread_sched_call(self, group->sched_call); while (group->pending_count > 0) { thread_call_t call; thread_call_func_t func; thread_call_param_t param0, param1; call = TC(dequeue_head(&group->pending_queue)); group->pending_count--; func = call->tc_call.func; param0 = call->tc_call.param0; param1 = call->tc_call.param1; call->tc_call.queue = NULL; _internal_call_release(call); /* * Can only do wakeups for thread calls whose storage * we control. */ if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) { canwait = TRUE; call->tc_refs++; /* Delay free until we're done */ } else canwait = FALSE; enable_ints_and_unlock(); KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0); (*func)(param0, param1); if (get_preemption_level() != 0) { int pl = get_preemption_level(); panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)", pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1); } (void)thread_funnel_set(self->funnel_lock, FALSE); /* XXX */ (void) disable_ints_and_lock(); if (canwait) { /* Frees if so desired */ thread_call_finish(call); } } thread_sched_call(self, NULL); group->active_count--; if (group_isparallel(group)) { /* * For new style of thread group, thread always blocks. * If we have more than the target number of threads, * and this is the first to block, and it isn't active * already, set a timer for deallocating a thread if we * continue to have a surplus. */ group->idle_count++; if (group->idle_count == 1) { group->idle_timestamp = mach_absolute_time(); } if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) && ((group->active_count + group->idle_count) > group->target_thread_count)) { group->flags |= TCG_DEALLOC_ACTIVE; thread_call_start_deallocate_timer(group); } /* Wait for more work (or termination) */ wres = wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTIBLE, 0); if (wres != THREAD_WAITING) { panic("kcall worker unable to assert wait?"); } enable_ints_and_unlock(); thread_block_parameter((thread_continue_t)thread_call_thread, group); } else { if (group->idle_count < group->target_thread_count) { group->idle_count++; wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0); /* Interrupted means to exit */ enable_ints_and_unlock(); thread_block_parameter((thread_continue_t)thread_call_thread, group); /* NOTREACHED */ } } enable_ints_and_unlock(); thread_terminate(self); /* NOTREACHED */ }
/* * Routine: macx_swapon * Function: * Syscall interface to add a file to backing store */ int macx_swapon( char *filename, int flags, long size, long priority) { struct vnode *vp = 0; struct nameidata nd, *ndp; struct proc *p = current_proc(); pager_file_t pf; register int error; kern_return_t kr; mach_port_t backing_store; memory_object_default_t default_pager; int i; boolean_t funnel_state; struct vattr vattr; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON); AUDIT_ARG(value, priority); funnel_state = thread_funnel_set(kernel_flock, TRUE); ndp = &nd; if ((error = suser(p->p_ucred, &p->p_acflag))) goto swapon_bailout; if(default_pager_init_flag == 0) { start_def_pager(NULL); default_pager_init_flag = 1; } /* * Get a vnode for the paging area. */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, filename, p); if ((error = namei(ndp))) goto swapon_bailout; vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; VOP_UNLOCK(vp, 0, p); goto swapon_bailout; } UBCINFOCHECK("macx_swapon", vp); if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) { VOP_UNLOCK(vp, 0, p); goto swapon_bailout; } if (vattr.va_size < (u_quad_t)size) { vattr_null(&vattr); vattr.va_size = (u_quad_t)size; error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); if (error) { VOP_UNLOCK(vp, 0, p); goto swapon_bailout; } } /* add new backing store to list */ i = 0; while(bs_port_table[i].vp != 0) { if(i == MAX_BACKING_STORE) break; i++; } if(i == MAX_BACKING_STORE) { error = ENOMEM; VOP_UNLOCK(vp, 0, p); goto swapon_bailout; } /* remember the vnode. This vnode has namei() reference */ bs_port_table[i].vp = vp; /* * Look to see if we are already paging to this file. */ /* make certain the copy send of kernel call will work */ default_pager = MEMORY_OBJECT_DEFAULT_NULL; kr = host_default_memory_manager(host_priv_self(), &default_pager, 0); if(kr != KERN_SUCCESS) { error = EAGAIN; VOP_UNLOCK(vp, 0, p); bs_port_table[i].vp = 0; goto swapon_bailout; } kr = default_pager_backing_store_create(default_pager, -1, /* default priority */ 0, /* default cluster size */ &backing_store); memory_object_default_deallocate(default_pager); if(kr != KERN_SUCCESS) { error = ENOMEM; VOP_UNLOCK(vp, 0, p); bs_port_table[i].vp = 0; goto swapon_bailout; } /* * NOTE: we are able to supply PAGE_SIZE here instead of * an actual record size or block number because: * a: we do not support offsets from the beginning of the * file (allowing for non page size/record modulo offsets. * b: because allow paging will be done modulo page size */ VOP_UNLOCK(vp, 0, p); kr = default_pager_add_file(backing_store, vp, PAGE_SIZE, ((int)vattr.va_size)/PAGE_SIZE); if(kr != KERN_SUCCESS) { bs_port_table[i].vp = 0; if(kr == KERN_INVALID_ARGUMENT) error = EINVAL; else error = ENOMEM; goto swapon_bailout; } bs_port_table[i].bs = (void *)backing_store; error = 0; if (!ubc_hold(vp)) panic("macx_swapon: hold"); /* Mark this vnode as being used for swapfile */ SET(vp->v_flag, VSWAP); ubc_setcred(vp, p); /* * take an extra reference on the vnode to keep * vnreclaim() away from this vnode. */ VREF(vp); /* Hold on to the namei reference to the paging file vnode */ vp = 0; swapon_bailout: if (vp) { vrele(vp); } (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }
void unix_syscall(struct i386_saved_state *regs) { thread_act_t thread; void *vt; unsigned short code; struct sysent *callp; int nargs, error; volatile int *rval; int funnel_type; vm_offset_t params; extern int nsysent; struct proc *p; struct proc *current_proc(); thread = current_act(); p = current_proc(); rval = (int *)get_bsduthreadrval(thread); //printf("[scall : eax %x]", regs->eax); code = regs->eax; params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int)); callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; if (callp == sysent) { code = fuword(params); params += sizeof (int); callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; } vt = get_bsduthreadarg(thread); if ((nargs = (callp->sy_narg * sizeof (int))) && (error = copyin((char *) params, (char *)vt , nargs)) != 0) { regs->eax = error; regs->efl |= EFL_CF; thread_exception_return(); /* NOTREACHED */ } rval[0] = 0; rval[1] = regs->edx; funnel_type = callp->sy_funnel; if(funnel_type == KERNEL_FUNNEL) (void) thread_funnel_set(kernel_flock, TRUE); else if (funnel_type == NETWORK_FUNNEL) (void) thread_funnel_set(network_flock, TRUE); set_bsduthreadargs(thread, regs, NULL); if (callp->sy_narg > 8) panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg); ktrsyscall(p, code, callp->sy_narg, vt, funnel_type); { int *ip = (int *)vt; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, *ip, *(ip+1), *(ip+2), *(ip+3), 0); } error = (*(callp->sy_call))(p, (void *) vt, (int *) &rval[0]); #if 0 /* May be needed with vfork changes */ regs = USER_REGS(thread); #endif if (error == ERESTART) { regs->eip -= 7; } else if (error != EJUSTRETURN) { if (error) { regs->eax = error; regs->efl |= EFL_CF; /* carry bit */ } else { /* (not error) */ regs->eax = rval[0]; regs->edx = rval[1]; regs->efl &= ~EFL_CF; } } ktrsysret(p, code, error, rval[0], funnel_type); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, error, rval[0], rval[1], 0, 0); if(funnel_type != NO_FUNNEL) (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); thread_exception_return(); /* NOTREACHED */ }
/* * Routine: macx_swapoff * Function: * Syscall interface to remove a file from backing store */ int macx_swapoff( char *filename, int flags) { kern_return_t kr; mach_port_t backing_store; struct vnode *vp = 0; struct nameidata nd, *ndp; struct proc *p = current_proc(); int i; int error; boolean_t funnel_state; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF); funnel_state = thread_funnel_set(kernel_flock, TRUE); backing_store = NULL; ndp = &nd; if ((error = suser(p->p_ucred, &p->p_acflag))) goto swapoff_bailout; /* * Get the vnode for the paging area. */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, filename, p); if ((error = namei(ndp))) goto swapoff_bailout; vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; VOP_UNLOCK(vp, 0, p); goto swapoff_bailout; } for(i = 0; i < MAX_BACKING_STORE; i++) { if(bs_port_table[i].vp == vp) { backing_store; break; } } if (i == MAX_BACKING_STORE) { error = EINVAL; VOP_UNLOCK(vp, 0, p); goto swapoff_bailout; } backing_store = (mach_port_t)bs_port_table[i].bs; VOP_UNLOCK(vp, 0, p); kr = default_pager_backing_store_delete(backing_store); switch (kr) { case KERN_SUCCESS: error = 0; bs_port_table[i].vp = 0; ubc_rele(vp); /* This vnode is no longer used for swapfile */ CLR(vp->v_flag, VSWAP); /* get rid of macx_swapon() namei() reference */ vrele(vp); /* get rid of macx_swapon() "extra" reference */ vrele(vp); break; case KERN_FAILURE: error = EAGAIN; break; default: error = EAGAIN; break; } swapoff_bailout: /* get rid of macx_swapoff() namei() reference */ if (vp) vrele(vp); (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }