/* * Routine: macx_backing_store_recovery * Function: * Syscall interface to set a tasks privilege * level so that it is not subject to * macx_backing_store_suspend */ int macx_backing_store_recovery( struct macx_backing_store_recovery_args *args) { int pid = args->pid; int error; struct proc *p = current_proc(); boolean_t funnel_state; funnel_state = thread_funnel_set(kernel_flock, TRUE); if ((error = suser(kauth_cred_get(), 0))) goto backing_store_recovery_return; /* for now restrict backing_store_recovery */ /* usage to only present task */ if(pid != proc_selfpid()) { error = EINVAL; goto backing_store_recovery_return; } task_backing_store_privileged(p->task); backing_store_recovery_return: (void) thread_funnel_set(kernel_flock, FALSE); return(error); }
errno_t IOWebFilterClass::tl_attach_func(void **cookie, socket_t so) { LOG(LOG_DEBUG, "enter"); SocketTracker *tracker = new SocketTracker(); *cookie=(void*)tracker; if(tracker==NULL) return 0; bzero(tracker, sizeof(SocketTracker)); tracker->lock = IOLockAlloc(); if(tracker->lock==NULL) { tracker->magic=kSocketTrackerInvalid; return 0; } tracker->magic=kSocketTrackerAttach; tracker->pid = proc_selfpid(); //get the process id LOG(LOG_DEBUG, "pid=%d", tracker->pid); if(tracker->pid==0) { tracker->magic=kSocketTrackerInvalid; return 0; } bzero(tracker->proc_name, sizeof(tracker->proc_name)); proc_name(tracker->pid, tracker->proc_name, sizeof(char)*(sizeof(tracker->proc_name)-1)); LOG(LOG_DEBUG, "proc=%s, magic=%ld", tracker->proc_name, tracker->magic); return 0; }
bool AppleSmartBatteryManagerUserClient::initWithTask(task_t owningTask, void *security_id, UInt32 type, OSDictionary * properties) { uint32_t _pid; /* 1. Only root processes may open a SmartBatteryManagerUserClient. * 2. Attempts to create exclusive UserClients will fail if an * exclusive user client is attached. * 3. Non-exclusive clients will not be able to perform transactions * while an exclusive client is attached. * 3a. Only battery firmware updaters should bother being exclusive. */ if ( kIOReturnSuccess != clientHasPrivilege(owningTask, kIOClientPrivilegeAdministrator)) { return false; } if (!super::initWithTask(owningTask, security_id, type, properties)) { return false; } fUserClientType = type; _pid = proc_selfpid(); setProperty("pid", _pid, 32); fOwningTask = owningTask; task_reference (fOwningTask); return true; }
static errno_t ppfilter_attach(void **cookie, socket_t so) { pp_filter_cookie_t cp = OSMalloc(sizeof(*cp), pp_malloc_tag); if (cp) { cp->pid = proc_selfpid(); cp->action = COOKIE_NO_ACTION; } *cookie = (void*)cp; return (0); }
/** * __ntfs_error - output an error to the console * @function: name of function outputting the error * @mp: mounted ntfs file system * @fmt: error string containing format specifications * @...: a variable number of arguments specified in @fmt * * Outputs an error to the console for the mounted ntfs file system described * by @mp. * * @fmt and the corresponding @... is printf style format string containing * the error string and the corresponding format arguments, respectively. * * @function is the name of the function from which __ntfs_error is being * called. * * Note, you should be using debug.h::ntfs_error(@mp, @fmt, @...) instead * as this provides the @function parameter automatically. */ void __ntfs_error(const char *function, struct mount *mp, const char *fmt, ...) { va_list args; int flen = 0; if (function) flen = strlen(function); mtx_lock_spin(&ntfs_err_buf_lock); va_start(args, fmt); vsnprintf(ntfs_err_buf, sizeof(ntfs_err_buf), fmt, args); va_end(args); if (mp) printf("NTFS-fs error (device %s, pid %d): %s(): %s\n", vfs_statfs(mp)->f_mntfromname, proc_selfpid(), flen ? function : "", ntfs_err_buf); else printf("NTFS-fs error (pid %d): %s(): %s\n", proc_selfpid(), flen ? function : "", ntfs_err_buf); #ifdef DEBUG OSReportWithBacktrace(""); #endif mtx_unlock_spin(&ntfs_err_buf_lock); }
//called for new socket // ->find rule, and attach entry (so know to allow/deny for later actions) // if no rule is found, that's ok (new proc), request user input in connect_out or sf_data_out, etc static kern_return_t attach(void **cookie, socket_t so) { //result kern_return_t result = kIOReturnError; //unset *cookie = NULL; //dbg msg IOLog("LULU: in %s\n", __FUNCTION__); //set cookie *cookie = (void*)OSMalloc(sizeof(struct cookieStruct), allocTag); if(NULL == *cookie) { //no memory result = ENOMEM; //bail goto bail; } //set rule action // not found, block, allow, etc ((struct cookieStruct*)(*cookie))->ruleAction = queryRule(proc_selfpid()); //dbg msg IOLog("LULU: rule action for %d: %d\n", proc_selfpid(), ((struct cookieStruct*)(*cookie))->ruleAction); //happy result = kIOReturnSuccess; bail: return result; }
//process // block/allow, or ask user and put thread to sleep kern_return_t process(void *cookie, socket_t so, const struct sockaddr *to) { //result kern_return_t result = kIOReturnError; //event firewallEvent event = {0}; //rule int action = RULE_STATE_NOT_FOUND; //awake reason int reason = 0; //socket type int socketType = 0; //length of socket type int socketTypeLength = 0; //process name char processName[PATH_MAX] = {0}; //what does rule say? // loop until we have an answer while(true) { //reset bzero(&event, sizeof(event)); //extract action action = ((struct cookieStruct*)cookie)->ruleAction; //get process name proc_selfname(processName, PATH_MAX); //block? if(RULE_STATE_BLOCK == action) { //dbg msg IOLog("LULU: rule says block for %s (pid: %d)\n", processName, proc_selfpid()); //gtfo! result = EPERM; //all done goto bail; } //allow? else if(RULE_STATE_ALLOW == action) { //dbg msg IOLog("LULU: rule says allow for %s (pid: %d)\n", processName, proc_selfpid()); //ok result = kIOReturnSuccess; //all done goto bail; } //not found // ->ask daemon and sleep for response else if(RULE_STATE_NOT_FOUND == action) { //dbg msg IOLog("LULU: no rule found for %s (pid: %d)\n", processName, proc_selfpid()); //zero out bzero(&event, sizeof(firewallEvent)); //set type event.networkOutEvent.type = EVENT_NETWORK_OUT; //add pid event.networkOutEvent.pid = proc_selfpid(); //init length socketTypeLength = sizeof(socketType); //get socket type sock_getsockopt(so, SOL_SOCKET, SO_TYPE, &socketType, &socketTypeLength); //save type event.networkOutEvent.socketType = socketType; //UDP sockets destination socket might be null // so grab via 'getpeername' and save as 'remote addr' if(NULL == to) { //copy into 'remote addr' for user mode if(0 != sock_getpeername(so, (struct sockaddr*)&(event.networkOutEvent.remoteAddress), sizeof(event.networkOutEvent.remoteAddress))) { //err msg IOLog("LULU ERROR: sock_getpeername() failed"); //bail goto bail; } } //copy remote socket for user mode else { //add remote (destination) socket addr memcpy(&(event.networkOutEvent.remoteAddress), to, sizeof(event.networkOutEvent.remoteAddress)); } //queue it up sharedDataQueue->enqueue_tail(&event, sizeof(firewallEvent)); //dbg msg IOLog("LULU: queued response to user mode, now going to sleep!\n"); //lock IOLockLock(ruleEventLock); //sleep reason = IOLockSleep(ruleEventLock, &ruleEventLock, THREAD_ABORTSAFE); //TODO: fix panic, think if kext is unloaded (sets ruleEventLock to NULL) this can still wake up? // "Preemption level underflow, possible cause unlocking an unlocked mutex or spinlock" // seems to happen when process is killed or kext unloaded while in the IOLockSleep!? //unlock IOLockUnlock(ruleEventLock); //thread wakeup cuz of signal, etc // ->just bail (process likely exited, etc) if(THREAD_AWAKENED != reason) { //dbg msg IOLog("LULU: thread awoke, but because of %d!\n", reason); //gtfo! result = EPERM; //all done goto bail; } //dbg msg IOLog("LULU: thread awoke, will check/process response\n"); //try get rule action again // ->not found, block, allow, etc ((struct cookieStruct*)(cookie))->ruleAction = queryRule(proc_selfpid()); //loop to (re)process } }//while bail: return result; }
/* * Routine: task_for_pid * Purpose: * Get the task port for another "process", named by its * process ID on the same host as "target_task". * * Only permitted to privileged processes, or processes * with the same user ID. * * Note: if pid == 0, an error is return no matter who is calling. * * XXX This should be a BSD system call, not a Mach trap!!! */ kern_return_t task_for_pid( struct task_for_pid_args *args) { mach_port_name_t target_tport = args->target_tport; int pid = args->pid; user_addr_t task_addr = args->t; proc_t p = PROC_NULL; task_t t1 = TASK_NULL; mach_port_name_t tret = MACH_PORT_NULL; ipc_port_t tfpport; void * sright; int error = 0; AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID); AUDIT_ARG(pid, pid); AUDIT_ARG(mach_port1, target_tport); /* Always check if pid == 0 */ if (pid == 0) { (void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE); return(KERN_FAILURE); } t1 = port_name_to_task(target_tport); if (t1 == TASK_NULL) { (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE); return(KERN_FAILURE); } p = proc_find(pid); if (p == PROC_NULL) { error = KERN_FAILURE; goto tfpout; } #if CONFIG_AUDIT AUDIT_ARG(process, p); #endif if (!(task_for_pid_posix_check(p))) { error = KERN_FAILURE; goto tfpout; } if (p->task != TASK_NULL) { /* If we aren't root and target's task access port is set... */ if (!kauth_cred_issuser(kauth_cred_get()) && p != current_proc() && (task_get_task_access_port(p->task, &tfpport) == 0) && (tfpport != IPC_PORT_NULL)) { if (tfpport == IPC_PORT_DEAD) { error = KERN_PROTECTION_FAILURE; goto tfpout; } /* Call up to the task access server */ error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid); if (error != MACH_MSG_SUCCESS) { if (error == MACH_RCV_INTERRUPTED) error = KERN_ABORTED; else error = KERN_FAILURE; goto tfpout; } } #if CONFIG_MACF error = mac_proc_check_get_task(kauth_cred_get(), p); if (error) { error = KERN_FAILURE; goto tfpout; } #endif /* Grant task port access */ task_reference(p->task); extmod_statistics_incr_task_for_pid(p->task); sright = (void *) convert_task_to_port(p->task); tret = ipc_port_copyout_send( sright, get_task_ipcspace(current_task())); } error = KERN_SUCCESS; tfpout: task_deallocate(t1); AUDIT_ARG(mach_port2, tret); (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t)); if (p != PROC_NULL) proc_rele(p); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }
static int new_proc_listener ( kauth_cred_t cred, struct vnode *vp, struct vnode *scriptvp, struct label *vnodelabel, struct label *scriptlabel, struct label *execlabel, struct componentname *cnp, u_int *csflags, void *macpolicyattr, size_t macpolicyattrlen ) #endif { #ifdef _USE_KAUTH vnode_t prog = (vnode_t)arg0; const char* file_path = (const char*)arg1; #else int pathLen = sizeof( g_processes[ 0 ].path ); #endif pid_t pid = 0; pid_t ppid = 0; uid_t uid = 0; #ifdef _USE_KAUTH if( KAUTH_FILEOP_EXEC != action || ( NULL != prog && VREG != vnode_vtype( prog ) ) ) { return KAUTH_RESULT_DEFER; } #endif uid = kauth_getuid(); pid = proc_selfpid(); ppid = proc_selfppid(); // We skip a known false positive if( 0 == ppid && 1 == pid ) { #ifdef _USE_KAUTH return KAUTH_RESULT_DEFER; #else return 0; // Always allow #endif } if( NULL != file_path ) { // rpal_debug_info( "!!!!!! process start: %d/%d/%d %s", ppid, pid, uid, file_path ); } rpal_mutex_lock( g_collector_1_mutex ); #ifdef _USE_KAUTH if( NULL != file_path ) { strncpy( g_processes[ g_nextProcess ].path, file_path, sizeof( g_processes[ g_nextProcess ].path ) - 1 ); } #else vn_getpath( vp, g_processes[ g_nextProcess ].path, &pathLen ); #endif g_processes[ g_nextProcess ].pid = pid; g_processes[ g_nextProcess ].ppid = ppid; g_processes[ g_nextProcess ].uid = uid; g_processes[ g_nextProcess ].ts = rpal_time_getLocal(); g_nextProcess++; if( g_nextProcess == _NUM_BUFFERED_PROCESSES ) { g_nextProcess = 0; rpal_debug_warning( "overflow of the execution buffer" ); } // rpal_debug_info( "now %d processes in buffer", g_nextProcess ); rpal_mutex_unlock( g_collector_1_mutex ); #ifdef _USE_KAUTH return KAUTH_RESULT_DEFER; #else return 0; // Always allow #endif }
void dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) { thread_t thread = current_thread(); savearea_t *regs; user_addr_t pc, sp; volatile uint16_t *flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; #if 0 uintptr_t oldcontext; size_t s1, s2; #endif boolean_t is64Bit = proc_is64bit(current_proc()); if (*flags & CPU_DTRACE_FAULT) return; if (pcstack_limit <= 0) return; /* * If there's no user context we still need to zero the stack. */ if (thread == NULL) goto zero; regs = (savearea_t *)find_user_regs(thread); if (regs == NULL) goto zero; *pcstack++ = (uint64_t)proc_selfpid(); pcstack_limit--; if (pcstack_limit <= 0) return; pc = regs->ss_32.eip; sp = regs->ss_32.ebp; #if 0 /* XXX signal stack crawl */ oldcontext = lwp->lwp_oldcontext; if (p->p_model == DATAMODEL_NATIVE) { s1 = sizeof (struct frame) + 2 * sizeof (long); s2 = s1 + sizeof (siginfo_t); } else { s1 = sizeof (struct frame32) + 3 * sizeof (int); s2 = s1 + sizeof (siginfo32_t); } #endif if(dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp) == 1) { /* * we made a change. */ *fpstack++ = 0; if (pcstack_limit <= 0) return; } while (pc != 0) { *pcstack++ = (uint64_t)pc; *fpstack++ = sp; pcstack_limit--; if (pcstack_limit <= 0) break; if (sp == 0) break; #if 0 /* XXX signal stack crawl */ if (oldcontext == sp + s1 || oldcontext == sp + s2) { if (p->p_model == DATAMODEL_NATIVE) { ucontext_t *ucp = (ucontext_t *)oldcontext; greg_t *gregs = ucp->uc_mcontext.gregs; sp = dtrace_fulword(&gregs[REG_FP]); pc = dtrace_fulword(&gregs[REG_PC]); oldcontext = dtrace_fulword(&ucp->uc_link); } else { ucontext_t *ucp = (ucontext_t *)oldcontext; greg_t *gregs = ucp->uc_mcontext.gregs; sp = dtrace_fuword32(&gregs[EBP]); pc = dtrace_fuword32(&gregs[EIP]); oldcontext = dtrace_fuword32(&ucp->uc_link); } } else #endif { if (is64Bit) { pc = dtrace_fuword64((sp + RETURN_OFFSET64)); sp = dtrace_fuword64(sp); } else { pc = dtrace_fuword32((sp + RETURN_OFFSET)); sp = dtrace_fuword32(sp); } } #if 0 /* XXX */ /* * This is totally bogus: if we faulted, we're going to clear * the fault and break. This is to deal with the apparently * broken Java stacks on x86. */ if (*flags & CPU_DTRACE_FAULT) { *flags &= ~CPU_DTRACE_FAULT; break; } #endif } zero: while (pcstack_limit-- > 0) *pcstack++ = 0; }
void dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) { thread_t thread = current_thread(); x86_saved_state_t *regs; user_addr_t pc, sp, fp; volatile uint16_t *flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int n; boolean_t is64Bit = proc_is64bit(current_proc()); if (*flags & CPU_DTRACE_FAULT) return; if (pcstack_limit <= 0) return; /* * If there's no user context we still need to zero the stack. */ if (thread == NULL) goto zero; regs = (x86_saved_state_t *)find_user_regs(thread); if (regs == NULL) goto zero; *pcstack++ = (uint64_t)proc_selfpid(); pcstack_limit--; if (pcstack_limit <= 0) return; if (is64Bit) { pc = regs->ss_64.isf.rip; sp = regs->ss_64.isf.rsp; fp = regs->ss_64.rbp; } else { pc = regs->ss_32.eip; sp = regs->ss_32.uesp; fp = regs->ss_32.ebp; } /* * The return value indicates if we've modified the stack. * Since there is nothing else to fix up in either case, * we can safely ignore it here. */ (void)dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp); if(pcstack_limit <= 0) return; /* * Note that unlike ppc, the x86 code does not use * CPU_DTRACE_USTACK_FP. This is because x86 always * traces from the fp, even in syscall/profile/fbt * providers. */ n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp); ASSERT(n >= 0); ASSERT(n <= pcstack_limit); pcstack += n; pcstack_limit -= n; zero: while (pcstack_limit-- > 0) *pcstack++ = 0; }
extern "C" int proc_event_pending(struct vcpu_t *vcpu) { int proc_id = proc_selfpid(); return (proc_issignal(proc_id, QEMU_SIGNAL_SIGMASK) || vcpu_event_pending(vcpu)); }
void dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit) { thread_t thread = current_thread(); ppc_saved_state_t *regs; user_addr_t pc, sp; volatile uint16_t *flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; #if 0 uintptr_t oldcontext; size_t s1, s2; #endif boolean_t is64Bit = proc_is64bit(current_proc()); if (*flags & CPU_DTRACE_FAULT) return; if (pcstack_limit <= 0) return; /* * If there's no user context we still need to zero the stack. */ if (thread == NULL) goto zero; regs = (ppc_saved_state_t *)find_user_regs(thread); if (regs == NULL) goto zero; *pcstack++ = (uint64_t)proc_selfpid(); pcstack_limit--; if (pcstack_limit <= 0) return; pc = regs->REGPC; sp = regs->REGSP; #if 0 /* XXX signal stack crawl*/ oldcontext = lwp->lwp_oldcontext; if (p->p_model == DATAMODEL_NATIVE) { s1 = sizeof (struct frame) + 2 * sizeof (long); s2 = s1 + sizeof (siginfo_t); } else { s1 = sizeof (struct frame32) + 3 * sizeof (int); s2 = s1 + sizeof (siginfo32_t); } #endif if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { *pcstack++ = (uint64_t)pc; *fpstack++ = 0; pcstack_limit--; if (pcstack_limit <= 0) return; /* * XXX This is wrong, but we do not yet support stack helpers. */ if (is64Bit) pc = dtrace_fuword64(sp); else pc = dtrace_fuword32(sp); } while (pc != 0) { *pcstack++ = (uint64_t)pc; *fpstack++ = sp; pcstack_limit--; if (pcstack_limit <= 0) break; if (sp == 0) break; #if 0 /* XXX signal stack crawl*/ if (oldcontext == sp + s1 || oldcontext == sp + s2) { if (p->p_model == DATAMODEL_NATIVE) { ucontext_t *ucp = (ucontext_t *)oldcontext; greg_t *gregs = ucp->uc_mcontext.gregs; sp = dtrace_fulword(&gregs[REG_FP]); pc = dtrace_fulword(&gregs[REG_PC]); oldcontext = dtrace_fulword(&ucp->uc_link); } else { ucontext_t *ucp = (ucontext_t *)oldcontext; greg_t *gregs = ucp->uc_mcontext.gregs; sp = dtrace_fuword32(&gregs[EBP]); pc = dtrace_fuword32(&gregs[EIP]); oldcontext = dtrace_fuword32(&ucp->uc_link); } } else #endif { if (is64Bit) { pc = dtrace_fuword64((sp + RETURN_OFFSET64)); sp = dtrace_fuword64(sp); } else { pc = dtrace_fuword32((sp + RETURN_OFFSET)); sp = dtrace_fuword32(sp); } } } zero: while (pcstack_limit-- > 0) *pcstack++ = 0; }
void dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit) { thread_t thread = current_thread(); ppc_saved_state_t *regs; user_addr_t pc, sp; volatile uint16_t *flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int n; boolean_t is64Bit = proc_is64bit(current_proc()); if (*flags & CPU_DTRACE_FAULT) return; if (pcstack_limit <= 0) return; /* * If there's no user context we still need to zero the stack. */ if (thread == NULL) goto zero; regs = (ppc_saved_state_t *)find_user_regs(thread); if (regs == NULL) goto zero; *pcstack++ = (uint64_t)proc_selfpid(); pcstack_limit--; if (pcstack_limit <= 0) return; pc = regs->REGPC; sp = regs->REGSP; if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { *pcstack++ = (uint64_t)pc; pcstack_limit--; if (pcstack_limit <= 0) return; pc = regs->save_lr; } if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) { /* * If the ustack fp flag is set, the stack frame from sp to * fp contains no valid call information. Start with the fp. */ if (is64Bit) sp = dtrace_fuword64(sp); else sp = (user_addr_t)dtrace_fuword32(sp); } n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp); ASSERT(n >= 0); ASSERT(n <= pcstack_limit); pcstack += n; pcstack_limit -= n; zero: while (pcstack_limit-- > 0) *pcstack++ = 0; }
RTDECL(RTPROCESS) RTProcSelf(void) { return proc_selfpid(); }
/* system call implementation */ int process_policy(__unused struct proc *p, struct process_policy_args * uap, __unused int32_t *retval) { int error = 0; int scope = uap->scope; int policy = uap->policy; int action = uap->action; int policy_subtype = uap->policy_subtype; user_addr_t attrp = uap->attrp; pid_t target_pid = uap->target_pid; uint64_t target_threadid = uap->target_threadid; proc_t target_proc = PROC_NULL; #if CONFIG_MACF || !CONFIG_EMBEDDED proc_t curp = current_proc(); #endif kauth_cred_t my_cred; #if CONFIG_EMBEDDED kauth_cred_t target_cred; #endif if ((scope != PROC_POLICY_SCOPE_PROCESS) && (scope != PROC_POLICY_SCOPE_THREAD)) { return(EINVAL); } if (target_pid == 0 || target_pid == proc_selfpid()) target_proc = proc_self(); else target_proc = proc_find(target_pid); if (target_proc == PROC_NULL) return(ESRCH); my_cred = kauth_cred_get(); #if CONFIG_EMBEDDED target_cred = kauth_cred_proc_ref(target_proc); if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) && kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) && kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) #else /* * Resoure starvation control can be used by unpriv resource owner but priv at the time of ownership claim. This is * checked in low resource handle routine. So bypass the checks here. */ if ((policy != PROC_POLICY_RESOURCE_STARVATION) && (policy != PROC_POLICY_APPTYPE) && (!kauth_cred_issuser(my_cred) && curp != p)) #endif { error = EPERM; goto out; } #if CONFIG_MACF switch (policy) { case PROC_POLICY_BOOST: case PROC_POLICY_RESOURCE_USAGE: #if CONFIG_EMBEDDED case PROC_POLICY_APPTYPE: case PROC_POLICY_APP_LIFECYCLE: #endif /* These policies do their own appropriate mac checks */ break; default: error = mac_proc_check_sched(curp, target_proc); if (error) goto out; break; } #endif /* CONFIG_MACF */ switch(policy) { case PROC_POLICY_BACKGROUND: error = ENOTSUP; break; case PROC_POLICY_HARDWARE_ACCESS: error = ENOTSUP; break; case PROC_POLICY_RESOURCE_STARVATION: error = handle_lowresource(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; case PROC_POLICY_RESOURCE_USAGE: switch(policy_subtype) { case PROC_POLICY_RUSAGE_NONE: case PROC_POLICY_RUSAGE_WIREDMEM: case PROC_POLICY_RUSAGE_VIRTMEM: case PROC_POLICY_RUSAGE_DISK: case PROC_POLICY_RUSAGE_NETWORK: case PROC_POLICY_RUSAGE_POWER: error = ENOTSUP; goto out; default: error = EINVAL; goto out; case PROC_POLICY_RUSAGE_CPU: break; } error = handle_cpuuse(action, attrp, target_proc, target_threadid); break; #if CONFIG_EMBEDDED case PROC_POLICY_APP_LIFECYCLE: error = handle_applifecycle(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; #endif /* CONFIG_EMBEDDED */ case PROC_POLICY_APPTYPE: error = handle_apptype(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; case PROC_POLICY_BOOST: error = handle_boost(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); break; default: error = EINVAL; break; } out: proc_rele(target_proc); #if CONFIG_EMBEDDED kauth_cred_unref(&target_cred); #endif return(error); }
/* * Routine: task_for_pid * Purpose: * Get the task port for another "process", named by its * process ID on the same host as "target_task". * * Only permitted to privileged processes, or processes * with the same user ID. * * XXX This should be a BSD system call, not a Mach trap!!! */ kern_return_t task_for_pid( struct task_for_pid_args *args) { mach_port_name_t target_tport = args->target_tport; int pid = args->pid; user_addr_t task_addr = args->t; struct uthread *uthread; proc_t p = PROC_NULL; task_t t1 = TASK_NULL; mach_port_name_t tret = MACH_PORT_NULL; ipc_port_t tfpport; void * sright; int error = 0; AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID); AUDIT_ARG(pid, pid); AUDIT_ARG(mach_port1, target_tport); #if defined(SECURE_KERNEL) if (0 == pid) { (void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE); return(KERN_FAILURE); } #endif t1 = port_name_to_task(target_tport); if (t1 == TASK_NULL) { (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE); return(KERN_FAILURE); } /* * Delayed binding of thread credential to process credential, if we * are not running with an explicitly set thread credential. */ uthread = get_bsdthread_info(current_thread()); kauth_cred_uthread_update(uthread, current_proc()); p = proc_find(pid); AUDIT_ARG(process, p); if (!(task_for_pid_posix_check(p))) { error = KERN_FAILURE; goto tfpout; } if (p->task != TASK_NULL) { /* If we aren't root and target's task access port is set... */ if (!kauth_cred_issuser(kauth_cred_get()) && p != current_proc() && (task_get_task_access_port(p->task, &tfpport) == 0) && (tfpport != IPC_PORT_NULL)) { if (tfpport == IPC_PORT_DEAD) { error = KERN_PROTECTION_FAILURE; goto tfpout; } /* Call up to the task access server */ error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid); if (error != MACH_MSG_SUCCESS) { if (error == MACH_RCV_INTERRUPTED) error = KERN_ABORTED; else error = KERN_FAILURE; goto tfpout; } } #if CONFIG_MACF error = mac_proc_check_get_task(kauth_cred_get(), p); if (error) { error = KERN_FAILURE; goto tfpout; } #endif /* Grant task port access */ task_reference(p->task); sright = (void *) convert_task_to_port(p->task); tret = ipc_port_copyout_send( sright, get_task_ipcspace(current_task())); } error = KERN_SUCCESS; tfpout: task_deallocate(t1); AUDIT_ARG(mach_port2, tret); (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t)); if (p != PROC_NULL) proc_rele(p); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }