__private_extern__
kern_return_t chudxnu_current_thread_get_callstack(uint32_t *callStack,
                                                   mach_msg_type_number_t *count,
                                                   boolean_t user_only)
{
    kern_return_t kr;
    vm_address_t nextFramePointer = 0;
    vm_address_t currPC, currLR, currR0;
    vm_address_t framePointer;
    vm_address_t prevPC = 0;
    vm_address_t kernStackMin = min_valid_stack_address();
    vm_address_t kernStackMax = max_valid_stack_address();
    unsigned int *buffer = callStack;
    int bufferIndex = 0;
    int bufferMaxIndex = *count;
    boolean_t supervisor;
    struct savearea *sv;

    if(user_only) {
        sv = chudxnu_private_get_user_regs();
    } else {
        sv = chudxnu_private_get_regs();
    }

    if(!sv) {
        *count = 0;
        return KERN_FAILURE;
    }

    supervisor = SUPERVISOR_MODE(sv->save_srr1);

    if(!supervisor && ml_at_interrupt_context()) { // can't do copyin() if on interrupt stack
        *count = 0;
        return KERN_FAILURE;
    }

    bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
    if(bufferMaxIndex<2) {
        *count = 0;
        return KERN_RESOURCE_SHORTAGE;
    }

    currPC = sv->save_srr0;
    framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC)  */
    currLR = sv->save_lr;
    currR0 = sv->save_r0;

    bufferIndex = 0;  // start with a stack of size zero
    buffer[bufferIndex++] = currPC; // save PC in position 0.

    // Now, fill buffer with stack backtraces.
    while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
        vm_address_t pc = 0;
        // Above the stack pointer, the following values are saved:
        // saved LR
        // saved CR
        // saved SP
        //-> SP
        // Here, we'll get the lr from the stack.
        volatile vm_address_t fp_link = (vm_address_t)(((unsigned *)framePointer)+FP_LINK_OFFSET);

        // Note that we read the pc even for the first stack frame (which, in theory,
        // is always empty because the callee fills it in just before it lowers the
        // stack.  However, if we catch the program in between filling in the return
        // address and lowering the stack, we want to still have a valid backtrace.
        // FixupStack correctly disregards this value if necessary.

        if(supervisor) {
            kr = chudxnu_private_task_read_bytes(kernel_task, fp_link, sizeof(unsigned int), &pc);
        } else {
            kr = chudxnu_private_task_read_bytes(current_task(), fp_link, sizeof(unsigned int), &pc);
        }
        if(kr!=KERN_SUCCESS) {
            //        IOLog("task_read_callstack: unable to read framePointer: %08x\n",framePointer);
            pc = 0;
            break;
        }

        // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid

        if(supervisor) {
            kr = chudxnu_private_task_read_bytes(kernel_task, framePointer, sizeof(unsigned int), &nextFramePointer);
        } else {
            kr = chudxnu_private_task_read_bytes(current_task(), framePointer, sizeof(unsigned int), &nextFramePointer);
        }
        if(kr!=KERN_SUCCESS) {
            nextFramePointer = 0;
        }

        if(nextFramePointer) {
            buffer[bufferIndex++] = pc;
            prevPC = pc;
        }
    
        if(nextFramePointer<framePointer) {
            break;
        } else {
	    framePointer = nextFramePointer;
	}
    }

    if(bufferIndex>=bufferMaxIndex) {
        *count = 0;
        return KERN_RESOURCE_SHORTAGE;
    }

    // Save link register and R0 at bottom of stack.  This means that we won't worry
    // about these values messing up stack compression.  These end up being used
    // by FixupStack.
    buffer[bufferIndex++] = currLR;
    buffer[bufferIndex++] = currR0;

    *count = bufferIndex;
    return KERN_SUCCESS;
}
bool
com_VFSFilter0::start(
    __in IOService *provider
    )
{
    
    //__asm__ volatile( "int $0x3" );
    
    VNodeMap::Init();
    
    if( kIOReturnSuccess != VFSHookInit() ){
        
        DBG_PRINT_ERROR( ( "VFSHookInit() failed\n" ) );
        goto __exit_on_error;
    }
    
    if( ! QvrVnodeHooksHashTable::CreateStaticTableWithSize( 8, true ) ){
        
        DBG_PRINT_ERROR( ( "QvrVnodeHooksHashTable::CreateStaticTableWithSize() failed\n" ) );
        goto __exit_on_error;
    }

    
    //
    // gSuperUserContext must have a valid thread and process pointer
    // TO DO redesign this! Indefinit holding a thread or task object is a bad behaviour.
    //
    thread_reference( current_thread() );
    task_reference( current_task() );
    
    gSuperUserContext = vfs_context_create(NULL); // vfs_context_kernel()
    
    //
    // create an object for the vnodes KAuth callback and register the callback,
    // the callback might be called immediatelly just after registration!
    //
    gVnodeGate = QvrIOKitKAuthVnodeGate::withCallbackRegistration( this );
    assert( NULL != gVnodeGate );
    if( NULL == gVnodeGate ){
        
        DBG_PRINT_ERROR( ( "QvrIOKitKAuthVnodeGate::withDefaultSettings() failed\n" ) );
        goto __exit_on_error;
    }
    
    Instance = this;
    
    //
    // register with IOKit to allow the class matching
    //
    registerService();

    return true;
    
__exit_on_error:
    
    //
    // all cleanup will be done in stop() and free()
    //
    this->release();
    return false;
}
/*
 * Allocate the stack, and build the argument list.
 */
static void
build_args_and_stack(struct exec_info *boot_exec_info,
		     char **argv, char **envp)
{
	vm_offset_t	stack_base;
	vm_size_t	stack_size;
	char *		arg_ptr;
	int		arg_count, envc;
	int		arg_len;
	char *		arg_pos;
	int		arg_item_len;
	char *		string_pos;
	char *		zero = (char *)0;
	int i;

#define	STACK_SIZE	(64*1024)

	/*
	 * Calculate the size of the argument list.
	 */
	arg_len = 0;
	arg_count = 0;
	while (argv[arg_count] != 0) {
	    arg_ptr = argv[arg_count++];
	    arg_len += strlen(arg_ptr) + 1;
	}
	envc = 0;
	if (envp != 0)
	  while (envp[envc] != 0)
	    arg_len += strlen (envp[envc++]) + 1;

	/*
	 * Add space for:
	 *	arg count
	 *	pointers to arguments
	 *	trailing 0 pointer
	 *	pointers to environment variables
	 *	trailing 0 pointer
	 *	and align to integer boundary
	 */
	arg_len += (sizeof(integer_t)
		    + (arg_count + 1 + envc + 1) * sizeof(char *));
	arg_len = (arg_len + sizeof(integer_t) - 1) & ~(sizeof(integer_t)-1);

	/*
	 * Allocate the stack.
	 */
	stack_size = round_page(STACK_SIZE);
	stack_base = user_stack_low(stack_size);
	(void) vm_allocate(current_task()->map,
			&stack_base,
			stack_size,
			FALSE);

	arg_pos = (char *)
		set_user_regs(stack_base, stack_size, boot_exec_info, arg_len);

	/*
	 * Start the strings after the arg-count and pointers
	 */
	string_pos = (arg_pos
		      + sizeof(integer_t)
		      + (arg_count + 1 + envc + 1) * sizeof(char *));

	/*
	 * first the argument count
	 */
	(void) copyout(&arg_count,
			arg_pos,
			sizeof(integer_t));
	arg_pos += sizeof(integer_t);

	/*
	 * Then the strings and string pointers for each argument
	 */
	for (i = 0; i < arg_count; ++i) {
	    arg_ptr = argv[i];
	    arg_item_len = strlen(arg_ptr) + 1; /* include trailing 0 */

	    /* set string pointer */
	    (void) copyout(&string_pos,
			arg_pos,
			sizeof (char *));
	    arg_pos += sizeof(char *);

	    /* copy string */
	    (void) copyout(arg_ptr, string_pos, arg_item_len);
	    string_pos += arg_item_len;
	}

	/*
	 * Null terminator for argv.
	 */
	(void) copyout(&zero, arg_pos, sizeof(char *));
	arg_pos += sizeof(char *);

	/*
	 * Then the strings and string pointers for each environment variable
	 */
	for (i = 0; i < envc; ++i) {
	    arg_ptr = envp[i];
	    arg_item_len = strlen(arg_ptr) + 1; /* include trailing 0 */

	    /* set string pointer */
	    (void) copyout(&string_pos,
			arg_pos,
			sizeof (char *));
	    arg_pos += sizeof(char *);

	    /* copy string */
	    (void) copyout(arg_ptr, string_pos, arg_item_len);
	    string_pos += arg_item_len;
	}

	/*
	 * Null terminator for envp.
	 */
	(void) copyout(&zero, arg_pos, sizeof(char *));
}
Exemple #4
0
void *
uthread_alloc(task_t task, thread_t thread, int noinherit)
{
	proc_t p;
	uthread_t uth;
	uthread_t uth_parent;
	void *ut;

	if (!uthread_zone_inited)
		uthread_zone_init();

	ut = (void *)zalloc(uthread_zone);
	bzero(ut, sizeof(struct uthread));

	p = (proc_t) get_bsdtask_info(task);
	uth = (uthread_t)ut;
	uth->uu_kwe.kwe_uth = uth;
	uth->uu_thread = thread;

	/*
	 * Thread inherits credential from the creating thread, if both
	 * are in the same task.
	 *
	 * If the creating thread has no credential or is from another
	 * task we can leave the new thread credential NULL.  If it needs
	 * one later, it will be lazily assigned from the task's process.
	 */
	uth_parent = (uthread_t)get_bsdthread_info(current_thread());
	if ((noinherit == 0) && task == current_task() && 
	    uth_parent != NULL &&
	    IS_VALID_CRED(uth_parent->uu_ucred)) {
		/*
		 * XXX The new thread is, in theory, being created in context
		 * XXX of parent thread, so a direct reference to the parent
		 * XXX is OK.
		 */
		kauth_cred_ref(uth_parent->uu_ucred);
		uth->uu_ucred = uth_parent->uu_ucred;
		/* the credential we just inherited is an assumed credential */
		if (uth_parent->uu_flag & UT_SETUID)
			uth->uu_flag |= UT_SETUID;
	} else {
		/* sometimes workqueue threads are created out task context */
		if ((task != kernel_task) && (p != PROC_NULL))
			uth->uu_ucred = kauth_cred_proc_ref(p);
		else
			uth->uu_ucred = NOCRED;
	}

	
	if ((task != kernel_task) && p) {
		
		proc_lock(p);
		if (noinherit != 0) {
			/* workq threads will not inherit masks */
			uth->uu_sigmask = ~workq_threadmask;
		} else if (uth_parent) {
			if (uth_parent->uu_flag & UT_SAS_OLDMASK)
				uth->uu_sigmask = uth_parent->uu_oldmask;
			else
				uth->uu_sigmask = uth_parent->uu_sigmask;
		}
		uth->uu_context.vc_thread = thread;
		TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
		proc_unlock(p);

#if CONFIG_DTRACE
		if (p->p_dtrace_ptss_pages != NULL) {
			uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
		}
#endif
#if CONFIG_MACF
		mac_thread_label_init(uth);
#endif
	}

	return (ut);
}
Exemple #5
0
int main(int argv,char *args[]){

	while(1){
		int r=getinput();

		if(r==R_CODE_EXIT)
			break;
		else if(r==R_CODE_INFO)
			printf("-help info:\n"HELP);
		else if(r==R_CODE_PS||r==R_CODE_AT){
			GetBSDProcessList();

			int i = 0;
			for (i = 0; i < gprocCount; i++) {
				kinfo_proc *pro = (gprocList + i);
				if(r==R_CODE_PS){
					printf("%d pid:%d name:%s user_stack:%p\n", i, pro->kp_proc.p_pid,
						pro->kp_proc.p_comm, pro->kp_proc.user_stack);
				}else{
					pid_t targetpid = pro->kp_proc.p_pid;
					int num=-1;
					MioGetArg2Num(1,&num);
					if(num==targetpid){
						kern_return_t kr=task_for_pid(current_task(), targetpid, &gtask);
						if(kr==KERN_SUCCESS){
							printf("[attach proccess %s %d]\n",pro->kp_proc.p_comm,num);
							gproc=pro;
						}else{
							printf("task_for_pid fail %d pid:%d\n",kr,num);
							gproc=NULL;
						}
						break;
					}
				}
			}
		}else if(r==R_CODE_SUS){
			kern_return_t kr = task_suspend(gtask);
			if(kr==KERN_SUCCESS){
				printf("[suspend]\n");
			}else{
				printf("task_suspend fail %d\n",kr);
			}
		}else if(r==R_CODE_RES){
			kern_return_t kr = task_resume(gtask);
			if(kr==KERN_SUCCESS){
				printf("[resume]\n");
			}else{
				printf("task_resume fail %d\n",kr);
			}
		}else if(r==R_CODE_SSI){
			int num=-1;
			if(MioGetArg2Num(1,&num)!=0){
				printf("arg error");
				continue;
			}

			findmemoryspace();

			int i=0;
			int index=0;
			for(i=0;i<gspace_count;i++){
				space *target_space=gspaces+i;
				vm_address_t target_add=target_space->address;
				vm_address_t end_add=target_space->address+target_space->size;
				printf("start search %d from %p to %p of %dK space.\n",num,target_add,end_add,target_space->size/1024);
				do{
					int *buf;
					uint32_t sz;
					kern_return_t kr=vm_read(gtask,target_add,sizeof(int),&buf,&sz);
					if(kr!=KERN_SUCCESS){
						printf("error %d\n",kr);
					}

					if((*buf)==num){
						if(index<MAX_ADDS){
							printf("find the var at %p=%lu\n",target_add,target_add);
							gadds[index]=target_add;
							index++;
						}else{
							printf("gadds over flow\n");
						}
					}
					target_add=target_add+sizeof(int);
				}while(target_add<end_add);
				printf("there are %d vars\n",index);
				gadds[index]=0;
			}
			//end of start search int
		}else if(r==R_CODE_CSI){
			int num=-1;
			if(MioGetArg2Num(1,&num)!=0){
				printf("arg error");
				continue;
			}
			char *add=NULL;
			int index=0;
			while((add=gadds[index])!=0){
				int *buf;
				uint32_t sz;
				kern_return_t kr=vm_read(gtask,add,sizeof(int),&buf,&sz);
				if(kr!=KERN_SUCCESS){
					printf("error %d\n",kr);
					break;
				}

				if((*buf)==num){
					printf("still find the var at %p=%lu\n",add,add);
					int t=0;
					char *tadd=NULL;
					while(1){
						tadd=gadds[t];
						if(tadd=-1){
							gadds[t]=add;
							break;
						}else{
							continue;
						}
					}	
					index++;
				}else{
					gadds[index]=0;
					index++;
				}
			}
			gadds[index]=0;
		}else if(r==R_CODE_MOD){
			char *add=-1;
			if(MioGetArg2Long(1,&add)!=0){
				printf("address arg error");
				continue;
			}
			int num=-1;
			if(MioGetArg2Num(2,&num)!=0){
				printf("change to arg error");
				continue;
			}
			printf("mod %p to %d\n",add,num);
			kern_return_t kr=vm_write(gtask,add,(vm_offset_t)&num,sizeof(int));
			if(kr==KERN_SUCCESS){
				printf("OK!\n");
			}else{
				printf("vm_write fail %d\n",kr);
			}
		}

	}
	return 0;
}
Exemple #6
0
/*
 * This function is called very early on in the Mach startup, from the
 * function start_kernel_threads() in osfmk/kern/startup.c.  It's called
 * in the context of the current (startup) task using a call to the
 * function kernel_thread_create() to jump into start_kernel_threads().
 * Internally, kernel_thread_create() calls thread_create_internal(),
 * which calls uthread_alloc().  The function of uthread_alloc() is
 * normally to allocate a uthread structure, and fill out the uu_sigmask,
 * uu_context fields.  It skips filling these out in the case of the "task"
 * being "kernel_task", because the order of operation is inverted.  To
 * account for that, we need to manually fill in at least the contents
 * of the uu_context.vc_ucred field so that the uthread structure can be
 * used like any other.
 */
void
bsd_init(void)
{
	struct uthread *ut;
	unsigned int i;
#if __i386__ || __x86_64__
	int error;
#endif	
	struct vfs_context context;
	kern_return_t	ret;
	struct ucred temp_cred;
	struct posix_cred temp_pcred;
#if NFSCLIENT || CONFIG_IMAGEBOOT
	boolean_t       netboot = FALSE;
#endif

#define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */

	kernel_flock = funnel_alloc(KERNEL_FUNNEL);
	if (kernel_flock == (funnel_t *)0 ) {
		panic("bsd_init: Failed to allocate kernel funnel");
	}
        
	printf(copyright);
	
	bsd_init_kprintf("calling kmeminit\n");
	kmeminit();
	
	bsd_init_kprintf("calling parse_bsd_args\n");
	parse_bsd_args();

	/* Initialize kauth subsystem before instancing the first credential */
	bsd_init_kprintf("calling kauth_init\n");
	kauth_init();

	/* Initialize process and pgrp structures. */
	bsd_init_kprintf("calling procinit\n");
	procinit();

	/* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/
	tty_init();

	kernproc = &proc0;	/* implicitly bzero'ed */

	/* kernel_task->proc = kernproc; */
	set_bsdtask_info(kernel_task,(void *)kernproc);

	/* give kernproc a name */
	bsd_init_kprintf("calling process_name\n");
	process_name("kernel_task", kernproc);

	/* allocate proc lock group attribute and group */
	bsd_init_kprintf("calling lck_grp_attr_alloc_init\n");
	proc_lck_grp_attr= lck_grp_attr_alloc_init();

	proc_lck_grp = lck_grp_alloc_init("proc",  proc_lck_grp_attr);
#if CONFIG_FINE_LOCK_GROUPS
	proc_slock_grp = lck_grp_alloc_init("proc-slock",  proc_lck_grp_attr);
	proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock",  proc_lck_grp_attr);
	proc_mlock_grp = lck_grp_alloc_init("proc-mlock",  proc_lck_grp_attr);
#endif
	/* Allocate proc lock attribute */
	proc_lck_attr = lck_attr_alloc_init();
#if 0
#if __PROC_INTERNAL_DEBUG
	lck_attr_setdebug(proc_lck_attr);
#endif
#endif

#if CONFIG_FINE_LOCK_GROUPS
	proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr);
#else
	proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr);
#endif

	assert(bsd_simul_execs != 0);
	execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	execargs_cache_size = bsd_simul_execs;
	execargs_free_count = bsd_simul_execs;
	execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t));
	bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t));
	
	if (current_task() != kernel_task)
		printf("bsd_init: We have a problem, "
				"current task is not kernel task\n");
	
	bsd_init_kprintf("calling get_bsdthread_info\n");
	ut = (uthread_t)get_bsdthread_info(current_thread());

#if CONFIG_MACF
	/*
	 * Initialize the MAC Framework
	 */
	mac_policy_initbsd();
	kernproc->p_mac_enforce = 0;

#if defined (__i386__) || defined (__x86_64__)
	/*
	 * We currently only support this on i386/x86_64, as that is the
	 * only lock code we have instrumented so far.
	 */
	check_policy_init(policy_check_flags);
#endif
#endif /* MAC */

	/*
	 * Create process 0.
	 */
	proc_list_lock();
	LIST_INSERT_HEAD(&allproc, kernproc, p_list);
	kernproc->p_pgrp = &pgrp0;
	LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
	LIST_INIT(&pgrp0.pg_members);
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr);
#endif
	/* There is no other bsd thread this point and is safe without pgrp lock */
	LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist);
	kernproc->p_listflag |= P_LIST_INPGRP;
	kernproc->p_pgrpid = 0;
	kernproc->p_uniqueid = 0;

	pgrp0.pg_session = &session0;
	pgrp0.pg_membercnt = 1;

	session0.s_count = 1;
	session0.s_leader = kernproc;
	session0.s_listflags = 0;
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr);
#endif
	LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash);
	proc_list_unlock();

#if CONFIG_LCTX
	kernproc->p_lctx = NULL;
#endif

	kernproc->task = kernel_task;
	
	kernproc->p_stat = SRUN;
	kernproc->p_flag = P_SYSTEM;
	kernproc->p_lflag = 0;
	kernproc->p_ladvflag = 0;
	
#if DEVELOPMENT || DEBUG
	if (bootarg_disable_aslr)
		kernproc->p_flag |= P_DISABLE_ASLR;
#endif

	kernproc->p_nice = NZERO;
	kernproc->p_pptr = kernproc;

	TAILQ_INIT(&kernproc->p_uthlist);
	TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list);
	
	kernproc->sigwait = FALSE;
	kernproc->sigwait_thread = THREAD_NULL;
	kernproc->exit_thread = THREAD_NULL;
	kernproc->p_csflags = CS_VALID;

	/*
	 * Create credential.  This also Initializes the audit information.
	 */
	bsd_init_kprintf("calling bzero\n");
	bzero(&temp_cred, sizeof(temp_cred));
	bzero(&temp_pcred, sizeof(temp_pcred));
	temp_pcred.cr_ngroups = 1;

	temp_cred.cr_audit.as_aia_p = audit_default_aia_p;

	bsd_init_kprintf("calling kauth_cred_create\n");
	/*
	 * We have to label the temp cred before we create from it to
	 * properly set cr_ngroups, or the create will fail.
	 */
	posix_cred_label(&temp_cred, &temp_pcred);
	kernproc->p_ucred = kauth_cred_create(&temp_cred); 

	/* update cred on proc */
	PROC_UPDATE_CREDS_ONPROC(kernproc);

	/* give the (already exisiting) initial thread a reference on it */
	bsd_init_kprintf("calling kauth_cred_ref\n");
	kauth_cred_ref(kernproc->p_ucred);
	ut->uu_context.vc_ucred = kernproc->p_ucred;
	ut->uu_context.vc_thread = current_thread();

	TAILQ_INIT(&kernproc->p_aio_activeq);
	TAILQ_INIT(&kernproc->p_aio_doneq);
	kernproc->p_aio_total_count = 0;
	kernproc->p_aio_active_count = 0;

	bsd_init_kprintf("calling file_lock_init\n");
	file_lock_init();

#if CONFIG_MACF
	mac_cred_label_associate_kernel(kernproc->p_ucred);
	mac_task_label_update_cred (kernproc->p_ucred, (struct task *) kernproc->task);
#endif

	/* Create the file descriptor table. */
	filedesc0.fd_refcnt = 1+1;	/* +1 so shutdown will not _FREE_ZONE */
	kernproc->p_fd = &filedesc0;
	filedesc0.fd_cmask = cmask;
	filedesc0.fd_knlistsize = -1;
	filedesc0.fd_knlist = NULL;
	filedesc0.fd_knhash = NULL;
	filedesc0.fd_knhashmask = 0;

	/* Create the limits structures. */
	kernproc->p_limit = &limit0;
	for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++)
		limit0.pl_rlimit[i].rlim_cur = 
			limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
	limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack;
	limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data;
	limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core;
	limit0.pl_refcnt = 1;

	kernproc->p_stats = &pstats0;
	kernproc->p_sigacts = &sigacts0;

	/*
	 * Charge root for two  processes: init and mach_init.
	 */
	bsd_init_kprintf("calling chgproccnt\n");
	(void)chgproccnt(0, 1);

	/*
	 *	Allocate a kernel submap for pageable memory
	 *	for temporary copying (execve()).
	 */
	{
		vm_offset_t	minimum;

		bsd_init_kprintf("calling kmem_suballoc\n");
		assert(bsd_pageable_map_size != 0);
		ret = kmem_suballoc(kernel_map,
				&minimum,
				(vm_size_t)bsd_pageable_map_size,
				TRUE,
				VM_FLAGS_ANYWHERE,
				&bsd_pageable_map);
		if (ret != KERN_SUCCESS) 
			panic("bsd_init: Failed to allocate bsd pageable map");
	}

	/*
	 * Initialize buffers and hash links for buffers
	 *
	 * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must
	 *		happen after a credential has been associated with
	 *		the kernel task.
	 */
	bsd_init_kprintf("calling bsd_bufferinit\n");
	bsd_bufferinit();

	/* Initialize the execve() semaphore */
	bsd_init_kprintf("calling semaphore_create\n");

	if (ret != KERN_SUCCESS)
		panic("bsd_init: Failed to create execve semaphore");

	/*
	 * Initialize the calendar.
	 */
	bsd_init_kprintf("calling IOKitInitializeTime\n");
	IOKitInitializeTime();

	bsd_init_kprintf("calling ubc_init\n");
	ubc_init();

	/*
	 * Initialize device-switches.
	 */
	bsd_init_kprintf("calling devsw_init() \n");
	devsw_init();

	/* Initialize the file systems. */
	bsd_init_kprintf("calling vfsinit\n");
	vfsinit();

#if SOCKETS
	/* Initialize per-CPU cache allocator */
	mcache_init();

	/* Initialize mbuf's. */
	bsd_init_kprintf("calling mbinit\n");
	mbinit();
	net_str_id_init(); /* for mbuf tags */
#endif /* SOCKETS */

	/*
	 * Initializes security event auditing.
	 * XXX: Should/could this occur later?
	 */
#if CONFIG_AUDIT
	bsd_init_kprintf("calling audit_init\n");
 	audit_init();  
#endif

	/* Initialize kqueues */
	bsd_init_kprintf("calling knote_init\n");
	knote_init();

	/* Initialize for async IO */
	bsd_init_kprintf("calling aio_init\n");
	aio_init();

	/* Initialize pipes */
	bsd_init_kprintf("calling pipeinit\n");
	pipeinit();

	/* Initialize SysV shm subsystem locks; the subsystem proper is
	 * initialized through a sysctl.
	 */
#if SYSV_SHM
	bsd_init_kprintf("calling sysv_shm_lock_init\n");
	sysv_shm_lock_init();
#endif
#if SYSV_SEM
	bsd_init_kprintf("calling sysv_sem_lock_init\n");
	sysv_sem_lock_init();
#endif
#if SYSV_MSG
	bsd_init_kprintf("sysv_msg_lock_init\n");
	sysv_msg_lock_init();
#endif
	bsd_init_kprintf("calling pshm_lock_init\n");
	pshm_lock_init();
	bsd_init_kprintf("calling psem_lock_init\n");
	psem_lock_init();

	pthread_init();
	/* POSIX Shm and Sem */
	bsd_init_kprintf("calling pshm_cache_init\n");
	pshm_cache_init();
	bsd_init_kprintf("calling psem_cache_init\n");
	psem_cache_init();
	bsd_init_kprintf("calling time_zone_slock_init\n");
	time_zone_slock_init();
	bsd_init_kprintf("calling select_wait_queue_init\n");
	select_wait_queue_init();

	/* Stack snapshot facility lock */
	stackshot_lock_init();
	/*
	 * Initialize protocols.  Block reception of incoming packets
	 * until everything is ready.
	 */
	bsd_init_kprintf("calling sysctl_register_fixed\n");
	sysctl_register_fixed(); 
	bsd_init_kprintf("calling sysctl_mib_init\n");
	sysctl_mib_init();
#if NETWORKING
	bsd_init_kprintf("calling dlil_init\n");
	dlil_init();
	bsd_init_kprintf("calling proto_kpi_init\n");
	proto_kpi_init();
#endif /* NETWORKING */
#if SOCKETS
	bsd_init_kprintf("calling socketinit\n");
	socketinit();
	bsd_init_kprintf("calling domaininit\n");
	domaininit();
#endif /* SOCKETS */

	kernproc->p_fd->fd_cdir = NULL;
	kernproc->p_fd->fd_rdir = NULL;

#if CONFIG_FREEZE
	/* Initialise background hibernation */
	bsd_init_kprintf("calling kern_hibernation_init\n");
	kern_hibernation_init();
#endif

#if CONFIG_EMBEDDED
	/* Initialize kernel memory status notifications */
	bsd_init_kprintf("calling kern_memorystatus_init\n");
	kern_memorystatus_init();
#endif

#ifdef GPROF
	/* Initialize kernel profiling. */
	kmstartup();
#endif

	/* kick off timeout driven events by calling first time */
	thread_wakeup(&lbolt);
	timeout(lightning_bolt, 0, hz);

	bsd_init_kprintf("calling bsd_autoconf\n");
	bsd_autoconf();

#if CONFIG_DTRACE
	dtrace_postinit();
#endif

	/*
	 * We attach the loopback interface *way* down here to ensure
	 * it happens after autoconf(), otherwise it becomes the
	 * "primary" interface.
	 */
#include <loop.h>
#if NLOOP > 0
	bsd_init_kprintf("calling loopattach\n");
	loopattach();			/* XXX */
#endif

#if PFLOG
	/* Initialize packet filter log interface */
	pfloginit();
#endif /* PFLOG */

#if NETHER > 0
	/* Register the built-in dlil ethernet interface family */
	bsd_init_kprintf("calling ether_family_init\n");
	ether_family_init();
#endif /* ETHER */

#if NETWORKING
	/* Call any kext code that wants to run just after network init */
	bsd_init_kprintf("calling net_init_run\n");
	net_init_run();
	
	/* register user tunnel kernel control handler */
	utun_register_control();
    netsrc_init();
	
	/* wait for network domain to finish */
	domainfin();
#endif /* NETWORKING */

	bsd_init_kprintf("calling vnode_pager_bootstrap\n");
	vnode_pager_bootstrap();
#if 0
	/* XXX Hack for early debug stop */
	printf("\nabout to sleep for 10 seconds\n");
	IOSleep( 10 * 1000 );
	/* Debugger("hello"); */
#endif

	bsd_init_kprintf("calling inittodr\n");
	inittodr(0);

	/* Mount the root file system. */
	while( TRUE) {
		int err;

		bsd_init_kprintf("calling setconf\n");
		setconf();
#if NFSCLIENT
		netboot = (mountroot == netboot_mountroot);
#endif

		bsd_init_kprintf("vfs_mountroot\n");
		if (0 == (err = vfs_mountroot()))
			break;
		rootdevice[0] = '\0';
#if NFSCLIENT
		if (netboot) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: failed to mount network root, error %d, %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
#endif
		printf("cannot mount root, errno = %d\n", err);
		boothowto |= RB_ASKNAME;
	}

	IOSecureBSDRoot(rootdevice);

	context.vc_thread = current_thread();
	context.vc_ucred = kernproc->p_ucred;
	mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;

	bsd_init_kprintf("calling VFS_ROOT\n");
	/* Get the vnode for '/'.  Set fdp->fd_fd.fd_cdir to reference it. */
	if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context))
		panic("bsd_init: cannot find root vnode: %s", PE_boot_args());
	rootvnode->v_flag |= VROOT;
	(void)vnode_ref(rootvnode);
	(void)vnode_put(rootvnode);
	filedesc0.fd_cdir = rootvnode;

#if NFSCLIENT
	if (netboot) {
		int err;

		netboot = TRUE;
		/* post mount setup */
		if ((err = netboot_setup()) != 0) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: NetBoot could not find root, error %d: %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
	}
#endif
	

#if CONFIG_IMAGEBOOT
	/*
	 * See if a system disk image is present. If so, mount it and
	 * switch the root vnode to point to it
	 */ 
	if (netboot == FALSE && imageboot_needed()) {
		/* 
		 * An image was found.  No turning back: we're booted
		 * with a kernel from the disk image.
		 */
		imageboot_setup(); 
	}
#endif /* CONFIG_IMAGEBOOT */
  
	/* set initial time; all other resource data is  already zero'ed */
	microtime(&kernproc->p_start);
	kernproc->p_stats->p_start = kernproc->p_start;	/* for compat */

#if DEVFS
	{
	    char mounthere[] = "/dev";	/* !const because of internal casting */

	    bsd_init_kprintf("calling devfs_kernel_mount\n");
	    devfs_kernel_mount(mounthere);
	}
#endif /* DEVFS */
	
	/* Initialize signal state for process 0. */
	bsd_init_kprintf("calling siginit\n");
	siginit(kernproc);

	bsd_init_kprintf("calling bsd_utaskbootstrap\n");
	bsd_utaskbootstrap();

#if defined(__LP64__)
	kernproc->p_flag |= P_LP64;
	printf("Kernel is LP64\n");
#endif

	pal_kernel_announce();

#if __i386__ || __x86_64__
	/* this should be done after the root filesystem is mounted */
	error = set_archhandler(kernproc, CPU_TYPE_POWERPC);
	if (error) /* XXX make more generic */
		exec_archhandler_ppc.path[0] = 0;
#endif	

	bsd_init_kprintf("calling mountroot_post_hook\n");

	/* invoke post-root-mount hook */
	if (mountroot_post_hook != NULL)
		mountroot_post_hook();

#if 0 /* not yet */
	consider_zone_gc(FALSE);
#endif

	bsd_init_kprintf("done\n");
}
Exemple #7
0
/*
 *	Routine:	ipc_mqueue_post
 *	Purpose:
 *		Post a message to a waiting receiver or enqueue it.  If a
 *		receiver is waiting, we can release our reserved space in
 *		the message queue.
 *
 *	Conditions:
 *		If we need to queue, our space in the message queue is reserved.
 */
void
ipc_mqueue_post(
	register ipc_mqueue_t 	mqueue,
	register ipc_kmsg_t		kmsg)
{
	spl_t s;

	/*
	 *	While the msg queue	is locked, we have control of the
	 *  kmsg, so the ref in	it for the port is still good.
	 *
	 *	Check for a receiver for the message.
	 */
	s = splsched();
	imq_lock(mqueue);
	for (;;) {
		wait_queue_t waitq = &mqueue->imq_wait_queue;
		thread_t receiver;
		mach_msg_size_t msize;

		receiver = wait_queue_wakeup64_identity_locked(
							waitq,
							IPC_MQUEUE_RECEIVE,
							THREAD_AWAKENED,
							FALSE);
		/* waitq still locked, thread locked */

		if (receiver == THREAD_NULL) {
			/* 
			 * no receivers; queue kmsg
			 */
			assert(mqueue->imq_msgcount > 0);
			ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
			break;
		}
	
		/*
		 * If the receiver waited with a facility not directly
		 * related to Mach messaging, then it isn't prepared to get
		 * handed the message directly.  Just set it running, and
		 * go look for another thread that can.
		 */
		if (receiver->ith_state != MACH_RCV_IN_PROGRESS) {
				  thread_unlock(receiver);
				  continue;
		}

	
		/*
		 * We found a waiting thread.
		 * If the message is too large or the scatter list is too small
		 * the thread we wake up will get that as its status.
		 */
		msize =	ipc_kmsg_copyout_size(kmsg, receiver->map);
		if (receiver->ith_msize <
				(msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(receiver), receiver->ith_option))) {
			receiver->ith_msize = msize;
			receiver->ith_state = MACH_RCV_TOO_LARGE;
		} else {
			receiver->ith_state = MACH_MSG_SUCCESS;
		}

		/*
		 * If there is no problem with the upcoming receive, or the
		 * receiver thread didn't specifically ask for special too
		 * large error condition, go ahead and select it anyway.
		 */
		if ((receiver->ith_state == MACH_MSG_SUCCESS) ||
		    !(receiver->ith_option & MACH_RCV_LARGE)) {

			receiver->ith_kmsg = kmsg;
			receiver->ith_seqno = mqueue->imq_seqno++;
			thread_unlock(receiver);

			/* we didn't need our reserved spot in the queue */
			ipc_mqueue_release_msgcount(mqueue);
			break;
		}

		/*
		 * Otherwise, this thread needs to be released to run
		 * and handle its error without getting the message.  We
		 * need to go back and pick another one.
		 */
		receiver->ith_receiver_name = mqueue->imq_receiver_name;
		receiver->ith_kmsg = IKM_NULL;
		receiver->ith_seqno = 0;
		thread_unlock(receiver);
	}

	imq_unlock(mqueue);
	splx(s);
	
	current_task()->messages_sent++;
	return;
}
Exemple #8
0
long sys_timer_create(clockid_t clockid,struct sigevent *evp,
                      posixid_t *timerid)
{
  task_t *caller=current_task(), *target=NULL;
  posix_stuff_t *stuff;
  struct sigevent kevp;
  long id,r;
  posix_timer_t *ptimer=NULL;
  ksiginfo_t *ksiginfo;

  if( !s_check_system_capability(SYS_CAP_TIMER) ) {
    return ERR(-EPERM);
  }

  if( clockid != CLOCK_REALTIME ) {
    return ERR(-EINVAL);
  }

  if( evp ) {
    if( copy_from_user(&kevp,evp,sizeof(kevp)) ) {
      return ERR(-EFAULT);
    }
    if( !posix_validate_sigevent(&kevp) ) {
      return ERR(-EINVAL);
    }
  } else {
    INIT_SIGEVENT(kevp);
  }

  ptimer=memalloc(sizeof(*ptimer));
  if( !ptimer ) {
    return ERR(-ENOMEM);
  }

  memset(ptimer,0,sizeof(*ptimer));
  stuff=caller->posix_stuff;

  LOCK_POSIX_STUFF_W(stuff);
  r=-EAGAIN;
  if( ++stuff->timers > get_limit(caller->limits, LIMIT_TIMERS) ) {
    goto out;
  }
  id=posix_allocate_obj_id(stuff);
  if( id < 0 ) {
    goto out;
  }
  UNLOCK_POSIX_STUFF_W(stuff);

  if( !evp ) {
    kevp.sigev_value.sival_int=id;
  }

  POSIX_KOBJ_INIT(&ptimer->kpo,POSIX_OBJ_TIMER,id);
  init_timer(&ptimer->ktimer,0,DEF_ACTION_SIGACTION);
  ptimer->ktimer.da.kern_priv=ptimer;
  ptimer->overrun=0;

  ksiginfo=&ptimer->ktimer.da.d.siginfo;
  siginfo_initialize(current_task(), &ksiginfo->user_siginfo);
  ksiginfo->user_siginfo.si_signo=kevp.sigev_signo;
  ksiginfo->user_siginfo.si_value=kevp.sigev_value;

  switch( kevp.sigev_notify ) {
    case SIGEV_SIGNAL_THREAD:
      target=lookup_task(current_task()->pid,kevp.tid,0);
      if( !target ) {
        r=-ESRCH;
        goto free_id;
      }

#ifdef CONFIG_DEBUG_TIMERS
      kprintf_fault("sys_timer_create() [%d:%d] timer %d will target task %d:%d by signal %d\n",
                    current_task()->pid,current_task()->tid,
                    id,target->pid,target->tid);
#endif

      ksiginfo->target=target;
      break;
  }

  if( copy_to_user(timerid,&id,sizeof(id)) ) {
    r=-EFAULT;
    goto free_target;
  }

  LOCK_POSIX_STUFF_W(stuff);
  posix_insert_object(stuff,&ptimer->kpo,id);
  stuff->timers++;
  UNLOCK_POSIX_STUFF_W(stuff);

#ifdef CONFIG_DEBUG_TIMERS
  kprintf_fault("sys_timer_create() [%d:%d] created POSIX timer (%p) N %d %p\n",
                current_task()->pid,current_task()->tid,ptimer,id,
                &ptimer->ktimer);
#endif

  return 0;
free_target:
  if( target ) {
    release_task_struct(target);
  }
free_id:
  LOCK_POSIX_STUFF_W(stuff);
  posix_free_obj_id(stuff,id);
out:
  stuff->timers--;
  UNLOCK_POSIX_STUFF_W(stuff);

  if( ptimer ) {
    memfree(ptimer);
  }
  return ERR(r);
}
int
mono_sgen_thread_handshake (int signum)
{
	task_t task = current_task ();
	thread_port_t cur_thread = mach_thread_self ();
	thread_act_array_t thread_list;
	mach_msg_type_number_t num_threads;
	mach_msg_type_number_t num_state;
	thread_state_t state;
	kern_return_t ret;
	ucontext_t ctx;
	mcontext_t mctx;
	pthread_t exception_thread = mono_gc_get_mach_exception_thread ();

	SgenThreadInfo *info;
	gpointer regs [ARCH_NUM_REGS];
	gpointer stack_start;

	int count, i;

	mono_mach_get_threads (&thread_list, &num_threads);

	for (i = 0, count = 0; i < num_threads; i++) {
		thread_port_t t = thread_list [i];
		pthread_t pt = pthread_from_mach_thread_np (t);
		if (t != cur_thread && pt != exception_thread && !mono_sgen_is_worker_thread (pt)) {
			if (signum == suspend_signal_num) {
				ret = thread_suspend (t);
				if (ret != KERN_SUCCESS) {
					mach_port_deallocate (task, t);
					continue;
				}

				state = (thread_state_t) alloca (mono_mach_arch_get_thread_state_size ());
				ret = mono_mach_arch_get_thread_state (t, state, &num_state);
				if (ret != KERN_SUCCESS) {
					mach_port_deallocate (task, t);
					continue;
				}


				info = mono_sgen_thread_info_lookup (pt);

				/* Ensure that the runtime is aware of this thread */
				if (info != NULL) {
					mctx = (mcontext_t) alloca (mono_mach_arch_get_mcontext_size ());
					mono_mach_arch_thread_state_to_mcontext (state, mctx);
					ctx.uc_mcontext = mctx;

					info->stopped_domain = mono_mach_arch_get_tls_value_from_thread (t, mono_pthread_key_for_tls (mono_domain_get_tls_key ()));
					info->stopped_ip = (gpointer) mono_mach_arch_get_ip (state);
					stack_start = (char*) mono_mach_arch_get_sp (state) - REDZONE_SIZE;
					/* If stack_start is not within the limits, then don't set it in info and we will be restarted. */
					if (stack_start >= info->stack_start_limit && info->stack_start <= info->stack_end) {
						info->stack_start = stack_start;

						ARCH_COPY_SIGCTX_REGS (regs, &ctx);
						info->stopped_regs = regs;
					} else {
						g_assert (!info->stack_start);
					}

					/* Notify the JIT */
					if (mono_gc_get_gc_callbacks ()->thread_suspend_func)
						mono_gc_get_gc_callbacks ()->thread_suspend_func (info->runtime_data, &ctx);
				}
			} else {
				ret = thread_resume (t);
				if (ret != KERN_SUCCESS) {
					mach_port_deallocate (task, t);
					continue;
				}
			}
			count ++;

			mach_port_deallocate (task, t);
		}
	}

	mach_port_deallocate (task, cur_thread);

	return count;
}
Exemple #10
0
kern_return_t
task_name_for_pid(
	struct task_name_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	proc_t		p = PROC_NULL;
	task_t		t1;
	mach_port_name_t	tret;
	void * sright;
	int error = 0, refheld = 0;
	kauth_cred_t target_cred;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKNAMEFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 

	p = proc_find(pid);
	if (p != PROC_NULL) {
		AUDIT_ARG(process, p);
		target_cred = kauth_cred_proc_ref(p);
		refheld = 1;

		if ((p->p_stat != SZOMB)
		    && ((current_proc() == p)
			|| kauth_cred_issuser(kauth_cred_get()) 
			|| ((kauth_cred_getuid(target_cred) == kauth_cred_getuid(kauth_cred_get())) && 
			    ((kauth_cred_getruid(target_cred) == kauth_getruid()))))) {

			if (p->task != TASK_NULL) {
				task_reference(p->task);
#if CONFIG_MACF
				error = mac_proc_check_get_task_name(kauth_cred_get(),  p);
				if (error) {
					task_deallocate(p->task);
					goto noperm;
				}
#endif
				sright = (void *)convert_task_name_to_port(p->task);
				tret = ipc_port_copyout_send(sright, 
						get_task_ipcspace(current_task()));
			} else
				tret  = MACH_PORT_NULL;

			AUDIT_ARG(mach_port2, tret);
			(void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
			task_deallocate(t1);
			error = KERN_SUCCESS;
			goto tnfpout;
		}
	}

#if CONFIG_MACF
noperm:
#endif
    task_deallocate(t1);
	tret = MACH_PORT_NULL;
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	error = KERN_FAILURE;
tnfpout:
	if (refheld != 0)
		kauth_cred_unref(&target_cred);
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Exemple #11
0
long sys_timer_control(long id,long cmd,long arg1,long arg2,long arg3)
{
  long r=-EINVAL;
  task_t *caller=current_task();
  posix_stuff_t *stuff=caller->posix_stuff;
  posix_timer_t *ptimer;
  itimerspec_t tspec,kspec;
  ktimer_t *ktimer;

#ifdef CONFIG_DEBUG_TIMERS
  kprintf_fault("sys_timer_control(<BEGIN>) [%d:%d]: Tick=%d,(cmd=%d,id=%d)\n",
                current_task()->pid,current_task()->tid,
                system_ticks,cmd,id);
#endif

  switch( cmd ) {
    case __POSIX_TIMER_SETTIME:
      /* Arguments are the same as for POSIX 'timer_settime()':
       *    arg1: int flags, arg2: struct itimerspec *value
       *    arg3: struct itimerspec *ovalue
       */
      if( !arg2 || copy_from_user(&tspec,(void *)arg2,sizeof(tspec)) ) {
        r=-EFAULT;
      } else {
        bool valid_timeval=timeval_is_valid(&tspec.it_value) && timeval_is_valid(&tspec.it_interval);
        ulong_t tx=time_to_ticks(&tspec.it_value);
        ulong_t itx=time_to_ticks(&tspec.it_interval);

        /* We need to hold the lock during the whole process, so lookup
         * target timer explicitely.
         */
        LOCK_POSIX_STUFF_W(stuff);
        ptimer=(posix_timer_t*)__posix_locate_object(stuff,id,POSIX_OBJ_TIMER);
        if( !ptimer ) {
          break;
        }

        ktimer=&ptimer->ktimer;
        if( !(tspec.it_value.tv_sec | tspec.it_value.tv_nsec) ) {
          if( ktimer->time_x && posix_timer_active(ptimer) ) { /* Disarm active timer */
#ifdef CONFIG_DEBUG_TIMERS
            kprintf_fault("sys_timer_control() [%d:%d]: Tick=%d, deactivating timer %p:(P=%d) to %d\n",
                          current_task()->pid,current_task()->tid,
                          system_ticks,ktimer,ptimer->interval,tx);
#endif
            deactivate_posix_timer(ptimer);
            delete_timer(ktimer);
          }
          r=0;
        } else if( valid_timeval ) {
          if( !(arg1 & TIMER_ABSTIME) ) {
            tx+=system_ticks;
          }

          ptimer->interval=itx;
          activate_posix_timer(ptimer);
          if( ktimer->time_x ) { /* New time for active timer. */
#ifdef CONFIG_DEBUG_TIMERS
            kprintf_fault("sys_timer_control() [%d:%d] <RE-ARM> Tick=%d, timer=%p:(Tv=%d/%d,Pv=%d/%d,ABS=%d) to %d\n",
                          current_task()->pid,current_task()->tid,
                          system_ticks,ktimer,
                          tspec.it_value.tv_sec,tspec.it_value.tv_nsec,
                          tspec.it_interval.tv_sec,tspec.it_interval.tv_nsec,
                          (arg1 & TIMER_ABSTIME) != 0,
                          tx);
#endif

            r=modify_timer(ktimer,tx);
          } else {
            TIMER_RESET_TIME(ktimer,tx);

#ifdef CONFIG_DEBUG_TIMERS
            kprintf_fault("sys_timer_control() <ARM> [%d:%d] Tick=%d, timer=%p:(Tv=%d/%d,Pv=%d/%d,ABS=%d) to %d\n",
                          current_task()->pid,current_task()->tid,
                          system_ticks,ktimer,
                          tspec.it_value.tv_sec,tspec.it_value.tv_nsec,
                          tspec.it_interval.tv_sec,tspec.it_interval.tv_nsec,
                          (arg1 & TIMER_ABSTIME) != 0,
                          tx);
#endif

            r=add_timer(ktimer);
          }
        }
        UNLOCK_POSIX_STUFF_W(stuff);

        if( !r && arg3 ) {
          __get_timer_status(ptimer,&kspec);
          if( copy_to_user((itimerspec_t *)arg3,&kspec,sizeof(kspec)) )  {
            r=-EFAULT;
            /* TODO: [mt] Cleanup timer upon -EFAULT. */
          }
        }
      }
      break;
    case __POSIX_TIMER_GETTIME:
      ptimer=posix_lookup_timer(stuff,id);
      if( !ptimer ) {
        break;
      }
      __get_timer_status(ptimer,&kspec);
      r=copy_to_user((itimerspec_t *)arg1,&kspec,sizeof(kspec)) ? -EFAULT : 0;
      break;
    case __POSIX_TIMER_GETOVERRUN:
      ptimer=posix_lookup_timer(stuff,id);
      if( !ptimer ) {
        break;
      }
      r=ptimer->overrun;
      break;
    default:
      r=-EINVAL;
      break;
  }

  if( ptimer ) {
    release_posix_timer(ptimer);
  }

#ifdef CONFIG_DEBUG_TIMERS
  kprintf_fault("sys_timer_control(<END>) [%d:%d]: Tick=%d, (cmd=%d,id=%d), result=%d\n",
                current_task()->pid,current_task()->tid,
                system_ticks,cmd,id,r);
#endif
  return ERR(r);
}
Exemple #12
0
/*
 *	Routine:	task_for_pid
 *	Purpose:
 *		Get the task port for another "process", named by its
 *		process ID on the same host as "target_task".
 *
 *		Only permitted to privileged processes, or processes
 *		with the same user ID.
 *
 *		Note: if pid == 0, an error is return no matter who is calling.
 *
 * XXX This should be a BSD system call, not a Mach trap!!!
 */
kern_return_t
task_for_pid(
	struct task_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	proc_t 			p = PROC_NULL;
	task_t			t1 = TASK_NULL;
	mach_port_name_t	tret = MACH_PORT_NULL;
 	ipc_port_t 		tfpport;
	void * sright;
	int error = 0;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

	/* Always check if pid == 0 */
	if (pid == 0) {
		(void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	}

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 


	p = proc_find(pid);
	if (p == PROC_NULL) {
		error = KERN_FAILURE;
		goto tfpout;
	}

#if CONFIG_AUDIT
	AUDIT_ARG(process, p);
#endif

	if (!(task_for_pid_posix_check(p))) {
		error = KERN_FAILURE;
		goto tfpout;
	}

	if (p->task != TASK_NULL) {
		/* If we aren't root and target's task access port is set... */
		if (!kauth_cred_issuser(kauth_cred_get()) &&
			p != current_proc() &&
			(task_get_task_access_port(p->task, &tfpport) == 0) &&
			(tfpport != IPC_PORT_NULL)) {

			if (tfpport == IPC_PORT_DEAD) {
				error = KERN_PROTECTION_FAILURE;
				goto tfpout;
			}

			/* Call up to the task access server */
			error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);

			if (error != MACH_MSG_SUCCESS) {
				if (error == MACH_RCV_INTERRUPTED)
					error = KERN_ABORTED;
				else
					error = KERN_FAILURE;
				goto tfpout;
			}
		}
#if CONFIG_MACF
		error = mac_proc_check_get_task(kauth_cred_get(), p);
		if (error) {
			error = KERN_FAILURE;
			goto tfpout;
		}
#endif

		/* Grant task port access */
		task_reference(p->task);
		extmod_statistics_incr_task_for_pid(p->task);

		sright = (void *) convert_task_to_port(p->task);
		tret = ipc_port_copyout_send(
				sright, 
				get_task_ipcspace(current_task()));
	} 
	error = KERN_SUCCESS;

tfpout:
	task_deallocate(t1);
	AUDIT_ARG(mach_port2, tret);
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Exemple #13
0
void
static build_args_and_stack(struct exec_info *boot_exec_info, ...)
{
	vm_offset_t	stack_base;
	vm_size_t	stack_size;
	va_list		argv_ptr;
	register
	char *		arg_ptr;
	int		arg_len;
	int		arg_count;
	register
	char *		arg_pos;
	int		arg_item_len;
	char *		string_pos;
	char *		zero = (char *)0;

#define	STACK_SIZE	(64*1024)

	/*
	 * Calculate the size of the argument list.
	 */
	va_start(argv_ptr, boot_exec_info);
	arg_len = 0;
	arg_count = 0;
	for (;;) {
	    arg_ptr = va_arg(argv_ptr, char *);
	    if (arg_ptr == 0)
		break;
	    arg_count++;
	    arg_len += strlen(arg_ptr) + 1;
	}
	va_end(argv_ptr);

	/*
	 * Add space for:
	 *	arg count
	 *	pointers to arguments
	 *	trailing 0 pointer
	 *	dummy 0 pointer to environment variables
	 *	and align to integer boundary
	 */
	arg_len += sizeof(integer_t)
		 + (2 + arg_count) * sizeof(char *);
	arg_len = (arg_len + sizeof(integer_t) - 1) & ~(sizeof(integer_t)-1);

	/*
	 * Allocate the stack.
	 */
	stack_size = round_page(STACK_SIZE);
	stack_base = user_stack_low(stack_size);
	(void) vm_allocate(current_task()->map,
			&stack_base,
			stack_size,
			FALSE);

	arg_pos = (char *)
		set_user_regs(stack_base, stack_size, boot_exec_info, arg_len);

	/*
	 * Start the strings after the arg-count and pointers
	 */
	string_pos = arg_pos
		+ sizeof(integer_t)
		+ arg_count * sizeof(char *)
		+ 2 * sizeof(char *);

	/*
	 * first the argument count
	 */
	(void) copyout((char *)&arg_count,
			arg_pos,
			sizeof(integer_t));
	arg_pos += sizeof(integer_t);

	/*
	 * Then the strings and string pointers for each argument
	 */
	va_start(argv_ptr, boot_exec_info);
	while (--arg_count >= 0) {
	    arg_ptr = va_arg(argv_ptr, char *);
	    arg_item_len = strlen(arg_ptr) + 1; /* include trailing 0 */

	    /* set string pointer */
	    (void) copyout((char *)&string_pos,
			arg_pos,
			sizeof (char *));
	    arg_pos += sizeof(char *);

	    /* copy string */
	    (void) copyout(arg_ptr, string_pos, arg_item_len);
	    string_pos += arg_item_len;
	}
	va_end(argv_ptr);

	/*
	 * last, the trailing 0 argument and a null environment pointer.
	 */
	(void) copyout((char *)&zero, arg_pos, sizeof(char *));
	arg_pos += sizeof(char *);
	(void) copyout((char *)&zero, arg_pos, sizeof(char *));
}
Exemple #14
0
__private_extern__
task_t chudxnu_current_task(void)
{
	return current_task();
}
Exemple #15
0
/*
 * XXX
 * receive could take a task-local port number like a fd and speed lookup and
 * minimize locking.
 */
int
ipc_port_receive(ipc_port_t port, struct ipc_header *ipch, void **vpagep)
{
	struct ipc_message *ipcmsg;
	struct ipc_port *ipcp;
	struct task *task;
	vaddr_t vaddr;
	int error, error2;

	task = current_task();

	ASSERT(task != NULL, "Must have a running task.");
	ASSERT(ipch != NULL, "Must be able to copy out header.");

	IPC_PORTS_LOCK();
	ipcp = ipc_port_lookup(port);
	if (ipcp == NULL) {
		IPC_PORTS_UNLOCK();
		return (ERROR_NOT_FOUND);
	}
	IPC_PORTS_UNLOCK();

	if (!ipc_port_right_check(ipcp, task, IPC_PORT_RIGHT_RECEIVE)) {
		IPC_PORT_UNLOCK(ipcp);
		return (ERROR_NO_RIGHT);
	}

	if (TAILQ_EMPTY(&ipcp->ipcp_msgs)) {
		IPC_PORT_UNLOCK(ipcp);
		return (ERROR_AGAIN);
	}

	ipcmsg = TAILQ_FIRST(&ipcp->ipcp_msgs);
	ASSERT(ipcmsg != NULL, "Queue must not change out from under us.");
	ASSERT(ipcmsg->ipcmsg_header.ipchdr_dst == ipcp->ipcp_port,
	       "Destination must be this port.");
	TAILQ_REMOVE(&ipcp->ipcp_msgs, ipcmsg, ipcmsg_link);
	IPC_PORT_UNLOCK(ipcp);

	/*
	 * Insert any passed rights.
	 */
	if (ipcmsg->ipcmsg_header.ipchdr_right != IPC_PORT_RIGHT_NONE) {
		ipcp = ipc_port_lookup(ipcmsg->ipcmsg_header.ipchdr_src);
		if (ipcp == NULL)
			panic("%s: port disappeared.", __func__);
		error = ipc_port_right_insert(ipcp, task, ipcmsg->ipcmsg_header.ipchdr_right);
		if (error != 0)
			panic("%s: grating rights failed: %m", __func__,
			      error);
		IPC_PORT_UNLOCK(ipcp);
	}

	if (ipcmsg->ipcmsg_page == NULL) {
		if (vpagep != NULL)
			*vpagep = NULL;
	} else {
		if (vpagep == NULL) {
			/*
			 * A task may refuse a page flip for any number of reasons.
			 */
			page_release(ipcmsg->ipcmsg_page);
		} else {
			/*
			 * Map this page into the receiving task.
			 */
			if ((task->t_flags & TASK_KERNEL) == 0) {
				/*
				 * User task.
				 */
				error = vm_alloc_address(task->t_vm, &vaddr, 1, false);
				if (error != 0) {
					page_release(ipcmsg->ipcmsg_page);
					free(ipcmsg);
					return (error);
				}

				error = page_map(task->t_vm, vaddr, ipcmsg->ipcmsg_page);
				if (error != 0) {
					error2 = vm_free_address(task->t_vm, vaddr);
					if (error2 != 0)
						panic("%s: vm_free_address failed: %m", __func__, error);
					page_release(ipcmsg->ipcmsg_page);
					free(ipcmsg);
				}
			} else {
				/*
				 * Kernel task.
				 */
				error = page_map_direct(&kernel_vm, ipcmsg->ipcmsg_page, &vaddr);
				if (error != 0) {
					page_release(ipcmsg->ipcmsg_page);
					free(ipcmsg);
					return (error);
				}
			}
			*vpagep = (void *)vaddr;
		}
	}

	*ipch = ipcmsg->ipcmsg_header;

	free(ipcmsg);

	return (0);
}
int
i386_set_ldt(
	uint32_t		*retval,
	uint32_t		start_sel,
	uint32_t		descs,	/* out */
	uint32_t		num_sels)
{
	user_ldt_t	new_ldt, old_ldt;
	struct real_descriptor *dp;
	unsigned int	i;
	unsigned int	min_selector = LDTSZ_MIN;	/* do not allow the system selectors to be changed */
	task_t		task = current_task();
	unsigned int	ldt_count;
	kern_return_t err;

	if (start_sel != LDT_AUTO_ALLOC
	    && (start_sel != 0 || num_sels != 0)
	    && (start_sel < min_selector || start_sel >= LDTSZ))
	    return EINVAL;
	if (start_sel != LDT_AUTO_ALLOC
	    && (uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) /* cast to uint64_t to detect wrap-around */
	    return EINVAL;

	task_lock(task);
	
	old_ldt = task->i386_ldt;

	if (start_sel == LDT_AUTO_ALLOC) {
	    if (old_ldt) {
		unsigned int null_count;
		struct real_descriptor null_ldt;
		
		bzero(&null_ldt, sizeof(null_ldt));

		/*
		 * Look for null selectors among the already-allocated
		 * entries.
		 */
		null_count = 0;
		i = 0;
		while (i < old_ldt->count)
		{
		    if (!memcmp(&old_ldt->ldt[i++], &null_ldt, sizeof(null_ldt))) {
			null_count++;
			if (null_count == num_sels)
			    break;  /* break out of while loop */
		    } else {
			null_count = 0;
		    }
		}

		/*
		 * If we broke out of the while loop, i points to the selector
		 * after num_sels null selectors.  Otherwise it points to the end
		 * of the old LDTs, and null_count is the number of null selectors
		 * at the end. 
		 *
		 * Either way, there are null_count null selectors just prior to
		 * the i-indexed selector, and either null_count >= num_sels,
		 * or we're at the end, so we can extend.
		 */
		start_sel = old_ldt->start + i - null_count;
	    } else {
		start_sel = LDTSZ_MIN;
	    }
		
	    if (start_sel + num_sels > LDTSZ) {
		task_unlock(task);
		return ENOMEM;
	    }
	}

	if (start_sel == 0 && num_sels == 0) {
	    new_ldt = NULL;
	} else {
	    /*
	     * Allocate new LDT
	     */

	    unsigned int    begin_sel = start_sel;
	    unsigned int    end_sel = begin_sel + num_sels;
	    
	    if (old_ldt != NULL) {
		if (old_ldt->start < begin_sel)
		    begin_sel = old_ldt->start;
		if (old_ldt->start + old_ldt->count > end_sel)
		    end_sel = old_ldt->start + old_ldt->count;
	    }

	    ldt_count = end_sel - begin_sel;

	    new_ldt = (user_ldt_t)kalloc(sizeof(struct user_ldt) + (ldt_count * sizeof(struct real_descriptor)));
	    if (new_ldt == NULL) {
		task_unlock(task);
		return ENOMEM;
	    }

	    new_ldt->start = begin_sel;
	    new_ldt->count = ldt_count;

	    /*
	     * Have new LDT.  If there was a an old ldt, copy descriptors
	     * from old to new.
	     */
	    if (old_ldt) {
		bcopy(&old_ldt->ldt[0],
		      &new_ldt->ldt[old_ldt->start - begin_sel],
		      old_ldt->count * sizeof(struct real_descriptor));

		/*
		 * If the old and new LDTs are non-overlapping, fill the 
		 * center in with null selectors.
		 */
		 		 
		if (old_ldt->start + old_ldt->count < start_sel)
		    bzero(&new_ldt->ldt[old_ldt->count],
			  (start_sel - (old_ldt->start + old_ldt->count)) * sizeof(struct real_descriptor));
		else if (old_ldt->start > start_sel + num_sels)
		    bzero(&new_ldt->ldt[num_sels],
			  (old_ldt->start - (start_sel + num_sels)) * sizeof(struct real_descriptor));
	    }

	    /*
	     * Install new descriptors.
	     */
	    if (descs != 0) {
		err = copyin(descs, (char *)&new_ldt->ldt[start_sel - begin_sel],
			     num_sels * sizeof(struct real_descriptor));
		if (err != 0)
		{
		    task_unlock(task);
		    user_ldt_free(new_ldt);
		    return err;
		}
	    } else {
		bzero(&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor));
	    }

	    /*
	     * Validate descriptors.
	     * Only allow descriptors with user priviledges.
	     */
	    for (i = 0, dp = (struct real_descriptor *) &new_ldt->ldt[start_sel - begin_sel];
		 i < num_sels;
		 i++, dp++)
	    {
		switch (dp->access & ~ACC_A) {
		    case 0:
		    case ACC_P:
			/* valid empty descriptor */
			break;
		    case ACC_P | ACC_PL_U | ACC_DATA:
		    case ACC_P | ACC_PL_U | ACC_DATA_W:
		    case ACC_P | ACC_PL_U | ACC_DATA_E:
		    case ACC_P | ACC_PL_U | ACC_DATA_EW:
		    case ACC_P | ACC_PL_U | ACC_CODE:
		    case ACC_P | ACC_PL_U | ACC_CODE_R:
		    case ACC_P | ACC_PL_U | ACC_CODE_C:
		    case ACC_P | ACC_PL_U | ACC_CODE_CR:
		    case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
		    case ACC_P | ACC_PL_U | ACC_CALL_GATE:
			break;
		    default:
			task_unlock(task);
			user_ldt_free(new_ldt);
			return EACCES;
		}
	    }
	}

	task->i386_ldt = new_ldt; /* new LDT for task */

	/*
	 * Switch to new LDT.  We need to do this on all CPUs, since
	 * another thread in this same task may be currently running,
	 * and we need to make sure the new LDT is in place
	 * throughout the task before returning to the user.
	 */
	mp_rendezvous_no_intrs(user_ldt_set_action, task);

	task_unlock(task);

	/* free old LDT.  We can't do this until after we've
	 * rendezvoused with all CPUs, in case another thread
	 * in this task was in the process of context switching.
	 */
	if (old_ldt)
	    user_ldt_free(old_ldt);

	*retval = start_sel;

	return 0;
}
Exemple #17
0
int
ipc_port_send_page(const struct ipc_header *ipch, struct vm_page *page)
{
	struct ipc_message *ipcmsg;
	struct ipc_port *ipcp;
	struct task *task;

	task = current_task();

	ASSERT(task != NULL, "Must have a running task.");
	ASSERT(ipch != NULL, "Must have a header.");

	/*
	 * A message of IPC_MSG_NONE may always be sent to any port by any
	 * port, may not contain any data, and may be used to grant rights.
	 *
	 * XXX There is probably a DoS in allowing rights to be inserted
	 *     for arbitrary tasks, but it's limited to the number of ports,
	 *     so clamping that value for untrusted tasks is probably a fine
	 *     compromise for now.
	 */
	if (ipch->ipchdr_msg == IPC_MSG_NONE && page != NULL)
		return (ERROR_INVALID);

	IPC_PORTS_LOCK();

	/*
	 * Step 1:
	 * Check that the sending task has a receive right on the source port.
	 */
	ipcp = ipc_port_lookup(ipch->ipchdr_src);
	if (ipcp == NULL) {
		IPC_PORTS_UNLOCK();
		return (ERROR_INVALID);
	}

	if (!ipc_port_right_check(ipcp, task, IPC_PORT_RIGHT_RECEIVE)) {
		IPC_PORT_UNLOCK(ipcp);
		IPC_PORTS_UNLOCK();
		return (ERROR_NO_RIGHT);
	}
	IPC_PORT_UNLOCK(ipcp);

	/*
	 * Step 2:
	 * Check that the sending task has a send right on the destination port
	 * unless the destination port is providing a public service or a knock
	 * message is being sent.
	 */
	ipcp = ipc_port_lookup(ipch->ipchdr_dst);
	if (ipcp == NULL) {
		IPC_PORTS_UNLOCK();
		return (ERROR_NOT_FOUND);
	}
	IPC_PORTS_UNLOCK();

	if ((ipcp->ipcp_flags & IPC_PORT_FLAG_PUBLIC) == 0 &&
	    ipch->ipchdr_msg != IPC_MSG_NONE) {
		if (!ipc_port_right_check(ipcp, task, IPC_PORT_RIGHT_SEND)) {
			IPC_PORT_UNLOCK(ipcp);
			return (ERROR_NO_RIGHT);
		}
	}

	ipcmsg = malloc(sizeof *ipcmsg);
	ipcmsg->ipcmsg_header = *ipch;
	ipcmsg->ipcmsg_page = page;

	TAILQ_INSERT_TAIL(&ipcp->ipcp_msgs, ipcmsg, ipcmsg_link);
	cv_signal(ipcp->ipcp_cv);

	IPC_PORT_UNLOCK(ipcp);

	return (0);
}
Exemple #18
0
DWORD scheduler_run(void *ptr)
{
	CONTEXT target_contexts[CORE_COUNT];
	bool context_changed[CORE_COUNT];
	memset(&context_changed, 0, CORE_COUNT * sizeof(bool));

	// pause and resume requests
	for (int core = 1; core < CORE_COUNT; core++)
	{
		if (core_req_pause[core])
		{
			if (running_tasks[core] != NULL)
			{
				task_queue.push_back(std::move(running_tasks[core]));
			}
			core_paused[core] = true;
			core_req_pause[core] = false;
		}

		if (core_req_resume[core])
		{
			core_paused[core] = false;
			core_req_resume[core] = false;
		}
	}

	// clean up exited tasks
	while (!exit_task_queue.empty())
	{
		exit_task_queue.pop_front();
	}

	// check for new tasks
	semaphore_P(sched_new_task_lock, 1);
	while (!new_task_queue.empty())
	{
		std::unique_ptr<new_task_req> task_request(std::move(new_task_queue.front()));
		new_task_queue.pop_front();

		std::unique_ptr<task_control_block> tcb(new task_control_block);
		sched_create_task(*tcb, *task_request);
		task_queue.push_back(std::move(tcb));
	}
	semaphore_V(sched_new_task_lock, 1);

	for (int core = 0; core < CORE_COUNT; core++)
	{
		if (core_paused[core])
		{
			continue;
		}

		std::unique_ptr<task_control_block> new_task;
		std::unique_ptr<task_control_block> current_task(std::move(running_tasks[core]));

		// never trust task id
		task_control_block *current_task_ptr = current_task.get();
		
		target_contexts[core] = default_context;
		context_changed[core] = true;

		// if something is on the core
		if (current_task.get() != NULL)
		{
			current_task->quantum -= TIME_QUANTUM_DECREASE;
			if (current_task->quantum < 0)
			{
				if (current_task->type == IDLE && !task_queue.empty() && sched_task_running())
				{
					current_task.reset();
				}
				else
				{
					current_task->quantum = TIME_QUANTUM;
					current_task->state = RUNNABLE;
					task_queue.push_back(std::move(current_task));
				}
			}
			else
			{
				target_contexts[core] = current_task->context;
				running_tasks[core] = std::move(current_task);
				context_changed[core] = false;
				SetEvent(cpu_int_table_handlers[core][INT_CORE_RESUME]);
				continue;
			}
		}

		if (task_queue.empty())
		{
			if (core == 0)
			{
				// create new IDLE task
				std::unique_ptr<new_task_req> task_request(new new_task_req);
				task_request->tcp = NULL;
				task_request->type = IDLE;
				task_request->task_id = task_counter++;

				new_task.reset(new task_control_block);
				sched_create_task(*new_task, *task_request);
			}
			else
			{
				// stop 
				SetEvent(cpu_int_table_handlers[core][INT_CORE_SUSPEND]);
				context_changed[core] = false;
				continue;
			}
		}
		else
		{
			new_task = std::move(task_queue.front());
			task_queue.pop_front();
		}

		if (current_task_ptr != NULL && new_task.get() == current_task_ptr)
		{
			context_changed[core] = false;
		}
		else
		{
			context_changed[core] = true;
		}

		new_task->state = RUNNING;
		target_contexts[core] = new_task->context;
		running_tasks[core] = std::move(new_task);
	}

	// send final reschedule events to the other cores
	for (int core = 1; core < CORE_COUNT; core++)
	{
		if (context_changed[core])
		{
			cpu_int_table_messages[core][INT_RESCHEDULE] = (void *)target_contexts[core].Esp;
			SetEvent(cpu_int_table_handlers[core][INT_RESCHEDULE]);
		}
	}

	return target_contexts[0].Esp;
}
Exemple #19
0
load_return_t
load_machfile(
	struct image_params	*imgp,
	struct mach_header	*header,
	thread_t 		thread,
	vm_map_t 		new_map,
	load_result_t		*result
)
{
	struct vnode		*vp = imgp->ip_vp;
	off_t			file_offset = imgp->ip_arch_offset;
	off_t			macho_size = imgp->ip_arch_size;
	off_t			file_size = imgp->ip_vattr->va_data_size;
	
	pmap_t			pmap = 0;	/* protected by create_map */
	vm_map_t		map;
	vm_map_t		old_map;
	task_t			old_task = TASK_NULL; /* protected by create_map */
	load_result_t		myresult;
	load_return_t		lret;
	boolean_t create_map = FALSE;
	int spawn = (imgp->ip_flags & IMGPF_SPAWN);
	task_t task = current_task();
	proc_t p = current_proc();
	mach_vm_offset_t	aslr_offset = 0;
	mach_vm_offset_t	dyld_aslr_offset = 0;
	kern_return_t 		kret;

	if (macho_size > file_size) {
		return(LOAD_BADMACHO);
	}

	if (new_map == VM_MAP_NULL) {
		create_map = TRUE;
		old_task = current_task();
	}

	/*
	 * If we are spawning, we have created backing objects for the process
	 * already, which include non-lazily creating the task map.  So we
	 * are going to switch out the task map with one appropriate for the
	 * bitness of the image being loaded.
	 */
	if (spawn) {
		create_map = TRUE;
		old_task = get_threadtask(thread);
	}

	if (create_map) {
		pmap = pmap_create(get_task_ledger(task), (vm_map_size_t) 0,
				(imgp->ip_flags & IMGPF_IS_64BIT));
		pal_switch_pmap(thread, pmap, imgp->ip_flags & IMGPF_IS_64BIT);
		map = vm_map_create(pmap,
				0,
				vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
				TRUE);
	} else
		map = new_map;

#ifndef	CONFIG_ENFORCE_SIGNED_CODE
	/* This turns off faulting for executable pages, which allows
	 * to circumvent Code Signing Enforcement. The per process
	 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
	 * global flag.
	 */
	if ( !cs_enforcement(NULL) && (header->flags & MH_ALLOW_STACK_EXECUTION) )
	        vm_map_disable_NX(map);
#endif

	/* Forcibly disallow execution from data pages on even if the arch
	 * normally permits it. */
	if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
		vm_map_disallow_data_exec(map);
	
	/*
	 * Compute a random offset for ASLR, and an independent random offset for dyld.
	 */
	if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
		uint64_t max_slide_pages;

		max_slide_pages = vm_map_get_max_aslr_slide_pages(map);

		aslr_offset = random();
		aslr_offset %= max_slide_pages;
		aslr_offset <<= vm_map_page_shift(map);

		dyld_aslr_offset = random();
		dyld_aslr_offset %= max_slide_pages;
		dyld_aslr_offset <<= vm_map_page_shift(map);
	}
	
	if (!result)
		result = &myresult;

	*result = load_result_null;

	lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
	                      0, (int64_t)aslr_offset, (int64_t)dyld_aslr_offset, result);

	if (lret != LOAD_SUCCESS) {
		if (create_map) {
			vm_map_deallocate(map);	/* will lose pmap reference too */
		}
		return(lret);
	}

	/*
	 * For 64-bit users, check for presence of a 4GB page zero
	 * which will enable the kernel to share the user's address space
	 * and hence avoid TLB flushes on kernel entry/exit
	 */ 

	if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
	     vm_map_has_4GB_pagezero(map)) {
		vm_map_set_4GB_pagezero(map);
	}
	/*
	 *	Commit to new map.
	 *
	 *	Swap the new map for the old, which  consumes our new map
	 *	reference but each leaves us responsible for the old_map reference.
	 *	That lets us get off the pmap associated with it, and
	 *	then we can release it.
	 */

	 if (create_map) {
		/*
		 * If this is an exec, then we are going to destroy the old
		 * task, and it's correct to halt it; if it's spawn, the
		 * task is not yet running, and it makes no sense.
		 */
	 	if (!spawn) {
			/*
			 * Mark the task as halting and start the other
			 * threads towards terminating themselves.  Then
			 * make sure any threads waiting for a process
			 * transition get informed that we are committed to
			 * this transition, and then finally complete the
			 * task halting (wait for threads and then cleanup
			 * task resources).
			 *
			 * NOTE: task_start_halt() makes sure that no new
			 * threads are created in the task during the transition.
			 * We need to mark the workqueue as exiting before we
			 * wait for threads to terminate (at the end of which
			 * we no longer have a prohibition on thread creation).
			 * 
			 * Finally, clean up any lingering workqueue data structures
			 * that may have been left behind by the workqueue threads
			 * as they exited (and then clean up the work queue itself).
			 */
			kret = task_start_halt(task);
			if (kret != KERN_SUCCESS) {
				return(kret);		
			}
			proc_transcommit(p, 0);
			workqueue_mark_exiting(p);
			task_complete_halt(task);
			workqueue_exit(p);
		}
		old_map = swap_task_map(old_task, thread, map, !spawn);
		vm_map_clear_4GB_pagezero(old_map);
		vm_map_deallocate(old_map);
	}
	return(LOAD_SUCCESS);
}
Exemple #20
0
/* get hv object associated with the current task */
void*
hv_get_task_target(void) {
	return current_task()->hv_task_target;
}
Exemple #21
0
void
mono_threads_platform_free (MonoThreadInfo *info)
{
	mach_port_deallocate (current_task (), info->native_handle);
}
Exemple #22
0
/* associate an hv object with the current task */
void
hv_set_task_target(void *target) {
	current_task()->hv_task_target = target;
}
Exemple #23
0
 AI::timer_t AI::push_script(AI::script_ptr ais) {
     current_task().emplace_back(std::move(ais));
     return current_script()->start(this);
 }
Exemple #24
0
/*
 *	Routine:	macx_swapoff
 *	Function:
 *		Syscall interface to remove a file from backing store
 */
int
macx_swapoff(
	struct macx_swapoff_args *args)
{
	__unused int	flags = args->flags;
	kern_return_t	kr;
	mach_port_t	backing_store;

	struct vnode		*vp = 0; 
	struct nameidata 	nd, *ndp;
	struct proc		*p =  current_proc();
	int			i;
	int			error;
	boolean_t		funnel_state;
	vfs_context_t ctx = vfs_context_current();
	int			orig_iopol_disk;

	AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF);

	funnel_state = thread_funnel_set(kernel_flock, TRUE);
	backing_store = NULL;
	ndp = &nd;

	if ((error = suser(kauth_cred_get(), 0)))
		goto swapoff_bailout;

	/*
	 * Get the vnode for the paging area.
	 */
	NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
	       ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32),
	       (user_addr_t) args->filename, ctx);

	if ((error = namei(ndp)))
		goto swapoff_bailout;
	nameidone(ndp);
	vp = ndp->ni_vp;

	if (vp->v_type != VREG) {
		error = EINVAL;
		goto swapoff_bailout;
	}
#if CONFIG_MACF
	vnode_lock(vp);
	error = mac_system_check_swapoff(vfs_context_ucred(ctx), vp);
	vnode_unlock(vp);
	if (error)
		goto swapoff_bailout;
#endif

	for(i = 0; i < MAX_BACKING_STORE; i++) {
		if(bs_port_table[i].vp == vp) {
			break;
		}
	}
	if (i == MAX_BACKING_STORE) {
		error = EINVAL;
		goto swapoff_bailout;
	}
	backing_store = (mach_port_t)bs_port_table[i].bs;

	orig_iopol_disk = proc_get_task_policy(current_task(), current_thread(),
	                                       TASK_POLICY_INTERNAL, TASK_POLICY_IOPOL);

	proc_set_task_policy(current_task(), current_thread(), TASK_POLICY_INTERNAL,
	                     TASK_POLICY_IOPOL, IOPOL_THROTTLE);

	kr = default_pager_backing_store_delete(backing_store);

	proc_set_task_policy(current_task(), current_thread(), TASK_POLICY_INTERNAL,
	                     TASK_POLICY_IOPOL, orig_iopol_disk);

	switch (kr) {
		case KERN_SUCCESS:
			error = 0;
			bs_port_table[i].vp = 0;
			/* This vnode is no longer used for swapfile */
			vnode_lock_spin(vp);
			CLR(vp->v_flag, VSWAP);
			vnode_unlock(vp);

			/* get rid of macx_swapon() "long term" reference */
			vnode_rele(vp);

			break;
		case KERN_FAILURE:
			error = EAGAIN;
			break;
		default:
			error = EAGAIN;
			break;
	}

swapoff_bailout:
	/* get rid of macx_swapoff() namei() reference */
	if (vp)
		vnode_put(vp);

	(void) thread_funnel_set(kernel_flock, FALSE);
	AUDIT_MACH_SYSCALL_EXIT(error);

	if (error)
		printf("macx_swapoff FAILED - %d\n", error);
	else
		printf("macx_swapoff SUCCESS\n");

	return(error);
}
Exemple #25
0
/*
 * Supporting some variables requires us to do "real" work.  We 
 * gather some of that here.
 */
static int
sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1,
	int arg2, struct sysctl_req *req)
{
	char dummy[65];
	int  epochTemp;
	ml_cpu_info_t cpu_info;
	int val, doquad;
	long long qval;
	host_basic_info_data_t hinfo;
	kern_return_t kret;
	mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;

	/*
	 * Test and mask off the 'return quad' flag.
	 * Note that only some things here support it.
	 */
	doquad = arg2 & CTLHW_RETQUAD;
	arg2 &= ~CTLHW_RETQUAD;

	ml_cpu_get_info(&cpu_info);

#define BSD_HOST 1
	kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);

	/*
	 * Handle various OIDs.
	 *
	 * OIDs that can return int or quad set val and qval and then break.
	 * Errors and int-only values return inline.
	 */
	switch (arg2) {
	case HW_NCPU:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.max_cpus));
		} else {
			return(EINVAL);
		}
	case HW_AVAILCPU:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.avail_cpus));
		} else {
			return(EINVAL);
		}
	case HW_LOCAL_PHYSICALCPU:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.physical_cpu));
		} else {
			return(EINVAL);
		}
	case HW_LOCAL_PHYSICALCPUMAX:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.physical_cpu_max));
		} else {
			return(EINVAL);
		}
	case HW_LOCAL_LOGICALCPU:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.logical_cpu));
		} else {
			return(EINVAL);
		}
	case HW_LOCAL_LOGICALCPUMAX:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.logical_cpu_max));
		} else {
			return(EINVAL);
		}
	case HW_PAGESIZE:
	{
		vm_map_t map = get_task_map(current_task());
		val = vm_map_page_size(map);
		qval = (long long)val;
		break;
	}
	case HW_CACHELINE:
		val = cpu_info.cache_line_size;
		qval = (long long)val;
		break;
	case HW_L1ICACHESIZE:
		val = cpu_info.l1_icache_size;
		qval = (long long)val;
		break;
	case HW_L1DCACHESIZE:
		val = cpu_info.l1_dcache_size;
		qval = (long long)val;
		break;
	case HW_L2CACHESIZE:
		if (cpu_info.l2_cache_size == 0xFFFFFFFF)
			return(EINVAL);
		val = cpu_info.l2_cache_size;
		qval = (long long)val;
		break;
	case HW_L3CACHESIZE:
		if (cpu_info.l3_cache_size == 0xFFFFFFFF)
			return(EINVAL);
		val = cpu_info.l3_cache_size;
		qval = (long long)val;
		break;

		/*
		 * Deprecated variables.  We still support these for
		 * backwards compatibility purposes only.
		 */
	case HW_MACHINE:
		bzero(dummy, sizeof(dummy));
		if(!PEGetMachineName(dummy,64))
			return(EINVAL);
		dummy[64] = 0;
		return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1));
	case HW_MODEL:
		bzero(dummy, sizeof(dummy));
		if(!PEGetModelName(dummy,64))
			return(EINVAL);
		dummy[64] = 0;
		return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1));
	case HW_USERMEM:
		{
		int usermem = mem_size - vm_page_wire_count * page_size;

			return(SYSCTL_RETURN(req, usermem));
		}
	case HW_EPOCH:
	        epochTemp = PEGetPlatformEpoch();
		if (epochTemp == -1)
			return(EINVAL);
		return(SYSCTL_RETURN(req, epochTemp));
	case HW_VECTORUNIT: {
		int vector = cpu_info.vector_unit == 0? 0 : 1;
		return(SYSCTL_RETURN(req, vector));
	}
	case HW_L2SETTINGS:
		if (cpu_info.l2_cache_size == 0xFFFFFFFF)
			return(EINVAL);
		return(SYSCTL_RETURN(req, cpu_info.l2_settings));
	case HW_L3SETTINGS:
		if (cpu_info.l3_cache_size == 0xFFFFFFFF)
			return(EINVAL);
		return(SYSCTL_RETURN(req, cpu_info.l3_settings));
	default:
		return(ENOTSUP);
	}
	/*
	 * Callers may come to us with either int or quad buffers.
	 */
	if (doquad) {
		return(SYSCTL_RETURN(req, qval));
	}
	return(SYSCTL_RETURN(req, val));
}
Exemple #26
0
static
void
ux_handler(void)
{
    task_t		self = current_task();
    mach_port_name_t	exc_port_name;
    mach_port_name_t	exc_set_name;

    /* self->kernel_vm_space = TRUE; */
    ux_handler_self = self;


    /*
     *	Allocate a port set that we will receive on.
     */
    if (mach_port_allocate(get_task_ipcspace(ux_handler_self), MACH_PORT_RIGHT_PORT_SET,  &exc_set_name) != MACH_MSG_SUCCESS)
	    panic("ux_handler: port_set_allocate failed");

    /*
     *	Allocate an exception port and use object_copyin to
     *	translate it to the global name.  Put it into the set.
     */
    if (mach_port_allocate(get_task_ipcspace(ux_handler_self), MACH_PORT_RIGHT_RECEIVE, &exc_port_name) != MACH_MSG_SUCCESS)
	panic("ux_handler: port_allocate failed");
    if (mach_port_move_member(get_task_ipcspace(ux_handler_self),
    			exc_port_name,  exc_set_name) != MACH_MSG_SUCCESS)
	panic("ux_handler: port_set_add failed");

    if (ipc_object_copyin(get_task_ipcspace(self), exc_port_name,
			MACH_MSG_TYPE_MAKE_SEND, 
			(void *) &ux_exception_port) != MACH_MSG_SUCCESS)
		panic("ux_handler: object_copyin(ux_exception_port) failed");

    proc_list_lock();
    thread_wakeup(&ux_exception_port);
    proc_list_unlock();

    /* Message handling loop. */

    for (;;) {
	struct rep_msg {
		mach_msg_header_t Head;
		NDR_record_t NDR;
		kern_return_t RetCode;
	} rep_msg;
	struct exc_msg {
		mach_msg_header_t Head;
		/* start of the kernel processed data */
		mach_msg_body_t msgh_body;
		mach_msg_port_descriptor_t thread;
		mach_msg_port_descriptor_t task;
		/* end of the kernel processed data */
		NDR_record_t NDR;
		exception_type_t exception;
		mach_msg_type_number_t codeCnt;
		mach_exception_data_t code;
		/* some times RCV_TO_LARGE probs */
		char pad[512];
	} exc_msg;
	mach_port_name_t	reply_port;
	kern_return_t	 result;

	exc_msg.Head.msgh_local_port = CAST_MACH_NAME_TO_PORT(exc_set_name);
	exc_msg.Head.msgh_size = sizeof (exc_msg);
#if 0
	result = mach_msg_receive(&exc_msg.Head);
#else
	result = mach_msg_receive(&exc_msg.Head, MACH_RCV_MSG,
			     sizeof (exc_msg), exc_set_name,
			     MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL,
			     0);
#endif
	if (result == MACH_MSG_SUCCESS) {
	    reply_port = CAST_MACH_PORT_TO_NAME(exc_msg.Head.msgh_remote_port);

	    if (mach_exc_server(&exc_msg.Head, &rep_msg.Head)) {
		result = mach_msg_send(&rep_msg.Head, MACH_SEND_MSG,
			sizeof (rep_msg),MACH_MSG_TIMEOUT_NONE,MACH_PORT_NULL);
		if (reply_port != 0 && result != MACH_MSG_SUCCESS)
			mach_port_deallocate(get_task_ipcspace(ux_handler_self), reply_port);
	    }

	}
	else if (result == MACH_RCV_TOO_LARGE)
		/* ignore oversized messages */;
	else
		panic("exception_handler");
    }
}
Exemple #27
0
IOReturn RTL8139::newUserClient(	task_t			owningTask,
									void*,						// Security id (?!)
									UInt32			type,		// Lucky number
									IOUserClient	**handler )	// returned handler
{
	IOReturn			ior		= kIOReturnSuccess;
	RTL8139UserClient	*client	= NULL;
    bool				privileged;


	ELG( type, type, 'Usr+', "RTL8139::newUserClient" );
    
    privileged = IOUserClient::clientHasPrivilege( current_task(), kIOClientPrivilegeAdministrator ) == kIOReturnSuccess;
	if ( !privileged )
	{
		ELG( 0, 0, 'nuc-', "RTL8139::newUserClient - task is not privileged." );
		return kIOReturnNotPrivileged;
	}
		// Check that this is a user client type that we support.
		// type is known only to this driver's user and kernel
		// classes. It could be used, for example, to define
		// read or write privileges. In this case, we look for
		// a private value.
	if ( type != 'Rltk' )
	{		/// ??? don't return error - call superclass and return its code.
		ELG( 0, type, 'Usr-', "RTL8139::newUserClient - unlucky." );
		return kIOReturnError;
	}

		// Instantiate a new client for the requesting task:

	client = RTL8139UserClient::withTask( owningTask );
	if ( !client )
	{
		ELG( 0, 0, 'usr-', "Realtek::newUserClient: Can't create user client" );
		return kIOReturnError;
	}

	if ( ior == kIOReturnSuccess )
	{		// Attach ourself to the client so that this client instance can call us.
		if ( client->attach( this ) == false )
		{
			ior = kIOReturnError;
			ELG( 0, 0, 'USR-', "Realtek::newUserClient: Can't attach user client" );
		}
	}

	if ( ior == kIOReturnSuccess )
	{		// Start the client so it can accept requests.
		if ( client->start( this ) == false )
		{
			ior = kIOReturnError;
			ELG( 0, 0, 'USR-', "Realtek::newUserClient: Can't start user client" );
		}
	}

	if ( client && (ior != kIOReturnSuccess) )
	{
		client->detach( this );
		client->release();
		client = 0;
	}

	*handler = client;
	return ior;
}/* end newUserClient */
Exemple #28
0
kern_return_t
catch_mach_exception_raise(
        __unused mach_port_t exception_port,
        mach_port_t thread,
        mach_port_t task,
        exception_type_t exception,
        mach_exception_data_t code,
        __unused mach_msg_type_number_t codeCnt
)
{
	task_t			self = current_task();
	thread_t		th_act;
	ipc_port_t 		thread_port;
	struct proc		*p;
	kern_return_t		result = MACH_MSG_SUCCESS;
	int			ux_signal = 0;
	mach_exception_code_t 	ucode = 0;
	struct uthread 		*ut;
	mach_port_name_t thread_name = CAST_MACH_PORT_TO_NAME(thread);
	mach_port_name_t task_name = CAST_MACH_PORT_TO_NAME(task);

	/*
	 *	Convert local thread name to global port.
	 */
   if (MACH_PORT_VALID(thread_name) &&
       (ipc_object_copyin(get_task_ipcspace(self), thread_name,
		       MACH_MSG_TYPE_PORT_SEND,
		       (void *) &thread_port) == MACH_MSG_SUCCESS)) {
        if (IPC_PORT_VALID(thread_port)) {
	   th_act = convert_port_to_thread(thread_port);
	   ipc_port_release(thread_port);
	} else {
	   th_act = THREAD_NULL;
	}

	/*
	 *	Catch bogus ports
	 */
	if (th_act != THREAD_NULL) {

	    /*
	     *	Convert exception to unix signal and code.
	     */
	    ux_exception(exception, code[0], code[1], &ux_signal, &ucode);

	    ut = get_bsdthread_info(th_act);
	    p = proc_findthread(th_act);

	    /* Can't deliver a signal without a bsd process reference */
	    if (p == NULL) {
		    ux_signal = 0;
		    result = KERN_FAILURE;
	    }

	    /*
	     * Stack overflow should result in a SIGSEGV signal
	     * on the alternate stack.
	     * but we have one or more guard pages after the
	     * stack top, so we would get a KERN_PROTECTION_FAILURE
	     * exception instead of KERN_INVALID_ADDRESS, resulting in
	     * a SIGBUS signal.
	     * Detect that situation and select the correct signal.
	     */
	    if (code[0] == KERN_PROTECTION_FAILURE &&
		ux_signal == SIGBUS) {
		    user_addr_t		sp, stack_min, stack_max;
		    int			mask;
		    struct sigacts	*ps;

		    sp = code[1];

		    stack_max = p->user_stack;
		    stack_min = p->user_stack - MAXSSIZ;
		    if (sp >= stack_min &&
			sp < stack_max) {
			    /*
			     * This is indeed a stack overflow.  Deliver a
			     * SIGSEGV signal.
			     */
			    ux_signal = SIGSEGV;

			    /*
			     * If the thread/process is not ready to handle
			     * SIGSEGV on an alternate stack, force-deliver
			     * SIGSEGV with a SIG_DFL handler.
			     */
			    mask = sigmask(ux_signal);
			    ps = p->p_sigacts;
			    if ((p->p_sigignore & mask) ||
				(ut->uu_sigwait & mask) ||
				(ut->uu_sigmask & mask) ||
				(ps->ps_sigact[SIGSEGV] == SIG_IGN) ||
				(! (ps->ps_sigonstack & mask))) {
				    p->p_sigignore &= ~mask;
				    p->p_sigcatch &= ~mask;
				    ps->ps_sigact[SIGSEGV] = SIG_DFL;
				    ut->uu_sigwait &= ~mask;
				    ut->uu_sigmask &= ~mask;
			    }
		    }
	    }
	    /*
	     *	Send signal.
	     */
	    if (ux_signal != 0) {
			ut->uu_exception = exception;
			//ut->uu_code = code[0]; // filled in by threadsignal
			ut->uu_subcode = code[1];			
			threadsignal(th_act, ux_signal, code[0]);
	    }
	    if (p != NULL) 
		    proc_rele(p);
	    thread_deallocate(th_act);
	}
	else
	    result = KERN_INVALID_ARGUMENT;
    }
    else
    	result = KERN_INVALID_ARGUMENT;

    /*
     *	Delete our send rights to the task port.
     */
    (void)mach_port_deallocate(get_task_ipcspace(ux_handler_self), task_name);

    return (result);
}
Exemple #29
0
/*
 *	Routine:	exception
 *	Purpose:
 *		The current thread caught an exception.
 *		We make an up-call to the thread's exception server.
 *	Conditions:
 *		Nothing locked and no resources held.
 *		Called from an exception context, so
 *		thread_exception_return and thread_kdb_return
 *		are possible.
 *	Returns:
 *		Doesn't return.
 */
void
exception_triage(
    exception_type_t	exception,
    mach_exception_data_t	code,
    mach_msg_type_number_t  codeCnt)
{
    thread_t		thread;
    task_t			task;
    host_priv_t		host_priv;
    struct exception_action *excp;
    lck_mtx_t			*mutex;
    kern_return_t		kr;

    assert(exception != EXC_RPC_ALERT);

    if (exception == KERN_SUCCESS)
        panic("exception");

    /*
     * Try to raise the exception at the activation level.
     */
    thread = current_thread();
    mutex = &thread->mutex;
    excp = &thread->exc_actions[exception];
    kr = exception_deliver(thread, exception, code, codeCnt, excp, mutex);
    if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
        goto out;

    /*
     * Maybe the task level will handle it.
     */
    task = current_task();
    mutex = &task->lock;
    excp = &task->exc_actions[exception];
    kr = exception_deliver(thread, exception, code, codeCnt, excp, mutex);
    if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
        goto out;

    /*
     * How about at the host level?
     */
    host_priv = host_priv_self();
    mutex = &host_priv->lock;
    excp = &host_priv->exc_actions[exception];
    kr = exception_deliver(thread, exception, code, codeCnt, excp, mutex);
    if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
        goto out;

    /*
     * Nobody handled it, terminate the task.
     */

#if	MACH_KDB
    if (debug_user_with_kdb) {
        /*
         *	Debug the exception with kdb.
         *	If kdb handles the exception,
         *	then thread_kdb_return won't return.
         */
        db_printf("No exception server, calling kdb...\n");
        thread_kdb_return();
    }
#endif	/* MACH_KDB */

    (void) task_terminate(task);

out:
    if (exception != EXC_CRASH)
        thread_exception_return();
    return;
}
Exemple #30
0
nw_buffer_t mk_rpc(nw_ep ep, nw_buffer_t msg, nw_options options,
		   int time_out) {
  nw_buffer_t rc;
  nw_result nrc;
  nw_ep sender;
  int dev;
  nw_pv_t pv;
  nw_ecb_t ecb;
  nw_tx_header_t header, first_header, previous_header;
  nw_hecb_t hecb;
  nw_waiter_t w;
  nw_ep_owned_t waited;

  nw_lock();
  if (ep >= MAX_EP || (pv = hect[ep].pv) == NULL) {
    rc = NW_BUFFER_ERROR;
  } else {
    while (pv != NULL && pv->owner != current_task())
      pv = pv->next;
    if (pv == NULL) {
      rc = NW_BUFFER_ERROR;
    } else {
      ecb = &ect[ep];
      if (ecb->state == NW_INEXISTENT ||
	  (ecb->protocol == NW_SEQ_PACKET && ecb->conn == NULL)) {
	rc = NW_BUFFER_ERROR;
      } else {
	first_header = header = nc_tx_header_allocate();
	previous_header = NULL;
	rc = NULL;
	while (header != NULL) {
	  if ((char *) msg < pv->buf_start ||
	      (char *) msg + sizeof(nw_buffer_s) > pv->buf_end ||
	      ((int) msg & 0x3) || (msg->block_offset & 0x3) ||
	      (msg->block_length & 0x3) || !msg->buf_used ||
	      (char *) msg + msg->buf_length > pv->buf_end ||
	      msg->block_offset + msg->block_length > msg->buf_length) {
	    rc = NW_BUFFER_ERROR;
	    break;
	  } else {
	    if (previous_header == NULL) {
	      if (ecb->protocol == NW_SEQ_PACKET)
		header->peer = ecb->conn->peer;
	      else
		header->peer = msg->peer;
	    } else {
	      previous_header->next = header;
	    }
	    header->buffer = (nw_buffer_t) ((char *) msg - pv->buf_start +
					    ecb->buf_start);
	    header->block = (char *) header->buffer + msg->block_offset;
	    if (!msg->block_deallocate)
	      header->buffer = NULL;
	    header->msg_length = 0;
	    header->block_length = msg->block_length;
	    first_header->msg_length += header->block_length;
	    header->next = NULL;
	    if (msg->buf_next == NULL)
	      break;
	    msg = msg->buf_next;
	    previous_header = header;
	    header = nc_tx_header_allocate();
	  }
	}
	if (header == NULL) {
	  nc_tx_header_deallocate(first_header);
	  rc = NW_BUFFER_ERROR;
	} else if (rc != NW_BUFFER_ERROR) {
	  dev = NW_DEVICE(first_header->peer.rem_addr_1);
	  if (ecb->protocol != NW_DATAGRAM ||
	      devct[dev].type != NW_CONNECTION_ORIENTED) {
	    sender = first_header->peer.local_ep;
	    nrc = NW_SUCCESS;
	  } else {
	    sender = nc_line_lookup(&first_header->peer);
	    if (sender == -1) {
	      nrc = NW_BAD_ADDRESS;
	    } else if (sender > 0) {
	      nrc = NW_SUCCESS;
	    } else {
	      nrc = mk_endpoint_allocate_internal(&sender, NW_LINE,
						  NW_AUTO_ACCEPT, 0, TRUE);
	      if (nrc == NW_SUCCESS) {
		nrc = mk_connection_open_internal(sender,
			                first_header->peer.rem_addr_1,
					first_header->peer.rem_addr_2,
					MASTER_LINE_EP);
		if (nrc == NW_SUCCESS) 
		  nc_line_update(&first_header->peer, sender);
	      }
	    }
	  }
	  if (nrc == NW_SUCCESS) {
	    first_header->sender = sender;
	    first_header->options = options;
	    rc = (*(devct[dev].entry->rpc)) (sender, first_header, options);
	    if (rc != NULL && rc != NW_BUFFER_ERROR) {
	      rc = (nw_buffer_t) ((char *) rc - ecb->buf_start +
				  pv->buf_start);
	    } else if (rc == NULL && time_out != 0 && nw_free_waiter != NULL &&
		       (time_out == -1 || nw_free_waited != NULL)) {
	      w = nw_free_waiter;
	      nw_free_waiter = w->next;
	      w->waiter = current_thread();
	      w->next = NULL;
	      hecb = &hect[ep];
	      if (hecb->rx_last == NULL)
		hecb->rx_first = hecb->rx_last = w;
	      else
		hecb->rx_last = hecb->rx_last->next = w;
	      assert_wait(0, TRUE);
	      if (time_out != -1) {
		waited = nw_free_waited;
		nw_free_waited = waited->next;
		waited->ep = ep;
		waited->next = NULL;
		current_thread()->nw_ep_waited = waited;
		current_thread()->wait_result = NULL;
		if (!current_thread()->timer.set) 
		  thread_set_timeout(time_out);
	      } else {
		current_thread()->nw_ep_waited = NULL;
	      }
	      simple_unlock(&nw_simple_lock);
	      thread_block(mk_return);
	    }
	  }
	}
      }
    }
  }
  nw_unlock();
  return rc;
}