示例#1
0
文件: kern_fork.c 项目: 0xffea/xnu
/*
 * proc_vfork_end
 *
 * Description:	stop a vfork on a process
 *
 * Parameters:	parent_proc		process leaving vfork state
 *
 * Returns:	(void)
 *
 * Notes:	Decerements the count; currently, reentrancy of vfork()
 *		is unsupported on the current process
 */
void
proc_vfork_end(proc_t parent_proc)
{
	proc_lock(parent_proc);
	parent_proc->p_vforkcnt--;
	if (parent_proc->p_vforkcnt < 0)
		panic("vfork cnt is -ve");
	/* resude the vfork count; clear the flag when it goes to 0 */
	if (parent_proc->p_vforkcnt == 0)
		parent_proc->p_lflag  &= ~P_LVFORK;
	proc_unlock(parent_proc);
}
示例#2
0
zebra_image_data_handler_t*
zebra_processor_set_data_handler (zebra_processor_t *proc,
                                  zebra_image_data_handler_t *handler,
                                  const void *userdata)
{
    if(proc_lock(proc) < 0)
        return(NULL);
    zebra_image_data_handler_t *result = proc->handler;
    proc->handler = handler;
    proc->userdata = userdata;
    proc_unlock(proc);
    return(result);
}
示例#3
0
/* make a thread-local copy of polling data */
static inline void proc_cache_polling (zebra_processor_t *proc)
{
    (void)proc_lock(proc);
    int n = proc->polling.num;
    zprintf(5, "%d fds\n", n);
    proc->thr_polling.num = n;
    alloc_polls(&proc->thr_polling);
    memcpy(proc->thr_polling.fds, proc->polling.fds,
           n * sizeof(struct pollfd));
    memcpy(proc->thr_polling.handlers, proc->polling.handlers,
           n * sizeof(poll_handler_t*));
    proc_unlock(proc);
}
示例#4
0
static void
proc_set_wqptr(struct proc *p, void *y) {
	proc_lock(p);

	assert(y == NULL || p->p_wqptr == WQPTR_IS_INITING_VALUE);

	p->p_wqptr = y;

	if (y != NULL){
		wakeup(&p->p_wqptr);
	}

	proc_unlock(p);
}
示例#5
0
static inline int proc_event_wait_unthreaded (zebra_processor_t *proc,
                                              unsigned event,
                                              int timeout,
                                              struct timespec *abstime)
{
    int blocking = proc->active && zebra_video_get_fd(proc->video) < 0;
    proc->events &= ~event;
    /* unthreaded, poll here for window input */
    while(!(proc->events & event)) {
        if(blocking) {
            zebra_image_t *img = zebra_video_next_image(proc->video);
            if(!img)
                return(-1);
            process_image(proc, img);
            zebra_image_destroy(img);
        }
        int reltime = timeout;
        if(reltime >= 0) {
            struct timespec now;
#if _POSIX_TIMERS > 0
            clock_gettime(CLOCK_REALTIME, &now);
#else
            struct timeval ustime;
            gettimeofday(&ustime, NULL);
            now.tv_nsec = ustime.tv_usec * 1000;
            now.tv_sec = ustime.tv_sec;
#endif
            reltime = ((abstime->tv_sec - now.tv_sec) * 1000 +
                       (abstime->tv_nsec - now.tv_nsec) / 1000000);
            if(reltime <= 0)
                return(0);
        }
        if(blocking && (reltime < 0 || reltime > 10))
            reltime = 10;
        if(proc->polling.num)
            proc_poll_inputs(proc, (poll_desc_t*)&proc->polling, reltime);
        else if(!blocking) {
            proc_unlock(proc);
            struct timespec sleepns, remns;
            sleepns.tv_sec = timeout / 1000;
            sleepns.tv_nsec = (timeout % 1000) * 1000000;
            while(nanosleep(&sleepns, &remns) && errno == EINTR)
                sleepns = remns;
            (void)proc_lock(proc);
            return(0);
        }
    }
    return(1);
}
示例#6
0
int zebra_processor_user_wait (zebra_processor_t *proc,
                               int timeout)
{
    if(proc_lock(proc) < 0)
        return(-1);
    int rc = -1;
    if(proc->visible || proc->active || timeout > 0)
        rc = proc_event_wait(proc, EVENT_INPUT, timeout);
    if(rc > 0)
        rc = proc->input;
    proc_unlock(proc);
    if(!proc->visible)
        return(err_capture(proc, SEV_WARNING, ZEBRA_ERR_CLOSED, __func__,
                           "display window not available for input"));
    return(rc);
}
示例#7
0
/* Not called from probe context */
proc_t * 
sprlock(pid_t pid)
{
	proc_t* p;

	if ((p = proc_find(pid)) == PROC_NULL) {
		return PROC_NULL;
	}

	task_suspend(p->task);

	proc_lock(p);

	lck_mtx_lock(&p->p_dtrace_sprlock);

	return p;
}
示例#8
0
文件: dtrace_glue.c 项目: argp/xnu
/* Not called from probe context */
proc_t *
sprlock(pid_t pid)
{
	proc_t* p;

	if ((p = proc_find(pid)) == PROC_NULL) {
		return PROC_NULL;
	}

	task_suspend_internal(p->task);

	dtrace_sprlock(p);

	proc_lock(p);

	return p;
}
示例#9
0
void zebra_processor_destroy (zebra_processor_t *proc)
{
    (void)proc_lock(proc);
    proc_destroy_thread(proc->video_thread, &proc->video_started);
    proc_destroy_thread(proc->input_thread, &proc->input_started);
    if(proc->window) {
        zebra_window_destroy(proc->window);
        proc->window = NULL;
        _zebra_window_close(proc);
    }
    if(proc->video) {
        zebra_video_destroy(proc->video);
        proc->video = NULL;
    }
    if(proc->scanner) {
        zebra_image_scanner_destroy(proc->scanner);
        proc->scanner = NULL;
    }
    proc_unlock(proc);
    err_cleanup(&proc->err);
#ifdef HAVE_LIBPTHREAD
    pthread_cond_destroy(&proc->event);
    pthread_cond_destroy(&proc->cond);
    pthread_mutex_destroy(&proc->mutex);
#endif
    if(proc->polling.fds) {
        free(proc->polling.fds);
        proc->polling.fds = NULL;
    }
    if(proc->polling.handlers) {
        free(proc->polling.handlers);
        proc->polling.handlers = NULL;
    }
    if(proc->thr_polling.fds) {
        free(proc->thr_polling.fds);
        proc->thr_polling.fds = NULL;
    }
    if(proc->thr_polling.handlers) {
        free(proc->thr_polling.handlers);
        proc->thr_polling.handlers = NULL;
    }
    free(proc);
}
示例#10
0
int zebra_process_image (zebra_processor_t *proc,
                         zebra_image_t *img)
{
    if(proc_lock(proc) < 0)
        return(-1);
    int rc = 0;
    if(img && proc->window)
        rc = _zebra_window_resize(proc,
                                  zebra_image_get_width(img),
                                  zebra_image_get_height(img));
    if(!rc) {
        zebra_image_scanner_enable_cache(proc->scanner, 0);
        rc = process_image(proc, img);
        if(proc->active)
            zebra_image_scanner_enable_cache(proc->scanner, 1);
    }
    proc_unlock(proc);
    return(rc);
}
示例#11
0
int zebra_processor_set_visible (zebra_processor_t *proc,
                                 int visible)
{
    if(proc_lock(proc) < 0)
        return(-1);
    int rc = 0;
    if(proc->window) {
        if(proc->video)
            rc = _zebra_window_resize(proc,
                                      zebra_video_get_width(proc->video),
                                      zebra_video_get_height(proc->video));
        if(!rc)
            rc = _zebra_window_set_visible(proc, visible);
    }
    else if(visible)
        rc = err_capture(proc, SEV_ERROR, ZEBRA_ERR_INVALID, __func__,
                         "processor display window not initialized");
    proc_unlock(proc);
    return(rc);
}
示例#12
0
static boolean_t
proc_init_wqptr_or_wait(struct proc *p) {
	proc_lock(p);

	if (p->p_wqptr == NULL){
		p->p_wqptr = WQPTR_IS_INITING_VALUE;
		proc_unlock(p);

		return TRUE;
	} else if (p->p_wqptr == WQPTR_IS_INITING_VALUE){
		assert_wait(&p->p_wqptr, THREAD_UNINT);
		proc_unlock(p);
		thread_block(THREAD_CONTINUE_NULL);

		return FALSE;
	} else {
		proc_unlock(p);

		return FALSE;
	}
}
示例#13
0
static int
sd_callback3(proc_t p, void * args)
{
	struct sd_iterargs * sd = (struct sd_iterargs *)args;
	vfs_context_t ctx = vfs_context_current();

	int setsdstate = sd->setsdstate;

	proc_lock(p);
	p->p_shutdownstate = setsdstate;
	if (p->p_stat != SZOMB) {
	       /*
		* NOTE: following code ignores sig_lock and plays
		* with exit_thread correctly.  This is OK unless we
		* are a multiprocessor, in which case I do not
		* understand the sig_lock.  This needs to be fixed.
		* XXX
		*/
		if (p->exit_thread) {	/* someone already doing it */
			proc_unlock(p);
			/* give him a chance */
			thread_block(THREAD_CONTINUE_NULL);
		} else {
			p->exit_thread = current_thread();
			printf(".");

			sd_log(ctx, "%s[%d] had to be forced closed with exit1().\n", p->p_comm, p->p_pid);

			proc_unlock(p);
			KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
					      p->p_pid, 0, 1, 0, 0);
			sd->activecount++;
			exit1(p, 1, (int *)NULL);
		}
	} else {
		proc_unlock(p);
	}

	return PROC_RETURNED;
}
示例#14
0
/*
 * vfork_return
 *
 * Description:	"Return" to parent vfork thread() following execve/_exit;
 *		this is done by reassociating the parent process structure
 *		with the task, thread, and uthread.
 *
 *		Refer to the ASCII art above vfork() to figure out the
 *		state we're undoing.
 *
 * Parameters:	child_proc		Child process
 *		retval			System call return value array
 *		rval			Return value to present to parent
 *
 * Returns:	void
 *
 * Notes:	The caller resumes or exits the parent, as appropriate, after
 *		calling this function.
 */
void
vfork_return(proc_t child_proc, int32_t *retval, int rval)
{
	task_t parent_task = get_threadtask(child_proc->p_vforkact);
	proc_t parent_proc = get_bsdtask_info(parent_task);
	thread_t th = current_thread();
	uthread_t uth = get_bsdthread_info(th);
	
	act_thread_catt(uth->uu_userstate);

	/* clear vfork state in parent proc structure */
	proc_vfork_end(parent_proc);

	/* REPATRIATE PARENT TASK, THREAD, UTHREAD */
	uth->uu_userstate = 0;
	uth->uu_flag &= ~UT_VFORK;
	/* restore thread-set-id state */
	if (uth->uu_flag & UT_WASSETUID) {
		uth->uu_flag |= UT_SETUID;
		uth->uu_flag &= UT_WASSETUID;
	}
	uth->uu_proc = 0;
	uth->uu_sigmask = uth->uu_vforkmask;

	proc_lock(child_proc);
	child_proc->p_lflag &= ~P_LINVFORK;
	child_proc->p_vforkact = 0;
	proc_unlock(child_proc);

	thread_set_parent(th, rval);

	if (retval) {
		retval[0] = rval;
		retval[1] = 0;			/* mark parent */
	}
}
示例#15
0
文件: kern_fork.c 项目: 0xffea/xnu
/* 
 * This routine frees all the BSD context in uthread except the credential.
 * It does not free the uthread structure as well
 */
void
uthread_cleanup(task_t task, void *uthread, void * bsd_info)
{
	struct _select *sel;
	uthread_t uth = (uthread_t)uthread;
	proc_t p = (proc_t)bsd_info;


	if (uth->uu_lowpri_window || uth->uu_throttle_info) {
	        /*
		 * task is marked as a low priority I/O type
		 * and we've somehow managed to not dismiss the throttle
		 * through the normal exit paths back to user space...
		 * no need to throttle this thread since its going away
		 * but we do need to update our bookeeping w/r to throttled threads
		 *
		 * Calling this routine will clean up any throttle info reference
		 * still inuse by the thread.
		 */
		throttle_lowpri_io(FALSE);
	}
	/*
	 * Per-thread audit state should never last beyond system
	 * call return.  Since we don't audit the thread creation/
	 * removal, the thread state pointer should never be
	 * non-NULL when we get here.
	 */
	assert(uth->uu_ar == NULL);

	sel = &uth->uu_select;
	/* cleanup the select bit space */
	if (sel->nbytes) {
		FREE(sel->ibits, M_TEMP);
		FREE(sel->obits, M_TEMP);
		sel->nbytes = 0;
	}

	if (uth->uu_cdir) {
		vnode_rele(uth->uu_cdir);
		uth->uu_cdir = NULLVP;
	}

	if (uth->uu_allocsize && uth->uu_wqset){
		kfree(uth->uu_wqset, uth->uu_allocsize);
		sel->count = 0;
		uth->uu_allocsize = 0;
		uth->uu_wqset = 0;
		sel->wql = 0;
	}

	if(uth->pth_name != NULL)
	{
		kfree(uth->pth_name, MAXTHREADNAMESIZE);
		uth->pth_name = 0;
	}
	if ((task != kernel_task) && p) {

		if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL))  {
			vfork_exit_internal(uth->uu_proc, 0, 1);
		}
		/*
		 * Remove the thread from the process list and
		 * transfer [appropriate] pending signals to the process.
		 */
		if (get_bsdtask_info(task) == p) { 
			proc_lock(p);
			TAILQ_REMOVE(&p->p_uthlist, uth, uu_list);
			p->p_siglist |= (uth->uu_siglist & execmask & (~p->p_sigignore | sigcantmask));
			proc_unlock(p);
		}
#if CONFIG_DTRACE
		struct dtrace_ptss_page_entry *tmpptr = uth->t_dtrace_scratch;
		uth->t_dtrace_scratch = NULL;
		if (tmpptr != NULL) {
			dtrace_ptss_release_entry(p, tmpptr);
		}
#endif
	}
}
示例#16
0
文件: kern_fork.c 项目: 0xffea/xnu
void *
uthread_alloc(task_t task, thread_t thread, int noinherit)
{
	proc_t p;
	uthread_t uth;
	uthread_t uth_parent;
	void *ut;

	if (!uthread_zone_inited)
		uthread_zone_init();

	ut = (void *)zalloc(uthread_zone);
	bzero(ut, sizeof(struct uthread));

	p = (proc_t) get_bsdtask_info(task);
	uth = (uthread_t)ut;

	/*
	 * Thread inherits credential from the creating thread, if both
	 * are in the same task.
	 *
	 * If the creating thread has no credential or is from another
	 * task we can leave the new thread credential NULL.  If it needs
	 * one later, it will be lazily assigned from the task's process.
	 */
	uth_parent = (uthread_t)get_bsdthread_info(current_thread());
	if ((noinherit == 0) && task == current_task() && 
	    uth_parent != NULL &&
	    IS_VALID_CRED(uth_parent->uu_ucred)) {
		/*
		 * XXX The new thread is, in theory, being created in context
		 * XXX of parent thread, so a direct reference to the parent
		 * XXX is OK.
		 */
		kauth_cred_ref(uth_parent->uu_ucred);
		uth->uu_ucred = uth_parent->uu_ucred;
		/* the credential we just inherited is an assumed credential */
		if (uth_parent->uu_flag & UT_SETUID)
			uth->uu_flag |= UT_SETUID;
	} else {
		/* sometimes workqueue threads are created out task context */
		if ((task != kernel_task) && (p != PROC_NULL))
			uth->uu_ucred = kauth_cred_proc_ref(p);
		else
			uth->uu_ucred = NOCRED;
	}

	
	if ((task != kernel_task) && p) {
		
		proc_lock(p);
		if (noinherit != 0) {
			/* workq threads will not inherit masks */
			uth->uu_sigmask = ~workq_threadmask;
		} else if (uth_parent) {
			if (uth_parent->uu_flag & UT_SAS_OLDMASK)
				uth->uu_sigmask = uth_parent->uu_oldmask;
			else
				uth->uu_sigmask = uth_parent->uu_sigmask;
		}
		uth->uu_context.vc_thread = thread;
		TAILQ_INSERT_TAIL(&p->p_uthlist, uth, uu_list);
		proc_unlock(p);

#if CONFIG_DTRACE
		if (p->p_dtrace_ptss_pages != NULL) {
			uth->t_dtrace_scratch = dtrace_ptss_claim_entry(p);
		}
#endif
	}

	return (ut);
}
示例#17
0
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands. If "map"==VM_MAP_NULL or
 * "thread"==THREAD_NULL, do not make permament VM modifications,
 * just preflight the parse.
 */
static
load_return_t
parse_machfile(
	uint8_t 		*vp,       
	struct mach_header	*header,
	off_t			file_offset,
	off_t			macho_size,
	int			depth,
	int64_t			aslr_offset,
	int64_t			dyld_aslr_offset,
	load_result_t		*result
)
{
	uint32_t		ncmds;
	struct load_command	*lcp;
	struct dylinker_command	*dlp = 0;
	integer_t		dlarchbits = 0;
	void *			control;
	load_return_t		ret = LOAD_SUCCESS;
	caddr_t			addr;
	vm_size_t		size,kl_size;
	size_t			offset;
	size_t			oldoffset;	/* for overflow check */
	int			pass;
	size_t			mach_header_sz = sizeof(struct mach_header);
	boolean_t		abi64;
	boolean_t		got_code_signatures = FALSE;
	int64_t			slide = 0;

	if (header->magic == MH_MAGIC_64 ||
	    header->magic == MH_CIGAM_64) {
	    	mach_header_sz = sizeof(struct mach_header_64);
	}

	/*
	 *	Break infinite recursion
	 */
	if (depth > 6) {
        printf("parse_machfile 1: %s\n", load_to_string(LOAD_FAILURE));
		return(LOAD_FAILURE);
	}

	depth++;

	/*
	 *	Check to see if right machine type.
	 */
    // this should be implemented by qemu somehow.
	/*if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
	    !grade_binary(header->cputype, 
	    	header->cpusubtype & ~CPU_SUBTYPE_MASK))
		return(LOAD_BADARCH);*/
		
	abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
    
	switch (header->filetype) {
	
	case MH_OBJECT:
	case MH_EXECUTE:
	case MH_PRELOAD:
		if (depth != 1) {
            printf("parse_machfile 2: %s\n", load_to_string(LOAD_FAILURE));
			return (LOAD_FAILURE);
		}
		break;
		
	case MH_FVMLIB:
	case MH_DYLIB:
		if (depth == 1) {
            printf("parse_machfile 2: %s\n", load_to_string(LOAD_FAILURE));
			return (LOAD_FAILURE);
		}
		break;

	case MH_DYLINKER:
		if (depth != 2) {
            printf("parse_machfile 3: %s\n", load_to_string(LOAD_FAILURE));
			return (LOAD_FAILURE);
		}
		break;
		
	default:
        printf("parse_machfile 4: %s, header->filetype = %d\n", load_to_string(LOAD_FAILURE), header->filetype);
		return (LOAD_FAILURE);
	}

	/*
	 *	Map portion that must be accessible directly into
	 *	kernel's map.
	 */
    if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size) {
        printf("parse_machfile 5: %s header->sizeofcmds: %d macho_size %d\n", load_to_string(LOAD_BADMACHO), header->sizeofcmds, macho_size);
		return(LOAD_BADMACHO);
    }

	/*
	 *	Round size of Mach-O commands up to page boundry.
	 */
	size = round_page(mach_header_sz + header->sizeofcmds);
    if (size <= 0) {
        printf("parse_machfile 6: %s\n", load_to_string(LOAD_BADMACHO));
		return(LOAD_BADMACHO);
    }

	/*
	 * Map the load commands into kernel memory.
	 */
	addr = 0;
	kl_size = size;
	addr = (caddr_t)(vp);
    if (addr == NULL) {
        printf("parse_machfile 7: %s\n", load_to_string(LOAD_NOSPACE));
		return(LOAD_NOSPACE);
    }

	/*
	 *	For PIE and dyld, slide everything by the ASLR offset.
	 */
	if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) {
		slide = aslr_offset;
	}

	 /*
	 *  Scan through the commands, processing each one as necessary.
	 *  We parse in three passes through the headers:
	 *  1: thread state, uuid, code signature
	 *  2: segments
	 *  3: dyld, encryption, check entry point
	 */
	
	for (pass = 1; pass <= 3; pass++) {

		/*
		 * Check that the entry point is contained in an executable segments
		 */ 
		if ((pass == 3) && (result->validentry == 0)) {
			ret = LOAD_FAILURE;
			break;
		}

		/*
		 * Loop through each of the load_commands indicated by the
		 * Mach-O header; if an absurd value is provided, we just
		 * run off the end of the reserved section by incrementing
		 * the offset too far, so we are implicitly fail-safe.
		 */
		offset = mach_header_sz;
		ncmds = header->ncmds;

		while (ncmds--) {
			/*
			 *	Get a pointer to the command.
			 */
			lcp = (struct load_command *)(addr + offset);
			oldoffset = offset;
			offset += lcp->cmdsize;

			/*
			 * Perform prevalidation of the struct load_command
			 * before we attempt to use its contents.  Invalid
			 * values are ones which result in an overflow, or
			 * which can not possibly be valid commands, or which
			 * straddle or exist past the reserved section at the
			 * start of the image.
			 */
			if (oldoffset > offset ||
			    lcp->cmdsize < sizeof(struct load_command) ||
			    offset > header->sizeofcmds + mach_header_sz) {
				ret = LOAD_BADMACHO;
				break;
			}

			/*
			 * Act on struct load_command's for which kernel
			 * intervention is required.
			 */
            printf("Command: %s\n", command_to_string(lcp->cmd));
			switch(lcp->cmd) {
			case LC_SEGMENT:
				if (pass != 2)
					break;

				if (abi64) {
					/*
					 * Having an LC_SEGMENT command for the
					 * wrong ABI is invalid <rdar://problem/11021230>
					 */
					ret = LOAD_BADMACHO;
					break;
				}

				ret = load_segment(lcp,
				                   header->filetype,
				                   control,
				                   file_offset,
				                   macho_size,
				                   vp,
				                   slide,
				                   result);
				break;
			case LC_SEGMENT_64:
				if (pass != 2)
					break;

				if (!abi64) {
					/*
					 * Having an LC_SEGMENT_64 command for the
					 * wrong ABI is invalid <rdar://problem/11021230>
					 */
					ret = LOAD_BADMACHO;
					break;
				}

				ret = load_segment(lcp,
				                   header->filetype,
				                   control,
				                   file_offset,
				                   macho_size,
				                   vp,
				                   slide,
				                   result);
				break;
			case LC_UNIXTHREAD:
				if (pass != 1)
					break;
				ret = load_unixthread(
						 (struct thread_command *) lcp,
						 result);
				break;
			case LC_MAIN:
				if (pass != 1)
					break;
				if (depth != 1)
					break;
				ret = load_main(
						 (struct entry_point_command *) lcp,
						 result);
				break;
			case LC_LOAD_DYLINKER:
				if (pass != 3)
					break;
				if ((depth == 1) && (dlp == 0)) {
					dlp = (struct dylinker_command *)lcp;
					dlarchbits = (header->cputype & CPU_ARCH_MASK);
				} else {
					ret = LOAD_FAILURE;
				}
				break;
			case LC_UUID:
				if (pass == 1 && depth == 1) {
					ret = load_uuid((struct uuid_command *) lcp,
							(char *)addr + mach_header_sz + header->sizeofcmds,
							result);
				}
				break;
			case LC_CODE_SIGNATURE:
				/* CODE SIGNING */
				if (pass != 1)
					break;
				/* pager -> uip ->
				   load signatures & store in uip
				   set VM object "signed_pages"
				*/
				/*ret = load_code_signature(
					(struct linkedit_data_command *) lcp,
					vp,
					file_offset,
					macho_size,
					header->cputype,
					result);*/
				if (ret != LOAD_SUCCESS) {
					printf("proc: load code signature error %d ", ret);
					ret = LOAD_SUCCESS; /* ignore error */
				} else {
					got_code_signatures = TRUE;
				}
				break;
#if CONFIG_CODE_DECRYPTION
			case LC_ENCRYPTION_INFO:
			case LC_ENCRYPTION_INFO_64:
				if (pass != 3)
					break;
				ret = set_code_unprotect(
					(struct encryption_info_command *) lcp,
					addr, map, slide, vp,
					header->cputype, header->cpusubtype);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: set_code_unprotect() error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					/* 
					 * Don't let the app run if it's 
					 * encrypted but we failed to set up the
					 * decrypter. If the keys are missing it will
					 * return LOAD_DECRYPTFAIL.
					 */
					 if (ret == LOAD_DECRYPTFAIL) {
						/* failed to load due to missing FP keys */
						proc_lock(p);
						p->p_lflag |= P_LTERM_DECRYPTFAIL;
						proc_unlock(p);
					}
					 psignal(p, SIGKILL);
				}
				break;
#endif
			default:
				/* Other commands are ignored by the kernel */
				ret = LOAD_SUCCESS;
				break;
			}
            
            printf("parse_machfile 9: ret %s\n", load_to_string(ret));

			if (ret != LOAD_SUCCESS)
				break;
		}
        
		if (ret != LOAD_SUCCESS)
			break;
	}

    if (ret == LOAD_SUCCESS) {
	    if (! got_code_signatures) {
		    //struct cs_blob *blob;
		    /* no embedded signatures: look for detached ones */
		    //blob = ubc_cs_blob_get(vp, -1, file_offset);
		    //if (blob != NULL) {
			//unsigned int cs_flag_data = blob->csb_flags;
			//if(0 != ubc_cs_generation_check(vp)) {
			//	if (0 != ubc_cs_blob_revalidate(vp, blob)) {
			//		/* clear out the flag data if revalidation fails */
			//		cs_flag_data = 0;
			//		result->csflags &= ~CS_VALID;
			//	}
			//}
			/* get flags to be applied to the process */
			//result->csflags |= cs_flag_data;
		    //}
	    }

		/* Make sure if we need dyld, we got it */
		if (result->needs_dynlinker && !dlp) {
			ret = LOAD_FAILURE;
		}

	    if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
			/*
		 	* load the dylinker, and slide it by the independent DYLD ASLR
		 	* offset regardless of the PIE-ness of the main binary.
		 	*/
			ret = load_dylinker(dlp, dlarchbits, depth, dyld_aslr_offset, result);
		}

	    if((ret == LOAD_SUCCESS) && (depth == 1)) {
			if (result->thread_count == 0) {
				ret = LOAD_FAILURE;
			}
	    }
    }
    
    printf("parse_machfile 8: %s\n", load_to_string(ret));

	return(ret);
}
示例#18
0
文件: unix_signal.c 项目: argp/xnu
/*
 * Send an interrupt to process.
 *
 */
void
sendsig(
	struct proc * p,
	user_addr_t catcher,
	int sig,
	int mask,
	__unused uint32_t code
)
{
	union {
		struct ts32 {
			arm_thread_state_t ss;
		} ts32;
#if defined(__arm64__)
		struct ts64 {
			arm_thread_state64_t ss;
		} ts64;
#endif
	} ts;
	union { 
		struct user_sigframe32 uf32;
#if defined(__arm64__)
		struct user_sigframe64 uf64;
#endif
	} user_frame;

	user_siginfo_t sinfo;
	user_addr_t 	sp = 0, trampact;
	struct sigacts *ps = p->p_sigacts;
	int             oonstack, infostyle;
	thread_t        th_act;
	struct uthread *ut;
	user_size_t	stack_size = 0;
	user_addr_t     p_uctx, token_uctx;
	kern_return_t   kr;

	th_act = current_thread();
	ut = get_bsdthread_info(th_act);

	bzero(&ts, sizeof(ts));
	bzero(&user_frame, sizeof(user_frame));

	if (p->p_sigacts->ps_siginfo & sigmask(sig))
		infostyle = UC_FLAVOR;
	else
		infostyle = UC_TRAD;

	trampact = ps->ps_trampact[sig];
	oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK;

	/*
	 * Get sundry thread state.
	 */
	if (proc_is64bit_data(p)) {
#ifdef __arm64__
		if (sendsig_get_state64(th_act, &ts.ts64.ss, &user_frame.uf64.mctx) != 0) {
			goto bad2;
		}
#else
	panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
#endif
	} else {
		if (sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx) != 0) {
			goto bad2;
		}
	}

	/*
	 * Figure out where our new stack lives.
	 */
	if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack &&
	    (ps->ps_sigonstack & sigmask(sig))) {
		sp = ps->ps_sigstk.ss_sp;
		sp += ps->ps_sigstk.ss_size;
		stack_size = ps->ps_sigstk.ss_size;
		ps->ps_sigstk.ss_flags |= SA_ONSTACK;
	} else {
		/*
		 * Get stack pointer, and allocate enough space
		 * for signal handler data.
		 */
		if (proc_is64bit_data(p)) {
#if defined(__arm64__)
			sp = CAST_USER_ADDR_T(ts.ts64.ss.sp);
			sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN) & ~0xf; /* Make sure to align to 16 bytes and respect red zone */
#else
			panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
#endif
		} else {
			sp = CAST_USER_ADDR_T(ts.ts32.ss.sp);
			sp -= sizeof(user_frame.uf32);
#if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4)
			sp &= ~0xf; /* Make sure to align to 16 bytes for armv7k */
#endif
		}
	}

	proc_unlock(p);

	/*
	 * Fill in ucontext (points to mcontext, i.e. thread states).
	 */
	if (proc_is64bit_data(p)) {
#if defined(__arm64__)
		sendsig_fill_uctx64(&user_frame.uf64.uctx, oonstack, mask, sp, (user64_size_t)stack_size,
				(user64_addr_t)&((struct user_sigframe64*)sp)->mctx);
#else
		panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
#endif
	} else {
		sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size, 
				(user32_addr_t)&((struct user_sigframe32*)sp)->mctx);
	}

	/*
	 * Setup siginfo.
	 */
	bzero((caddr_t) & sinfo, sizeof(sinfo));
	sinfo.si_signo = sig;

	if (proc_is64bit_data(p)) {
#if defined(__arm64__)
		sinfo.si_addr = ts.ts64.ss.pc;
		sinfo.pad[0] = ts.ts64.ss.sp;
#else
		panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
#endif
	} else {
		sinfo.si_addr = ts.ts32.ss.pc;
		sinfo.pad[0] = ts.ts32.ss.sp;
	}

	switch (sig) {
	case SIGILL:
#ifdef	BER_XXX
		if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
			sinfo.si_code = ILL_ILLOPC;
		else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
			sinfo.si_code = ILL_PRVOPC;
		else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
			sinfo.si_code = ILL_ILLTRP;
		else
			sinfo.si_code = ILL_NOOP;
#else
		sinfo.si_code = ILL_ILLTRP;
#endif
		break;

	case SIGFPE:
		break;

	case SIGBUS:
		if (proc_is64bit_data(p)) {
#if defined(__arm64__)
			sinfo.si_addr = user_frame.uf64.mctx.es.far;
#else
			panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
#endif
		} else {
			sinfo.si_addr = user_frame.uf32.mctx.es.far;
		}

		sinfo.si_code = BUS_ADRALN;
		break;

	case SIGSEGV:
		if (proc_is64bit_data(p)) {
#if defined(__arm64__)
			sinfo.si_addr = user_frame.uf64.mctx.es.far;
#else
			panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
#endif
		} else {
			sinfo.si_addr = user_frame.uf32.mctx.es.far;
		}

#ifdef	BER_XXX
		/* First check in srr1 and then in dsisr */
		if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
			sinfo.si_code = SEGV_ACCERR;
		else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
			sinfo.si_code = SEGV_ACCERR;
		else
			sinfo.si_code = SEGV_MAPERR;
#else
		sinfo.si_code = SEGV_ACCERR;
#endif
		break;

	default:
	{
		int status_and_exitcode;

		/*
		 * All other signals need to fill out a minimum set of
		 * information for the siginfo structure passed into
		 * the signal handler, if SA_SIGINFO was specified.
		 *
		 * p->si_status actually contains both the status and
		 * the exit code; we save it off in its own variable
		 * for later breakdown.
		 */
		proc_lock(p);
		sinfo.si_pid = p->si_pid;
		p->si_pid = 0;
		status_and_exitcode = p->si_status;
		p->si_status = 0;
		sinfo.si_uid = p->si_uid;
		p->si_uid = 0;
		sinfo.si_code = p->si_code;
		p->si_code = 0;
		proc_unlock(p);
		if (sinfo.si_code == CLD_EXITED) {
			if (WIFEXITED(status_and_exitcode))
				sinfo.si_code = CLD_EXITED;
			else if (WIFSIGNALED(status_and_exitcode)) {
				if (WCOREDUMP(status_and_exitcode)) {
					sinfo.si_code = CLD_DUMPED;
					status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
				} else {
					sinfo.si_code = CLD_KILLED;
					status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
				}
			}
		}
		/*
		 * The recorded status contains the exit code and the
		 * signal information, but the information to be passed
		 * in the siginfo to the handler is supposed to only
		 * contain the status, so we have to shift it out.
		 */
		sinfo.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000);
		p->p_xhighbits = 0;
		break;
	}
	}

#if CONFIG_DTRACE	
	sendsig_do_dtrace(ut, &sinfo, sig, catcher);
#endif /* CONFIG_DTRACE */

	/* 
	 * Copy signal-handling frame out to user space, set thread state.
	 */
	if (proc_is64bit_data(p)) {
#if defined(__arm64__)
		user64_addr_t token;

		/*
		 * mctx filled in when we get state.  uctx filled in by 
		 * sendsig_fill_uctx64(). We fill in the sinfo now.
		 */
		siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo);

		p_uctx = (user_addr_t)&((struct user_sigframe64*)sp)->uctx;
		/*
		 * Generate the validation token for sigreturn
		 */
		token_uctx = p_uctx;
		kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
		assert(kr == KERN_SUCCESS);
		token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token;

		if (copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64)) != 0) {
			goto bad; 
		} 

		if (sendsig_set_thread_state64(&ts.ts64.ss,
			catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo,
			(user64_addr_t)p_uctx, token, trampact, sp, th_act) != KERN_SUCCESS)
			goto bad;

#else
	panic("Shouldn't have 64-bit thread states on a 32-bit kernel.");
#endif
	} else {
		user32_addr_t token;

		/*
		 * mctx filled in when we get state.  uctx filled in by 
		 * sendsig_fill_uctx32(). We fill in the sinfo, *pointer*
		 * to uctx and token now.
		 */
		siginfo_user_to_user32(&sinfo, &user_frame.uf32.sinfo);

		p_uctx = (user_addr_t)&((struct user_sigframe32*)sp)->uctx;
		/*
		 * Generate the validation token for sigreturn
		 */
		token_uctx = (user_addr_t)p_uctx;
		kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx);
		assert(kr == KERN_SUCCESS);
		token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token;

		user_frame.uf32.puctx = (user32_addr_t)p_uctx;
		user_frame.uf32.token = token;

		if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) {
			goto bad; 
		} 

		if (sendsig_set_thread_state32(&ts.ts32.ss,
			CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo,
			CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS)
			goto bad;
	}

	proc_lock(p);
	return;

bad:
	proc_lock(p);
bad2:
	SIGACTION(p, SIGILL) = SIG_DFL;
	sig = sigmask(SIGILL);
	p->p_sigignore &= ~sig;
	p->p_sigcatch &= ~sig;
	ut->uu_sigmask &= ~sig;
	/* sendsig is called with signal lock held */
	proc_unlock(p);
	psignal_locked(p, SIGILL);
	proc_lock(p);
}
示例#19
0
int
cs_invalid_page(
	addr64_t vaddr)
{
	struct proc	*p;
	int		send_kill = 0, retval = 0, verbose = cs_debug;
	uint32_t	csflags;

	p = current_proc();

	if (verbose)
		printf("CODE SIGNING: cs_invalid_page(0x%llx): p=%d[%s]\n",
		    vaddr, p->p_pid, p->p_comm);

	proc_lock(p);

	/* XXX for testing */
	if (cs_force_kill)
		p->p_csflags |= CS_KILL;
	if (cs_force_hard)
		p->p_csflags |= CS_HARD;

	/* CS_KILL triggers a kill signal, and no you can't have the page. Nothing else. */
	if (p->p_csflags & CS_KILL) {
		if (panic_on_cs_killed &&
		    vaddr >= SHARED_REGION_BASE &&
		    vaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE) {
			panic("<rdar://14393620> cs_invalid_page(va=0x%llx): killing p=%p\n", (uint64_t) vaddr, p);
		}
		p->p_csflags |= CS_KILLED;
		cs_procs_killed++;
		send_kill = 1;
		retval = 1;
	}
	
#if __x86_64__
	if (panic_on_cs_killed &&
	    vaddr >= SHARED_REGION_BASE &&
	    vaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE) {
		panic("<rdar://14393620> cs_invalid_page(va=0x%llx): cs error p=%p\n", (uint64_t) vaddr, p);
	}
#endif /* __x86_64__ */

	/* CS_HARD means fail the mapping operation so the process stays valid. */
	if (p->p_csflags & CS_HARD) {
		retval = 1;
	} else {
		if (p->p_csflags & CS_VALID) {
			p->p_csflags &= ~CS_VALID;
			cs_procs_invalidated++;
			verbose = 1;
		}
	}
	csflags = p->p_csflags;
	proc_unlock(p);

	if (verbose)
		printf("CODE SIGNING: cs_invalid_page(0x%llx): "
		       "p=%d[%s] final status 0x%x, %s page%s\n",
		       vaddr, p->p_pid, p->p_comm, p->p_csflags,
		       retval ? "denying" : "allowing (remove VALID)",
		       send_kill ? " sending SIGKILL" : "");

	if (send_kill)
		threadsignal(current_thread(), SIGKILL, EXC_BAD_ACCESS);


	return retval;
}
示例#20
0
void sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code)
{
    user_addr_t ua_sp;
    user_addr_t ua_sip;
    user_addr_t trampact;
    user_addr_t ua_uctxp;
    user_addr_t ua_mctxp;
    user_siginfo_t  sinfo32;

    struct uthread *ut;
    struct mcontext mctx32;
    struct user_ucontext32 uctx32;
    struct sigacts *ps = p->p_sigacts;

    void *state;
    arm_thread_state_t *tstate32;
    mach_msg_type_number_t state_count;

    int stack_size = 0;
    int infostyle = UC_TRAD;
    int oonstack, flavor, error;

    proc_unlock(p);

    thread_t thread = current_thread();
    ut = get_bsdthread_info(thread);

    /*
     * Set up thread state info.
     */
    flavor = ARM_THREAD_STATE;
    state = (void *)&mctx32.ss;
    state_count = ARM_THREAD_STATE_COUNT;
    if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
        goto bad;

    flavor = ARM_EXCEPTION_STATE;
    state = (void *)&mctx32.es;
    state_count = ARM_EXCEPTION_STATE_COUNT;
    if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
        goto bad;

    flavor = ARM_VFP_STATE;
    state = (void *)&mctx32.fs;
    state_count = ARM_VFP_STATE_COUNT;
    if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS)
        goto bad;

    tstate32 = &mctx32.ss;

    /*
     * Set the signal style.
     */
    if (p->p_sigacts->ps_siginfo & sigmask(sig))
        infostyle = UC_FLAVOR;

    /*
     * Get the signal disposition.
     */
    trampact = ps->ps_trampact[sig];

    /*
     * Figure out where our new stack lives.
     */
    oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;
    if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) {
        ua_sp = ut->uu_sigstk.ss_sp;
        ua_sp += ut->uu_sigstk.ss_size;
        stack_size = ut->uu_sigstk.ss_size;
        ut->uu_sigstk.ss_flags |= SA_ONSTACK;
    } else {
        ua_sp = tstate32->sp;
    }

    /*
     * Set up the stack.
     */
    ua_sp -= UC_FLAVOR_SIZE;
    ua_mctxp = ua_sp;

    ua_sp -= sizeof(struct user_ucontext32);
    ua_uctxp = ua_sp;

    ua_sp -= sizeof(siginfo_t);
    ua_sip = ua_sp;

    /*
     * Align the stack pointer.
     */
    ua_sp = TRUNC_DOWN32(ua_sp, C_32_STK_ALIGN);

    /*
     * Build the signal context to be used by sigreturn.
     */
    uctx32.uc_onstack = oonstack;
    uctx32.uc_sigmask = mask;
    uctx32.uc_stack.ss_sp = ua_sp;
    uctx32.uc_stack.ss_size = stack_size;

    if (oonstack)
            uctx32.uc_stack.ss_flags |= SS_ONSTACK;

    uctx32.uc_link = 0;

    uctx32.uc_mcsize = UC_FLAVOR_SIZE;

    uctx32.uc_mcontext = ua_mctxp;

    /*
     * init siginfo
     */
    bzero((caddr_t)&sinfo32, sizeof(user_siginfo_t));

    sinfo32.si_signo = sig;
    sinfo32.pad[0]  = tstate32->sp;
    sinfo32.si_addr = tstate32->lr;

    switch (sig) {
        case SIGILL:
            sinfo32.si_code = ILL_NOOP;
            break;
        case SIGFPE:
            sinfo32.si_code = FPE_NOOP;
            break;
        case SIGBUS:
            sinfo32.si_code = BUS_ADRALN;
            break;
        case SIGSEGV:
            sinfo32.si_code = SEGV_ACCERR;
            break;
        default:
            {
                int status_and_exitcode;

                /*
                 * All other signals need to fill out a minimum set of
                 * information for the siginfo structure passed into
                 * the signal handler, if SA_SIGINFO was specified.
                 *
                 * p->si_status actually contains both the status and
                 * the exit code; we save it off in its own variable
                 * for later breakdown.
                 */
                proc_lock(p);
                sinfo32.si_pid = p->si_pid;
                p->si_pid = 0;
                status_and_exitcode = p->si_status;
                p->si_status = 0;
                sinfo32.si_uid = p->si_uid;
                p->si_uid = 0;
                sinfo32.si_code = p->si_code;
                p->si_code = 0;
                proc_unlock(p);
                if (sinfo32.si_code == CLD_EXITED) {
                    if (WIFEXITED(status_and_exitcode))
                        sinfo32.si_code = CLD_EXITED;
                    else if (WIFSIGNALED(status_and_exitcode)) {
                        if (WCOREDUMP(status_and_exitcode)) {
                            sinfo32.si_code = CLD_DUMPED;
                            status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
                        } else {
                            sinfo32.si_code = CLD_KILLED;
                            status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
                        }
                    }
                }
                /*
                 * The recorded status contains the exit code and the
                 * signal information, but the information to be passed
                 * in the siginfo to the handler is supposed to only
                 * contain the status, so we have to shift it out.
                 */
                sinfo32.si_status = WEXITSTATUS(status_and_exitcode);
                break;
            }
    }

    /*
     * Copy out context info.
     */
    if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof(struct user_ucontext32)) != KERN_SUCCESS)
        goto bad;
    if (copyout((caddr_t)&sinfo32, ua_sip, sizeof(user_siginfo_t)) != KERN_SUCCESS)
        goto bad;
    if (copyout((caddr_t)&mctx32, ua_mctxp, sizeof(struct mcontext)) != KERN_SUCCESS)
        goto bad;
    if (copyout((caddr_t)&ua_uctxp, ua_sp, sizeof(user_addr_t)) != KERN_SUCCESS)
        goto bad;

    /*
     * Set up regsiters for the trampoline.
     */
    tstate32->r[0] = ua_catcher;
    tstate32->r[1] = infostyle;
    tstate32->r[2] = sig;
    tstate32->r[3] = ua_sip;
    tstate32->sp = ua_sp;

    if (trampact & 0x01) {
        tstate32->lr = trampact;
        tstate32->cpsr = 0x10;      /* User mode */
    } else {
        trampact &= ~0x01;
        tstate32->lr = trampact;
        tstate32->cpsr = 0x10;      /* User mode */
        tstate32->cpsr |= (1 << 5); /* T-bit */
    }

    /*
     * Call the trampoline.
     */
    flavor = ARM_THREAD_STATE;
    state_count = ARM_THREAD_STATE_COUNT;
    state = (void *)tstate32;
    if ((error = thread_setstatus(thread, flavor, (thread_state_t)state, state_count)) != KERN_SUCCESS)
        panic("sendsig: thread_setstatus failed, ret = %08X\n", error);

    proc_lock(p);

    return;

bad:
    proc_lock(p);
    SIGACTION(p, SIGILL) = SIG_DFL;
    sig = sigmask(SIGILL);
    p->p_sigignore &= ~sig;
    p->p_sigcatch &= ~sig;
    ut->uu_sigmask &= ~sig;

    /*
     * sendsig is called with signal lock held
     */
    proc_unlock(p);
    psignal_locked(p, SIGILL);
    proc_lock(p);

    return;
}
示例#21
0
int
ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval)
{
	struct proc *t = current_proc();	/* target process */
	task_t		task;
	thread_t	th_act;
	struct uthread 	*ut;
	int tr_sigexc = 0;
	int error = 0;
	int stopped = 0;

	AUDIT_ARG(cmd, uap->req);
	AUDIT_ARG(pid, uap->pid);
	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(value32, uap->data);

	if (uap->req == PT_DENY_ATTACH) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			proc_unlock(p);
			KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
					      p->p_pid, W_EXITCODE(ENOTSUP, 0), 4, 0, 0);
			exit1(p, W_EXITCODE(ENOTSUP, 0), retval);

			thread_exception_return();
			/* NOTREACHED */
		}
		SET(p->p_lflag, P_LNOATTACH);
		proc_unlock(p);

		return(0);
	}

	if (uap->req == PT_FORCEQUOTA) {
		if (kauth_cred_issuser(kauth_cred_get())) {
			OSBitOrAtomic(P_FORCEQUOTA, &t->p_flag);
			return (0);
		} else
			return (EPERM);
	}

	/*
	 *	Intercept and deal with "please trace me" request.
	 */	 
	if (uap->req == PT_TRACE_ME) {
retry_trace_me:;
		proc_t pproc = proc_parent(p);
		if (pproc == NULL)
			return (EINVAL);
#if CONFIG_MACF
		/*
		 * NB: Cannot call kauth_authorize_process(..., KAUTH_PROCESS_CANTRACE, ...)
		 *     since that assumes the process being checked is the current process
		 *     when, in this case, it is the current process's parent.
		 *     Most of the other checks in cantrace() don't apply either.
		 */
		if ((error = mac_proc_check_debug(pproc, p)) == 0) {
#endif
			proc_lock(p);
			/* Make sure the process wasn't re-parented. */
			if (p->p_ppid != pproc->p_pid) {
				proc_unlock(p);
				proc_rele(pproc);
				goto retry_trace_me;
			}
			SET(p->p_lflag, P_LTRACED);
			/* Non-attached case, our tracer is our parent. */
			p->p_oppid = p->p_ppid;
			proc_unlock(p);
			/* Child and parent will have to be able to run modified code. */
			cs_allow_invalid(p);
			cs_allow_invalid(pproc);
#if CONFIG_MACF
		}
#endif
		proc_rele(pproc);
		return (error);
	}
	if (uap->req == PT_SIGEXC) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			SET(p->p_lflag, P_LSIGEXC);
			proc_unlock(p);
			return(0);
		} else {
			proc_unlock(p);
			return(EINVAL);
		}
	}

	/* 
	 * We do not want ptrace to do anything with kernel or launchd 
	 */
	if (uap->pid < 2) {
		return(EPERM);
	}

	/*
	 *	Locate victim, and make sure it is traceable.
	 */
	if ((t = proc_find(uap->pid)) == NULL)
			return (ESRCH);

	AUDIT_ARG(process, t);

	task = t->task;
	if (uap->req == PT_ATTACHEXC) {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
		uap->req = PT_ATTACH;
		tr_sigexc = 1;
	}
	if (uap->req == PT_ATTACH) {
#pragma clang diagnostic pop
		int		err;


		if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, 
									 t, (uintptr_t)&err, 0, 0) == 0 ) {
			/* it's OK to attach */
			proc_lock(t);
			SET(t->p_lflag, P_LTRACED);
			if (tr_sigexc) 
				SET(t->p_lflag, P_LSIGEXC);
	
			t->p_oppid = t->p_ppid;
			/* Check whether child and parent are allowed to run modified
			 * code (they'll have to) */
			proc_unlock(t);
			cs_allow_invalid(t);
			cs_allow_invalid(p);
			if (t->p_pptr != p)
				proc_reparentlocked(t, p, 1, 0);
	
			proc_lock(t);
			if (get_task_userstop(task) > 0 ) {
				stopped = 1;
			}
			t->p_xstat = 0;
			proc_unlock(t);
			psignal(t, SIGSTOP);
			/*
			 * If the process was stopped, wake up and run through
			 * issignal() again to properly connect to the tracing
			 * process.
			 */
			if (stopped)
				task_resume(task);       
			error = 0;
			goto out;
		}
		else {
			/* not allowed to attach, proper error code returned by kauth_authorize_process */
			if (ISSET(t->p_lflag, P_LNOATTACH)) {
				psignal(p, SIGSEGV);
			}
			
			error = err;
			goto out;
		}
	}

	/*
	 * You can't do what you want to the process if:
	 *	(1) It's not being traced at all,
	 */
	proc_lock(t);
	if (!ISSET(t->p_lflag, P_LTRACED)) {
		proc_unlock(t);
		error = EPERM;
		goto out;
	}

	/*
	 *	(2) it's not being traced by _you_, or
	 */
	if (t->p_pptr != p) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	(3) it's not currently stopped.
	 */
	if (t->p_stat != SSTOP) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	Mach version of ptrace executes request directly here,
	 *	thus simplifying the interaction of ptrace and signals.
	 */
	/* proc lock is held here */
	switch (uap->req) {

	case PT_DETACH:
		if (t->p_oppid != t->p_ppid) {
			struct proc *pp;

			proc_unlock(t);
			pp = proc_find(t->p_oppid);
			if (pp != PROC_NULL) {
				proc_reparentlocked(t, pp, 1, 0);
				proc_rele(pp);
			} else {
				/* original parent exited while traced */
				proc_list_lock();
				t->p_listflag |= P_LIST_DEADPARENT;
				proc_list_unlock();
				proc_reparentlocked(t, initproc, 1, 0);
			}
			proc_lock(t);
		}

		t->p_oppid = 0;
		CLR(t->p_lflag, P_LTRACED);
		CLR(t->p_lflag, P_LSIGEXC);
		proc_unlock(t);
		goto resume;
		
	case PT_KILL:
		/*
		 *	Tell child process to kill itself after it
		 *	is resumed by adding NSIG to p_cursig. [see issig]
		 */
		proc_unlock(t);
#if CONFIG_MACF
		error = mac_proc_check_signal(p, t, SIGKILL);
		if (0 != error)
			goto resume;
#endif
		psignal(t, SIGKILL);
		goto resume;

	case PT_STEP:			/* single step the child */
	case PT_CONTINUE:		/* continue the child */
		proc_unlock(t);
		th_act = (thread_t)get_firstthread(task);
		if (th_act == THREAD_NULL) {
			error = EINVAL;
			goto out;
		}

		/* force use of Mach SPIs (and task_for_pid security checks) to adjust PC */
		if (uap->addr != (user_addr_t)1) {
			error = ENOTSUP;
			goto out;
		}

		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}

		if (uap->data != 0) {
#if CONFIG_MACF
			error = mac_proc_check_signal(p, t, uap->data);
			if (0 != error)
				goto out;
#endif
			psignal(t, uap->data);
		}

		if (uap->req == PT_STEP) {
		        /*
			 * set trace bit 
			 * we use sending SIGSTOP as a comparable security check.
			 */
#if CONFIG_MACF
			error = mac_proc_check_signal(p, t, SIGSTOP);
			if (0 != error) {
				goto out;
			}
#endif
			if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		} else {
		        /*
			 * clear trace bit if on
			 * we use sending SIGCONT as a comparable security check.
			 */
#if CONFIG_MACF
			error = mac_proc_check_signal(p, t, SIGCONT);
			if (0 != error) {
				goto out;
			}
#endif
			if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		}	
	resume:
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		if (t->sigwait) {
			wakeup((caddr_t)&(t->sigwait));
			proc_unlock(t);
			if ((t->p_lflag & P_LSIGEXC) == 0) {
				task_resume(task);
			}
		} else
			proc_unlock(t);
			
		break;
		
	case PT_THUPDATE:  {
		proc_unlock(t);
		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}
		th_act = port_name_to_thread(CAST_MACH_PORT_TO_NAME(uap->addr));
		if (th_act == THREAD_NULL) {
			error = ESRCH;
			goto out;
		}
		ut = (uthread_t)get_bsdthread_info(th_act);
		if (uap->data)
			ut->uu_siglist |= sigmask(uap->data);
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		proc_unlock(t);
		thread_deallocate(th_act);
		error = 0;
		}
		break;
	default:
		proc_unlock(t);
		error = EINVAL;
		goto out;
	}

	error = 0;
out:
	proc_rele(t);
	return(error);
}
示例#22
0
void sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code)
{
    struct mcontext mctx;
    thread_t th_act;
    struct uthread *ut;
    void *tstate;
    int flavor;
    user_addr_t p_mctx = USER_ADDR_NULL;    /* mcontext dest. */
    int infostyle = UC_TRAD;
    mach_msg_type_number_t state_count;
    user_addr_t trampact;
    int oonstack;
    struct user_ucontext32 uctx;
    user_addr_t sp;
    user_addr_t p_uctx;         /* user stack addr top copy ucontext */
    user_siginfo_t sinfo;
    user_addr_t p_sinfo;        /* user stack addr top copy siginfo */
    struct sigacts *ps = p->p_sigacts;
    int stack_size = 0;
    kern_return_t kretn;
    
    th_act = current_thread();
    kprintf("sendsig: Sending signal to thread %p, code %d.\n", th_act, sig);
    return;

    ut = get_bsdthread_info(th_act);

    if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
        infostyle = UC_FLAVOR;
    }

    flavor = ARM_THREAD_STATE;
    tstate = (void *) &mctx.ss;
    state_count = ARM_THREAD_STATE_COUNT;
    if (thread_getstatus(th_act, flavor, (thread_state_t) tstate, &state_count) != KERN_SUCCESS)
        goto bad;

    trampact = ps->ps_trampact[sig];
    oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;

    /*
     * figure out where our new stack lives 
     */
    if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) {
        sp = ut->uu_sigstk.ss_sp;
        sp += ut->uu_sigstk.ss_size;
        stack_size = ut->uu_sigstk.ss_size;
        ut->uu_sigstk.ss_flags |= SA_ONSTACK;
    } else {
        sp = CAST_USER_ADDR_T(mctx.ss.sp);
    }

    /*
     * context goes first on stack 
     */
    sp -= sizeof(struct ucontext);
    p_uctx = sp;

    /*
     * this is where siginfo goes on stack 
     */
    sp -= sizeof(user32_siginfo_t);
    p_sinfo = sp;

    /*
     * final stack pointer 
     */
    sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN + C_32_LINKAGE_LEN, C_32_STK_ALIGN);

    uctx.uc_mcsize = (size_t) ((ARM_THREAD_STATE_COUNT) * sizeof(int));
    uctx.uc_onstack = oonstack;
    uctx.uc_sigmask = mask;
    uctx.uc_stack.ss_sp = sp;
    uctx.uc_stack.ss_size = stack_size;
    if (oonstack)
        uctx.uc_stack.ss_flags |= SS_ONSTACK;
    uctx.uc_link = 0;

    /*
     * setup siginfo 
     */
    bzero((caddr_t) & sinfo, sizeof(sinfo));
    sinfo.si_signo = sig;
    sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.pc);
    sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.sp);

    switch (sig) {
    case SIGILL:
        sinfo.si_code = ILL_NOOP;
        break;
    case SIGFPE:
        sinfo.si_code = FPE_NOOP;
        break;
    case SIGBUS:
        sinfo.si_code = BUS_ADRALN;
        break;
    case SIGSEGV:
        sinfo.si_code = SEGV_ACCERR;
        break;
    default:
        {
            int status_and_exitcode;

            /*
             * All other signals need to fill out a minimum set of
             * information for the siginfo structure passed into
             * the signal handler, if SA_SIGINFO was specified.
             *
             * p->si_status actually contains both the status and
             * the exit code; we save it off in its own variable
             * for later breakdown.
             */
            proc_lock(p);
            sinfo.si_pid = p->si_pid;
            p->si_pid = 0;
            status_and_exitcode = p->si_status;
            p->si_status = 0;
            sinfo.si_uid = p->si_uid;
            p->si_uid = 0;
            sinfo.si_code = p->si_code;
            p->si_code = 0;
            proc_unlock(p);
            if (sinfo.si_code == CLD_EXITED) {
                if (WIFEXITED(status_and_exitcode))
                    sinfo.si_code = CLD_EXITED;
                else if (WIFSIGNALED(status_and_exitcode)) {
                    if (WCOREDUMP(status_and_exitcode)) {
                        sinfo.si_code = CLD_DUMPED;
                        status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
                    } else {
                        sinfo.si_code = CLD_KILLED;
                        status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode);
                    }
                }
            }
            /*
             * The recorded status contains the exit code and the
             * signal information, but the information to be passed
             * in the siginfo to the handler is supposed to only
             * contain the status, so we have to shift it out.
             */
            sinfo.si_status = WEXITSTATUS(status_and_exitcode);
            break;
        }
    }

    if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext32)))
        goto bad;

    if (copyout(&sinfo, p_sinfo, sizeof(sinfo)))
        goto bad;

    /*
     * set signal registers, these are probably wrong.. 
     */
    {
        mctx.ss.r[0] = CAST_DOWN(uint32_t, ua_catcher);
        mctx.ss.r[1] = (uint32_t) infostyle;
        mctx.ss.r[2] = (uint32_t) sig;
        mctx.ss.r[3] = CAST_DOWN(uint32_t, p_sinfo);
        mctx.ss.r[4] = CAST_DOWN(uint32_t, p_uctx);
        mctx.ss.pc = CAST_DOWN(uint32_t, trampact);
        mctx.ss.sp = CAST_DOWN(uint32_t, sp);
        state_count = ARM_THREAD_STATE_COUNT;
        printf("sendsig: Sending signal to thread %p, code %d, new pc 0x%08x\n", th_act, sig, trampact);
        if ((kretn = thread_setstatus(th_act, ARM_THREAD_STATE, (void *) &mctx.ss, state_count)) != KERN_SUCCESS) {
            panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn);
        }
    }

    proc_lock(p);
    return;

 bad:
    proc_lock(p);
    SIGACTION(p, SIGILL) = SIG_DFL;
    sig = sigmask(SIGILL);
    p->p_sigignore &= ~sig;
    p->p_sigcatch &= ~sig;
    ut->uu_sigmask &= ~sig;
    /*
     * sendsig is called with signal lock held 
     */
    proc_unlock(p);
    psignal_locked(p, SIGILL);
    proc_lock(p);
    return;
}
示例#23
0
文件: kern_cs.c 项目: onlynone/xnu
int
cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed)
{
	struct proc	*p;
	int		send_kill = 0, retval = 0, verbose = cs_debug;
	uint32_t	csflags;

	p = current_proc();

	if (verbose)
		printf("CODE SIGNING: cs_invalid_page(0x%llx): p=%d[%s]\n",
		    vaddr, p->p_pid, p->p_comm);

	proc_lock(p);

	/* XXX for testing */
	if (cs_force_kill)
		p->p_csflags |= CS_KILL;
	if (cs_force_hard)
		p->p_csflags |= CS_HARD;

	/* CS_KILL triggers a kill signal, and no you can't have the page. Nothing else. */
	if (p->p_csflags & CS_KILL) {
		p->p_csflags |= CS_KILLED;
		cs_procs_killed++;
		send_kill = 1;
		retval = 1;
	}
	
	/* CS_HARD means fail the mapping operation so the process stays valid. */
	if (p->p_csflags & CS_HARD) {
		retval = 1;
	} else {
		if (p->p_csflags & CS_VALID) {
			p->p_csflags &= ~CS_VALID;
			cs_procs_invalidated++;
			verbose = 1;
		}
	}
	csflags = p->p_csflags;
	proc_unlock(p);

	if (verbose)
		printf("CODE SIGNING: cs_invalid_page(0x%llx): "
		       "p=%d[%s] final status 0x%x, %s page%s\n",
		       vaddr, p->p_pid, p->p_comm, p->p_csflags,
		       retval ? "denying" : "allowing (remove VALID)",
		       send_kill ? " sending SIGKILL" : "");

	if (send_kill) {
		/* We will set the exit reason for the thread later */
		threadsignal(current_thread(), SIGKILL, EXC_BAD_ACCESS, FALSE);
		if (cs_killed) {
			*cs_killed = TRUE;
		}
	} else if (cs_killed) {
		*cs_killed = FALSE;
	}


	return retval;
}
示例#24
0
/*
 * Write out process accounting information, on process exit.
 * Data to be written out is specified in Leffler, et al.
 * and are enumerated below.  (They're also noted in the system
 * "acct.h" header file.)
 */
int
acct_process(proc_t p)
{
	struct acct an_acct;
	struct rusage rup, *r;
	struct timeval ut, st, tmp;
	int t;
	int error;
	struct vnode *vp;
	kauth_cred_t safecred;
	struct session * sessp;
	boolean_t fstate;

	/* If accounting isn't enabled, don't bother */
	vp = acctp;
	if (vp == NULLVP)
		return (0);

	/*
	 * Get process accounting information.
	 */

	/* (1) The name of the command that ran */
	bcopy(p->p_comm, an_acct.ac_comm, sizeof an_acct.ac_comm);

	/* (2) The amount of user and system time that was used */
	calcru(p, &ut, &st, NULL);
	an_acct.ac_utime = encode_comp_t(ut.tv_sec, ut.tv_usec);
	an_acct.ac_stime = encode_comp_t(st.tv_sec, st.tv_usec);

	/* (3) The elapsed time the commmand ran (and its starting time) */
	an_acct.ac_btime = p->p_start.tv_sec;
	microtime(&tmp);
	timevalsub(&tmp, &p->p_start);
	an_acct.ac_etime = encode_comp_t(tmp.tv_sec, tmp.tv_usec);

	/* (4) The average amount of memory used */
	proc_lock(p);
	rup = p->p_stats->p_ru;
	proc_unlock(p);
	r = &rup;
	tmp = ut;
	timevaladd(&tmp, &st);
	t = tmp.tv_sec * hz + tmp.tv_usec / tick;
	if (t)
		an_acct.ac_mem = (r->ru_ixrss + r->ru_idrss + r->ru_isrss) / t;
	else
		an_acct.ac_mem = 0;

	/* (5) The number of disk I/O operations done */
	an_acct.ac_io = encode_comp_t(r->ru_inblock + r->ru_oublock, 0);

	/* (6) The UID and GID of the process */
	safecred = kauth_cred_proc_ref(p);

	an_acct.ac_uid = safecred->cr_ruid;
	an_acct.ac_gid = safecred->cr_rgid;

	/* (7) The terminal from which the process was started */
	
	sessp = proc_session(p);
	if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) && (sessp->s_ttyp != TTY_NULL)) {
		fstate = thread_funnel_set(kernel_flock, TRUE);
		an_acct.ac_tty = sessp->s_ttyp->t_dev;
		(void) thread_funnel_set(kernel_flock, fstate);
	 }else
		an_acct.ac_tty = NODEV;

	if (sessp != SESSION_NULL)
		session_rele(sessp);

	/* (8) The boolean flags that tell how the process terminated, etc. */
	an_acct.ac_flag = p->p_acflag;

	/*
	 * Now, just write the accounting information to the file.
	 */
	if ((error = vnode_getwithref(vp)) == 0) {
	        error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&an_acct, sizeof (an_acct),
				(off_t)0, UIO_SYSSPACE32, IO_APPEND|IO_UNIT, safecred,
				(int *)0, p);
		vnode_put(vp);
	}

	kauth_cred_unref(&safecred);
	return (error);
}
示例#25
0
文件: fasttrap_isa.c 项目: argp/xnu
static void
fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_addr_t new_pc)
{
	pid_t pid = p->p_pid;
	fasttrap_tracepoint_t *tp;
	fasttrap_bucket_t *bucket;
	fasttrap_id_t *id;
	lck_mtx_t *pid_mtx;
	int retire_tp = 1;

	pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
	lck_mtx_lock(pid_mtx);
	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];

	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
		if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
	    	tp->ftt_proc->ftpc_acount != 0)
			break;
	}

	/*
	 * Don't sweat it if we can't find the tracepoint again; unlike
	 * when we're in fasttrap_pid_probe(), finding the tracepoint here
	 * is not essential to the correct execution of the process.
 	 */
	if (tp == NULL) {
		lck_mtx_unlock(pid_mtx);
		return;
	}

	for (id = tp->ftt_retids; id != NULL; id = id->fti_next) {
		fasttrap_probe_t *probe = id->fti_probe;
		/*
		 * If there's a branch that could act as a return site, we
		 * need to trace it, and check here if the program counter is
		 * external to the function.
		 */
		if (tp->ftt_type != FASTTRAP_T_LDM_PC &&
		    tp->ftt_type != FASTTRAP_T_POP_PC &&
		    new_pc - probe->ftp_faddr < probe->ftp_fsize)
			continue;

		if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) {
			uint8_t already_triggered = atomic_or_8(&probe->ftp_triggered, 1);
			if (already_triggered) {
				continue;
			}
		}
		/*
		 * If we have at least one probe associated that
		 * is not a oneshot probe, don't remove the
		 * tracepoint
		 */
		else {
			retire_tp = 0;
		}
#ifndef CONFIG_EMBEDDED
		if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
			dtrace_probe(dtrace_probeid_error, 0 /* state */, id->fti_probe->ftp_id,
				     1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV);
#else
		if (FALSE) {
#endif
		} else {
			dtrace_probe(id->fti_probe->ftp_id,
				     pc - id->fti_probe->ftp_faddr,
				     regs->r[0], 0, 0, 0);
		}
	}
	if (retire_tp) {
		fasttrap_tracepoint_retire(p, tp);
	}

	lck_mtx_unlock(pid_mtx);
}

static void
fasttrap_sigsegv(proc_t *p, uthread_t t, user_addr_t addr, arm_saved_state_t *regs)
{
	/* TODO: This function isn't implemented yet. In debug mode, panic the system to
	 * find out why we're hitting this point. In other modes, kill the process.
	 */
#if DEBUG
#pragma unused(p,t,addr,arm_saved_state)
	panic("fasttrap: sigsegv not yet implemented");
#else
#pragma unused(p,t,addr)
	/* Kill the process */
	regs->pc = 0;
#endif

#if 0
	proc_lock(p);

	/* Set fault address and mark signal */
	t->uu_code = addr;
	t->uu_siglist |= sigmask(SIGSEGV);

	/* 
	 * XXX These two line may be redundant; if not, then we need
	 * XXX to potentially set the data address in the machine
	 * XXX specific thread state structure to indicate the address.
	 */         
	t->uu_exception = KERN_INVALID_ADDRESS;         /* SIGSEGV */
	t->uu_subcode = 0;      /* XXX pad */
                
	proc_unlock(p); 
                                     
	/* raise signal */
	signal_setast(t->uu_context.vc_thread);
#endif
}
示例#26
0
int zebra_processor_init (zebra_processor_t *proc,
                          const char *dev,
                          int enable_display)
{
    if(proc_lock(proc) < 0)
        return(-1);
    proc_destroy_thread(proc->video_thread, &proc->video_started);
    proc_destroy_thread(proc->input_thread, &proc->input_started);
    proc->video_started = proc->input_started = 0;
    int rc = 0;
    if(proc->video) {
        if(dev)
            zebra_video_open(proc->video, NULL);
        else {
            zebra_video_destroy(proc->video);
            proc->video = NULL;
        }
    }
    if(proc->window) {
        zebra_window_destroy(proc->window);
        proc->window = NULL;
        _zebra_window_close(proc);
    }

    if(!dev && !enable_display)
        /* nothing to do */
        goto done;

    if(enable_display) {
        proc->window = zebra_window_create();
        if(!proc->window) {
            rc = err_copy(proc, proc->window);
            goto done;
        }
    }

    if(dev) {
        proc->video = zebra_video_create();
        if(!proc->video ||
           zebra_video_open(proc->video, dev)) {
            rc = err_copy(proc, proc->video);
            goto done;
        }
        if(zebra_video_get_fd(proc->video) < 0 && proc->threaded) {
#ifdef HAVE_LIBPTHREAD
            /* spawn blocking video thread */
            if((rc = pthread_create(&proc->video_thread, NULL,
                                    proc_video_thread, proc))) {
                rc = err_capture_num(proc, SEV_ERROR, ZEBRA_ERR_SYSTEM,
                                     __func__, "spawning video thread", rc);
                goto done;
            }
            proc->video_started = 1;
            zprintf(4, "spawned video thread\n");
#endif
        }
    }

#ifdef HAVE_LIBPTHREAD
    if(proc->threaded && (proc->window ||
                          (proc->video && !proc->video_started))) {
        /* spawn input monitor thread */
        if((rc = pthread_create(&proc->input_thread, NULL,
                                proc_input_thread, proc))) {
            rc = err_capture_num(proc, SEV_ERROR, ZEBRA_ERR_SYSTEM, __func__,
                                 "spawning input thread", rc);
            goto done;
        }
        proc->input_started = 1;
        zprintf(4, "spawned input thread\n");
    }
#endif

    if(proc->window) {
        /* arbitrary default */
        int width = 640, height = 480;
        if(proc->video) {
            width = zebra_video_get_width(proc->video);
            height = zebra_video_get_height(proc->video);
        }

        if(_zebra_window_open(proc, "zebra barcode reader", width, height)) {
            rc = -1;
            goto done;
        }
        else if(zebra_window_attach(proc->window, proc->display, proc->xwin)) {
            rc = err_copy(proc, proc->window);
            goto done;
        }
    }

    if(proc->video && proc->force_input) {
        if(zebra_video_init(proc->video, proc->force_input))
            rc = err_copy(proc, proc->video);
    }
    else if(proc->video)
        while(zebra_negotiate_format(proc->video, proc->window)) {
            if(proc->video && proc->window) {
                fprintf(stderr,
                        "WARNING: no compatible input to output format\n"
                        "...trying again with output disabled\n");
                zebra_window_destroy(proc->window);
                proc->window = NULL;
            }
            else {
                zprintf(1, "ERROR: no compatible %s format\n",
                        (proc->video) ? "video input" : "window output");
                rc = err_capture(proc, SEV_ERROR, ZEBRA_ERR_UNSUPPORTED,
                                 __func__, "no compatible image format");
                goto done;
            }
        }

 done:
    proc_unlock(proc);
    return(rc);
}
示例#27
0
int
ptrace(struct proc *p, struct ptrace_args *uap, register_t *retval)
{
	struct proc *t = current_proc();	/* target process */
	task_t		task;
	thread_t	th_act;
	struct uthread 	*ut;
	int tr_sigexc = 0;
	int error = 0;
	int stopped = 0;

	AUDIT_ARG(cmd, uap->req);
	AUDIT_ARG(pid, uap->pid);
	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(value, uap->data);

	if (uap->req == PT_DENY_ATTACH) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			proc_unlock(p);
			exit1(p, W_EXITCODE(ENOTSUP, 0), retval);
			/* drop funnel before we return */
			thread_exception_return();
			/* NOTREACHED */
		}
		SET(p->p_lflag, P_LNOATTACH);
		proc_unlock(p);

		return(0);
	}

	if (uap->req == PT_FORCEQUOTA) {
		if (is_suser()) {
			OSBitOrAtomic(P_FORCEQUOTA, (UInt32 *)&t->p_flag);
			return (0);
		} else
			return (EPERM);
	}

	/*
	 *	Intercept and deal with "please trace me" request.
	 */	 
	if (uap->req == PT_TRACE_ME) {
		proc_lock(p);
		SET(p->p_lflag, P_LTRACED);
		/* Non-attached case, our tracer is our parent. */
		p->p_oppid = p->p_ppid;
		proc_unlock(p);
		return(0);
	}
	if (uap->req == PT_SIGEXC) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			SET(p->p_lflag, P_LSIGEXC);
			proc_unlock(p);
			return(0);
		} else {
			proc_unlock(p);
			return(EINVAL);
		}
	}

	/* 
	 * We do not want ptrace to do anything with kernel or launchd 
	 */
	if (uap->pid < 2) {
		return(EPERM);
	}

	/*
	 *	Locate victim, and make sure it is traceable.
	 */
	if ((t = proc_find(uap->pid)) == NULL)
			return (ESRCH);

	AUDIT_ARG(process, t);

	task = t->task;
	if (uap->req == PT_ATTACHEXC) {
		uap->req = PT_ATTACH;
		tr_sigexc = 1;
	}
	if (uap->req == PT_ATTACH) {
		int		err;
		
		if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, 
									 t, (uintptr_t)&err, 0, 0) == 0 ) {
			/* it's OK to attach */
			proc_lock(t);
			SET(t->p_lflag, P_LTRACED);
			if (tr_sigexc) 
				SET(t->p_lflag, P_LSIGEXC);
	
			t->p_oppid = t->p_ppid;
			proc_unlock(t);
			if (t->p_pptr != p)
				proc_reparentlocked(t, p, 1, 0);
	
			proc_lock(t);
			if (get_task_userstop(task) > 0 ) {
				stopped = 1;
			}
			t->p_xstat = 0;
			proc_unlock(t);
			psignal(t, SIGSTOP);
			/*
			 * If the process was stopped, wake up and run through
			 * issignal() again to properly connect to the tracing
			 * process.
			 */
			if (stopped)
				task_resume(task);       
			error = 0;
			goto out;
		}
		else {
			/* not allowed to attach, proper error code returned by kauth_authorize_process */
			if (ISSET(t->p_lflag, P_LNOATTACH)) {
				psignal(p, SIGSEGV);
			}
			
			error = err;
			goto out;
		}
	}

	/*
	 * You can't do what you want to the process if:
	 *	(1) It's not being traced at all,
	 */
	proc_lock(t);
	if (!ISSET(t->p_lflag, P_LTRACED)) {
		proc_unlock(t);
		error = EPERM;
		goto out;
	}

	/*
	 *	(2) it's not being traced by _you_, or
	 */
	if (t->p_pptr != p) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	(3) it's not currently stopped.
	 */
	if (t->p_stat != SSTOP) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	Mach version of ptrace executes request directly here,
	 *	thus simplifying the interaction of ptrace and signals.
	 */
	/* proc lock is held here */
	switch (uap->req) {

	case PT_DETACH:
		if (t->p_oppid != t->p_ppid) {
			struct proc *pp;

			proc_unlock(t);
			pp = proc_find(t->p_oppid);
			proc_reparentlocked(t, pp ? pp : initproc, 1, 0);
			if (pp != PROC_NULL)
				proc_rele(pp);
			proc_lock(t);
			
		}

		t->p_oppid = 0;
		CLR(t->p_lflag, P_LTRACED);
		CLR(t->p_lflag, P_LSIGEXC);
		proc_unlock(t);
		goto resume;
		
	case PT_KILL:
		/*
		 *	Tell child process to kill itself after it
		 *	is resumed by adding NSIG to p_cursig. [see issig]
		 */
		proc_unlock(t);
		psignal(t, SIGKILL);
		goto resume;

	case PT_STEP:			/* single step the child */
	case PT_CONTINUE:		/* continue the child */
		proc_unlock(t);
		th_act = (thread_t)get_firstthread(task);
		if (th_act == THREAD_NULL) {
			error = EINVAL;
			goto out;
		}

		if (uap->addr != (user_addr_t)1) {
#if defined(ppc)
#define ALIGNED(addr,size)	(((unsigned)(addr)&((size)-1))==0)
			if (!ALIGNED((int)uap->addr, sizeof(int)))
				return (ERESTART);
#undef 	ALIGNED
#endif
			thread_setentrypoint(th_act, uap->addr);
		}

		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}

		if (uap->data != 0) {
			psignal(t, uap->data);
                }

		if (uap->req == PT_STEP) {
		        /*
			 * set trace bit
			 */
			if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		} else {
		        /*
			 * clear trace bit if on
			 */
			if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		}	
	resume:
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		if (t->sigwait) {
			wakeup((caddr_t)&(t->sigwait));
			proc_unlock(t);
			if ((t->p_lflag & P_LSIGEXC) == 0) {
				task_resume(task);
			}
		} else
			proc_unlock(t);
			
		break;
		
	case PT_THUPDATE:  {
		proc_unlock(t);
		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}
		th_act = port_name_to_thread(CAST_DOWN(mach_port_name_t, uap->addr));
		if (th_act == THREAD_NULL)
			return (ESRCH);
		ut = (uthread_t)get_bsdthread_info(th_act);
		if (uap->data)
			ut->uu_siglist |= sigmask(uap->data);
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		proc_unlock(t);
		thread_deallocate(th_act);
		error = 0;
		}
		break;
	default:
		proc_unlock(t);
		error = EINVAL;
		goto out;
	}

	error = 0;
out:
	proc_rele(t);
	return(error);
}
示例#28
0
void
sendsig(struct proc *p, user_addr_t catcher, int sig, int mask, __unused u_long code)
{
	kern_return_t kretn;
	struct mcontext mctx;
	user_addr_t p_mctx = USER_ADDR_NULL;		/* mcontext dest. */
	struct mcontext64 mctx64;
	user_addr_t p_mctx64 = USER_ADDR_NULL;		/* mcontext dest. */
	struct user_ucontext64 uctx;
	user_addr_t p_uctx;		/* user stack addr top copy ucontext */
	user_siginfo_t sinfo;
	user_addr_t p_sinfo;		/* user stack addr top copy siginfo */
	struct sigacts *ps = p->p_sigacts;
	int oonstack;
	user_addr_t sp;
	mach_msg_type_number_t state_count;
	thread_t th_act;
	struct uthread *ut;
	int infostyle = UC_TRAD;
	int dualcontext =0;
	user_addr_t trampact;
	int vec_used = 0;
	int stack_size = 0;
	void * tstate;
	int flavor;
        int ctx32 = 1;

	th_act = current_thread();
	ut = get_bsdthread_info(th_act);

	/*
	 * XXX We conditionalize type passed here based on SA_SIGINFO, but
	 * XXX we always send up all the information, regardless; perhaps
	 * XXX this should not be conditionalized?  Defer making this change
	 * XXX now, due to possible tools impact.
	 */
	if (p->p_sigacts->ps_siginfo & sigmask(sig)) {
		/*
		 * If SA_SIGINFO is set, then we must provide the user
		 * process both a siginfo_t and a context argument.  We call
		 * this "FLAVORED", as opposed to "TRADITIONAL", which doesn't
		 * expect a context.  "DUAL" is a type of "FLAVORED".
		 */
		if (is_64signalregset()) {
			/*
			 * If this is a 64 bit CPU, we must include a 64 bit
			 * context in the data we pass to user space; we may
			 * or may not also include a 32 bit context at the
			 * same time, for non-leaf functions.
			 *
			 * The user may also explicitly choose to not receive
			 * a 32 bit context, at their option; we only allow
			 * this to happen on 64 bit processors, for obvious
			 * reasons.
			 */
			if (IS_64BIT_PROCESS(p) ||
			    (p->p_sigacts->ps_64regset & sigmask(sig))) {
				 /*
				  * For a 64 bit process, there is no 32 bit
				  * context.
				  */
				ctx32 = 0;
				infostyle = UC_FLAVOR64;
			} else {
				/*
				 * For a 32 bit process on a 64 bit CPU, we
				 * may have 64 bit leaf functions, so we need
				 * both contexts.
				 */
				dualcontext = 1;
				infostyle = UC_DUAL;
			}
		} else {
			/*
			 * If this is a 32 bit CPU, then we only have a 32 bit
			 * context to contend with.
			 */
			infostyle = UC_FLAVOR;
		}
        } else {
		/*
		 * If SA_SIGINFO is not set, then we have a traditional style
		 * call which does not need additional context passed.  The
		 * default is 32 bit traditional.
		 *
		 * XXX The second check is redundant on PPC32; keep it anyway.
		 */
		if (is_64signalregset() || IS_64BIT_PROCESS(p)) {
			/*
			 * However, if this is a 64 bit CPU, we need to change
			 * this to 64 bit traditional, and drop the 32 bit
			 * context.
			 */
			ctx32 = 0;
			infostyle = UC_TRAD64;
		}
	}

	proc_unlock(p);

	/* I need this for SIGINFO anyway */
	flavor = PPC_THREAD_STATE;
	tstate = (void *)&mctx.ss;
	state_count = PPC_THREAD_STATE_COUNT;
	if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
		goto bad;

	if ((ctx32 == 0) || dualcontext) {
		flavor = PPC_THREAD_STATE64;
		tstate = (void *)&mctx64.ss;
		state_count = PPC_THREAD_STATE64_COUNT;
                if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
	}

        if ((ctx32 == 1) || dualcontext) {
                flavor = PPC_EXCEPTION_STATE;
		tstate = (void *)&mctx.es;
		state_count = PPC_EXCEPTION_STATE_COUNT;
                if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
       } 
       
	if ((ctx32 == 0) || dualcontext) {
 		flavor = PPC_EXCEPTION_STATE64;
		tstate = (void *)&mctx64.es;
		state_count = PPC_EXCEPTION_STATE64_COUNT;
       
                if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
                
        }
       

        if ((ctx32 == 1) || dualcontext) {
                flavor = PPC_FLOAT_STATE;
		tstate = (void *)&mctx.fs;
		state_count = PPC_FLOAT_STATE_COUNT;
                if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
       } 
       
	if ((ctx32 == 0) || dualcontext) {
 		flavor = PPC_FLOAT_STATE;
		tstate = (void *)&mctx64.fs;
		state_count = PPC_FLOAT_STATE_COUNT;
                       if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
                
        }


	if (find_user_vec_curr()) {
		vec_used = 1;

                if ((ctx32 == 1) || dualcontext) {
                    flavor = PPC_VECTOR_STATE;
                    tstate = (void *)&mctx.vs;
                    state_count = PPC_VECTOR_STATE_COUNT;
                    if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                    goto bad;
                    infostyle += 5;
            } 
       
            if ((ctx32 == 0) || dualcontext) {
                    flavor = PPC_VECTOR_STATE;
                    tstate = (void *)&mctx64.vs;
                    state_count = PPC_VECTOR_STATE_COUNT;
                    if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count)  != KERN_SUCCESS)
                        goto bad;
                    infostyle += 5;
           }
	}  

	trampact = ps->ps_trampact[sig];
	oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK;

	/* figure out where our new stack lives */
	if ((ut->uu_flag & UT_ALTSTACK) && !oonstack &&
		(ps->ps_sigonstack & sigmask(sig))) {
		sp = ut->uu_sigstk.ss_sp;
		sp += ut->uu_sigstk.ss_size;
		stack_size = ut->uu_sigstk.ss_size;
		ut->uu_sigstk.ss_flags |= SA_ONSTACK;
	}
	else {
		if (ctx32 == 0)
			sp = mctx64.ss.r1;
		else
			sp = CAST_USER_ADDR_T(mctx.ss.r1);
	}

	
	/* put siginfo on top */
        
	/* preserve RED ZONE area */
	if (IS_64BIT_PROCESS(p))
		sp = TRUNC_DOWN64(sp, C_64_REDZONE_LEN, C_64_STK_ALIGN);
	else
		sp = TRUNC_DOWN32(sp, C_32_REDZONE_LEN, C_32_STK_ALIGN);

        /* next are the saved registers */
        if ((ctx32 == 0) || dualcontext) {
            sp -= sizeof(struct mcontext64);
            p_mctx64 = sp;
        }
        if ((ctx32 == 1) || dualcontext) {
            sp -= sizeof(struct mcontext);
            p_mctx = sp;
        }    
        
	if (IS_64BIT_PROCESS(p)) {
		/* context goes first on stack */
		sp -= sizeof(struct user_ucontext64);
		p_uctx = sp;

		/* this is where siginfo goes on stack */
		sp -= sizeof(user_siginfo_t);
		p_sinfo = sp;
		
		sp = TRUNC_DOWN64(sp, C_64_PARAMSAVE_LEN+C_64_LINKAGE_LEN, C_64_STK_ALIGN);
	} else {
		/*
		 * struct ucontext and struct ucontext64 are identical in
		 * size and content; the only difference is the internal
		 * pointer type for the last element, which makes no
		 * difference for the copyout().
		 */

		/* context goes first on stack */
		sp -= sizeof(struct ucontext64);
		p_uctx = sp;

		/* this is where siginfo goes on stack */
		sp -= sizeof(siginfo_t);
		p_sinfo = sp;

		sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN+C_32_LINKAGE_LEN, C_32_STK_ALIGN);
	}

	uctx.uc_onstack = oonstack;
	uctx.uc_sigmask = mask;
	uctx.uc_stack.ss_sp = sp;
	uctx.uc_stack.ss_size = stack_size;
	if (oonstack)
		uctx.uc_stack.ss_flags |= SS_ONSTACK;
		
	uctx.uc_link = 0;
	if (ctx32 == 0)
		uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
	else
		uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int));
	
	if (vec_used) 
		uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int));
        
	if (ctx32 == 0)
             uctx.uc_mcontext64 = p_mctx64;
       else
            uctx.uc_mcontext64 = p_mctx;

	/* setup siginfo */
	bzero((caddr_t)&sinfo, sizeof(user_siginfo_t));
	sinfo.si_signo = sig;
	if (ctx32 == 0) {
		sinfo.si_addr = mctx64.ss.srr0;
		sinfo.pad[0] = mctx64.ss.r1;
	} else {
		sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.srr0);
		sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.r1);
	}

	switch (sig) {
		case SIGILL:
			/*
			 * If it's 64 bit and not a dual context, mctx will
			 * contain uninitialized data, so we have to use
			 * mctx64 here.
			 */
			if(ctx32 == 0) {
				if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
					sinfo.si_code = ILL_ILLOPC;
				else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
					sinfo.si_code = ILL_PRVOPC;
				else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
					sinfo.si_code = ILL_ILLTRP;
				else
					sinfo.si_code = ILL_NOOP;
			} else {
				if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT)))
					sinfo.si_code = ILL_ILLOPC;
				else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT)))
					sinfo.si_code = ILL_PRVOPC;
				else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT)))
					sinfo.si_code = ILL_ILLTRP;
				else
					sinfo.si_code = ILL_NOOP;
			}
			break;
		case SIGFPE:
#define FPSCR_VX	2
#define FPSCR_OX	3
#define FPSCR_UX	4
#define FPSCR_ZX	5
#define FPSCR_XX	6
			/*
			 * If it's 64 bit and not a dual context, mctx will
			 * contain uninitialized data, so we have to use
			 * mctx64 here.
			 */
			if(ctx32 == 0) {
				if (mctx64.fs.fpscr & (1 << (31 - FPSCR_VX)))
					sinfo.si_code = FPE_FLTINV;
				else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_OX)))
					sinfo.si_code = FPE_FLTOVF;
				else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_UX)))
					sinfo.si_code = FPE_FLTUND;
				else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_ZX)))
					sinfo.si_code = FPE_FLTDIV;
				else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_XX)))
					sinfo.si_code = FPE_FLTRES;
				else
					sinfo.si_code = FPE_NOOP;
			} else {
				if (mctx.fs.fpscr & (1 << (31 - FPSCR_VX)))
					sinfo.si_code = FPE_FLTINV;
				else if (mctx.fs.fpscr & (1 << (31 - FPSCR_OX)))
					sinfo.si_code = FPE_FLTOVF;
				else if (mctx.fs.fpscr & (1 << (31 - FPSCR_UX)))
					sinfo.si_code = FPE_FLTUND;
				else if (mctx.fs.fpscr & (1 << (31 - FPSCR_ZX)))
					sinfo.si_code = FPE_FLTDIV;
				else if (mctx.fs.fpscr & (1 << (31 - FPSCR_XX)))
					sinfo.si_code = FPE_FLTRES;
				else
					sinfo.si_code = FPE_NOOP;
			}
			break;

		case SIGBUS:
			if (ctx32 == 0) {
				sinfo.si_addr = mctx64.es.dar;
			} else {
				sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
			}
			/* on ppc we generate only if EXC_PPC_UNALIGNED */
			sinfo.si_code = BUS_ADRALN;
			break;

		case SIGSEGV:
			/*
			 * If it's 64 bit and not a dual context, mctx will
			 * contain uninitialized data, so we have to use
			 * mctx64 here.
			 */
			if (ctx32 == 0) {
				sinfo.si_addr = mctx64.es.dar;
				/* First check in srr1 and then in dsisr */
				if (mctx64.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
					sinfo.si_code = SEGV_ACCERR;
				else if (mctx64.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
					sinfo.si_code = SEGV_ACCERR;
				else
					sinfo.si_code = SEGV_MAPERR;
			} else {
				sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar);
				/* First check in srr1 and then in dsisr */
				if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT)))
					sinfo.si_code = SEGV_ACCERR;
				else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT)))
					sinfo.si_code = SEGV_ACCERR;
				else
					sinfo.si_code = SEGV_MAPERR;
			}
			break;
		default:
		{
			int status_and_exitcode;

			/*
			 * All other signals need to fill out a minimum set of
			 * information for the siginfo structure passed into
			 * the signal handler, if SA_SIGINFO was specified.
			 *
			 * p->si_status actually contains both the status and
			 * the exit code; we save it off in its own variable
			 * for later breakdown.
			 */
			proc_lock(p);
			sinfo.si_pid = p->si_pid;
			p->si_pid = 0;
			status_and_exitcode = p->si_status;
			p->si_status = 0;
			sinfo.si_uid = p->si_uid;
			p->si_uid = 0;
			sinfo.si_code = p->si_code;
			p->si_code = 0;
			proc_unlock(p);
			if (sinfo.si_code == CLD_EXITED) {
				if (WIFEXITED(status_and_exitcode)) 
					sinfo.si_code = CLD_EXITED;
				else if (WIFSIGNALED(status_and_exitcode)) {
					if (WCOREDUMP(status_and_exitcode)) {
						sinfo.si_code = CLD_DUMPED;
						status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
					} else {
						sinfo.si_code = CLD_KILLED;
						status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode);
					}
				}
			}
			/*
			 * The recorded status contains the exit code and the
			 * signal information, but the information to be passed
			 * in the siginfo to the handler is supposed to only
			 * contain the status, so we have to shift it out.
			 */
			sinfo.si_status = WEXITSTATUS(status_and_exitcode);
			break;
		}
	}


	/* copy info out to user space */
	if (IS_64BIT_PROCESS(p)) {

		/* XXX truncates catcher address to uintptr_t */
		DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &sinfo,
			void (*)(void), CAST_DOWN(sig_t, catcher));

		if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext64)))
			goto bad;
		if (copyout(&sinfo, p_sinfo, sizeof(user_siginfo_t)))
			goto bad;
	} else {