Exemple #1
0
/*
 * cloneproc
 *
 * Description: Create a new process from a specified process.
 *
 * Parameters:	parent_task		The parent task to be cloned, or
 *					TASK_NULL is task characteristics
 *					are not to be inherited
 *					be cloned, or TASK_NULL if the new
 *					task is not to inherit the VM
 *					characteristics of the parent
 *		parent_proc		The parent process to be cloned
 *		inherit_memory		True if the child is to inherit
 *					memory from the parent; if this is
 *					non-NULL, then the parent_task must
 *					also be non-NULL
 *
 * Returns:	!NULL			pointer to new child thread
 *		NULL			Failure (unspecified)
 *
 * Note:	On return newly created child process has signal lock held
 *		to block delivery of signal to it if called with lock set.
 *		fork() code needs to explicity remove this lock before
 *		signals can be delivered
 *
 *		In the case of bootstrap, this function can be called from
 *		bsd_utaskbootstrap() in order to bootstrap the first process;
 *		the net effect is to provide a uthread structure for the
 *		kernel process associated with the kernel task.
 *
 * XXX:		Tristating using the value parent_task as the major key
 *		and inherit_memory as the minor key is something we should
 *		refactor later; we owe the current semantics, ultimately,
 *		to the semantics of task_create_internal.  For now, we will
 *		live with this being somewhat awkward.
 */
thread_t
cloneproc(task_t parent_task, proc_t parent_proc, int inherit_memory)
{
	task_t child_task;
	proc_t child_proc;
	thread_t child_thread = NULL;

	if ((child_proc = forkproc(parent_proc)) == NULL) {
		/* Failed to allocate new process */
		goto bad;
	}

	child_thread = fork_create_child(parent_task, child_proc, inherit_memory, (parent_task == TASK_NULL) ? FALSE : (parent_proc->p_flag & P_LP64));

	if (child_thread == NULL) {
		/*
		 * Failed to create thread; now we must deconstruct the new
		 * process previously obtained from forkproc().
		 */
		forkproc_free(child_proc);
		goto bad;
	}

	child_task = get_threadtask(child_thread);
	if (parent_proc->p_flag & P_LP64) {
		task_set_64bit(child_task, TRUE);
		OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
#ifdef __ppc__
		/*
		 * PPC51: ppc64 is limited to 51-bit addresses.
		 * Memory above that limit is handled specially at
		 * the pmap level.
		 */
		pmap_map_sharedpage(child_task, get_map_pmap(get_task_map(child_task)));
#endif /* __ppc__ */
	} else {
		task_set_64bit(child_task, FALSE);
		OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
	}

	/* make child visible */
	pinsertchild(parent_proc, child_proc);

	/*
	 * Make child runnable, set start time.
	 */
	child_proc->p_stat = SRUN;
bad:
	return(child_thread);
}
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands.
 */
static
load_return_t
parse_machfile(
    struct vnode 		*vp,
    vm_map_t		map,
    thread_t		thread,
    struct mach_header	*header,
    off_t			file_offset,
    off_t			macho_size,
    int			depth,
    load_result_t		*result
)
{
    uint32_t		ncmds;
    struct load_command	*lcp;
    struct dylinker_command	*dlp = 0;
    integer_t		dlarchbits = 0;
    void *			pager;
    load_return_t		ret = LOAD_SUCCESS;
    caddr_t			addr;
    void *			kl_addr;
    vm_size_t		size,kl_size;
    size_t			offset;
    size_t			oldoffset;	/* for overflow check */
    int			pass;
    proc_t			p = current_proc();		/* XXXX */
    int			error;
    int resid=0;
    task_t task;
    size_t			mach_header_sz = sizeof(struct mach_header);
    boolean_t		abi64;
    boolean_t		got_code_signatures = FALSE;

    if (header->magic == MH_MAGIC_64 ||
            header->magic == MH_CIGAM_64) {
        mach_header_sz = sizeof(struct mach_header_64);
    }

    /*
     *	Break infinite recursion
     */
    if (depth > 6) {
        return(LOAD_FAILURE);
    }

    task = (task_t)get_threadtask(thread);

    depth++;

    /*
     *	Check to see if right machine type.
     */
    if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
            !grade_binary(header->cputype,
                          header->cpusubtype & ~CPU_SUBTYPE_MASK))
        return(LOAD_BADARCH);

    abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);

    switch (header->filetype) {

    case MH_OBJECT:
    case MH_EXECUTE:
    case MH_PRELOAD:
        if (depth != 1) {
            return (LOAD_FAILURE);
        }
        break;

    case MH_FVMLIB:
    case MH_DYLIB:
        if (depth == 1) {
            return (LOAD_FAILURE);
        }
        break;

    case MH_DYLINKER:
        if (depth != 2) {
            return (LOAD_FAILURE);
        }
        break;

    default:
        return (LOAD_FAILURE);
    }

    /*
     *	Get the pager for the file.
     */
    pager = (void *) ubc_getpager(vp);

    /*
     *	Map portion that must be accessible directly into
     *	kernel's map.
     */
    if ((mach_header_sz + header->sizeofcmds) > macho_size)
        return(LOAD_BADMACHO);

    /*
     *	Round size of Mach-O commands up to page boundry.
     */
    size = round_page(mach_header_sz + header->sizeofcmds);
    if (size <= 0)
        return(LOAD_BADMACHO);

    /*
     * Map the load commands into kernel memory.
     */
    addr = 0;
    kl_size = size;
    kl_addr = kalloc(size);
    addr = (caddr_t)kl_addr;
    if (addr == NULL)
        return(LOAD_NOSPACE);

    error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
                    UIO_SYSSPACE32, 0, kauth_cred_get(), &resid, p);
    if (error) {
        if (kl_addr )
            kfree(kl_addr, kl_size);
        return(LOAD_IOERROR);
    }
    /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */

    /*
     *	Scan through the commands, processing each one as necessary.
     */
    for (pass = 1; pass <= 2; pass++) {
        /*
         * Loop through each of the load_commands indicated by the
         * Mach-O header; if an absurd value is provided, we just
         * run off the end of the reserved section by incrementing
         * the offset too far, so we are implicitly fail-safe.
         */
        offset = mach_header_sz;
        ncmds = header->ncmds;
        while (ncmds--) {
            /*
             *	Get a pointer to the command.
             */
            lcp = (struct load_command *)(addr + offset);
            oldoffset = offset;
            offset += lcp->cmdsize;

            /*
             * Perform prevalidation of the struct load_command
             * before we attempt to use its contents.  Invalid
             * values are ones which result in an overflow, or
             * which can not possibly be valid commands, or which
             * straddle or exist past the reserved section at the
             * start of the image.
             */
            if (oldoffset > offset ||
                    lcp->cmdsize < sizeof(struct load_command) ||
                    offset > header->sizeofcmds + mach_header_sz) {
                ret = LOAD_BADMACHO;
                break;
            }

            /*
             * Act on struct load_command's for which kernel
             * intervention is required.
             */
            switch(lcp->cmd) {
            case LC_SEGMENT_64:
                if (pass != 1)
                    break;
                ret = load_segment_64(
                          (struct segment_command_64 *)lcp,
                          pager,
                          file_offset,
                          macho_size,
                          ubc_getsize(vp),
                          map,
                          result);
                break;
            case LC_SEGMENT:
                if (pass != 1)
                    break;
                ret = load_segment(
                          (struct segment_command *) lcp,
                          pager,
                          file_offset,
                          macho_size,
                          ubc_getsize(vp),
                          map,
                          result);
                break;
            case LC_THREAD:
                if (pass != 2)
                    break;
                ret = load_thread((struct thread_command *)lcp,
                                  thread,
                                  result);
                break;
            case LC_UNIXTHREAD:
                if (pass != 2)
                    break;
                ret = load_unixthread(
                          (struct thread_command *) lcp,
                          thread,
                          result);
                break;
            case LC_LOAD_DYLINKER:
                if (pass != 2)
                    break;
                if ((depth == 1) && (dlp == 0)) {
                    dlp = (struct dylinker_command *)lcp;
                    dlarchbits = (header->cputype & CPU_ARCH_MASK);
                } else {
                    ret = LOAD_FAILURE;
                }
                break;
            case LC_CODE_SIGNATURE:
                /* CODE SIGNING */
                if (pass != 2)
                    break;
                /* pager -> uip ->
                   load signatures & store in uip
                   set VM object "signed_pages"
                */
                ret = load_code_signature(
                          (struct linkedit_data_command *) lcp,
                          vp,
                          file_offset,
                          macho_size,
                          header->cputype,
                          (depth == 1) ? result : NULL);
                if (ret != LOAD_SUCCESS) {
                    printf("proc %d: load code signature error %d "
                           "for file \"%s\"\n",
                           p->p_pid, ret, vp->v_name);
                    ret = LOAD_SUCCESS; /* ignore error */
                } else {
                    got_code_signatures = TRUE;
                }
                break;
            default:
                /* Other commands are ignored by the kernel */
                ret = LOAD_SUCCESS;
                break;
            }
            if (ret != LOAD_SUCCESS)
                break;
        }
        if (ret != LOAD_SUCCESS)
            break;
    }
    if (ret == LOAD_SUCCESS) {
        if (! got_code_signatures) {
            struct cs_blob *blob;
            /* no embedded signatures: look for detached ones */
            blob = ubc_cs_blob_get(vp, -1, file_offset);
            if (blob != NULL) {
                /* get flags to be applied to the process */
                result->csflags |= blob->csb_flags;
            }
        }

        if (dlp != 0)
            ret = load_dylinker(dlp, dlarchbits, map, thread, depth, result, abi64);

        if(depth == 1) {
            if (result->thread_count == 0) {
                ret = LOAD_FAILURE;
            } else if ( abi64 ) {
#ifdef __ppc__
                /* Map in 64-bit commpage */
                /* LP64todo - make this clean */
                /*
                 * PPC51: ppc64 is limited to 51-bit addresses.
                 * Memory above that limit is handled specially
                 * at the pmap level.
                 */
                pmap_map_sharedpage(current_task(), get_map_pmap(map));
#endif /* __ppc__ */
            }
        }
    }

    if (kl_addr )
        kfree(kl_addr, kl_size);

    if (ret == LOAD_SUCCESS)
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);

    return(ret);
}