/**
 * Gets the virtual memory map the specified object is mapped into.
 *
 * @returns VM map handle on success, NULL if no map.
 * @param   pMem                The memory object.
 */
DECLINLINE(vm_map_t) rtR0MemObjDarwinGetMap(PRTR0MEMOBJINTERNAL pMem)
{
    switch (pMem->enmType)
    {
        case RTR0MEMOBJTYPE_PAGE:
        case RTR0MEMOBJTYPE_LOW:
        case RTR0MEMOBJTYPE_CONT:
            return kernel_map;

        case RTR0MEMOBJTYPE_PHYS:
        case RTR0MEMOBJTYPE_PHYS_NC:
            return NULL; /* pretend these have no mapping atm. */

        case RTR0MEMOBJTYPE_LOCK:
            return pMem->u.Lock.R0Process == NIL_RTR0PROCESS
                ? kernel_map
                : get_task_map((task_t)pMem->u.Lock.R0Process);

        case RTR0MEMOBJTYPE_RES_VIRT:
            return pMem->u.ResVirt.R0Process == NIL_RTR0PROCESS
                ? kernel_map
                : get_task_map((task_t)pMem->u.ResVirt.R0Process);

        case RTR0MEMOBJTYPE_MAPPING:
            return pMem->u.Mapping.R0Process == NIL_RTR0PROCESS
                ? kernel_map
                : get_task_map((task_t)pMem->u.Mapping.R0Process);

        default:
            return NULL;
    }
}
Exemple #2
0
/*
 * fork_create_child
 *
 * Description:	Common operations associated with the creation of a child
 *		process
 *
 * Parameters:	parent_task		parent task
 *		child_proc		child process
 *		inherit_memory		TRUE, if the parents address space is
 *					to be inherited by the child
 *		is64bit			TRUE, if the child being created will
 *					be associated with a 64 bit process
 *					rather than a 32 bit process
 *
 * Note:	This code is called in the fork() case, from the execve() call
 *		graph, if implementing an execve() following a vfork(), from
 *		the posix_spawn() call graph (which implicitly includes a
 *		vfork() equivalent call, and in the system bootstrap case.
 *
 *		It creates a new task and thread (and as a side effect of the
 *		thread creation, a uthread), which is then associated with the
 *		process 'child'.  If the parent process address space is to
 *		be inherited, then a flag indicates that the newly created
 *		task should inherit this from the child task.
 *
 *		As a special concession to bootstrapping the initial process
 *		in the system, it's possible for 'parent_task' to be TASK_NULL;
 *		in this case, 'inherit_memory' MUST be FALSE.
 */
thread_t
fork_create_child(task_t parent_task, proc_t child_proc, int inherit_memory, int is64bit)
{
	thread_t	child_thread = NULL;
	task_t		child_task;
	kern_return_t	result;

	/* Create a new task for the child process */
	result = task_create_internal(parent_task,
					inherit_memory,
					is64bit,
					&child_task);
	if (result != KERN_SUCCESS) {
		printf("execve: task_create_internal failed.  Code: %d\n", result);
		goto bad;
	}

	/* Set the child process task to the new task */
	child_proc->task = child_task;

	/* Set child task process to child proc */
	set_bsdtask_info(child_task, child_proc);

	/* Propagate CPU limit timer from parent */
	if (timerisset(&child_proc->p_rlim_cpu))
		task_vtimer_set(child_task, TASK_VTIMER_RLIM);

	/* Set/clear 64 bit vm_map flag */
	if (is64bit)
		vm_map_set_64bit(get_task_map(child_task));
	else
		vm_map_set_32bit(get_task_map(child_task));

#if CONFIG_MACF
	/* Update task for MAC framework */
	/* valid to use p_ucred as child is still not running ... */
	mac_task_label_update_cred(child_proc->p_ucred, child_task);
#endif

	/*
	 * Set child process BSD visible scheduler priority if nice value
	 * inherited from parent
	 */
	if (child_proc->p_nice != 0)
		resetpriority(child_proc);

	/* Create a new thread for the child process */
	result = thread_create(child_task, &child_thread);
	if (result != KERN_SUCCESS) {
		printf("execve: thread_create failed. Code: %d\n", result);
		task_deallocate(child_task);
		child_task = NULL;
	}
bad:
	thread_yield_internal(1);

	return(child_thread);
}
Exemple #3
0
int
cs_allow_invalid(struct proc *p)
{
#if MACH_ASSERT
	lck_mtx_assert(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED);
#endif
#if CONFIG_MACF && CONFIG_ENFORCE_SIGNED_CODE
	/* There needs to be a MAC policy to implement this hook, or else the
	 * kill bits will be cleared here every time. If we have 
	 * CONFIG_ENFORCE_SIGNED_CODE, we can assume there is a policy
	 * implementing the hook. 
	 */
	if( 0 != mac_proc_check_run_cs_invalid(p)) {
		if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() "
				    "not allowed: pid %d\n", 
				    p->p_pid);
		return 0;
	}
	if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() "
			    "allowed: pid %d\n", 
			    p->p_pid);
	proc_lock(p);
	p->p_csflags &= ~(CS_KILL | CS_HARD);
	proc_unlock(p);
	vm_map_switch_protect(get_task_map(p->task), FALSE);
#endif
	return (p->p_csflags & (CS_KILL | CS_HARD)) == 0;
}
Exemple #4
0
__private_extern__ kern_return_t
chudxnu_task_write(
				   task_t		task,
				   uint64_t	useraddr,
				   void		*kernaddr,
				   vm_size_t	size)
{
	kern_return_t ret = KERN_SUCCESS;
	boolean_t old_level;
	
	if(ml_at_interrupt_context()) {
		return KERN_FAILURE; // can't poke into tasks on interrupt stack
	}

	/*
	 * pmap layer requires interrupts to be on
	 */
	old_level = ml_set_interrupts_enabled(TRUE);
	
	if(current_task()==task) {    
		
		if(copyout(kernaddr, useraddr, size)) {
			ret = KERN_FAILURE;
		}
	} else {
		vm_map_t map = get_task_map(task);
		ret = vm_map_write_user(map, kernaddr, useraddr, size);
	}		
	
	ml_set_interrupts_enabled(old_level);

	return ret;
}
Exemple #5
0
/* hw.pagesize and hw.tbfrequency are expected as 64 bit values */
static int
sysctl_pagesize
(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
{
	vm_map_t map = get_task_map(current_task());
	long long l = vm_map_page_size(map);
	return sysctl_io_number(req, l, sizeof(l), NULL, NULL);
}
Exemple #6
0
/*
 * cloneproc
 *
 * Description: Create a new process from a specified process.
 *
 * Parameters:	parent_task		The parent task to be cloned, or
 *					TASK_NULL is task characteristics
 *					are not to be inherited
 *					be cloned, or TASK_NULL if the new
 *					task is not to inherit the VM
 *					characteristics of the parent
 *		parent_proc		The parent process to be cloned
 *		inherit_memory		True if the child is to inherit
 *					memory from the parent; if this is
 *					non-NULL, then the parent_task must
 *					also be non-NULL
 *
 * Returns:	!NULL			pointer to new child thread
 *		NULL			Failure (unspecified)
 *
 * Note:	On return newly created child process has signal lock held
 *		to block delivery of signal to it if called with lock set.
 *		fork() code needs to explicity remove this lock before
 *		signals can be delivered
 *
 *		In the case of bootstrap, this function can be called from
 *		bsd_utaskbootstrap() in order to bootstrap the first process;
 *		the net effect is to provide a uthread structure for the
 *		kernel process associated with the kernel task.
 *
 * XXX:		Tristating using the value parent_task as the major key
 *		and inherit_memory as the minor key is something we should
 *		refactor later; we owe the current semantics, ultimately,
 *		to the semantics of task_create_internal.  For now, we will
 *		live with this being somewhat awkward.
 */
thread_t
cloneproc(task_t parent_task, proc_t parent_proc, int inherit_memory)
{
	task_t child_task;
	proc_t child_proc;
	thread_t child_thread = NULL;

	if ((child_proc = forkproc(parent_proc)) == NULL) {
		/* Failed to allocate new process */
		goto bad;
	}

	child_thread = fork_create_child(parent_task, child_proc, inherit_memory, (parent_task == TASK_NULL) ? FALSE : (parent_proc->p_flag & P_LP64));

	if (child_thread == NULL) {
		/*
		 * Failed to create thread; now we must deconstruct the new
		 * process previously obtained from forkproc().
		 */
		forkproc_free(child_proc);
		goto bad;
	}

	child_task = get_threadtask(child_thread);
	if (parent_proc->p_flag & P_LP64) {
		task_set_64bit(child_task, TRUE);
		OSBitOrAtomic(P_LP64, (UInt32 *)&child_proc->p_flag);
#ifdef __ppc__
		/*
		 * PPC51: ppc64 is limited to 51-bit addresses.
		 * Memory above that limit is handled specially at
		 * the pmap level.
		 */
		pmap_map_sharedpage(child_task, get_map_pmap(get_task_map(child_task)));
#endif /* __ppc__ */
	} else {
		task_set_64bit(child_task, FALSE);
		OSBitAndAtomic(~((uint32_t)P_LP64), (UInt32 *)&child_proc->p_flag);
	}

	/* make child visible */
	pinsertchild(parent_proc, child_proc);

	/*
	 * Make child runnable, set start time.
	 */
	child_proc->p_stat = SRUN;
bad:
	return(child_thread);
}
Exemple #7
0
void log_unnest_badness(vm_map_t m, vm_map_offset_t s, vm_map_offset_t e) {
	struct timeval tv;
	const char *pcommstr;

	if (shared_region_unnest_logging == 0)
		return;

	if (shared_region_unnest_logging == 1) {
		microtime(&tv);
		if ((tv.tv_sec - last_unnest_log_time) < vm_shared_region_unnest_log_interval) {
			if (shared_region_unnest_log_count++ > shared_region_unnest_log_count_threshold)
				return;
		}
		else {
			last_unnest_log_time = tv.tv_sec;
			shared_region_unnest_log_count = 0;
		}
	}

	pcommstr = current_proc()->p_comm;

	printf("%s (map: %p) triggered DYLD shared region unnest for map: %p, region 0x%qx->0x%qx. While not abnormal for debuggers, this increases system memory footprint until the target exits.\n", current_proc()->p_comm, get_task_map(current_proc()->task), m, (uint64_t)s, (uint64_t)e);
}
Exemple #8
0
/*
 * Supporting some variables requires us to do "real" work.  We 
 * gather some of that here.
 */
static int
sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1,
	int arg2, struct sysctl_req *req)
{
	char dummy[65];
	int  epochTemp;
	ml_cpu_info_t cpu_info;
	int val, doquad;
	long long qval;
	host_basic_info_data_t hinfo;
	kern_return_t kret;
	mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;

	/*
	 * Test and mask off the 'return quad' flag.
	 * Note that only some things here support it.
	 */
	doquad = arg2 & CTLHW_RETQUAD;
	arg2 &= ~CTLHW_RETQUAD;

	ml_cpu_get_info(&cpu_info);

#define BSD_HOST 1
	kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);

	/*
	 * Handle various OIDs.
	 *
	 * OIDs that can return int or quad set val and qval and then break.
	 * Errors and int-only values return inline.
	 */
	switch (arg2) {
	case HW_NCPU:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.max_cpus));
		} else {
			return(EINVAL);
		}
	case HW_AVAILCPU:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.avail_cpus));
		} else {
			return(EINVAL);
		}
	case HW_LOCAL_PHYSICALCPU:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.physical_cpu));
		} else {
			return(EINVAL);
		}
	case HW_LOCAL_PHYSICALCPUMAX:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.physical_cpu_max));
		} else {
			return(EINVAL);
		}
	case HW_LOCAL_LOGICALCPU:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.logical_cpu));
		} else {
			return(EINVAL);
		}
	case HW_LOCAL_LOGICALCPUMAX:
		if (kret == KERN_SUCCESS) {
			return(SYSCTL_RETURN(req, hinfo.logical_cpu_max));
		} else {
			return(EINVAL);
		}
	case HW_PAGESIZE:
	{
		vm_map_t map = get_task_map(current_task());
		val = vm_map_page_size(map);
		qval = (long long)val;
		break;
	}
	case HW_CACHELINE:
		val = cpu_info.cache_line_size;
		qval = (long long)val;
		break;
	case HW_L1ICACHESIZE:
		val = cpu_info.l1_icache_size;
		qval = (long long)val;
		break;
	case HW_L1DCACHESIZE:
		val = cpu_info.l1_dcache_size;
		qval = (long long)val;
		break;
	case HW_L2CACHESIZE:
		if (cpu_info.l2_cache_size == 0xFFFFFFFF)
			return(EINVAL);
		val = cpu_info.l2_cache_size;
		qval = (long long)val;
		break;
	case HW_L3CACHESIZE:
		if (cpu_info.l3_cache_size == 0xFFFFFFFF)
			return(EINVAL);
		val = cpu_info.l3_cache_size;
		qval = (long long)val;
		break;

		/*
		 * Deprecated variables.  We still support these for
		 * backwards compatibility purposes only.
		 */
	case HW_MACHINE:
		bzero(dummy, sizeof(dummy));
		if(!PEGetMachineName(dummy,64))
			return(EINVAL);
		dummy[64] = 0;
		return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1));
	case HW_MODEL:
		bzero(dummy, sizeof(dummy));
		if(!PEGetModelName(dummy,64))
			return(EINVAL);
		dummy[64] = 0;
		return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1));
	case HW_USERMEM:
		{
		int usermem = mem_size - vm_page_wire_count * page_size;

			return(SYSCTL_RETURN(req, usermem));
		}
	case HW_EPOCH:
	        epochTemp = PEGetPlatformEpoch();
		if (epochTemp == -1)
			return(EINVAL);
		return(SYSCTL_RETURN(req, epochTemp));
	case HW_VECTORUNIT: {
		int vector = cpu_info.vector_unit == 0? 0 : 1;
		return(SYSCTL_RETURN(req, vector));
	}
	case HW_L2SETTINGS:
		if (cpu_info.l2_cache_size == 0xFFFFFFFF)
			return(EINVAL);
		return(SYSCTL_RETURN(req, cpu_info.l2_settings));
	case HW_L3SETTINGS:
		if (cpu_info.l3_cache_size == 0xFFFFFFFF)
			return(EINVAL);
		return(SYSCTL_RETURN(req, cpu_info.l3_settings));
	default:
		return(ENOTSUP);
	}
	/*
	 * Callers may come to us with either int or quad buffers.
	 */
	if (doquad) {
		return(SYSCTL_RETURN(req, qval));
	}
	return(SYSCTL_RETURN(req, val));
}
bool IOBufferMemoryDescriptor::initWithOptions(
    IOOptionBits options,
    vm_size_t    capacity,
    vm_offset_t  alignment,
    task_t	    inTask)
{
    vm_map_t map = 0;
    IOOptionBits iomdOptions = kIOMemoryAsReference | kIOMemoryTypeVirtual;

    if (!capacity)
        return false;

    _options   	  = options;
    _capacity     = capacity;
    _physAddrs    = 0;
    _physSegCount = 0;
    _buffer	  = 0;

    // Grab the direction and the Auto Prepare bits from the Buffer MD options
    iomdOptions  |= options & (kIOMemoryDirectionMask | kIOMemoryAutoPrepare);

    if ((options & kIOMemorySharingTypeMask) && (alignment < page_size))
        alignment = page_size;

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
        return false;

    _alignment = alignment;
    if (options & kIOMemoryPageable)
    {
        iomdOptions |= kIOMemoryBufferPageable;
        if (inTask == kernel_task)
        {
            /* Allocate some kernel address space. */
            _buffer = IOMallocPageable(capacity, alignment);
            if (_buffer)
                map = IOPageableMapForAddress((vm_address_t) _buffer);
        }
        else
        {
            kern_return_t kr;

            if( !reserved) {
                reserved = IONew( ExpansionData, 1 );
                if( !reserved)
                    return( false );
            }
            map = get_task_map(inTask);
            vm_map_reference(map);
            reserved->map = map;
            kr = vm_allocate( map, (vm_address_t *) &_buffer, round_page_32(capacity),
                              VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
            if( KERN_SUCCESS != kr)
                return( false );

            // we have to make sure that these pages don't get copied on fork.
            kr = vm_inherit( map, (vm_address_t) _buffer, round_page_32(capacity), VM_INHERIT_NONE);
            if( KERN_SUCCESS != kr)
                return( false );
        }
    }
    else
    {
        // @@@ gvdl: Need to remove this
        // Buffer should never auto prepare they should be prepared explicitly
        // But it never was enforced so what are you going to do?
        iomdOptions |= kIOMemoryAutoPrepare;

        /* Allocate a wired-down buffer inside kernel space. */
        if (options & kIOMemoryPhysicallyContiguous)
            _buffer = IOMallocContiguous(capacity, alignment, 0);
        else if (alignment > 1)
            _buffer = IOMallocAligned(capacity, alignment);
        else
            _buffer = IOMalloc(capacity);
    }

    if (!_buffer)
        return false;

    _singleRange.v.address = (vm_address_t) _buffer;
    _singleRange.v.length  = capacity;

    if (!super::initWithOptions(&_singleRange.v, 1, 0,
                                inTask, iomdOptions, /* System mapper */ 0))
        return false;

    if (options & kIOMemoryPageable) {
        kern_return_t kr;
        ipc_port_t sharedMem = (ipc_port_t) _memEntry;
        vm_size_t size = round_page_32(_ranges.v[0].length);

        // must create the entry before any pages are allocated
        if( 0 == sharedMem) {

            // set memory entry cache
            vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
            switch (options & kIOMapCacheMask)
            {
            case kIOMapInhibitCache:
                SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
                break;

            case kIOMapWriteThruCache:
                SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
                break;

            case kIOMapWriteCombineCache:
                SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
                break;

            case kIOMapCopybackCache:
                SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
                break;

            case kIOMapDefaultCache:
            default:
                SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
                break;
            }

            kr = mach_make_memory_entry( map,
                                         &size, _ranges.v[0].address,
                                         memEntryCacheMode, &sharedMem,
                                         NULL );

            if( (KERN_SUCCESS == kr) && (size != round_page_32(_ranges.v[0].length))) {
                ipc_port_release_send( sharedMem );
                kr = kIOReturnVMError;
            }
            if( KERN_SUCCESS != kr)
                sharedMem = 0;
            _memEntry = (void *) sharedMem;
        }
    }

    setLength(capacity);

    return true;
}
Exemple #10
0
/*
 * fork_create_child
 *
 * Description:	Common operations associated with the creation of a child
 *		process
 *
 * Parameters:	parent_task		parent task
 *		parent_coalitions	parent's set of coalitions
 *		child_proc		child process
 *		inherit_memory		TRUE, if the parents address space is
 *					to be inherited by the child
 *		is64bit			TRUE, if the child being created will
 *					be associated with a 64 bit process
 *					rather than a 32 bit process
 *
 * Note:	This code is called in the fork() case, from the execve() call
 *		graph, if implementing an execve() following a vfork(), from
 *		the posix_spawn() call graph (which implicitly includes a
 *		vfork() equivalent call, and in the system bootstrap case.
 *
 *		It creates a new task and thread (and as a side effect of the
 *		thread creation, a uthread) in the parent coalition set, which is
 *		then associated with the process 'child'.  If the parent
 *		process address space is to be inherited, then a flag
 *		indicates that the newly created task should inherit this from
 *		the child task.
 *
 *		As a special concession to bootstrapping the initial process
 *		in the system, it's possible for 'parent_task' to be TASK_NULL;
 *		in this case, 'inherit_memory' MUST be FALSE.
 */
thread_t
fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child_proc, int inherit_memory, int is64bit)
{
	thread_t	child_thread = NULL;
	task_t		child_task;
	kern_return_t	result;

	/* Create a new task for the child process */
	result = task_create_internal(parent_task,
					parent_coalitions,
					inherit_memory,
					is64bit,
					&child_task);
	if (result != KERN_SUCCESS) {
		printf("%s: task_create_internal failed.  Code: %d\n",
		    __func__, result);
		goto bad;
	}

	/* Set the child process task to the new task */
	child_proc->task = child_task;

	/* Set child task process to child proc */
	set_bsdtask_info(child_task, child_proc);

	/* Propagate CPU limit timer from parent */
	if (timerisset(&child_proc->p_rlim_cpu))
		task_vtimer_set(child_task, TASK_VTIMER_RLIM);

	/* Set/clear 64 bit vm_map flag */
	if (is64bit)
		vm_map_set_64bit(get_task_map(child_task));
	else
		vm_map_set_32bit(get_task_map(child_task));

	/*
	 * Set child process BSD visible scheduler priority if nice value
	 * inherited from parent
	 */
	if (child_proc->p_nice != 0)
		resetpriority(child_proc);

	/* Create a new thread for the child process */
	result = thread_create_with_continuation(child_task, &child_thread, (thread_continue_t)proc_wait_to_return);
	if (result != KERN_SUCCESS) {
		printf("%s: thread_create failed. Code: %d\n",
		    __func__, result);
		task_deallocate(child_task);
		child_task = NULL;
	}

	/*
         * Tag thread as being the first thread in its task.
         */
	thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD);

bad:
	thread_yield_internal(1);

	return(child_thread);
}