Beispiel #1
0
kern_return_t OSKextPingKextd(void)
{
    kern_return_t result     = KERN_FAILURE;
    mach_port_t   kextd_port = IPC_PORT_NULL;

    result = host_get_kextd_port(host_priv_self(), &kextd_port);
    if (result != KERN_SUCCESS || !IPC_PORT_VALID(kextd_port)) {
	OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogIPCFlag,
            "Can't get kextd port.");
        goto finish;
    }

    result = kextd_ping(kextd_port);
    if (result != KERN_SUCCESS) {
	OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogIPCFlag,
            "kextd ping failed (0x%x).", (int)result);
        goto finish;
    }

finish:
    if (IPC_PORT_VALID(kextd_port)) {
        ipc_port_release_send(kextd_port);
    }

    return result;
}
Beispiel #2
0
void
bsdinit_task(void)
{
	proc_t p = current_proc();
	struct uthread *ut;
	thread_t thread;

	process_name("init", p);

	ux_handler_init();

	thread = current_thread();
	(void) host_set_exception_ports(host_priv_self(),
					EXC_MASK_ALL & ~(EXC_MASK_RPC_ALERT),//pilotfish (shark) needs this port
					(mach_port_t) ux_exception_port,
					EXCEPTION_DEFAULT| MACH_EXCEPTION_CODES,
					0);

	ut = (uthread_t)get_bsdthread_info(thread);

	bsd_init_task = get_threadtask(thread);
	init_task_died = FALSE;

#if CONFIG_MACF
	mac_cred_label_associate_user(p->p_ucred);
#endif
	load_init_program(p);
	lock_trace = 1;
}
Beispiel #3
0
/*
 * 	host_get_io_master
 *
 *	Return the IO master access port for this host.
 */
kern_return_t
host_get_io_master(
        host_t host,
        io_master_t *io_masterp)
{
	if (host == HOST_NULL)
		return KERN_INVALID_ARGUMENT;

	return (host_get_io_master_port(host_priv_self(), io_masterp));
}
bool AutoThrottler::setup(OSObject* owner) {
	if (setupDone) return true;
	
	workLoop = IOWorkLoop::workLoop();
	if (workLoop == 0) return false;
	
	perfTimer = IOTimerEventSource::timerEventSource(owner, (IOTimerEventSource::Action) &perfTimerWrapper);
	if (perfTimer == 0) return false;
	
	/* from Superhai (modified by mercurysquad) */
	cpu_count = 0; OSDictionary* service;
	mach_timespec_t serviceTimeout = { 60, 0 }; // in seconds
	totalTimerEvents = 0;
	
	IOService* firstCPU = IOService::waitForService(IOService::serviceMatching("IOCPU"), &serviceTimeout);

	if (!firstCPU) {
		warn("IOKit CPUs not found. Auto-throttle may not work.\n");
		return false;
	} else {
		// we got first cpu, so the others should also be available by now. get them
		service = IOService::serviceMatching("IOCPU");
	}
	
	OSIterator* iterator = IOService::getMatchingServices(service);
	
	if (!iterator) {
		warn("IOKit CPU iterator couldn't be created. Auto-throttle may not work.\n");
		return false;
	}

	IOCPU * cpu;
	while ((cpu = OSDynamicCast(IOCPU, iterator->getNextObject())))
	{
		/*dbg("Got I/O Kit CPU %d (%u) named %s", cpu_count, (unsigned int)(cpu->getCPUNumber(), cpu->getCPUName()->getCStringNoCopy());
    */
		mach_cpu[cpu_count] = cpu->getMachProcessor();
		if (cpu_count++ > max_cpus) break;
	}
	selfHost = host_priv_self();
	if (workLoop->addEventSource(perfTimer) != kIOReturnSuccess) return false;
	currentPState = NumberOfPStates - 1;
	perfTimer->setTimeoutMS(throttleQuantum * (1 + currentPState));
	clock_get_uptime(&lastTime);
	if (!targetCPULoad) targetCPULoad = defaultTargetLoad; // % x10
	sysctl_register_oid(&sysctl__kern_cputhrottle_targetload);
	sysctl_register_oid(&sysctl__kern_cputhrottle_auto);
	setupDone = true;
	return true;
}
/*
 * If user space has registered for background notifications, send one.
 */
kern_return_t
ktrace_background_available_notify_user(void)
{
	mach_port_t user_port;
	kern_return_t kr;

	kr = host_get_ktrace_background_port(host_priv_self(), &user_port);
	if (kr != KERN_SUCCESS || !IPC_PORT_VALID(user_port)) {
		return KERN_FAILURE;
	}

	kr = send_ktrace_background_available(user_port);
	ipc_port_release_send(user_port);
	return kr;
}
int
munlock(__unused proc_t p, struct munlock_args *uap, __unused register_t *retval)
{
	mach_vm_offset_t addr;
	mach_vm_size_t size;
	vm_map_t user_map;
	kern_return_t	result;

	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(addr, uap->len);

	addr = (mach_vm_offset_t) uap->addr;
	size = (mach_vm_size_t)uap->len;
	user_map = current_map();

	/* JMM - need to remove all wirings by spec - this just removes one */
	result = mach_vm_wire(host_priv_self(), user_map, addr, size, VM_PROT_NONE);
	return (result == KERN_SUCCESS ? 0 : ENOMEM);
}
Beispiel #7
0
static int get_cpu_ticks(long * idle, long * total) {
    host_cpu_load_info_data_t loadinfo;
	static long idle_old, total_old;
	long total_new;
	mach_msg_type_number_t count;

	count = HOST_CPU_LOAD_INFO_COUNT; 
	host_statistics(host_priv_self(),HOST_CPU_LOAD_INFO,(host_info_t)&loadinfo,&count);

	total_new = loadinfo.cpu_ticks[CPU_STATE_USER]+loadinfo.cpu_ticks[CPU_STATE_NICE]+loadinfo.cpu_ticks[CPU_STATE_SYSTEM]+loadinfo.cpu_ticks[CPU_STATE_IDLE];

	if (idle)
		*idle =	loadinfo.cpu_ticks[CPU_STATE_IDLE] - idle_old;
	if (total)
		*total = total_new - total_old;

	idle_old = loadinfo.cpu_ticks[CPU_STATE_IDLE];
	total_old = total_new;

	return 0;
}
Beispiel #8
0
/*
 *	Handle interface for special performance monitoring
 *	This is a special case of the host exception handler
 */
kern_return_t sys_perf_notify(thread_t thread, int pid)
{

    host_priv_t		hostp;
    struct exception_action *excp;
    ipc_port_t		xport;
    wait_interrupt_t	wsave;
    kern_return_t		ret;

    hostp = host_priv_self();	/* Get the host privileged ports */
    mach_exception_data_type_t	code[EXCEPTION_CODE_MAX];
    code[0] = 0xFF000001;		/* Set terminate code */
    code[1] = pid;		/* Pass out the pid */

    struct task *task = thread->task;
    excp = &hostp->exc_actions[EXC_RPC_ALERT];
    xport = excp->port;

    /* Make sure we're not catching our own exception */
    if (!IP_VALID(xport) ||
            !ip_active(xport) ||
            task->itk_space == xport->data.receiver) {

        return(KERN_FAILURE);
    }

    wsave = thread_interrupt_level(THREAD_UNINT);
    ret = exception_deliver(
              thread,
              EXC_RPC_ALERT,
              code,
              2,
              excp,
              &hostp->lock);
    (void)thread_interrupt_level(wsave);

    return(ret);
}
Beispiel #9
0
int
reboot_kernel(int howto, char *message)
{
	int hostboot_option=0;

	if (!OSCompareAndSwap(0, 1, &system_inshutdown)) {
		if ( (howto&RB_QUICK) == RB_QUICK)
			goto force_reboot;
		return (EBUSY);
	}
	/*
	 * Temporary hack to notify the power management root domain
	 * that the system will shut down.
	 */
	IOSystemShutdownNotification();

	if ((howto&RB_QUICK)==RB_QUICK) {
		printf("Quick reboot...\n");
		if ((howto&RB_NOSYNC)==0) {
			sync((proc_t)NULL, (void *)NULL, (int *)NULL);
		}
	}
	else if ((howto&RB_NOSYNC)==0) {
		int iter, nbusy;

		printf("syncing disks... ");

		/*
		 * Release vnodes held by texts before sync.
		 */

		/* handle live procs (deallocate their root and current directories), suspend initproc */
		proc_shutdown();

#if CONFIG_AUDIT
		audit_shutdown();
#endif

		if (unmountroot_pre_hook != NULL)
			unmountroot_pre_hook();

		sync((proc_t)NULL, (void *)NULL, (int *)NULL);

		if (kdebug_enable)
			kdbg_dump_trace_to_file("/var/log/shutdown/shutdown.trace");

		/*
		 * Unmount filesystems
		 */

#if DEVELOPMENT || DEBUG
		if (!(howto & RB_PANIC) || !kdp_has_polled_corefile())
#endif /* DEVELOPMENT || DEBUG */
		{
			vfs_unmountall();
		}

		/* Wait for the buffer cache to clean remaining dirty buffers */
		for (iter = 0; iter < 100; iter++) {
			nbusy = count_busy_buffers();
			if (nbusy == 0)
				break;
			printf("%d ", nbusy);
			delay_for_interval( 1 * nbusy, 1000 * 1000);
		}
		if (nbusy)
			printf("giving up\n");
		else
			printf("done\n");
	}
#if NETWORKING
	/*
	 * Can't just use an splnet() here to disable the network
	 * because that will lock out softints which the disk
	 * drivers depend on to finish DMAs.
	 */
	if_down_all();
#endif /* NETWORKING */

force_reboot:

	if (howto & RB_PANIC) {
		if (strncmp(message, "Kernel memory has exceeded limits", 33) == 0) {
			kernel_hwm_panic_info();
		}
		panic ("userspace panic: %s", message);
	}

	if (howto & RB_POWERDOWN)
		hostboot_option = HOST_REBOOT_HALT;
	if (howto & RB_HALT)
		hostboot_option = HOST_REBOOT_HALT;

	if (howto & RB_UPSDELAY) {
		hostboot_option = HOST_REBOOT_UPSDELAY;
	}

	host_reboot(host_priv_self(), hostboot_option);
	/*
	 * should not be reached
	 */
	return (0);
}
Beispiel #10
0
void
default_pager_initialize(void)
{
	kern_return_t		kr;
	__unused static char	here[] = "default_pager_initialize";

	lck_grp_attr_setdefault(&default_pager_lck_grp_attr);
	lck_grp_init(&default_pager_lck_grp, "default_pager", &default_pager_lck_grp_attr);
	lck_attr_setdefault(&default_pager_lck_attr);	

	/*
	 * Vm variables.
	 */
#ifndef MACH_KERNEL
	vm_page_mask = vm_page_size - 1;
	assert((unsigned int) vm_page_size == vm_page_size);
	vm_page_shift = local_log2((unsigned int) vm_page_size);
#endif

	/*
	 * List of all vstructs.
	 */
	vstruct_zone = zinit(sizeof(struct vstruct),
			     10000 * sizeof(struct vstruct),
			     8192, "vstruct zone");
	zone_change(vstruct_zone, Z_CALLERACCT, FALSE);
	zone_change(vstruct_zone, Z_NOENCRYPT, TRUE);

	VSL_LOCK_INIT();
	queue_init(&vstruct_list.vsl_queue);
	vstruct_list.vsl_count = 0;

	VSTATS_LOCK_INIT(&global_stats.gs_lock);

	bs_initialize();

	/*
	 * Exported DMM port.
	 */
	default_pager_object = ipc_port_alloc_kernel();


	/*
	 * Export pager interfaces.
	 */
#ifdef	USER_PAGER
	if ((kr = netname_check_in(name_server_port, "UserPager",
				   default_pager_self,
				   default_pager_object))
	    != KERN_SUCCESS) {
		dprintf(("netname_check_in returned 0x%x\n", kr));
		exit(1);
	}
#else	/* USER_PAGER */
	{
		unsigned int clsize;
		memory_object_default_t dmm;

		dmm = default_pager_object;
		assert((unsigned int) vm_page_size == vm_page_size);
		clsize = ((unsigned int) vm_page_size << vstruct_def_clshift);
		kr = host_default_memory_manager(host_priv_self(), &dmm, clsize);
		if ((kr != KERN_SUCCESS) ||
		    (dmm != MEMORY_OBJECT_DEFAULT_NULL))
			Panic("default memory manager");

	}
#endif	/* USER_PAGER */


}
Beispiel #11
0
/*
 *	Routine:	macx_triggers
 *	Function:
 *		Syscall interface to set the call backs for low and
 *		high water marks.
 */
int
macx_triggers(
	struct macx_triggers_args *args)
{
	int	hi_water = args->hi_water;
	int	low_water = args->low_water;
	int	flags = args->flags;
	mach_port_t	trigger_name = args->alert_port;
	kern_return_t kr;
	memory_object_default_t	default_pager;
	ipc_port_t		trigger_port;

	default_pager = MEMORY_OBJECT_DEFAULT_NULL;
	kr = host_default_memory_manager(host_priv_self(), 
					&default_pager, 0);
	if(kr != KERN_SUCCESS) {
		return EINVAL;
	}

	if ((flags & SWAP_ENCRYPT_ON) &&
	    (flags & SWAP_ENCRYPT_OFF)) {
		/* can't have it both ways */
		return EINVAL;
	}

	if (default_pager_init_flag == 0) {
               start_def_pager(NULL);
               default_pager_init_flag = 1;
	}

	if (flags & SWAP_ENCRYPT_ON) {
		/* ENCRYPTED SWAP: tell default_pager to encrypt */
		default_pager_triggers(default_pager,
				       0, 0,
				       SWAP_ENCRYPT_ON,
				       IP_NULL);
	} else if (flags & SWAP_ENCRYPT_OFF) {
		/* ENCRYPTED SWAP: tell default_pager not to encrypt */
		default_pager_triggers(default_pager,
				       0, 0,
				       SWAP_ENCRYPT_OFF,
				       IP_NULL);
	}

	if (flags & HI_WAT_ALERT) {
		trigger_port = trigger_name_to_port(trigger_name);
		if(trigger_port == NULL) {
			return EINVAL;
		}
		/* trigger_port is locked and active */
		ipc_port_make_send_locked(trigger_port); 
		/* now unlocked */
		default_pager_triggers(default_pager, 
				       hi_water, low_water,
				       HI_WAT_ALERT, trigger_port);
	}

	if (flags & LO_WAT_ALERT) {
		trigger_port = trigger_name_to_port(trigger_name);
		if(trigger_port == NULL) {
			return EINVAL;
		}
		/* trigger_port is locked and active */
		ipc_port_make_send_locked(trigger_port);
		/* and now its unlocked */
		default_pager_triggers(default_pager, 
				       hi_water, low_water,
				       LO_WAT_ALERT, trigger_port);
	}

	/*
	 * Set thread scheduling priority and policy for the current thread
	 * it is assumed for the time being that the thread setting the alert
	 * is the same one which will be servicing it.
	 *
	 * XXX This does not belong in the kernel XXX
	 */
	{
		thread_precedence_policy_data_t		pre;
		thread_extended_policy_data_t		ext;

		ext.timeshare = FALSE;
		pre.importance = INT32_MAX;

		thread_policy_set(current_thread(),
				  THREAD_EXTENDED_POLICY,
				  (thread_policy_t)&ext,
				  THREAD_EXTENDED_POLICY_COUNT);

		thread_policy_set(current_thread(),
				  THREAD_PRECEDENCE_POLICY,
				  (thread_policy_t)&pre,
				  THREAD_PRECEDENCE_POLICY_COUNT);
	}
 
	current_thread()->options |= TH_OPT_VMPRIV;

	return 0;
}
Beispiel #12
0
/*
 *	Routine:	macx_swapon
 *	Function:
 *		Syscall interface to add a file to backing store
 */
int
macx_swapon(
	struct macx_swapon_args *args)
{
	int			size = args->size;
	vnode_t			vp = (vnode_t)NULL; 
	struct nameidata 	nd, *ndp;
	register int		error;
	kern_return_t		kr;
	mach_port_t		backing_store;
	memory_object_default_t	default_pager;
	int			i;
	boolean_t		funnel_state;
	off_t			file_size;
	vfs_context_t		ctx = vfs_context_current();
	struct proc		*p =  current_proc();
	int			dp_cluster_size;


	AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON);
	AUDIT_ARG(value32, args->priority);

	funnel_state = thread_funnel_set(kernel_flock, TRUE);
	ndp = &nd;

	if ((error = suser(kauth_cred_get(), 0)))
		goto swapon_bailout;

	/*
	 * Get a vnode for the paging area.
	 */
	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1,
	       ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32),
	       (user_addr_t) args->filename, ctx);

	if ((error = namei(ndp)))
		goto swapon_bailout;
	nameidone(ndp);
	vp = ndp->ni_vp;

	if (vp->v_type != VREG) {
		error = EINVAL;
		goto swapon_bailout;
	}

	/* get file size */
	if ((error = vnode_size(vp, &file_size, ctx)) != 0)
		goto swapon_bailout;
#if CONFIG_MACF
	vnode_lock(vp);
	error = mac_system_check_swapon(vfs_context_ucred(ctx), vp);
	vnode_unlock(vp);
	if (error)
		goto swapon_bailout;
#endif

	/* resize to desired size if it's too small */
	if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0))
		goto swapon_bailout;

	if (default_pager_init_flag == 0) {
		start_def_pager(NULL);
		default_pager_init_flag = 1;
	}

	/* add new backing store to list */
	i = 0;
	while(bs_port_table[i].vp != 0) {
		if(i == MAX_BACKING_STORE)
			break;
		i++;
	}
	if(i == MAX_BACKING_STORE) {
	   	error = ENOMEM;
		goto swapon_bailout;
	}

	/* remember the vnode. This vnode has namei() reference */
	bs_port_table[i].vp = vp;
	
	/*
	 * Look to see if we are already paging to this file.
	 */
	/* make certain the copy send of kernel call will work */
	default_pager = MEMORY_OBJECT_DEFAULT_NULL;
	kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
	if(kr != KERN_SUCCESS) {
	   error = EAGAIN;
	   bs_port_table[i].vp = 0;
	   goto swapon_bailout;
	}

	if (vp->v_mount->mnt_kern_flag & MNTK_SSD) {
		/*
		 * keep the cluster size small since the
		 * seek cost is effectively 0 which means
		 * we don't care much about fragmentation
		 */
		dp_isssd = TRUE;
		dp_cluster_size = 2 * PAGE_SIZE;
	} else {
		/*
		 * use the default cluster size
		 */
		dp_isssd = FALSE;
		dp_cluster_size = 0;
	}
	kr = default_pager_backing_store_create(default_pager, 
					-1, /* default priority */
					dp_cluster_size,
					&backing_store);
	memory_object_default_deallocate(default_pager);

	if(kr != KERN_SUCCESS) {
	   error = ENOMEM;
	   bs_port_table[i].vp = 0;
	   goto swapon_bailout;
	}

	/* Mark this vnode as being used for swapfile */
	vnode_lock_spin(vp);
	SET(vp->v_flag, VSWAP);
	vnode_unlock(vp);

	/*
	 * NOTE: we are able to supply PAGE_SIZE here instead of
	 *	an actual record size or block number because:
	 *	a: we do not support offsets from the beginning of the
	 *		file (allowing for non page size/record modulo offsets.
	 *	b: because allow paging will be done modulo page size
	 */

	kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp,
				PAGE_SIZE, (int)(file_size/PAGE_SIZE));
	if(kr != KERN_SUCCESS) {
	   bs_port_table[i].vp = 0;
	   if(kr == KERN_INVALID_ARGUMENT)
		error = EINVAL;
	   else 
		error = ENOMEM;

	   /* This vnode is not to be used for swapfile */
	   vnode_lock_spin(vp);
	   CLR(vp->v_flag, VSWAP);
	   vnode_unlock(vp);

	   goto swapon_bailout;
	}
	bs_port_table[i].bs = (void *)backing_store;
	error = 0;

	ubc_setthreadcred(vp, p, current_thread());

	/*
	 * take a long term reference on the vnode to keep
	 * vnreclaim() away from this vnode.
	 */
	vnode_ref(vp);

swapon_bailout:
	if (vp) {
		vnode_put(vp);
	}
	(void) thread_funnel_set(kernel_flock, FALSE);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Beispiel #13
0
/*
 *	Routine:	mach_macx_triggers
 *	Function:
 *		Syscall interface to set the call backs for low and
 *		high water marks.
 */
int
mach_macx_triggers(
	struct macx_triggers_args *args)
{
	int	hi_water = args->hi_water;
	int	low_water = args->low_water;
	int	flags = args->flags;
	mach_port_t	trigger_name = args->alert_port;
	kern_return_t kr;
	memory_object_default_t	default_pager;
	ipc_port_t		trigger_port;

	default_pager = MEMORY_OBJECT_DEFAULT_NULL;
	kr = host_default_memory_manager(host_priv_self(), 
					&default_pager, 0);
	if(kr != KERN_SUCCESS) {
		return EINVAL;
	}

	if (((flags & SWAP_ENCRYPT_ON) && (flags & SWAP_ENCRYPT_OFF)) || 
	    ((flags & SWAP_COMPACT_ENABLE) && (flags & SWAP_COMPACT_DISABLE))) {
		/* can't have it both ways */
		return EINVAL;
	}

	if (default_pager_init_flag == 0) {
               start_def_pager(NULL);
               default_pager_init_flag = 1;
	}

	if (flags & SWAP_ENCRYPT_ON) {
		/* ENCRYPTED SWAP: tell default_pager to encrypt */
		default_pager_triggers(default_pager,
				       0, 0,
				       SWAP_ENCRYPT_ON,
				       IP_NULL);
	} else if (flags & SWAP_ENCRYPT_OFF) {
		/* ENCRYPTED SWAP: tell default_pager not to encrypt */
		default_pager_triggers(default_pager,
				       0, 0,
				       SWAP_ENCRYPT_OFF,
				       IP_NULL);
	}

	if (flags & USE_EMERGENCY_SWAP_FILE_FIRST) {
		/*
		 * Time to switch to the emergency segment.
		 */
		return default_pager_triggers(default_pager,
					0, 0, 
					USE_EMERGENCY_SWAP_FILE_FIRST,
					IP_NULL);
	}

	if (flags & SWAP_FILE_CREATION_ERROR) {
		/* 
		 * For some reason, the dynamic pager failed to create a swap file.
	 	 */
		trigger_port = trigger_name_to_port(trigger_name);
		if(trigger_port == NULL) {
			return EINVAL;
		}
		/* trigger_port is locked and active */
		ipc_port_make_send_locked(trigger_port); 
		/* now unlocked */
		default_pager_triggers(default_pager,
					0, 0, 
					SWAP_FILE_CREATION_ERROR,
					trigger_port);
	}

	if (flags & HI_WAT_ALERT) {
		trigger_port = trigger_name_to_port(trigger_name);
		if(trigger_port == NULL) {
			return EINVAL;
		}
		/* trigger_port is locked and active */
		ipc_port_make_send_locked(trigger_port); 
		/* now unlocked */
		default_pager_triggers(default_pager, 
				       hi_water, low_water,
				       HI_WAT_ALERT, trigger_port);
	}

	if (flags & LO_WAT_ALERT) {
		trigger_port = trigger_name_to_port(trigger_name);
		if(trigger_port == NULL) {
			return EINVAL;
		}
		/* trigger_port is locked and active */
		ipc_port_make_send_locked(trigger_port);
		/* and now its unlocked */
		default_pager_triggers(default_pager, 
				       hi_water, low_water,
				       LO_WAT_ALERT, trigger_port);
	}


	if (flags & PROC_RESUME) {

		/*
		 * For this call, hi_water is used to pass in the pid of the process we want to resume
		 * or unthrottle.  This is of course restricted to the superuser (checked inside of 
		 * proc_resetpcontrol).
		 */

		return proc_resetpcontrol(hi_water);
	}

	/*
	 * Set thread scheduling priority and policy for the current thread
	 * it is assumed for the time being that the thread setting the alert
	 * is the same one which will be servicing it.
	 *
	 * XXX This does not belong in the kernel XXX
	 */
	if (flags & HI_WAT_ALERT) {
		thread_precedence_policy_data_t		pre;
		thread_extended_policy_data_t		ext;

		ext.timeshare = FALSE;
		pre.importance = INT32_MAX;

		thread_policy_set(current_thread(),
				  THREAD_EXTENDED_POLICY,
				  (thread_policy_t)&ext,
				  THREAD_EXTENDED_POLICY_COUNT);

		thread_policy_set(current_thread(),
				  THREAD_PRECEDENCE_POLICY,
				  (thread_policy_t)&pre,
				  THREAD_PRECEDENCE_POLICY_COUNT);

		current_thread()->options |= TH_OPT_VMPRIV;
	}
 
	if (flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE)) {
		return macx_backing_store_compaction(flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE));
	}

	return 0;
}
Beispiel #14
0
static void _OSMetaClassConsiderUnloads(thread_call_param_t p0,
                                        thread_call_param_t p1)
{
    OSSet *kmodClasses;
    OSSymbol *kmodName;
    OSCollectionIterator *kmods;
    OSCollectionIterator *classes;
    OSMetaClass *checkClass;
    kmod_info_t *ki = 0;
    kern_return_t ret;
    bool didUnload;

    mutex_lock(loadLock);

    do {

	kmods = OSCollectionIterator::withCollection(sKModClassesDict);
	if (!kmods)
	    break;

        didUnload = false;
        while ( (kmodName = (OSSymbol *) kmods->getNextObject()) ) {

            if (ki) {
                kfree((vm_offset_t) ki, sizeof(kmod_info_t));
                ki = 0;
            }

            ki = kmod_lookupbyname_locked((char *)kmodName->getCStringNoCopy());
            if (!ki)
                continue;

            if (ki->reference_count) {
                 continue;
            }

            kmodClasses = OSDynamicCast(OSSet,
                                sKModClassesDict->getObject(kmodName));
            classes = OSCollectionIterator::withCollection(kmodClasses);
            if (!classes)
                continue;
    
            while ((checkClass = (OSMetaClass *) classes->getNextObject())
              && (0 == checkClass->getInstanceCount()))
                {}
            classes->release();

            if (0 == checkClass) {
                OSRuntimeUnloadCPP(ki, 0);	// call destructors
                ret = kmod_destroy(host_priv_self(), ki->id);
                didUnload = true;
            }

        }

        kmods->release();

    } while (didUnload);

    mutex_unlock(loadLock);

    kmod_unload_cache();
}
Beispiel #15
0
int
boot(int paniced, int howto, char *command)
{
	struct proc *p = current_proc();	/* XXX */
	int hostboot_option=0;

	if (!OSCompareAndSwap(0, 1, &system_inshutdown)) {
		if ( (howto&RB_QUICK) == RB_QUICK)
			goto force_reboot;
		return (EBUSY);
	}
	/*
	 * Temporary hack to notify the power management root domain
	 * that the system will shut down.
	 */
	IOSystemShutdownNotification();

	md_prepare_for_shutdown(paniced, howto, command);

	if ((howto&RB_QUICK)==RB_QUICK) {
		printf("Quick reboot...\n");
		if ((howto&RB_NOSYNC)==0) {
			sync(p, (void *)NULL, (int *)NULL);
		}
	}
	else if ((howto&RB_NOSYNC)==0) {
		int iter, nbusy;

		printf("syncing disks... ");

		/*
		 * Release vnodes held by texts before sync.
		 */

		/* handle live procs (deallocate their root and current directories). */		
		proc_shutdown();

#if CONFIG_AUDIT
		audit_shutdown();
#endif

		if (unmountroot_pre_hook != NULL)
			unmountroot_pre_hook();

		sync(p, (void *)NULL, (int *)NULL);

		/*
		 * Now that all processes have been terminated and system is
		 * sync'ed up, suspend init
		 */
			
		if (initproc && p != initproc)
			task_suspend(initproc->task);

		if (kdebug_enable)
			kdbg_dump_trace_to_file("/var/log/shutdown/shutdown.trace");

		/*
		 * Unmount filesystems
		 */
		vfs_unmountall();

		/* Wait for the buffer cache to clean remaining dirty buffers */
		for (iter = 0; iter < 100; iter++) {
			nbusy = count_busy_buffers();
			if (nbusy == 0)
				break;
			printf("%d ", nbusy);
			delay_for_interval( 1 * nbusy, 1000 * 1000);
		}
		if (nbusy)
			printf("giving up\n");
		else
			printf("done\n");
	}
#if NETWORKING
	/*
	 * Can't just use an splnet() here to disable the network
	 * because that will lock out softints which the disk
	 * drivers depend on to finish DMAs.
	 */
	if_down_all();
#endif /* NETWORKING */

force_reboot:
	if (howto & RB_POWERDOWN)
		hostboot_option = HOST_REBOOT_HALT;
	if (howto & RB_HALT)
		hostboot_option = HOST_REBOOT_HALT;
	if (paniced == RB_PANIC)
		hostboot_option = HOST_REBOOT_HALT;

	if (howto & RB_UPSDELAY) {
		hostboot_option = HOST_REBOOT_UPSDELAY;
	}

	host_reboot(host_priv_self(), hostboot_option);
	/*
	 * should not be reached
	 */
	return (0);
}
Beispiel #16
0
void
ipc_task_init(
	task_t		task,
	task_t		parent)
{
	ipc_space_t space;
	ipc_port_t kport;
	ipc_port_t nport;
	kern_return_t kr;
	int i;


	kr = ipc_space_create(&ipc_table_entries[0], &space);
	if (kr != KERN_SUCCESS)
		panic("ipc_task_init");

	space->is_task = task;

	kport = ipc_port_alloc_kernel();
	if (kport == IP_NULL)
		panic("ipc_task_init");

	nport = ipc_port_alloc_kernel();
	if (nport == IP_NULL)
		panic("ipc_task_init");

	itk_lock_init(task);
	task->itk_self = kport;
	task->itk_nself = nport;
	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
	task->itk_sself = ipc_port_make_send(kport);
	task->itk_debug_control = IP_NULL;
	task->itk_space = space;

	if (parent == TASK_NULL) {
		ipc_port_t port;

		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
			task->exc_actions[i].port = IP_NULL;
		}/* for */
		
		kr = host_get_host_port(host_priv_self(), &port);
		assert(kr == KERN_SUCCESS);
		task->itk_host = port;

		task->itk_bootstrap = IP_NULL;
		task->itk_seatbelt = IP_NULL;
		task->itk_gssd = IP_NULL;
		task->itk_task_access = IP_NULL;

		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
			task->itk_registered[i] = IP_NULL;
	} else {
		itk_lock(parent);
		assert(parent->itk_self != IP_NULL);

		/* inherit registered ports */

		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
			task->itk_registered[i] =
				ipc_port_copy_send(parent->itk_registered[i]);

		/* inherit exception and bootstrap ports */

		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
		    task->exc_actions[i].port =
		  		ipc_port_copy_send(parent->exc_actions[i].port);
		    task->exc_actions[i].flavor =
				parent->exc_actions[i].flavor;
		    task->exc_actions[i].behavior = 
				parent->exc_actions[i].behavior;
		    task->exc_actions[i].privileged =
				parent->exc_actions[i].privileged;
		}/* for */
		task->itk_host =
			ipc_port_copy_send(parent->itk_host);

		task->itk_bootstrap =
			ipc_port_copy_send(parent->itk_bootstrap);

		task->itk_seatbelt =
			ipc_port_copy_send(parent->itk_seatbelt);

		task->itk_gssd =
			ipc_port_copy_send(parent->itk_gssd);

		task->itk_task_access =
			ipc_port_copy_send(parent->itk_task_access);

		itk_unlock(parent);
	}
}
Beispiel #17
0
/*
 *	Routine:	macx_swapon
 *	Function:
 *		Syscall interface to add a file to backing store
 */
int
macx_swapon(
    char 	*filename,
    int	flags,
    long	size,
    long	priority)
{
    struct vnode		*vp = 0;
    struct nameidata 	nd, *ndp;
    struct proc		*p =  current_proc();
    pager_file_t		pf;
    register int		error;
    kern_return_t		kr;
    mach_port_t		backing_store;
    memory_object_default_t	default_pager;
    int			i;
    boolean_t		funnel_state;

    struct vattr	vattr;

    AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON);
    AUDIT_ARG(value, priority);

    funnel_state = thread_funnel_set(kernel_flock, TRUE);
    ndp = &nd;

    if ((error = suser(p->p_ucred, &p->p_acflag)))
        goto swapon_bailout;

    if(default_pager_init_flag == 0) {
        start_def_pager(NULL);
        default_pager_init_flag = 1;
    }

    /*
     * Get a vnode for the paging area.
     */
    NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE,
           filename, p);

    if ((error = namei(ndp)))
        goto swapon_bailout;
    vp = ndp->ni_vp;

    if (vp->v_type != VREG) {
        error = EINVAL;
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }
    UBCINFOCHECK("macx_swapon", vp);

    if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }

    if (vattr.va_size < (u_quad_t)size) {
        vattr_null(&vattr);
        vattr.va_size = (u_quad_t)size;
        error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
        if (error) {
            VOP_UNLOCK(vp, 0, p);
            goto swapon_bailout;
        }
    }

    /* add new backing store to list */
    i = 0;
    while(bs_port_table[i].vp != 0) {
        if(i == MAX_BACKING_STORE)
            break;
        i++;
    }
    if(i == MAX_BACKING_STORE) {
        error = ENOMEM;
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }

    /* remember the vnode. This vnode has namei() reference */
    bs_port_table[i].vp = vp;

    /*
     * Look to see if we are already paging to this file.
     */
    /* make certain the copy send of kernel call will work */
    default_pager = MEMORY_OBJECT_DEFAULT_NULL;
    kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
    if(kr != KERN_SUCCESS) {
        error = EAGAIN;
        VOP_UNLOCK(vp, 0, p);
        bs_port_table[i].vp = 0;
        goto swapon_bailout;
    }

    kr = default_pager_backing_store_create(default_pager,
                                            -1, /* default priority */
                                            0, /* default cluster size */
                                            &backing_store);
    memory_object_default_deallocate(default_pager);

    if(kr != KERN_SUCCESS) {
        error = ENOMEM;
        VOP_UNLOCK(vp, 0, p);
        bs_port_table[i].vp = 0;
        goto swapon_bailout;
    }

    /*
     * NOTE: we are able to supply PAGE_SIZE here instead of
     *	an actual record size or block number because:
     *	a: we do not support offsets from the beginning of the
     *		file (allowing for non page size/record modulo offsets.
     *	b: because allow paging will be done modulo page size
     */

    VOP_UNLOCK(vp, 0, p);
    kr = default_pager_add_file(backing_store, vp, PAGE_SIZE,
                                ((int)vattr.va_size)/PAGE_SIZE);
    if(kr != KERN_SUCCESS) {
        bs_port_table[i].vp = 0;
        if(kr == KERN_INVALID_ARGUMENT)
            error = EINVAL;
        else
            error = ENOMEM;
        goto swapon_bailout;
    }
    bs_port_table[i].bs = (void *)backing_store;
    error = 0;
    if (!ubc_hold(vp))
        panic("macx_swapon: hold");

    /* Mark this vnode as being used for swapfile */
    SET(vp->v_flag, VSWAP);

    ubc_setcred(vp, p);

    /*
     * take an extra reference on the vnode to keep
     * vnreclaim() away from this vnode.
     */
    VREF(vp);

    /* Hold on to the namei  reference to the paging file vnode */
    vp = 0;

swapon_bailout:
    if (vp) {
        vrele(vp);
    }
    (void) thread_funnel_set(kernel_flock, FALSE);
    AUDIT_MACH_SYSCALL_EXIT(error);
    return(error);
}
Beispiel #18
0
/*
 *	Routine:	macx_swapinfo
 *	Function:
 *		Syscall interface to get general swap statistics
 */
int
macx_swapinfo(
	memory_object_size_t	*total_p,
	memory_object_size_t	*avail_p,
	vm_size_t		*pagesize_p,
	boolean_t		*encrypted_p)
{
	int			error;
	memory_object_default_t	default_pager;
	default_pager_info_64_t	dpi64;
	kern_return_t		kr;

	error = 0;

	/*
	 * Get a handle on the default pager.
	 */
	default_pager = MEMORY_OBJECT_DEFAULT_NULL;
	kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
	if (kr != KERN_SUCCESS) {
		error = EAGAIN;	/* XXX why EAGAIN ? */
		goto done;
	}
	if (default_pager == MEMORY_OBJECT_DEFAULT_NULL) {
		/*
		 * The default pager has not initialized yet,
		 * so it can't be using any swap space at all.
		 */
		*total_p = 0;
		*avail_p = 0;
		*pagesize_p = 0;
		*encrypted_p = FALSE;
		goto done;
	}
	
	/*
	 * Get swap usage data from default pager.
	 */
	kr = default_pager_info_64(default_pager, &dpi64);
	if (kr != KERN_SUCCESS) {
		error = ENOTSUP;
		goto done;
	}

	/*
	 * Provide default pager info to caller.
	 */
	*total_p = dpi64.dpi_total_space;
	*avail_p = dpi64.dpi_free_space;
	*pagesize_p = dpi64.dpi_page_size;
	if (dpi64.dpi_flags & DPI_ENCRYPTED) {
		*encrypted_p = TRUE;
	} else {
		*encrypted_p = FALSE;
	}

done:
	if (default_pager != MEMORY_OBJECT_DEFAULT_NULL) {
		/* release our handle on default pager */
		memory_object_default_deallocate(default_pager);
	}
	return error;
}
Beispiel #19
0
/*
 *	Routine:	exception
 *	Purpose:
 *		The current thread caught an exception.
 *		We make an up-call to the thread's exception server.
 *	Conditions:
 *		Nothing locked and no resources held.
 *		Called from an exception context, so
 *		thread_exception_return and thread_kdb_return
 *		are possible.
 *	Returns:
 *		Doesn't return.
 */
void
exception_triage(
	exception_type_t	exception,
	mach_exception_data_t	code,
	mach_msg_type_number_t  codeCnt)
{
	thread_t		thread;
	task_t			task;
	host_priv_t		host_priv;
	struct exception_action *excp;
	lck_mtx_t			*mutex;
	kern_return_t		kr;

	assert(exception != EXC_RPC_ALERT);

	if (exception == KERN_SUCCESS)
		panic("exception");

	/*
	 * Try to raise the exception at the activation level.
	 */
	thread = current_thread();
	mutex = &thread->mutex;
	excp = &thread->exc_actions[exception];
	kr = exception_deliver(thread, exception, code, codeCnt, excp, mutex);
	if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
		goto out;

	/*
	 * Maybe the task level will handle it.
	 */
	task = current_task();
	mutex = &task->lock;
	excp = &task->exc_actions[exception];
	kr = exception_deliver(thread, exception, code, codeCnt, excp, mutex);
	if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
		goto out;

	/*
	 * How about at the host level?
	 */
	host_priv = host_priv_self();
	mutex = &host_priv->lock;
	excp = &host_priv->exc_actions[exception];
	kr = exception_deliver(thread, exception, code, codeCnt, excp, mutex);
	if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED)
		goto out;

	/*
	 * Nobody handled it, terminate the task.
	 */

#if	MACH_KDB
	if (debug_user_with_kdb) {
		/*
		 *	Debug the exception with kdb.
		 *	If kdb handles the exception,
		 *	then thread_kdb_return won't return.
		 */
		db_printf("No exception server, calling kdb...\n");
		thread_kdb_return();
	}
#endif	/* MACH_KDB */

	(void) task_terminate(task);

out:
	if (exception != EXC_CRASH)
		thread_exception_return();
	return;
}