Пример #1
0
int
i386_set_ldt(
	uint32_t		*retval,
	uint32_t		start_sel,
	uint32_t		descs,	/* out */
	uint32_t		num_sels)
{
	user_ldt_t	new_ldt, old_ldt;
	struct real_descriptor *dp;
	unsigned int	i;
	unsigned int	min_selector = LDTSZ_MIN;	/* do not allow the system selectors to be changed */
	task_t		task = current_task();
	unsigned int	ldt_count;
	kern_return_t err;

	if (start_sel != LDT_AUTO_ALLOC
	    && (start_sel != 0 || num_sels != 0)
	    && (start_sel < min_selector || start_sel >= LDTSZ))
	    return EINVAL;
	if (start_sel != LDT_AUTO_ALLOC
	    && (uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) /* cast to uint64_t to detect wrap-around */
	    return EINVAL;

	task_lock(task);
	
	old_ldt = task->i386_ldt;

	if (start_sel == LDT_AUTO_ALLOC) {
	    if (old_ldt) {
		unsigned int null_count;
		struct real_descriptor null_ldt;
		
		bzero(&null_ldt, sizeof(null_ldt));

		/*
		 * Look for null selectors among the already-allocated
		 * entries.
		 */
		null_count = 0;
		i = 0;
		while (i < old_ldt->count)
		{
		    if (!memcmp(&old_ldt->ldt[i++], &null_ldt, sizeof(null_ldt))) {
			null_count++;
			if (null_count == num_sels)
			    break;  /* break out of while loop */
		    } else {
			null_count = 0;
		    }
		}

		/*
		 * If we broke out of the while loop, i points to the selector
		 * after num_sels null selectors.  Otherwise it points to the end
		 * of the old LDTs, and null_count is the number of null selectors
		 * at the end. 
		 *
		 * Either way, there are null_count null selectors just prior to
		 * the i-indexed selector, and either null_count >= num_sels,
		 * or we're at the end, so we can extend.
		 */
		start_sel = old_ldt->start + i - null_count;
	    } else {
		start_sel = LDTSZ_MIN;
	    }
		
	    if (start_sel + num_sels > LDTSZ) {
		task_unlock(task);
		return ENOMEM;
	    }
	}

	if (start_sel == 0 && num_sels == 0) {
	    new_ldt = NULL;
	} else {
	    /*
	     * Allocate new LDT
	     */

	    unsigned int    begin_sel = start_sel;
	    unsigned int    end_sel = begin_sel + num_sels;
	    
	    if (old_ldt != NULL) {
		if (old_ldt->start < begin_sel)
		    begin_sel = old_ldt->start;
		if (old_ldt->start + old_ldt->count > end_sel)
		    end_sel = old_ldt->start + old_ldt->count;
	    }

	    ldt_count = end_sel - begin_sel;

	    new_ldt = (user_ldt_t)kalloc(sizeof(struct user_ldt) + (ldt_count * sizeof(struct real_descriptor)));
	    if (new_ldt == NULL) {
		task_unlock(task);
		return ENOMEM;
	    }

	    new_ldt->start = begin_sel;
	    new_ldt->count = ldt_count;

	    /*
	     * Have new LDT.  If there was a an old ldt, copy descriptors
	     * from old to new.
	     */
	    if (old_ldt) {
		bcopy(&old_ldt->ldt[0],
		      &new_ldt->ldt[old_ldt->start - begin_sel],
		      old_ldt->count * sizeof(struct real_descriptor));

		/*
		 * If the old and new LDTs are non-overlapping, fill the 
		 * center in with null selectors.
		 */
		 		 
		if (old_ldt->start + old_ldt->count < start_sel)
		    bzero(&new_ldt->ldt[old_ldt->count],
			  (start_sel - (old_ldt->start + old_ldt->count)) * sizeof(struct real_descriptor));
		else if (old_ldt->start > start_sel + num_sels)
		    bzero(&new_ldt->ldt[num_sels],
			  (old_ldt->start - (start_sel + num_sels)) * sizeof(struct real_descriptor));
	    }

	    /*
	     * Install new descriptors.
	     */
	    if (descs != 0) {
		err = copyin(descs, (char *)&new_ldt->ldt[start_sel - begin_sel],
			     num_sels * sizeof(struct real_descriptor));
		if (err != 0)
		{
		    task_unlock(task);
		    user_ldt_free(new_ldt);
		    return err;
		}
	    } else {
		bzero(&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor));
	    }

	    /*
	     * Validate descriptors.
	     * Only allow descriptors with user priviledges.
	     */
	    for (i = 0, dp = (struct real_descriptor *) &new_ldt->ldt[start_sel - begin_sel];
		 i < num_sels;
		 i++, dp++)
	    {
		switch (dp->access & ~ACC_A) {
		    case 0:
		    case ACC_P:
			/* valid empty descriptor */
			break;
		    case ACC_P | ACC_PL_U | ACC_DATA:
		    case ACC_P | ACC_PL_U | ACC_DATA_W:
		    case ACC_P | ACC_PL_U | ACC_DATA_E:
		    case ACC_P | ACC_PL_U | ACC_DATA_EW:
		    case ACC_P | ACC_PL_U | ACC_CODE:
		    case ACC_P | ACC_PL_U | ACC_CODE_R:
		    case ACC_P | ACC_PL_U | ACC_CODE_C:
		    case ACC_P | ACC_PL_U | ACC_CODE_CR:
		    case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
		    case ACC_P | ACC_PL_U | ACC_CALL_GATE:
			break;
		    default:
			task_unlock(task);
			user_ldt_free(new_ldt);
			return EACCES;
		}
	    }
	}

	task->i386_ldt = new_ldt; /* new LDT for task */

	/*
	 * Switch to new LDT.  We need to do this on all CPUs, since
	 * another thread in this same task may be currently running,
	 * and we need to make sure the new LDT is in place
	 * throughout the task before returning to the user.
	 */
	mp_rendezvous_no_intrs(user_ldt_set_action, task);

	task_unlock(task);

	/* free old LDT.  We can't do this until after we've
	 * rendezvoused with all CPUs, in case another thread
	 * in this task was in the process of context switching.
	 */
	if (old_ldt)
	    user_ldt_free(old_ldt);

	*retval = start_sel;

	return 0;
}
Пример #2
0
void *realloc(void *ptr, unsigned long size) {
	kfree(ptr);
	return kalloc(size);
}
Пример #3
0
void
bsd_init(void)
{
	struct uthread *ut;
	unsigned int i;
	struct vfs_context context;
	kern_return_t	ret;
	struct ucred temp_cred;
	struct posix_cred temp_pcred;
#if NFSCLIENT || CONFIG_IMAGEBOOT
	boolean_t       netboot = FALSE;
#endif

#define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */

	throttle_init();

	printf(copyright);
	
	bsd_init_kprintf("calling kmeminit\n");
	kmeminit();
	
	bsd_init_kprintf("calling parse_bsd_args\n");
	parse_bsd_args();

#if CONFIG_DEV_KMEM
	bsd_init_kprintf("calling dev_kmem_init\n");
	dev_kmem_init();
#endif

	/* Initialize kauth subsystem before instancing the first credential */
	bsd_init_kprintf("calling kauth_init\n");
	kauth_init();

	/* Initialize process and pgrp structures. */
	bsd_init_kprintf("calling procinit\n");
	procinit();

	/* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/
	tty_init();

	kernproc = &proc0;	/* implicitly bzero'ed */

	/* kernel_task->proc = kernproc; */
	set_bsdtask_info(kernel_task,(void *)kernproc);

	/* give kernproc a name */
	bsd_init_kprintf("calling process_name\n");
	process_name("kernel_task", kernproc);

	/* allocate proc lock group attribute and group */
	bsd_init_kprintf("calling lck_grp_attr_alloc_init\n");
	proc_lck_grp_attr= lck_grp_attr_alloc_init();

	proc_lck_grp = lck_grp_alloc_init("proc",  proc_lck_grp_attr);
#if CONFIG_FINE_LOCK_GROUPS
	proc_slock_grp = lck_grp_alloc_init("proc-slock",  proc_lck_grp_attr);
	proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock",  proc_lck_grp_attr);
	proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock",  proc_lck_grp_attr);
	proc_mlock_grp = lck_grp_alloc_init("proc-mlock",  proc_lck_grp_attr);
#endif
	/* Allocate proc lock attribute */
	proc_lck_attr = lck_attr_alloc_init();
#if 0
#if __PROC_INTERNAL_DEBUG
	lck_attr_setdebug(proc_lck_attr);
#endif
#endif

#if CONFIG_FINE_LOCK_GROUPS
	proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr);
#else
	proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr);
	lck_mtx_init(&kernproc->p_ucred_mlock, proc_lck_grp, proc_lck_attr);
	lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr);
#endif

	assert(bsd_simul_execs != 0);
	execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr);
	execargs_cache_size = bsd_simul_execs;
	execargs_free_count = bsd_simul_execs;
	execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t));
	bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t));
	
	if (current_task() != kernel_task)
		printf("bsd_init: We have a problem, "
				"current task is not kernel task\n");
	
	bsd_init_kprintf("calling get_bsdthread_info\n");
	ut = (uthread_t)get_bsdthread_info(current_thread());

#if CONFIG_MACF
	/*
	 * Initialize the MAC Framework
	 */
	mac_policy_initbsd();
	kernproc->p_mac_enforce = 0;

#if defined (__i386__) || defined (__x86_64__)
	/*
	 * We currently only support this on i386/x86_64, as that is the
	 * only lock code we have instrumented so far.
	 */
	check_policy_init(policy_check_flags);
#endif
#endif /* MAC */

	/* Initialize System Override call */
	init_system_override();
	
	/*
	 * Create process 0.
	 */
	proc_list_lock();
	LIST_INSERT_HEAD(&allproc, kernproc, p_list);
	kernproc->p_pgrp = &pgrp0;
	LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
	LIST_INIT(&pgrp0.pg_members);
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr);
#endif
	/* There is no other bsd thread this point and is safe without pgrp lock */
	LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist);
	kernproc->p_listflag |= P_LIST_INPGRP;
	kernproc->p_pgrpid = 0;
	kernproc->p_uniqueid = 0;

	pgrp0.pg_session = &session0;
	pgrp0.pg_membercnt = 1;

	session0.s_count = 1;
	session0.s_leader = kernproc;
	session0.s_listflags = 0;
#ifdef CONFIG_FINE_LOCK_GROUPS
	lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr);
#else
	lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr);
#endif
	LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash);
	proc_list_unlock();

#if CONFIG_PERSONAS
	kernproc->p_persona = NULL;
#endif

	kernproc->task = kernel_task;
	
	kernproc->p_stat = SRUN;
	kernproc->p_flag = P_SYSTEM;
	kernproc->p_lflag = 0;
	kernproc->p_ladvflag = 0;
	
#if DEVELOPMENT || DEBUG
	if (bootarg_disable_aslr)
		kernproc->p_flag |= P_DISABLE_ASLR;
#endif

	kernproc->p_nice = NZERO;
	kernproc->p_pptr = kernproc;

	TAILQ_INIT(&kernproc->p_uthlist);
	TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list);
	
	kernproc->sigwait = FALSE;
	kernproc->sigwait_thread = THREAD_NULL;
	kernproc->exit_thread = THREAD_NULL;
	kernproc->p_csflags = CS_VALID;

	/*
	 * Create credential.  This also Initializes the audit information.
	 */
	bsd_init_kprintf("calling bzero\n");
	bzero(&temp_cred, sizeof(temp_cred));
	bzero(&temp_pcred, sizeof(temp_pcred));
	temp_pcred.cr_ngroups = 1;
	/* kern_proc, shouldn't call up to DS for group membership */
	temp_pcred.cr_flags = CRF_NOMEMBERD;
	temp_cred.cr_audit.as_aia_p = audit_default_aia_p;
	
	bsd_init_kprintf("calling kauth_cred_create\n");
	/*
	 * We have to label the temp cred before we create from it to
	 * properly set cr_ngroups, or the create will fail.
	 */
	posix_cred_label(&temp_cred, &temp_pcred);
	kernproc->p_ucred = kauth_cred_create(&temp_cred); 

	/* update cred on proc */
	PROC_UPDATE_CREDS_ONPROC(kernproc);

	/* give the (already exisiting) initial thread a reference on it */
	bsd_init_kprintf("calling kauth_cred_ref\n");
	kauth_cred_ref(kernproc->p_ucred);
	ut->uu_context.vc_ucred = kernproc->p_ucred;
	ut->uu_context.vc_thread = current_thread();

	TAILQ_INIT(&kernproc->p_aio_activeq);
	TAILQ_INIT(&kernproc->p_aio_doneq);
	kernproc->p_aio_total_count = 0;
	kernproc->p_aio_active_count = 0;

	bsd_init_kprintf("calling file_lock_init\n");
	file_lock_init();

#if CONFIG_MACF
	mac_cred_label_associate_kernel(kernproc->p_ucred);
#endif

	/* Create the file descriptor table. */
	kernproc->p_fd = &filedesc0;
	filedesc0.fd_cmask = cmask;
	filedesc0.fd_knlistsize = -1;
	filedesc0.fd_knlist = NULL;
	filedesc0.fd_knhash = NULL;
	filedesc0.fd_knhashmask = 0;

	/* Create the limits structures. */
	kernproc->p_limit = &limit0;
	for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++)
		limit0.pl_rlimit[i].rlim_cur = 
			limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid;
	limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
	limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack;
	limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data;
	limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core;
	limit0.pl_refcnt = 1;

	kernproc->p_stats = &pstats0;
	kernproc->p_sigacts = &sigacts0;

	/*
	 * Charge root for one process: launchd.
	 */
	bsd_init_kprintf("calling chgproccnt\n");
	(void)chgproccnt(0, 1);

	/*
	 *	Allocate a kernel submap for pageable memory
	 *	for temporary copying (execve()).
	 */
	{
		vm_offset_t	minimum;

		bsd_init_kprintf("calling kmem_suballoc\n");
		assert(bsd_pageable_map_size != 0);
		ret = kmem_suballoc(kernel_map,
				&minimum,
				(vm_size_t)bsd_pageable_map_size,
				TRUE,
				VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_BSD),
				&bsd_pageable_map);
		if (ret != KERN_SUCCESS) 
			panic("bsd_init: Failed to allocate bsd pageable map");
	}

	/*
	 * Initialize buffers and hash links for buffers
	 *
	 * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must
	 *		happen after a credential has been associated with
	 *		the kernel task.
	 */
	bsd_init_kprintf("calling bsd_bufferinit\n");
	bsd_bufferinit();

	/* Initialize the execve() semaphore */
	bsd_init_kprintf("calling semaphore_create\n");

	if (ret != KERN_SUCCESS)
		panic("bsd_init: Failed to create execve semaphore");

	/*
	 * Initialize the calendar.
	 */
	bsd_init_kprintf("calling IOKitInitializeTime\n");
	IOKitInitializeTime();

	bsd_init_kprintf("calling ubc_init\n");
	ubc_init();

	/*
	 * Initialize device-switches.
	 */
	bsd_init_kprintf("calling devsw_init() \n");
	devsw_init();

	/* Initialize the file systems. */
	bsd_init_kprintf("calling vfsinit\n");
	vfsinit();

#if CONFIG_PROC_UUID_POLICY
	/* Initial proc_uuid_policy subsystem */
	bsd_init_kprintf("calling proc_uuid_policy_init()\n");
	proc_uuid_policy_init();
#endif

#if SOCKETS
	/* Initialize per-CPU cache allocator */
	mcache_init();

	/* Initialize mbuf's. */
	bsd_init_kprintf("calling mbinit\n");
	mbinit();
	net_str_id_init(); /* for mbuf tags */
#endif /* SOCKETS */

	/*
	 * Initializes security event auditing.
	 * XXX: Should/could this occur later?
	 */
#if CONFIG_AUDIT
	bsd_init_kprintf("calling audit_init\n");
 	audit_init();  
#endif

	/* Initialize kqueues */
	bsd_init_kprintf("calling knote_init\n");
	knote_init();

	/* Initialize for async IO */
	bsd_init_kprintf("calling aio_init\n");
	aio_init();

	/* Initialize pipes */
	bsd_init_kprintf("calling pipeinit\n");
	pipeinit();

	/* Initialize SysV shm subsystem locks; the subsystem proper is
	 * initialized through a sysctl.
	 */
#if SYSV_SHM
	bsd_init_kprintf("calling sysv_shm_lock_init\n");
	sysv_shm_lock_init();
#endif
#if SYSV_SEM
	bsd_init_kprintf("calling sysv_sem_lock_init\n");
	sysv_sem_lock_init();
#endif
#if SYSV_MSG
	bsd_init_kprintf("sysv_msg_lock_init\n");
	sysv_msg_lock_init();
#endif
	bsd_init_kprintf("calling pshm_lock_init\n");
	pshm_lock_init();
	bsd_init_kprintf("calling psem_lock_init\n");
	psem_lock_init();

	pthread_init();
	/* POSIX Shm and Sem */
	bsd_init_kprintf("calling pshm_cache_init\n");
	pshm_cache_init();
	bsd_init_kprintf("calling psem_cache_init\n");
	psem_cache_init();
	bsd_init_kprintf("calling time_zone_slock_init\n");
	time_zone_slock_init();
	bsd_init_kprintf("calling select_waitq_init\n");
	select_waitq_init();

	/*
	 * Initialize protocols.  Block reception of incoming packets
	 * until everything is ready.
	 */
	bsd_init_kprintf("calling sysctl_register_fixed\n");
	sysctl_register_fixed(); 
	bsd_init_kprintf("calling sysctl_mib_init\n");
	sysctl_mib_init();
#if NETWORKING
	bsd_init_kprintf("calling dlil_init\n");
	dlil_init();
	bsd_init_kprintf("calling proto_kpi_init\n");
	proto_kpi_init();
#endif /* NETWORKING */
#if SOCKETS
	bsd_init_kprintf("calling socketinit\n");
	socketinit();
	bsd_init_kprintf("calling domaininit\n");
	domaininit();
	iptap_init();
#if FLOW_DIVERT
	flow_divert_init();
#endif	/* FLOW_DIVERT */
#endif /* SOCKETS */

	kernproc->p_fd->fd_cdir = NULL;
	kernproc->p_fd->fd_rdir = NULL;

#if CONFIG_FREEZE
#ifndef CONFIG_MEMORYSTATUS
    #error "CONFIG_FREEZE defined without matching CONFIG_MEMORYSTATUS"
#endif
	/* Initialise background freezing */
	bsd_init_kprintf("calling memorystatus_freeze_init\n");
	memorystatus_freeze_init();
#endif

#if CONFIG_MEMORYSTATUS
	/* Initialize kernel memory status notifications */
	bsd_init_kprintf("calling memorystatus_init\n");
	memorystatus_init();
#endif /* CONFIG_MEMORYSTATUS */

	bsd_init_kprintf("calling macx_init\n");
	macx_init();

	bsd_init_kprintf("calling acct_init\n");
	acct_init();

#ifdef GPROF
	/* Initialize kernel profiling. */
	kmstartup();
#endif

	bsd_init_kprintf("calling bsd_autoconf\n");
	bsd_autoconf();

#if CONFIG_DTRACE
	dtrace_postinit();
#endif

	/*
	 * We attach the loopback interface *way* down here to ensure
	 * it happens after autoconf(), otherwise it becomes the
	 * "primary" interface.
	 */
#include <loop.h>
#if NLOOP > 0
	bsd_init_kprintf("calling loopattach\n");
	loopattach();			/* XXX */
#endif
#if NGIF
	/* Initialize gif interface (after lo0) */
	gif_init();
#endif

#if PFLOG
	/* Initialize packet filter log interface */
	pfloginit();
#endif /* PFLOG */

#if NETHER > 0
	/* Register the built-in dlil ethernet interface family */
	bsd_init_kprintf("calling ether_family_init\n");
	ether_family_init();
#endif /* ETHER */

#if NETWORKING
	/* Call any kext code that wants to run just after network init */
	bsd_init_kprintf("calling net_init_run\n");
	net_init_run();
	
#if CONTENT_FILTER
	cfil_init();
#endif

#if PACKET_MANGLER
	pkt_mnglr_init();
#endif	

#if NECP
	/* Initialize Network Extension Control Policies */
	necp_init();
#endif

	netagent_init();

	/* register user tunnel kernel control handler */
	utun_register_control();
#if IPSEC
	ipsec_register_control();
#endif /* IPSEC */
	netsrc_init();
	nstat_init();
	tcp_cc_init();
#if MPTCP
	mptcp_control_register();
#endif /* MPTCP */
#endif /* NETWORKING */

	bsd_init_kprintf("calling vnode_pager_bootstrap\n");
	vnode_pager_bootstrap();

	bsd_init_kprintf("calling inittodr\n");
	inittodr(0);

	/* Mount the root file system. */
	while( TRUE) {
		int err;

		bsd_init_kprintf("calling setconf\n");
		setconf();
#if NFSCLIENT
		netboot = (mountroot == netboot_mountroot);
#endif

		bsd_init_kprintf("vfs_mountroot\n");
		if (0 == (err = vfs_mountroot()))
			break;
		rootdevice[0] = '\0';
#if NFSCLIENT
		if (netboot) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: failed to mount network root, error %d, %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
#endif
		printf("cannot mount root, errno = %d\n", err);
		boothowto |= RB_ASKNAME;
	}

	IOSecureBSDRoot(rootdevice);

	context.vc_thread = current_thread();
	context.vc_ucred = kernproc->p_ucred;
	mountlist.tqh_first->mnt_flag |= MNT_ROOTFS;

	bsd_init_kprintf("calling VFS_ROOT\n");
	/* Get the vnode for '/'.  Set fdp->fd_fd.fd_cdir to reference it. */
	if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context))
		panic("bsd_init: cannot find root vnode: %s", PE_boot_args());
	rootvnode->v_flag |= VROOT;
	(void)vnode_ref(rootvnode);
	(void)vnode_put(rootvnode);
	filedesc0.fd_cdir = rootvnode;

#if NFSCLIENT
	if (netboot) {
		int err;

		netboot = TRUE;
		/* post mount setup */
		if ((err = netboot_setup()) != 0) {
			PE_display_icon( 0, "noroot");  /* XXX a netboot-specific icon would be nicer */
			vc_progress_set(FALSE, 0);
			for (i=1; 1; i*=2) {
				printf("bsd_init: NetBoot could not find root, error %d: %s\n",
					err, PE_boot_args());
				printf("We are hanging here...\n");
				IOSleep(i*60*1000);
			}
			/*NOTREACHED*/
		}
	}
#endif
	

#if CONFIG_IMAGEBOOT
	/*
	 * See if a system disk image is present. If so, mount it and
	 * switch the root vnode to point to it
	 */ 
	if (netboot == FALSE && imageboot_needed()) {
		/* 
		 * An image was found.  No turning back: we're booted
		 * with a kernel from the disk image.
		 */
		imageboot_setup(); 
	}
#endif /* CONFIG_IMAGEBOOT */
  
	/* set initial time; all other resource data is  already zero'ed */
	microtime_with_abstime(&kernproc->p_start, &kernproc->p_stats->ps_start);

#if DEVFS
	{
	    char mounthere[] = "/dev";	/* !const because of internal casting */

	    bsd_init_kprintf("calling devfs_kernel_mount\n");
	    devfs_kernel_mount(mounthere);
	}
#endif /* DEVFS */

	/* Initialize signal state for process 0. */
	bsd_init_kprintf("calling siginit\n");
	siginit(kernproc);

	bsd_init_kprintf("calling bsd_utaskbootstrap\n");
	bsd_utaskbootstrap();

#if defined(__LP64__)
	kernproc->p_flag |= P_LP64;
#endif

	pal_kernel_announce();

	bsd_init_kprintf("calling mountroot_post_hook\n");

	/* invoke post-root-mount hook */
	if (mountroot_post_hook != NULL)
		mountroot_post_hook();

#if 0 /* not yet */
	consider_zone_gc(FALSE);
#endif


	bsd_init_kprintf("done\n");
}
Пример #4
0
u_int32_t vnode_trim_list (vnode_t vp, struct trim_list *tl, boolean_t route_only)
{
	int		error = 0;
	int		trim_index = 0;
	u_int32_t	blocksize = 0;
	struct vnode	*devvp;
	dk_extent_t	*extents;
	dk_unmap_t	unmap;
	_dk_cs_unmap_t	cs_unmap;

	if ( !(vp->v_mount->mnt_ioflags & MNT_IOFLAGS_UNMAP_SUPPORTED))
		return (ENOTSUP);

	if (tl == NULL)
		return (0);

	/*
	 * Get the underlying device vnode and physical block size
	 */
	devvp = vp->v_mount->mnt_devvp;
	blocksize = vp->v_mount->mnt_devblocksize;

	extents = kalloc(sizeof(dk_extent_t) * MAX_BATCH_TO_TRIM);

	if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_CSUNMAP_SUPPORTED) {
		memset (&cs_unmap, 0, sizeof(_dk_cs_unmap_t));
		cs_unmap.extents = extents;

		if (route_only == TRUE)
			cs_unmap.options = ROUTE_ONLY;
	} else {
		memset (&unmap, 0, sizeof(dk_unmap_t));
		unmap.extents = extents;
	}

	while (tl) {
		daddr64_t	io_blockno;	/* Block number corresponding to the start of the extent */
		size_t		io_bytecount;	/* Number of bytes in current extent for the specified range */
		size_t		trimmed;
		size_t		remaining_length;
		off_t		current_offset; 

		current_offset = tl->tl_offset;
		remaining_length = tl->tl_length;
		trimmed = 0;
		
		/* 
		 * We may not get the entire range from tl_offset -> tl_offset+tl_length in a single
		 * extent from the blockmap call.  Keep looping/going until we are sure we've hit
		 * the whole range or if we encounter an error.
		 */
		while (trimmed < tl->tl_length) {
			/*
			 * VNOP_BLOCKMAP will tell us the logical to physical block number mapping for the
			 * specified offset.  It returns blocks in contiguous chunks, so if the logical range is 
			 * broken into multiple extents, it must be called multiple times, increasing the offset
			 * in each call to ensure that the entire range is covered.
			 */
			error = VNOP_BLOCKMAP (vp, current_offset, remaining_length, 
					       &io_blockno, &io_bytecount, NULL, VNODE_READ, NULL);

			if (error) {
				goto trim_exit;
			}

			extents[trim_index].offset = (uint64_t) io_blockno * (u_int64_t) blocksize;
			extents[trim_index].length = io_bytecount;

			trim_index++;

			if (trim_index == MAX_BATCH_TO_TRIM) {

				if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_CSUNMAP_SUPPORTED) {
					cs_unmap.extentsCount = trim_index;
					error = VNOP_IOCTL(devvp, _DKIOCCSUNMAP, (caddr_t)&cs_unmap, 0, vfs_context_kernel());
				} else {
					unmap.extentsCount = trim_index;
					error = VNOP_IOCTL(devvp, DKIOCUNMAP, (caddr_t)&unmap, 0, vfs_context_kernel());
				}
				if (error) {
					goto trim_exit;
				}
				trim_index = 0;
			}
			trimmed += io_bytecount;
			current_offset += io_bytecount;
			remaining_length -= io_bytecount;
		}
		tl = tl->tl_next;
	}
	if (trim_index) {
		if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_CSUNMAP_SUPPORTED) {
			cs_unmap.extentsCount = trim_index;
			error = VNOP_IOCTL(devvp, _DKIOCCSUNMAP, (caddr_t)&cs_unmap, 0, vfs_context_kernel());
		} else {
			unmap.extentsCount = trim_index;
			error = VNOP_IOCTL(devvp, DKIOCUNMAP, (caddr_t)&unmap, 0, vfs_context_kernel());
		}
	}
trim_exit:
	kfree(extents, sizeof(dk_extent_t) * MAX_BATCH_TO_TRIM);

	return error;
}
Пример #5
0
void *
boot_script_malloc (unsigned int size)
{
  return (void *) kalloc (size);
}
Пример #6
0
/*--
Cat pdnet;RPC;XML-RPC;Encode;BGB
Form
	NetParse_Node *BGBRPC_EncodeValue(void *val);
Description
	Encode an BGB-RPC Value.
Status Internal
--*/
NetParse_Node *BGBRPC_EncodeValue(void *val)
{
	NetParse_Node *t, *t2;
	char buf[16];
	byte *s, *s2;
	int i;
	char *type;

	type=ObjType_GetTypeName(val);

	if(!strcmp(type, "int_t"))
	{
		sprintf(buf, "%d", *(int *)val);
		t2=NetParse_NewNode();
		t2->text=kstrdup(buf);

		t=NetParse_NewNode();
		t->key=kstrdup("i4");
		t->first=t2;

		return(t);
	}

	if(!strcmp(type, "bool_t"))
	{
		sprintf(buf, "%d", *(int *)val);
		t2=NetParse_NewNode();
		t2->text=kstrdup(buf);

		t=NetParse_NewNode();
		t->key=kstrdup("boolean");
		t->first=t2;

		return(t);
	}

	if(!strcmp(type, "char_t"))
	{
		sprintf(buf, "%d", *(int *)val);
		t2=NetParse_NewNode();
		t2->text=kstrdup(buf);

		t=NetParse_NewNode();
		t->key=kstrdup("char");
		t->first=t2;

		return(t);
	}

	if(!strcmp(type, "string_t"))
	{
//		sprintf(buf, "%s", val);
		t2=NetParse_NewNode();
		t2->text=kstrdup((char *)val);

		t=NetParse_NewNode();
		t->key=kstrdup("string");
		t->first=t2;

		return(t);
	}

	if(!strcmp(type, "symbol_t"))
	{
//		sprintf(buf, "%s", val);
		t2=NetParse_NewNode();
		t2->text=kstrdup((char *)val);

		t=NetParse_NewNode();
		t->key=kstrdup("symbol");
		t->first=t2;

		return(t);
	}

	if(!strcmp(type, "link_t"))
	{
//		sprintf(buf, "%s", val);
		t2=NetParse_NewNode();
		t2->text=kstrdup((char *)val);

		t=NetParse_NewNode();
		t->key=kstrdup("link");
		t->first=t2;

		return(t);
	}


	if(!strcmp(type, "float_t"))
	{
		sprintf(buf, "%f", *(double *)val);
		t2=NetParse_NewNode();
		t2->text=kstrdup(buf);

		t=NetParse_NewNode();
		t->key=kstrdup("double");
		t->first=t2;

		return(t);
	}

	if(!strcmp(type, "date_t"))
	{
		s=(char *)val;
		sprintf(buf, "%04d%02d%02dT%02d:%02d:%02d",
			(s[0]<<8)+s[1], s[2], s[3], s[4], s[5], s[6]);
		t2=NetParse_NewNode();
		t2->text=kstrdup(buf);

		t=NetParse_NewNode();
		t->key=kstrdup("dateTime.iso8601");
		t->first=t2;

		return(t);
	}

	if(!strcmp(type, "data_t"))
	{
		s=(byte *)val;
		i=ObjType_GetSize(val);
		s2=kalloc(((i*4)/3)+5);
		HttpNode_EncodeMime(s2, s, i);

		t2=NetParse_NewNode();
		t2->text=s2;

		t=NetParse_NewNode();
		t->key=kstrdup("base64");
		t->first=t2;

		return(t);
	}

	if(!strcmp(type, "array_t"))
	{
		return(BGBRPC_EncodeArray2(val));
	}

	if(!strcmp(type, "d_array_t"))
	{
		return(BGBRPC_EncodeArray(val));
	}

	if(!strcmp(type, "d_struct_t"))
	{
		return(BGBRPC_EncodeStruct(val));
	}

	if(!strcmp(type, "d_vector_t"))
	{
		return(BGBRPC_EncodeVector(val));
	}

	kprint("BGBRPC_EncodeValue: Unknown type '%s'\n", type);
	return(NULL);
}
Пример #7
0
/*
 *	processor_set_things:
 *
 *	Common internals for processor_set_{threads,tasks}
 */
kern_return_t
processor_set_things(
	processor_set_t			pset,
	mach_port_t				**thing_list,
	mach_msg_type_number_t	*count,
	int						type)
{
	unsigned int actual;	/* this many things */
	unsigned int maxthings;
	unsigned int i;

	vm_size_t size, size_needed;
	void  *addr;

	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
		return (KERN_INVALID_ARGUMENT);

	size = 0;
	addr = NULL;

	for (;;) {
		lck_mtx_lock(&tasks_threads_lock);

		if (type == THING_TASK)
			maxthings = tasks_count;
		else
			maxthings = threads_count;

		/* do we have the memory we need? */

		size_needed = maxthings * sizeof (mach_port_t);
		if (size_needed <= size)
			break;

		/* unlock and allocate more memory */
		lck_mtx_unlock(&tasks_threads_lock);

		if (size != 0)
			kfree(addr, size);

		assert(size_needed > 0);
		size = size_needed;

		addr = kalloc(size);
		if (addr == 0)
			return (KERN_RESOURCE_SHORTAGE);
	}

	/* OK, have memory and the list locked */

	actual = 0;
	switch (type) {

	case THING_TASK: {
		task_t		task, *task_list = (task_t *)addr;

		for (task = (task_t)queue_first(&tasks);
						!queue_end(&tasks, (queue_entry_t)task);
								task = (task_t)queue_next(&task->tasks)) {
#if defined(SECURE_KERNEL)
			if (task != kernel_task) {
#endif
				task_reference_internal(task);
				task_list[actual++] = task;
#if defined(SECURE_KERNEL)
			}
#endif
		}

		break;
	}

	case THING_THREAD: {
		thread_t	thread, *thread_list = (thread_t *)addr;

		for (thread = (thread_t)queue_first(&threads);
						!queue_end(&threads, (queue_entry_t)thread);
								thread = (thread_t)queue_next(&thread->threads)) {
			thread_reference_internal(thread);
			thread_list[actual++] = thread;
		}

		break;
	}

	}
		
	lck_mtx_unlock(&tasks_threads_lock);

	if (actual < maxthings)
		size_needed = actual * sizeof (mach_port_t);

	if (actual == 0) {
		/* no things, so return null pointer and deallocate memory */
		*thing_list = NULL;
		*count = 0;

		if (size != 0)
			kfree(addr, size);
	}
	else {
		/* if we allocated too much, must copy */

		if (size_needed < size) {
			void *newaddr;

			newaddr = kalloc(size_needed);
			if (newaddr == 0) {
				switch (type) {

				case THING_TASK: {
					task_t		*task_list = (task_t *)addr;

					for (i = 0; i < actual; i++)
						task_deallocate(task_list[i]);
					break;
				}

				case THING_THREAD: {
					thread_t	*thread_list = (thread_t *)addr;

					for (i = 0; i < actual; i++)
						thread_deallocate(thread_list[i]);
					break;
				}

				}

				kfree(addr, size);
				return (KERN_RESOURCE_SHORTAGE);
			}

			bcopy((void *) addr, (void *) newaddr, size_needed);
			kfree(addr, size);
			addr = newaddr;
		}

		*thing_list = (mach_port_t *)addr;
		*count = actual;

		/* do the conversion that Mig should handle */

		switch (type) {

		case THING_TASK: {
			task_t		*task_list = (task_t *)addr;

			for (i = 0; i < actual; i++)
				(*thing_list)[i] = convert_task_to_port(task_list[i]);
			break;
		}

		case THING_THREAD: {
			thread_t	*thread_list = (thread_t *)addr;

			for (i = 0; i < actual; i++)
			  	(*thing_list)[i] = convert_thread_to_port(thread_list[i]);
			break;
		}

		}
	}

	return (KERN_SUCCESS);
}
Пример #8
0
kern_return_t
host_processor_sets(
	host_t				host,
	processor_set_name_array_t	*pset_list,
	natural_t			*count)
{
	unsigned int actual;	/* this many psets */
	processor_set_t pset;
	processor_set_t *psets;
	int i;

	vm_size_t size;
	vm_size_t size_needed;
	vm_offset_t addr;

	if (host == HOST_NULL)
		return KERN_INVALID_ARGUMENT;

	size = 0; addr = 0;

	for (;;) {
		simple_lock(&all_psets_lock);
		actual = all_psets_count;

		/* do we have the memory we need? */

		size_needed = actual * sizeof(mach_port_t);
		if (size_needed <= size)
			break;

		/* unlock and allocate more memory */
		simple_unlock(&all_psets_lock);

		if (size != 0)
			kfree(addr, size);

		assert(size_needed > 0);
		size = size_needed;

		addr = kalloc(size);
		if (addr == 0)
			return KERN_RESOURCE_SHORTAGE;
	}

	/* OK, have memory and the all_psets_lock */

	psets = (processor_set_t *) addr;

	for (i = 0, pset = (processor_set_t) queue_first(&all_psets);
	     i < actual;
	     i++, pset = (processor_set_t) queue_next(&pset->all_psets)) {
		/* take ref for convert_pset_name_to_port */
		pset_reference(pset);
		psets[i] = pset;
	}
	assert(queue_end(&all_psets, (queue_entry_t) pset));

	/* can unlock now that we've got the pset refs */
	simple_unlock(&all_psets_lock);

	/*
	 *	Always have default port.
	 */

	assert(actual > 0);

	/* if we allocated too much, must copy */

	if (size_needed < size) {
		vm_offset_t newaddr;

		newaddr = kalloc(size_needed);
		if (newaddr == 0) {
			for (i = 0; i < actual; i++)
				pset_deallocate(psets[i]);
			kfree(addr, size);
			return KERN_RESOURCE_SHORTAGE;
		}

		memcpy((char *) newaddr, (char *) addr, size_needed);
		kfree(addr, size);
		psets = (processor_set_t *) newaddr;
	}

	*pset_list = (mach_port_t *) psets;
	*count = actual;

	/* do the conversion that Mig should handle */

	for (i = 0; i < actual; i++)
		((mach_port_t *) psets)[i] =
			(mach_port_t)convert_pset_name_to_port(psets[i]);

	return KERN_SUCCESS;
}
Пример #9
0
int sysfs_tasks_read(struct sysfs_fnode *sfs, void *buf, int len)
{
    char *res = (char *)buf;
    struct fnode *fno = sfs->fnode;
    static char *task_txt;
    static int off;
    int i;
    int stack_used;
    char *name;
    int p_state;
    const char legend[]="pid\tstate\tstack\tname\r\n";
    if (fno->off == 0) {
        frosted_mutex_lock(sysfs_mutex);
        task_txt = kalloc(MAX_SYSFS_BUFFER);
        if (!task_txt)
            return -1;
        off = 0;

        strcpy(task_txt, legend);
        off += strlen(legend);

        for (i = 1; i < MAX_SYSFS_BUFFER; i++) {
            p_state = scheduler_task_state(i);
            if ((p_state != TASK_IDLE) && (p_state != TASK_OVER)) {
                off += ul_to_str(i, task_txt + off);
                task_txt[off++] = '\t';
                if (p_state == TASK_RUNNABLE)
                    task_txt[off++] = 'r';
                if (p_state == TASK_RUNNING)
                    task_txt[off++] = 'R';
                if (p_state == TASK_WAITING)
                    task_txt[off++] = 'W';
                if (p_state == TASK_FORKED)
                    task_txt[off++] = 'F';
                if (p_state == TASK_ZOMBIE)
                    task_txt[off++] = 'Z';

                task_txt[off++] = '\t';
                stack_used = scheduler_stack_used(i);
                off += ul_to_str(stack_used, task_txt + off);

                task_txt[off++] = '\t';
                name = scheduler_task_name(i);
                if (name)
                {
                    strcpy(&task_txt[off], name);
                    off += strlen(name);
                }

                task_txt[off++] = '\r';
                task_txt[off++] = '\n';
            }
        }
        task_txt[off++] = '\0';
    }
    if (off == fno->off) {
        kfree(task_txt);
        frosted_mutex_unlock(sysfs_mutex);
        return -1;
    }
    if (len > (off - fno->off)) {
       len = off - fno->off;
    }
    memcpy(res, task_txt + fno->off, len);
    fno->off += len;
    return len;
}
Пример #10
0
uint txt_insert(uint elem_, char val)
{
    txt* elem=(txt*)elem_;
    txtContent* nextPos;
    uint size=sizeof(txtContent);
    uint x,y;
    uint minX, minY, maxX, maxY;

    if (elem->cursor==(txtContent*)(uint)-1)
        return -1;
    if (elem->chImgArray[(uint)val]==0)
        return -1;
    if (elem->tail==0)
        nextPos=(txtContent*)((uint)elem->blockTail+4);
    else if ((uint)elem->tail+size-(uint)elem->blockTail>4096-size)
    {
        if ((*(uint*)elem->blockTail=(uint)kalloc()) == 0)
        {
            panic("Bingolingo!");
            return -1;
        }
        elem->blockTail=(void*)(*(uint*)elem->blockTail);
        *(uint*)elem->blockTail=(uint)0;
        nextPos=(txtContent*)((uint)elem->blockTail+4);
    }
    else
        nextPos=(txtContent*)((uint)elem->tail+size);

    if (elem->cursor==0)
        x=y=0;
    else if (elem->cursor->data.ch=='\n')
    {
        x=0;
        y=elem->cursor->data.ds.y+elem->chHeight;
        if (y+elem->chHeight>elem->ds.height)
            elem->ds.height=y+elem->chHeight;
    }
    else if (elem->cursor->data.ds.x+2*elem->chWidth<=elem->ds.width)
    {
        x=elem->cursor->data.ds.x+elem->chWidth;
        y=elem->cursor->data.ds.y;
    }
    else
    {
        x=0;
        y=elem->cursor->data.ds.y+elem->chHeight;
        if (y+elem->chHeight>elem->ds.height)
            elem->ds.height=y+elem->chHeight;
    }

    cha_createDomOrphan((cha*)nextPos, x, y, elem->chWidth, elem->chHeight, elem->ds.pid);
    ((cha*)nextPos)->ds.parent=&elem->ds;
    if (elem->cursor!=0)
        ((cha*)nextPos)->ds.frater=&(elem->cursor->data.ds);
    else
        ((cha*)nextPos)->ds.frater=&(elem->cursorDiv->ds);
    if (elem->cursor!=0 && elem->cursor->next!=0)
        elem->cursor->next->data.ds.frater=&(((cha*)nextPos)->ds);
    else if (elem->cursor==0 && elem->head!=0)
        elem->head->data.ds.frater=&(((cha*)nextPos)->ds);
    else
        elem->ds.descent=&(((cha*)nextPos)->ds);
    if (elem->cursor!=0 && elem->cursor->next==0)
        nextPos->next=0;
    else if (elem->cursor==0 && elem->head==0)
        nextPos->next=0;
    else if (elem->cursor==0 && elem->head!=0)
    {
        nextPos->next=elem->head;
        elem->head->prev=nextPos;
    }
    else
    {
        nextPos->next=elem->cursor->next;
        elem->cursor->next->prev=nextPos;
    }
    if (elem->cursor==0)
    {
        nextPos->prev=0;
        elem->head=nextPos;
    }
    else
    {
        nextPos->prev=elem->cursor;
        elem->cursor->next=nextPos;
    }
    elem->tail=nextPos;
    cha_setContentNotRedraw((uint)nextPos, elem->chImgArray[(uint)val], val);
    cha_setColor((uint)nextPos, elem->txtColor);

    elem->cursor=nextPos;
    minX=elem->cursor->data.ds.x;
    minY=elem->cursor->data.ds.y;
    maxX=elem->cursor->data.ds.x+elem->chWidth;
    maxY=elem->cursor->data.ds.y+elem->chHeight;

    while (nextPos!=0)
    {
        if (nextPos==0)
            x=y=0;
        else if (nextPos->data.ch=='\n')
        {
            x=0;
            y=nextPos->data.ds.y+elem->chHeight;
            if (y+elem->chHeight>elem->ds.height)
                elem->ds.height=y+elem->chHeight;
        }
        else if (nextPos->data.ds.x+2*elem->chWidth<=elem->ds.width)
        {
            x=nextPos->data.ds.x+elem->chWidth;
            y=nextPos->data.ds.y;
        }
        else
        {
            x=0;
            y=nextPos->data.ds.y+elem->chHeight;
            if (y+elem->chHeight>elem->ds.height)
                elem->ds.height=y+elem->chHeight;
        }
        if (nextPos==elem->cursor)
        {
            minX=min3(minX,x,elem->cursorDiv->ds.x);
            minY=min3(minY,y,elem->cursorDiv->ds.y);
            maxX=max3(maxX,x+elem->chWidth,elem->cursorDiv->ds.x+elem->chWidth);
            maxY=max3(maxY,y+elem->chHeight,elem->cursorDiv->ds.y+elem->chHeight);
            elem->cursorDiv->ds.x=x;
            elem->cursorDiv->ds.y=y;
        }
        if (nextPos->next==0)
            break;
        if (x==nextPos->next->data.ds.x && y==nextPos->next->data.ds.y)
            break;
        minX=min3(minX,x,nextPos->next->data.ds.x);
        minY=min3(minY,y,nextPos->next->data.ds.y);
        maxX=max3(maxX,x+elem->chWidth,nextPos->next->data.ds.x+elem->chWidth);
        maxY=max3(maxY,y+elem->chHeight,nextPos->next->data.ds.y+elem->chHeight);
        nextPos->next->data.ds.x=x;
        nextPos->next->data.ds.y=y;

        nextPos=nextPos->next;
    }

    reDraw_(&elem->ds,minX,minY,maxX-minX,maxY-minY);

    return 0;
}
Пример #11
0
/*
 * Return info on stack usage for threads in a specific processor set
 */
kern_return_t
processor_set_stack_usage(
	processor_set_t	pset,
	unsigned int	*totalp,
	vm_size_t	*spacep,
	vm_size_t	*residentp,
	vm_size_t	*maxusagep,
	vm_offset_t	*maxstackp)
{
#if !MACH_DEBUG
        return KERN_NOT_SUPPORTED;
#else
	unsigned int total;
	vm_size_t maxusage;
	vm_offset_t maxstack;

	register thread_t *thread_list;
	register thread_t thread;

	unsigned int actual;	/* this many things */
	unsigned int i;

	vm_size_t size, size_needed;
	void *addr;

	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
		return KERN_INVALID_ARGUMENT;

	size = 0;
	addr = NULL;

	for (;;) {
		mutex_lock(&tasks_threads_lock);

		actual = threads_count;

		/* do we have the memory we need? */

		size_needed = actual * sizeof(thread_t);
		if (size_needed <= size)
			break;

		mutex_unlock(&tasks_threads_lock);

		if (size != 0)
			kfree(addr, size);

		assert(size_needed > 0);
		size = size_needed;

		addr = kalloc(size);
		if (addr == 0)
			return KERN_RESOURCE_SHORTAGE;
	}

	/* OK, have memory and list is locked */
	thread_list = (thread_t *) addr;
	for (i = 0, thread = (thread_t) queue_first(&threads);
					!queue_end(&threads, (queue_entry_t) thread);
					thread = (thread_t) queue_next(&thread->threads)) {
		thread_reference_internal(thread);
		thread_list[i++] = thread;
	}
	assert(i <= actual);

	mutex_unlock(&tasks_threads_lock);

	/* calculate maxusage and free thread references */

	total = 0;
	maxusage = 0;
	maxstack = 0;
	while (i > 0) {
		thread_t threadref = thread_list[--i];

		if (threadref->kernel_stack != 0)
			total++;

		thread_deallocate(threadref);
	}

	if (size != 0)
		kfree(addr, size);

	*totalp = total;
	*residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
	*maxusagep = maxusage;
	*maxstackp = maxstack;
	return KERN_SUCCESS;

#endif	/* MACH_DEBUG */
}
Пример #12
0
uint txt_setStr(uint elem_, char* str_)
{
    txt* elem=(txt*)elem_;
    txtContent* p;
    txtContent* q;
    void* pp;
    void* qq;
    char* str=str_;
    txtContent* nextPos;
    uint size=sizeof(txtContent);
    uint x;
    uint y;

    p=elem->head;
    while (p!=0){
        q=p->next;
        cha_release((uint)p);
        p=q;
    }

    pp=elem->blockHead;
    while (pp!=0){
        qq=(void*)(*((uint*)pp));
        kfree(pp);
        pp=qq;
    }

    elem->cursorDiv->ds.x=0;
    elem->cursorDiv->ds.y=0;
    elem->cursorDiv->ds.width=elem->chWidth;
    elem->cursorDiv->ds.height=elem->chHeight;
    elem->cursorDiv->bgColor.a=255;
    elem->cursor=(txtContent*)(uint)-1;

    if ((elem->blockTail=kalloc()) == 0)
    {
        panic("Bingolingo!");
        return -1;
    }
    elem->blockHead=elem->blockTail;
    elem->head=0;
    elem->tail=0;
    *(uint*)elem->blockTail=(uint)0;
    nextPos=(txtContent*)((uint)elem->blockTail+4);
    while (*str!=0)
    {
        if (elem->chImgArray[(uint)*str]==0)
        {
            str++;
            continue;
        }
        else if (elem->tail==0)
            x=y=0;
        else if (elem->tail->data.ch=='\n')
        {
            x=0;
            y=elem->tail->data.ds.y+elem->chHeight;
            if (y+elem->chHeight>elem->ds.height)
                elem->ds.height=y+elem->chHeight;
        }
        else if (elem->tail->data.ds.x+2*elem->chWidth<=elem->ds.width)
        {
            x=elem->tail->data.ds.x+elem->chWidth;
            y=elem->tail->data.ds.y;
        }
        else
        {
            x=0;
            y=elem->tail->data.ds.y+elem->chHeight;
            if (y+elem->chHeight>elem->ds.height)
                elem->ds.height=y+elem->chHeight;
        }

        cha_createDom((cha*)nextPos, x, y, elem->chWidth, elem->chHeight, (uint)elem, elem->ds.pid);
        cha_setContent((uint)nextPos, elem->chImgArray[(uint)*str], *str);
        cha_setColor((uint)nextPos, elem->txtColor);

        if (elem->tail==0)
        {
            elem->head=nextPos;
            elem->tail=nextPos;
            elem->tail->prev=0;
            elem->tail->next=0;
        }
        else
        {
            elem->tail->next=nextPos;
            nextPos->prev=elem->tail;
            nextPos->next=0;
            elem->tail=nextPos;
        }

        if ((uint)elem->tail+size-(uint)elem->blockTail>4096-size)
        {
            if ((*(uint*)elem->blockTail=(uint)kalloc()) == 0)
            {
                panic("Bingolingo!");
                return -1;
            }
            elem->blockTail=(void*)(*(uint*)elem->blockTail);
            *(uint*)elem->blockTail=(uint)0;
            nextPos=(txtContent*)((uint)elem->blockTail+4);
        }
        else
            nextPos=(txtContent*)((uint)nextPos+size);
        str++;
    }

    return 0;
}
Пример #13
0
/*
 * Initialize and Run the default pager
 */
void
default_pager(void)
{
	int			i, id;
	static char		here[] = "default_pager";
	mach_msg_options_t 	server_options;
	default_pager_thread_t	dpt;
	default_pager_thread_t	**dpt_array;

	default_pager_thread_privileges();

	/*
	 * Wire down code, data, stack
	 */
	wire_all_memory();

	/*
	 * Give me space for the thread array and zero it.
	 */
	i = default_pager_internal_count + default_pager_external_count + 1;
	dpt_array = (default_pager_thread_t **)
	    kalloc(i * sizeof(default_pager_thread_t *));
	memset(dpt_array, 0, i * sizeof(default_pager_thread_t *));

	/* Setup my thread structure.  */
	id = 0;
	dpt.dpt_thread = cthread_self();
	dpt.dpt_buffer = 0;
	dpt.dpt_internal = FALSE;
	dpt.dpt_id = id++;
	dpt.dpt_initialized_p = TRUE;
	cthread_set_data(cthread_self(), (char *) &dpt);
	dpt_array[0] = &dpt;

	/*
	 * Now we create the threads that will actually
	 * manage objects.
	 */

	for (i = 0; i < default_pager_internal_count; i++) {
		dpt_array[id] = start_default_pager_thread(id, TRUE);
		id++;
	 }

	for (i = 0; i < default_pager_external_count; i++) {
		dpt_array[id] = start_default_pager_thread(id, FALSE);
		id++;
	}

	/* Is everybody ready?  */
	for (i = 0; i < id; i++)
	    while (!dpt_array[i])
		cthread_yield();

	/* Tell the bootstrap process to go ahead.  */
	bootstrap_completed(bootstrap_port, mach_task_self());

	/* Start servicing requests.  */
	server_options = MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_SEQNO);
	for (;;) {
		mach_msg_server(default_pager_demux_default,
				default_pager_msg_size,
				default_pager_default_set,
				server_options);
		Panic("default server");
	}
}
Пример #14
0
mach_msg_return_t
mach_msg_server(
	boolean_t		(*demux)(mach_msg_header_t *,
					 mach_msg_header_t *),
	mach_msg_size_t		max_size,
	mach_port_t		rcv_name,
	mach_msg_options_t	server_options)
{
	mig_reply_error_t 	*bufRequest, *bufReply, *bufTemp;
	mach_msg_return_t 	mr;
	mach_msg_options_t	options;
	static char here[] =	"mach_msg_server";

	bufRequest = (mig_reply_error_t *)kalloc(max_size + MAX_TRAILER_SIZE);
	if (bufRequest == 0)
		return KERN_RESOURCE_SHORTAGE;
	bufReply = (mig_reply_error_t *)kalloc(max_size + MAX_TRAILER_SIZE);
	if (bufReply == 0)
		return KERN_RESOURCE_SHORTAGE;

	for (;;) {
	    get_request:
		mr = mach_msg(&bufRequest->Head, MACH_RCV_MSG | server_options,
			      0, max_size, rcv_name, MACH_MSG_TIMEOUT_NONE,
			      MACH_PORT_NULL);
		while (mr == MACH_MSG_SUCCESS) {
			/* we have a request message */

			(void) (*demux)(&bufRequest->Head, &bufReply->Head);

			if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)
			    && bufReply->RetCode != KERN_SUCCESS) {
				if (bufReply->RetCode == MIG_NO_REPLY)
					goto get_request;

				/*
				 * Don't destroy the reply port right,
				 * so we can send an error message
				 */
				bufRequest->Head.msgh_remote_port =
					MACH_PORT_NULL;
				mach_msg_destroy(&bufRequest->Head);
			}

			if (bufReply->Head.msgh_remote_port == MACH_PORT_NULL) {
				/* no reply port, so destroy the reply */
				if (bufReply->Head.msgh_bits &
				    MACH_MSGH_BITS_COMPLEX)
					mach_msg_destroy(&bufReply->Head);

				goto get_request;
			}

			/* send reply and get next request */

			bufTemp = bufRequest;
			bufRequest = bufReply;
			bufReply = bufTemp;

			/*
			 * We don't want to block indefinitely because the
			 * client isn't receiving messages from the reply port.
			 * If we have a send-once right for the reply port,
			 * then this isn't a concern because the send won't
			 * block.
			 * If we have a send right, we need to use
			 * MACH_SEND_TIMEOUT.
			 * To avoid falling off the kernel's fast RPC path
			 * unnecessarily, we only supply MACH_SEND_TIMEOUT when
			 * absolutely necessary.
			 */

			options = MACH_SEND_MSG | MACH_RCV_MSG | server_options;
			if (MACH_MSGH_BITS_REMOTE(bufRequest->Head.msgh_bits)
			    != MACH_MSG_TYPE_MOVE_SEND_ONCE) {
				options |= MACH_SEND_TIMEOUT;
			}
			mr = mach_msg(&bufRequest->Head, options,
				      bufRequest->Head.msgh_size, max_size,
				      rcv_name, MACH_MSG_TIMEOUT_NONE,
				      MACH_PORT_NULL);
		}

		/* a message error occurred */

		switch (mr) {
		    case MACH_SEND_INVALID_DEST:
		    case MACH_SEND_TIMED_OUT:
			/* the reply can't be delivered, so destroy it */
			mach_msg_destroy(&bufRequest->Head);
			break;

		    case MACH_RCV_TOO_LARGE:
			/* the kernel destroyed the request */
			break;

		    default:
			dprintf(("mach_msg_overwrite_trap returned 0x%x %s\n",
				 mr, mach_error_string(mr)));
			Panic("mach_msg failed");
			/* should only happen if the server is buggy */
			kfree((char *) bufRequest, max_size + MAX_TRAILER_SIZE);
			kfree((char *) bufReply, max_size + MAX_TRAILER_SIZE);
			return mr;
		}
	}
}
Пример #15
0
//PAGEBREAK: 41
void
trap(struct trapframe *tf)
{
  if(tf->trapno == T_SYSCALL){
    if(proc->killed)
      exit();
    proc->tf = tf;
    syscall();
    if(proc->killed)
      exit();
    return;
  }

  switch(tf->trapno){
  case T_IRQ0 + IRQ_TIMER:
    if(cpu->id == 0){
      acquire(&tickslock);
      ticks++;
      wakeup(&ticks);

		// We're in user space
		if(proc && (tf->cs & 3) == DPL_USER) {
			//is the alarm set?
			if(proc->alarmticks != 0 && proc->alarmhandler != 0) {
				proc->alarmticksleft--;
				if(proc->alarmticksleft == 0) {
					proc->tf->esp--;
					*(uint*)proc->tf->esp = proc->tf->eip;
					proc->tf->eip = (uint)proc->alarmhandler;
					proc->alarmticksleft = proc->alarmticks;
				}
			}
		}

      release(&tickslock);
    }
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_IDE:
    ideintr();
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_IDE+1:
    // Bochs generates spurious IDE1 interrupts.
    break;
  case T_IRQ0 + IRQ_KBD:
    kbdintr();
    lapiceoi();
    break;
  case T_IRQ0 + IRQ_COM1:
    uartintr();
    lapiceoi();
    break;
  case T_IRQ0 + 7:
  case T_IRQ0 + IRQ_SPURIOUS:
    cprintf("cpu%d: spurious interrupt at %x:%x\n",
            cpu->id, tf->cs, tf->eip);
    lapiceoi();
    break;
   
  //PAGEBREAK: 13
  default:
    if(proc == 0 || (tf->cs&3) == 0){
      // In kernel, it must be our mistake.
      cprintf("unexpected trap %d from cpu %d eip %x (cr2=0x%x)\n",
              tf->trapno, cpu->id, tf->eip, rcr2());
      panic("trap");
    }
    // In user space, check whether it is an access to an lazy allocated address
    if(tf->trapno == T_PGFLT) {
    	char *mem;
        uint a;

        a = PGROUNDDOWN(rcr2());
		mem = kalloc();
		if(mem == 0){
		  cprintf("allocuvm out of memory\n");
		  deallocuvm(proc->pgdir, a+PGSIZE, rcr2());
		  exit();
		}
		memset(mem, 0, PGSIZE);
		mappages(proc->pgdir, (char*)a, PGSIZE, v2p(mem), PTE_W|PTE_U);
    } else {
    	//If not, the user program is misbehaving
    	cprintf("pid %d %s: trap %d err %d on cpu %d "
				"eip 0x%x addr 0x%x--kill proc\n",
				proc->pid, proc->name, tf->trapno, tf->err, cpu->id, tf->eip,
				rcr2());
		proc->killed = 1;
    }
  }

  // Force process exit if it has been killed and is in user space.
  // (If it is still executing in the kernel, let it keep running 
  // until it gets to the regular system call return.)
  if(proc && proc->killed && (tf->cs&3) == DPL_USER)
    exit();

  // Force process to give up CPU on clock tick.
  // If interrupts were on while locks held, would need to check nlock.
  if(proc && proc->state == RUNNING && tf->trapno == T_IRQ0+IRQ_TIMER)
    yield();

  // Check if the process has been killed since we yielded
  if(proc && proc->killed && (tf->cs&3) == DPL_USER)
    exit();
}
Пример #16
0
int sysfs_mem_read(struct sysfs_fnode *sfs, void *buf, int len)
{
    char *res = (char *)buf;
    struct fnode *fno = sfs->fnode;
    static char *mem_txt;
    static int off;
    int i;
    int stack_used;
    int p_state;
    if (fno->off == 0) {
        const char mem_stat_banner[NPOOLS][50] = {"\r\nKernel memory statistics\r\n",
                                          "\r\n\nUser memory statistics\r\n",
                                          "\r\n\nTask space statistics\r\n",
#ifdef CONFIG_TCPIP_MEMPOOL
                                          "\r\n\nTCP/IP space statistics\r\n",
#endif

        };

        const char malloc_banner[] = "\tObjects in use: ";
        const char mem_banner[] = "\tMemory in use: ";
        const char frags_banner[] = "\tReserved: ";
        int i;
        frosted_mutex_lock(sysfs_mutex);
        mem_txt = kalloc(MAX_SYSFS_BUFFER);
        if (!mem_txt)
            return -1;
        off = 0;

        for (i = 0; i < NPOOLS; i++) {
            unsigned long allocated = f_malloc_stats[i].malloc_calls - f_malloc_stats[i].free_calls;
            strcpy(mem_txt + off, mem_stat_banner[i]);
            off += strlen(mem_stat_banner[i]);
            strcpy(mem_txt + off, malloc_banner);
            off += strlen(malloc_banner);
            off += ul_to_str(allocated, mem_txt + off);
            *(mem_txt + off) = '\r';
            off++;
            *(mem_txt + off) = '\n';
            off++;

            strcpy(mem_txt + off, mem_banner);
            off += strlen(mem_banner);
            off += ul_to_str(f_malloc_stats[i].mem_allocated, mem_txt + off);

            *(mem_txt + off) = ' ';
            off++;
            *(mem_txt + off) = 'B';
            off++;
            *(mem_txt + off) = '\r';
            off++;
            *(mem_txt + off) = '\n';
            off++;

            strcpy(mem_txt + off, frags_banner);
            off += strlen(frags_banner);
            off += ul_to_str(mem_stats_frag(i), mem_txt + off);
            *(mem_txt + off) = ' ';
            off++;
            *(mem_txt + off) = 'B';
            off++;
            *(mem_txt + off) = '\r';
            off++;
            *(mem_txt + off) = '\n';
            off++;
        }
        if (off > 0)
            mem_txt[off++] = '\0';
    }
    if (off == fno->off) {
        kfree(mem_txt);
        frosted_mutex_unlock(sysfs_mutex);
        return -1;
    }
    if (len > (off - fno->off)) {
       len = off - fno->off;
    }
    memcpy(res, mem_txt + fno->off, len);
    fno->off += len;
    return len;
}
Пример #17
0
struct kern_direct_file_io_ref_t *
kern_open_file_for_direct_io(const char * name, 
			     kern_get_file_extents_callback_t callback, 
			     void * callback_ref,
			     dev_t * partition_device_result,
			     dev_t * image_device_result,
                             uint64_t * partitionbase_result,
                             uint64_t * maxiocount_result,
                             uint32_t * oflags,
                             off_t offset,
                             caddr_t addr,
                             vm_size_t len)
{
    struct kern_direct_file_io_ref_t * ref;

    proc_t			p;
    struct vnode_attr		va;
    int				error;
    off_t			f_offset;
    uint64_t                    fileblk;
    size_t                      filechunk;
    uint64_t                    physoffset;
    dev_t			device;
    dev_t			target = 0;
    int			        isssd = 0;
    uint32_t                    flags = 0;
    uint32_t			blksize;
    off_t 			maxiocount, count;
    boolean_t                   locked = FALSE;

    int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result);
    void * p1 = NULL;
    void * p2 = NULL;

    error = EFAULT;

    ref = (struct kern_direct_file_io_ref_t *) kalloc(sizeof(struct kern_direct_file_io_ref_t));
    if (!ref)
    {
	error = EFAULT;
    	goto out;
    }

    bzero(ref, sizeof(*ref));
    p = kernproc;
    ref->ctx = vfs_context_create(vfs_context_current());

    if ((error = vnode_open(name, (O_CREAT | FWRITE), (0), 0, &ref->vp, ref->ctx)))
        goto out;

    if (addr && len)
    {
	if ((error = kern_write_file(ref, offset, addr, len)))
	    goto out;
    }

    VATTR_INIT(&va);
    VATTR_WANTED(&va, va_rdev);
    VATTR_WANTED(&va, va_fsid);
    VATTR_WANTED(&va, va_data_size);
    VATTR_WANTED(&va, va_nlink);
    error = EFAULT;
    if (vnode_getattr(ref->vp, &va, ref->ctx))
    	goto out;

    kprintf("vp va_rdev major %d minor %d\n", major(va.va_rdev), minor(va.va_rdev));
    kprintf("vp va_fsid major %d minor %d\n", major(va.va_fsid), minor(va.va_fsid));
    kprintf("vp size %qd\n", va.va_data_size);

    if (ref->vp->v_type == VREG)
    {
	/* Don't dump files with links. */
	if (va.va_nlink != 1)
	    goto out;

        device = va.va_fsid;
        p1 = &device;
        p2 = p;
        do_ioctl = &file_ioctl;
    }
    else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR))
    {
	/* Partition. */
        device = va.va_rdev;

        p1 = ref->vp;
        p2 = ref->ctx;
        do_ioctl = &device_ioctl;
    }
    else
    {
	/* Don't dump to non-regular files. */
	error = EFAULT;
        goto out;
    }
    ref->device = device;

    // get block size

    error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &ref->blksize);
    if (error)
        goto out;

    if (ref->vp->v_type == VREG)
        ref->filelength = va.va_data_size;
    else
    {
        error = do_ioctl(p1, p2, DKIOCGETBLOCKCOUNT, (caddr_t) &fileblk);
        if (error)
            goto out;
	ref->filelength = fileblk * ref->blksize;    
    }

    // pin logical extents

    error = kern_ioctl_file_extents(ref, _DKIOCCSPINEXTENT, 0, ref->filelength);
    if (error && (ENOTTY != error)) goto out;
    ref->pinned = (error == 0);

    // generate the block list

    error = do_ioctl(p1, p2, DKIOCLOCKPHYSICALEXTENTS, NULL);
    if (error)
        goto out;
    locked = TRUE;

    f_offset = 0;
    while (f_offset < ref->filelength) 
    {
        if (ref->vp->v_type == VREG)
        {
            filechunk = 1*1024*1024*1024;
            daddr64_t blkno;

            error = VNOP_BLOCKMAP(ref->vp, f_offset, filechunk, &blkno, &filechunk, NULL, 0, NULL);
            if (error)
                goto out;

            fileblk = blkno * ref->blksize;
        }
        else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR))
        {
            fileblk = f_offset;
            filechunk = f_offset ? 0 : ref->filelength;
        }

        physoffset = 0;
        while (physoffset < filechunk)
        {
            dk_physical_extent_t getphysreq;
            bzero(&getphysreq, sizeof(getphysreq));

            getphysreq.offset = fileblk + physoffset;
            getphysreq.length = (filechunk - physoffset);
            error = do_ioctl(p1, p2, DKIOCGETPHYSICALEXTENT, (caddr_t) &getphysreq);
            if (error)
                goto out;
            if (!target)
            {
                target = getphysreq.dev;
            }
            else if (target != getphysreq.dev)
            {
                error = ENOTSUP;
                goto out;
            }
            callback(callback_ref, getphysreq.offset, getphysreq.length);
            physoffset += getphysreq.length;
        }
        f_offset += filechunk;
    }
    callback(callback_ref, 0ULL, 0ULL);

    if (ref->vp->v_type == VREG)
        p1 = &target;

    // get partition base

    error = do_ioctl(p1, p2, DKIOCGETBASE, (caddr_t) partitionbase_result);
    if (error)
        goto out;

    // get block size & constraints

    error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &blksize);
    if (error)
        goto out;

    maxiocount = 1*1024*1024*1024;

    error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t) &count);
    if (error)
        count = 0;
    count *= blksize;
    if (count && (count < maxiocount))
        maxiocount = count;

    error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t) &count);
    if (error)
        count = 0;
    count *= blksize;
    if (count && (count < maxiocount))
        maxiocount = count;

    error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTREAD, (caddr_t) &count);
    if (error)
        count = 0;
    if (count && (count < maxiocount))
        maxiocount = count;

    error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t) &count);
    if (error)
        count = 0;
    if (count && (count < maxiocount))
        maxiocount = count;

    error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTREAD, (caddr_t) &count);
    if (error)
        count = 0;
    if (count && (count < maxiocount))
        maxiocount = count;

    error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, (caddr_t) &count);
    if (error)
        count = 0;
    if (count && (count < maxiocount))
        maxiocount = count;

    kprintf("max io 0x%qx bytes\n", maxiocount);
    if (maxiocount_result)
        *maxiocount_result = maxiocount;

    error = do_ioctl(p1, p2, DKIOCISSOLIDSTATE, (caddr_t)&isssd);
    if (!error && isssd)
        flags |= kIOHibernateOptionSSD;

    if (partition_device_result)
        *partition_device_result = device;
    if (image_device_result)
        *image_device_result = target;
    if (flags)
        *oflags = flags;

out:
    kprintf("kern_open_file_for_direct_io(%d)\n", error);

    if (error && locked)
    {
        p1 = &device;
        (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL);
    }

    if (error && ref)
    {
	if (ref->vp)
	{
	    vnode_close(ref->vp, FWRITE, ref->ctx);
	    ref->vp = NULLVP;
	}
	vfs_context_rele(ref->ctx);
	kfree(ref, sizeof(struct kern_direct_file_io_ref_t));
	ref = NULL;
    }
    return(ref);
}
Пример #18
0
int sysfs_mtab_read(struct sysfs_fnode *sfs, void *buf, int len)
{
    char *res = (char *)buf;
    struct fnode *fno = sfs->fnode;
    static char *mem_txt;
    static int off;
    int i;
    int stack_used;
    int p_state;
    struct mountpoint *m = MTAB;
    int l = 0;
    if (fno->off == 0) {
        const char mtab_banner[] = "Mountpoint\tDriver\t\tInfo\r\n--------------------------------------\r\n";
        frosted_mutex_lock(sysfs_mutex);
        mem_txt = kalloc(MAX_SYSFS_BUFFER);
        if (!mem_txt)
            return -1;
        off = 0;
        strcpy(mem_txt + off, mtab_banner);
        off += strlen(mtab_banner);

        while (m) {
            l = fno_fullpath(m->target, mem_txt + off, MAX_SYSFS_BUFFER - off);
            if (l > 0)
                off += l;
            *(mem_txt + (off++)) = '\t';
            *(mem_txt + (off++)) = '\t';

            if (m->target->owner) {
                strcpy(mem_txt + off, m->target->owner->name);
                off += strlen(m->target->owner->name);
            }
            *(mem_txt + (off++)) = '\t';
            *(mem_txt + (off++)) = '\t';

            l = 0;
            if (m->target->owner->mount_info) {
                l = m->target->owner->mount_info(m->target, mem_txt + off, MAX_SYSFS_BUFFER - off);
            }
            if (l > 0) {
                off += l;
            } else {
                strcpy(mem_txt + off, "None");
                off += 4;
            }
            *(mem_txt + (off++)) = '\r';
            *(mem_txt + (off++)) = '\n';

            m = m->next;
        }
        *(mem_txt + (off++)) = '\r';
        *(mem_txt + (off++)) = '\n';
    }
    if (off == fno->off) {
        kfree(mem_txt);
        frosted_mutex_unlock(sysfs_mutex);
        return -1;
    }
    if (len > (off - fno->off)) {
       len = off - fno->off;
    }
    memcpy(res, mem_txt + fno->off, len);
    fno->off += len;
    return len;
}
Пример #19
0
int devspi_create(const struct spi_config *conf)
{
    struct dev_spi *spi = NULL;

    if (!conf)
        return -EINVAL;
    if (conf->base == 0)
        return -EINVAL;

    if ((conf->idx < 0) || (conf->idx > MAX_SPIS))
        return -EINVAL;

    spi = kalloc(sizeof(struct dev_spi));
    if (!spi)
        return -ENOMEM;

    /* Claim pins for SCK/MOSI/MISO */
    gpio_create(&mod_spi, &conf->pio_sck);
    gpio_create(&mod_spi, &conf->pio_mosi);
    gpio_create(&mod_spi, &conf->pio_miso);

    /* Erase spi content */
    memset(spi, 0, sizeof(struct dev_spi));

    /* Enable clocks */
    rcc_periph_clock_enable(conf->rcc);
    rcc_periph_clock_enable(conf->dma_rcc);

    /* Startup routine */
    //spi_disable(conf->base);

    /**********************************/
	/* reset SPI1 */
	spi_reset(conf->base);
	/* init SPI1 master */
	spi_init_master(conf->base,
					SPI_CR1_BAUDRATE_FPCLK_DIV_64,
					SPI_CR1_CPOL_CLK_TO_0_WHEN_IDLE,
					SPI_CR1_CPHA_CLK_TRANSITION_1,
					SPI_CR1_DFF_8BIT,
					SPI_CR1_MSBFIRST);
	/* enable SPI1 first */
	spi_enable(conf->base);
    /**********************************/

#if 0
    spi_set_master_mode(conf->base);
    spi_set_baudrate_prescaler(conf->base, SPI_CR1_BR_FPCLK_DIV_256); /* TODO: Calculate prescaler from baudrate */
    if(conf->polarity == 0) 
        spi_set_clock_polarity_0(conf->base);
    else                    
        spi_set_clock_polarity_1(conf->base);
    if(conf->phase == 0) spi_set_clock_phase_0(conf->base);
    else
        spi_set_clock_phase_1(conf->base);
    if(conf->rx_only == 0)      
        spi_set_full_duplex_mode(conf->base);
    else
        spi_set_receive_only_mode(conf->base);
    if(conf->bidir_mode == 0)      
        spi_set_unidirectional_mode(conf->base);
    else
        spi_set_bidirectional_mode(conf->base);
    if(conf->dff_16) 
        spi_set_dff_16bit(conf->base);
    else
        spi_set_dff_8bit(conf->base);
    if(conf->enable_software_slave_management) 
        spi_enable_software_slave_management(conf->base);
    else
        spi_disable_software_slave_management(conf->base);
    if(conf->send_msb_first) 
        spi_send_msb_first(conf->base);
    else
        spi_send_lsb_first(conf->base);
    spi_set_nss_high(conf->base);
#endif

    /* Set up device struct */
    spi->base = conf->base;
    spi->irq = conf->irq;
    //spi->tx_dma_config = &conf->tx_dma;
    //spi->rx_dma_config = &conf->rx_dma;
    spi->mutex = mutex_init();

    /* Store address in the DEV_SPI array. */
    DEV_SPI[conf->idx] = spi;

    /* Enable interrupts */
    //nvic_set_priority(conf->irq, 1);
    //nvic_enable_irq(conf->irq);
    return 0;
}
Пример #20
0
void initializeFrameBitmap(u32 mem_size) {
    frame_count = mem_size / 0x1000;
    frame_bitmap = (u32*)kalloc(frame_count / 32, 0, 0);
    memSet32(frame_bitmap, 0, frame_count / 32);
}
Пример #21
0
void
krealloc(
	vm_offset_t	*addrp,
	vm_size_t	old_size,
	vm_size_t	new_size,
	simple_lock_t	lock)
{
	register int zindex;
	register vm_size_t allocsize;
	vm_offset_t naddr;

	/* can only be used for increasing allocation size */

	assert(new_size > old_size);

	/* if old_size is zero, then we are simply allocating */

	if (old_size == 0) {
		simple_unlock(lock);
		naddr = kalloc(new_size);
		simple_lock(lock);
		*addrp = naddr;
		return;
	}

	/* if old block was kmem_alloc'd, then use kmem_realloc if necessary */

	if (old_size >= kalloc_max_prerounded) {
		old_size = round_page_32(old_size);
		new_size = round_page_32(new_size);
		if (new_size > old_size) {

			if (kmem_realloc(kalloc_map, *addrp, old_size, &naddr,
					 new_size) != KERN_SUCCESS) {
				panic("krealloc: kmem_realloc");
				naddr = 0;
			}

			simple_lock(lock);
			*addrp = naddr;

			/* kmem_realloc() doesn't free old page range. */
			kmem_free(kalloc_map, *addrp, old_size);

			kalloc_large_total += (new_size - old_size);

			if (kalloc_large_total > kalloc_large_max)
			        kalloc_large_max = kalloc_large_total;
		}
		return;
	}

	/* compute the size of the block that we actually allocated */

	allocsize = KALLOC_MINSIZE;
	zindex = first_k_zone;
	while (allocsize < old_size) {
		allocsize <<= 1;
		zindex++;
	}

	/* if new size fits in old block, then return */

	if (new_size <= allocsize) {
		return;
	}

	/* if new size does not fit in zone, kmem_alloc it, else zalloc it */

	simple_unlock(lock);
	if (new_size >= kalloc_max_prerounded) {
		if (kmem_alloc(kalloc_map, &naddr, new_size) != KERN_SUCCESS) {
			panic("krealloc: kmem_alloc");
			simple_lock(lock);
			*addrp = 0;
			return;
		}
		kalloc_large_inuse++;
		kalloc_large_total += new_size;

		if (kalloc_large_total > kalloc_large_max)
		        kalloc_large_max = kalloc_large_total;
	} else {
		register int new_zindex;

		allocsize <<= 1;
		new_zindex = zindex + 1;
		while (allocsize < new_size) {
			allocsize <<= 1;
			new_zindex++;
		}
		naddr = zalloc(k_zone[new_zindex]);
	}
	simple_lock(lock);

	/* copy existing data */

	bcopy((const char *)*addrp, (char *)naddr, old_size);

	/* free old block, and return */

	zfree(k_zone[zindex], *addrp);

	/* set up new address */

	*addrp = naddr;
}
Пример #22
0
/*
 *	ROUTINE:	lock_set_create		[exported]
 *
 *	Creates a lock set.
 *	The port representing the lock set is returned as a parameter.
 */      
kern_return_t
lock_set_create (
	task_t		task,
	lock_set_t	*new_lock_set,
	int		n_ulocks,
	int		policy)
{
	lock_set_t 	lock_set = LOCK_SET_NULL;
	ulock_t		ulock;
	vm_size_t 	size;
	int 		x;

	*new_lock_set = LOCK_SET_NULL;

	if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
		return KERN_INVALID_ARGUMENT;

	if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks)
		return KERN_RESOURCE_SHORTAGE;

	size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
	lock_set = (lock_set_t) kalloc (size);

	if (lock_set == LOCK_SET_NULL)
		return KERN_RESOURCE_SHORTAGE; 


	lock_set_lock_init(lock_set);
	lock_set->n_ulocks = n_ulocks;
	lock_set->ref_count = (task == kernel_task) ? 1 : 2; /* one for kernel, one for port */

	/*
	 *  Create and initialize the lock set port
	 */
	lock_set->port = ipc_port_alloc_kernel();
	if (lock_set->port == IP_NULL) {	
		kfree(lock_set, size);
		return KERN_RESOURCE_SHORTAGE; 
	}

	ipc_kobject_set (lock_set->port,
			(ipc_kobject_t) lock_set,
			IKOT_LOCK_SET);

	/*
	 *  Initialize each ulock in the lock set
	 */

	for (x=0; x < n_ulocks; x++) {
		ulock = (ulock_t) &lock_set->ulock_list[x];
		ulock_lock_init(ulock);
		ulock->lock_set  = lock_set;
		ulock->holder	 = THREAD_NULL;
		ulock->blocked   = FALSE;
		ulock->unstable	 = FALSE;
		ulock->ho_wait	 = FALSE;
		ulock->accept_wait = FALSE;
		wait_queue_init(&ulock->wait_queue, policy);
	}

	lock_set_ownership_set(lock_set, task);

	lock_set->active = TRUE;
	*new_lock_set = lock_set;

	return KERN_SUCCESS;
}
Пример #23
0
elem XmlRpc_EncodeValue(elem val)
{
	char buf[256];
	int i;
	double x;
	elem t;

	char *s, *s2;

	if(ELEM_STRINGP(val))
	{
		t=val;
		t=CONS(t, MISC_EOL);
		t=CONS(MISC_EOL, t);
		t=CONS(SYM("string"), t);
		return(t);
	}
	if(ELEM_FIXNUMP(val))
	{
		i=TOINT(val);
		sprintf(buf, "%d", i);
		t=STRING(buf);
		t=CONS(t, MISC_EOL);
		t=CONS(MISC_EOL, t);
		t=CONS(SYM("i4"), t);
		return(t);
	}
	if(ELEM_FLONUMP(val))
	{
		x=TOFLOAT(val);
		sprintf(buf, "%g", x);
		t=STRING(buf);
		t=CONS(t, MISC_EOL);
		t=CONS(MISC_EOL, t);
		t=CONS(SYM("double"), t);
		return(t);
	}

	if(ELEM_CONSP(val))
	{
		if(CAR(val)==SYM("date-time:"))
		{
			t=XmlRpc_EncodeDate(val);
			return(t);
		}

		t=XmlRpc_EncodeArray(val);
		return(t);
	}

	if(ELEM_ENVOBJP(val))
	{
		t=XmlRpc_EncodeStruct(val);
		return(t);
	}

	if(ELEM_BYTEVECTORP(val))
	{
		s=TyFcn_ByteVectorBody(val);
		i=VECTOR_LEN(val);
		s2=kalloc(((i*4)/3)+5);
		HttpNode_EncodeMime(s2, s, i);

		kprint("send mime %d->%d\n", i, (i*4)/3);
		t=STRING(s2);
		kfree(s2);
		t=CONS(t, MISC_EOL);
		t=CONS(MISC_EOL, t);
		t=CONS(SYM("base64"), t);
		return(t);
	}

	if(val==MISC_TRUE)
	{
		t=STRING("1");
		t=CONS(t, MISC_EOL);
		t=CONS(MISC_EOL, t);
		t=CONS(SYM("boolean"), t);
		return(t);
	}
	if(val==MISC_FALSE)
	{
		t=STRING("0");
		t=CONS(t, MISC_EOL);
		t=CONS(MISC_EOL, t);
		t=CONS(SYM("boolean"), t);
		return(t);
	}

	if(val==MISC_NULL)
	{
		t=STRING("$null");
		t=CONS(t, MISC_EOL);
		t=CONS(MISC_EOL, t);
		t=CONS(SYM("boolean"), t);
		return(t);
	}

	t=STRING("$undefined");
	t=CONS(t, MISC_EOL);
	t=CONS(MISC_EOL, t);
	t=CONS(SYM("string"), t);
	return(t);
}
Пример #24
0
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands. If "map"==VM_MAP_NULL or
 * "thread"==THREAD_NULL, do not make permament VM modifications,
 * just preflight the parse.
 */
static
load_return_t
parse_machfile(
	struct vnode 		*vp,       
	vm_map_t		map,
	thread_t		thread,
	struct mach_header	*header,
	off_t			file_offset,
	off_t			macho_size,
	int			depth,
	int64_t			aslr_offset,
	int64_t			dyld_aslr_offset,
	load_result_t		*result
)
{
	uint32_t		ncmds;
	struct load_command	*lcp;
	struct dylinker_command	*dlp = 0;
	integer_t		dlarchbits = 0;
	void *			control;
	load_return_t		ret = LOAD_SUCCESS;
	caddr_t			addr;
	void *			kl_addr;
	vm_size_t		size,kl_size;
	size_t			offset;
	size_t			oldoffset;	/* for overflow check */
	int			pass;
	proc_t			p = current_proc();		/* XXXX */
	int			error;
	int resid=0;
	size_t			mach_header_sz = sizeof(struct mach_header);
	boolean_t		abi64;
	boolean_t		got_code_signatures = FALSE;
	int64_t			slide = 0;

	if (header->magic == MH_MAGIC_64 ||
	    header->magic == MH_CIGAM_64) {
	    	mach_header_sz = sizeof(struct mach_header_64);
	}

	/*
	 *	Break infinite recursion
	 */
	if (depth > 6) {
		return(LOAD_FAILURE);
	}

	depth++;

	/*
	 *	Check to see if right machine type.
	 */
	if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
	    !grade_binary(header->cputype, 
	    	header->cpusubtype & ~CPU_SUBTYPE_MASK))
		return(LOAD_BADARCH);
		
	abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
		
	switch (header->filetype) {
	
	case MH_OBJECT:
	case MH_EXECUTE:
	case MH_PRELOAD:
		if (depth != 1) {
			return (LOAD_FAILURE);
		}
		break;
		
	case MH_FVMLIB:
	case MH_DYLIB:
		if (depth == 1) {
			return (LOAD_FAILURE);
		}
		break;

	case MH_DYLINKER:
		if (depth != 2) {
			return (LOAD_FAILURE);
		}
		break;
		
	default:
		return (LOAD_FAILURE);
	}

	/*
	 *	Get the pager for the file.
	 */
	control = ubc_getobject(vp, UBC_FLAGS_NONE);

	/*
	 *	Map portion that must be accessible directly into
	 *	kernel's map.
	 */
	if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
		return(LOAD_BADMACHO);

	/*
	 *	Round size of Mach-O commands up to page boundry.
	 */
	size = round_page(mach_header_sz + header->sizeofcmds);
	if (size <= 0)
		return(LOAD_BADMACHO);

	/*
	 * Map the load commands into kernel memory.
	 */
	addr = 0;
	kl_size = size;
	kl_addr = kalloc(size);
	addr = (caddr_t)kl_addr;
	if (addr == NULL)
		return(LOAD_NOSPACE);

	error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
	    UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
	if (error) {
		if (kl_addr )
			kfree(kl_addr, kl_size);
		return(LOAD_IOERROR);
	}

	/*
	 *	For PIE and dyld, slide everything by the ASLR offset.
	 */
	if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) {
		slide = aslr_offset;
	}

	 /*
	 *  Scan through the commands, processing each one as necessary.
	 *  We parse in three passes through the headers:
	 *  1: thread state, uuid, code signature
	 *  2: segments
	 *  3: dyld, encryption, check entry point
	 */
	
	for (pass = 1; pass <= 3; pass++) {

		/*
		 * Check that the entry point is contained in an executable segments
		 */ 
		if ((pass == 3) && (result->validentry == 0)) {
			thread_state_initialize(thread);
			ret = LOAD_FAILURE;
			break;
		}

		/*
		 * Loop through each of the load_commands indicated by the
		 * Mach-O header; if an absurd value is provided, we just
		 * run off the end of the reserved section by incrementing
		 * the offset too far, so we are implicitly fail-safe.
		 */
		offset = mach_header_sz;
		ncmds = header->ncmds;

		while (ncmds--) {
			/*
			 *	Get a pointer to the command.
			 */
			lcp = (struct load_command *)(addr + offset);
			oldoffset = offset;
			offset += lcp->cmdsize;

			/*
			 * Perform prevalidation of the struct load_command
			 * before we attempt to use its contents.  Invalid
			 * values are ones which result in an overflow, or
			 * which can not possibly be valid commands, or which
			 * straddle or exist past the reserved section at the
			 * start of the image.
			 */
			if (oldoffset > offset ||
			    lcp->cmdsize < sizeof(struct load_command) ||
			    offset > header->sizeofcmds + mach_header_sz) {
				ret = LOAD_BADMACHO;
				break;
			}

			/*
			 * Act on struct load_command's for which kernel
			 * intervention is required.
			 */
			switch(lcp->cmd) {
			case LC_SEGMENT:
				if (pass != 2)
					break;

				if (abi64) {
					/*
					 * Having an LC_SEGMENT command for the
					 * wrong ABI is invalid <rdar://problem/11021230>
					 */
					ret = LOAD_BADMACHO;
					break;
				}

				ret = load_segment(lcp,
				                   header->filetype,
				                   control,
				                   file_offset,
				                   macho_size,
				                   vp,
				                   map,
				                   slide,
				                   result);
				break;
			case LC_SEGMENT_64:
				if (pass != 2)
					break;

				if (!abi64) {
					/*
					 * Having an LC_SEGMENT_64 command for the
					 * wrong ABI is invalid <rdar://problem/11021230>
					 */
					ret = LOAD_BADMACHO;
					break;
				}

				ret = load_segment(lcp,
				                   header->filetype,
				                   control,
				                   file_offset,
				                   macho_size,
				                   vp,
				                   map,
				                   slide,
				                   result);
				break;
			case LC_UNIXTHREAD:
				if (pass != 1)
					break;
				ret = load_unixthread(
						 (struct thread_command *) lcp,
						 thread,
						 slide,
						 result);
				break;
			case LC_MAIN:
				if (pass != 1)
					break;
				if (depth != 1)
					break;
				ret = load_main(
						 (struct entry_point_command *) lcp,
						 thread,
						 slide,
						 result);
				break;
			case LC_LOAD_DYLINKER:
				if (pass != 3)
					break;
				if ((depth == 1) && (dlp == 0)) {
					dlp = (struct dylinker_command *)lcp;
					dlarchbits = (header->cputype & CPU_ARCH_MASK);
				} else {
					ret = LOAD_FAILURE;
				}
				break;
			case LC_UUID:
				if (pass == 1 && depth == 1) {
					ret = load_uuid((struct uuid_command *) lcp,
							(char *)addr + mach_header_sz + header->sizeofcmds,
							result);
				}
				break;
			case LC_CODE_SIGNATURE:
				/* CODE SIGNING */
				if (pass != 1)
					break;
				/* pager -> uip ->
				   load signatures & store in uip
				   set VM object "signed_pages"
				*/
				ret = load_code_signature(
					(struct linkedit_data_command *) lcp,
					vp,
					file_offset,
					macho_size,
					header->cputype,
					(depth == 1) ? result : NULL);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: load code signature error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					ret = LOAD_SUCCESS; /* ignore error */
				} else {
					got_code_signatures = TRUE;
				}
				break;
#if CONFIG_CODE_DECRYPTION
			case LC_ENCRYPTION_INFO:
			case LC_ENCRYPTION_INFO_64:
				if (pass != 3)
					break;
				ret = set_code_unprotect(
					(struct encryption_info_command *) lcp,
					addr, map, slide, vp,
					header->cputype, header->cpusubtype);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: set_code_unprotect() error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					/* 
					 * Don't let the app run if it's 
					 * encrypted but we failed to set up the
					 * decrypter. If the keys are missing it will
					 * return LOAD_DECRYPTFAIL.
					 */
					 if (ret == LOAD_DECRYPTFAIL) {
						/* failed to load due to missing FP keys */
						proc_lock(p);
						p->p_lflag |= P_LTERM_DECRYPTFAIL;
						proc_unlock(p);
					}
					 psignal(p, SIGKILL);
				}
				break;
#endif
			default:
				/* Other commands are ignored by the kernel */
				ret = LOAD_SUCCESS;
				break;
			}
			if (ret != LOAD_SUCCESS)
				break;
		}
		if (ret != LOAD_SUCCESS)
			break;
	}
	if (ret == LOAD_SUCCESS) { 
	    if (! got_code_signatures) {
		    struct cs_blob *blob;
		    /* no embedded signatures: look for detached ones */
		    blob = ubc_cs_blob_get(vp, -1, file_offset);
		    if (blob != NULL) {
			    /* get flags to be applied to the process */
			    result->csflags |= blob->csb_flags;
		    }
	    }

		/* Make sure if we need dyld, we got it */
		if (result->needs_dynlinker && !dlp) {
			ret = LOAD_FAILURE;
		}

	    if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
		/*
		 * load the dylinker, and slide it by the independent DYLD ASLR
		 * offset regardless of the PIE-ness of the main binary.
		 */

		ret = load_dylinker(dlp, dlarchbits, map, thread, depth,
		                    dyld_aslr_offset, result);
	    }

	    if((ret == LOAD_SUCCESS) && (depth == 1)) {
			if (result->thread_count == 0) {
				ret = LOAD_FAILURE;
			}
	    }
	}

	if (kl_addr )
		kfree(kl_addr, kl_size);

	return(ret);
}
Пример #25
0
void *malloc(unsigned long size) {
	void *ptr=kalloc(size);
	return ptr;
}
Пример #26
0
/*
 *	processor_set_things:
 *
 *	Common internals for processor_set_{threads,tasks}
 */
kern_return_t
processor_set_things(
	processor_set_t	pset,
	void **thing_list,
	mach_msg_type_number_t *count,
	int type)
{
	unsigned int i;
	task_t task;
	thread_t thread;

	task_t *task_list;
	unsigned int actual_tasks;
	vm_size_t task_size, task_size_needed;

	thread_t *thread_list;
	unsigned int actual_threads;
	vm_size_t thread_size, thread_size_needed;

	void *addr, *newaddr;
	vm_size_t size, size_needed;

	if (pset == PROCESSOR_SET_NULL || pset != &pset0)
		return (KERN_INVALID_ARGUMENT);

	task_size = 0;
	task_size_needed = 0;
	task_list = NULL;
	actual_tasks = 0;

	thread_size = 0;
	thread_size_needed = 0;
	thread_list = NULL;
	actual_threads = 0;

	for (;;) {
		lck_mtx_lock(&tasks_threads_lock);

		/* do we have the memory we need? */
		if (type == PSET_THING_THREAD)
			thread_size_needed = threads_count * sizeof(void *);
#if !CONFIG_MACF
		else
#endif
			task_size_needed = tasks_count * sizeof(void *);

		if (task_size_needed <= task_size &&
		    thread_size_needed <= thread_size)
			break;

		/* unlock and allocate more memory */
		lck_mtx_unlock(&tasks_threads_lock);

		/* grow task array */
		if (task_size_needed > task_size) {
			if (task_size != 0)
				kfree(task_list, task_size);

			assert(task_size_needed > 0);
			task_size = task_size_needed;

			task_list = (task_t *)kalloc(task_size);
			if (task_list == NULL) {
				if (thread_size != 0)
					kfree(thread_list, thread_size);
				return (KERN_RESOURCE_SHORTAGE);
			}
		}

		/* grow thread array */
		if (thread_size_needed > thread_size) {
			if (thread_size != 0)
				kfree(thread_list, thread_size);

			assert(thread_size_needed > 0);
			thread_size = thread_size_needed;

			thread_list = (thread_t *)kalloc(thread_size);
			if (thread_list == 0) {
				if (task_size != 0)
					kfree(task_list, task_size);
				return (KERN_RESOURCE_SHORTAGE);
			}
		}
	}

	/* OK, have memory and the list locked */

	/* If we need it, get the thread list */
	if (type == PSET_THING_THREAD) {
		for (thread = (thread_t)queue_first(&threads);
		     !queue_end(&threads, (queue_entry_t)thread);
		     thread = (thread_t)queue_next(&thread->threads)) {
#if defined(SECURE_KERNEL)
			if (thread->task != kernel_task) {
#endif
				thread_reference_internal(thread);
				thread_list[actual_threads++] = thread;
#if defined(SECURE_KERNEL)
			}
#endif
		}
	}
#if !CONFIG_MACF
	  else {
#endif
		/* get a list of the tasks */
		for (task = (task_t)queue_first(&tasks);
		     !queue_end(&tasks, (queue_entry_t)task);
		     task = (task_t)queue_next(&task->tasks)) {
#if defined(SECURE_KERNEL)
			if (task != kernel_task) {
#endif
				task_reference_internal(task);
				task_list[actual_tasks++] = task;
#if defined(SECURE_KERNEL)
			}
#endif
		}
#if !CONFIG_MACF
	}
#endif

	lck_mtx_unlock(&tasks_threads_lock);

#if CONFIG_MACF
	unsigned int j, used;

	/* for each task, make sure we are allowed to examine it */
	for (i = used = 0; i < actual_tasks; i++) {
		if (mac_task_check_expose_task(task_list[i])) {
			task_deallocate(task_list[i]);
			continue;
		}
		task_list[used++] = task_list[i];
	}
	actual_tasks = used;
	task_size_needed = actual_tasks * sizeof(void *);

	if (type == PSET_THING_THREAD) {

		/* for each thread (if any), make sure it's task is in the allowed list */
		for (i = used = 0; i < actual_threads; i++) {
			boolean_t found_task = FALSE;

			task = thread_list[i]->task;
			for (j = 0; j < actual_tasks; j++) {
				if (task_list[j] == task) {
					found_task = TRUE;
					break;
				}
			}
			if (found_task)
				thread_list[used++] = thread_list[i];
			else
				thread_deallocate(thread_list[i]);
		}
		actual_threads = used;
		thread_size_needed = actual_threads * sizeof(void *);

		/* done with the task list */
		for (i = 0; i < actual_tasks; i++)
			task_deallocate(task_list[i]);
		kfree(task_list, task_size);
		task_size = 0;
		actual_tasks = 0;
		task_list = NULL;
	}
#endif

	if (type == PSET_THING_THREAD) {
		if (actual_threads == 0) {
			/* no threads available to return */
			assert(task_size == 0);
			if (thread_size != 0)
				kfree(thread_list, thread_size);
			*thing_list = NULL;
			*count = 0;
			return KERN_SUCCESS;
		}
		size_needed = actual_threads * sizeof(void *);
		size = thread_size;
		addr = thread_list;
	} else {
		if (actual_tasks == 0) {
			/* no tasks available to return */
			assert(thread_size == 0);
			if (task_size != 0)
				kfree(task_list, task_size);
			*thing_list = NULL;
			*count = 0;
			return KERN_SUCCESS;
		} 
		size_needed = actual_tasks * sizeof(void *);
		size = task_size;
		addr = task_list;
	}

	/* if we allocated too much, must copy */
	if (size_needed < size) {
		newaddr = kalloc(size_needed);
		if (newaddr == 0) {
			for (i = 0; i < actual_tasks; i++) {
				if (type == PSET_THING_THREAD)
					thread_deallocate(thread_list[i]);
				else
					task_deallocate(task_list[i]);
			}
			if (size)
				kfree(addr, size);
			return (KERN_RESOURCE_SHORTAGE);
		}

		bcopy((void *) addr, (void *) newaddr, size_needed);
		kfree(addr, size);

		addr = newaddr;
		size = size_needed;
	}

	*thing_list = (void **)addr;
	*count = (unsigned int)size / sizeof(void *);

	return (KERN_SUCCESS);
}
Пример #27
0
/*********************************************************************
* This function takes a dependency list containing a series of
* already-loaded module names, followed by a single name for a module
* that hasn't yet been loaded. It invokes kld_load_from_memory() to
* build symbol info for the already-loaded modules, and then finally
* loads the actually requested module.
*********************************************************************/
static
kern_return_t load_kmod(OSArray * dependencyList) {
    kern_return_t result = KERN_SUCCESS;

    unsigned int  num_dependencies = 0;
    kmod_info_t ** kmod_dependencies = NULL;
    unsigned int  i;
    OSString    * requestedKmodName;   // don't release
    const char  * requested_kmod_name;
    OSString    * currentKmodName;     // don't release
    char        * kmod_address;
    unsigned long kmod_size;
    struct mach_header * kmod_header;
    unsigned long kld_result;
    int           do_kld_unload = 0;
    kmod_info_t * kmod_info_freeme = 0;
    kmod_info_t * kmod_info = 0;
    kmod_t        kmod_id;


   /* Separate the requested kmod from its dependencies.
    */
    i = dependencyList->getCount();
    if (i == 0) {
        IOLog("load_kmod(): Called with empty list.\n");
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    } else {
        i--;  // make i be the index of the last entry
    }

    requestedKmodName = OSDynamicCast(OSString, dependencyList->getObject(i));
    if (!requestedKmodName) {
        IOLog("load_kmod(): Called with invalid list of kmod names.\n");
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }
    requested_kmod_name = requestedKmodName->getCStringNoCopy();
    dependencyList->removeObject(i);

   /* If the requested kmod is already loaded, there's no work to do.
    */
    kmod_info_freeme = kmod_lookupbyname_locked(requested_kmod_name);
    if (kmod_info_freeme) {
        // FIXME: Need to check for version mismatch if already loaded.
        result = KERN_SUCCESS;
        goto finish;
    }


   /* Do the KLD loads for the already-loaded modules in order to get
    * their symbols.
    */
    kld_address_func(&address_for_loaded_kmod);

    num_dependencies = dependencyList->getCount();
    kmod_dependencies = (kmod_info_t **)kalloc(num_dependencies *
        sizeof(kmod_info_t *));
    if (!kmod_dependencies) {
        IOLog("load_kmod(): Failed to allocate memory for dependency array "
            "during load of kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }

    bzero(kmod_dependencies, num_dependencies *
        sizeof(kmod_info_t *));

    for (i = 0; i < num_dependencies; i++) {

        currentKmodName = OSDynamicCast(OSString,
            dependencyList->getObject(i));

        if (!currentKmodName) {
            IOLog("load_kmod(): Invalid dependency name at index %d for "
                "kmod \"%s\".\n", i, requested_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

        const char * current_kmod_name = currentKmodName->getCStringNoCopy();

        // These globals are needed by the kld_address functions
        g_current_kmod_info = kmod_lookupbyname_locked(current_kmod_name);
        g_current_kmod_name = current_kmod_name;

        if (!g_current_kmod_info) {
            IOLog("load_kmod(): Missing dependency \"%s\".\n",
                current_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

       /* Record the current kmod as a dependency of the requested
        * one. This will be used in building references after the
        * load is complete.
        */
        kmod_dependencies[i] = g_current_kmod_info;

        /* If the current kmod's size is zero it means that we have a
         * fake in-kernel dependency.  If so then don't have to arrange
         * for its symbol table to be reloaded as it is
         * part of the kernel's symbol table..
         */ 
        if (!g_current_kmod_info->size)
            continue;

	if (!kld_file_merge_OSObjects(current_kmod_name)) {
            IOLog("load_kmod(): Can't merge OSObjects \"%s\".\n",
		current_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

	kmod_address = (char *)
	    kld_file_getaddr(current_kmod_name, (long *) &kmod_size);
        if (!kmod_address) {

            IOLog("load_kmod() failed for dependency kmod "
                "\"%s\".\n", current_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

        kld_result = kld_load_from_memory(&kmod_header,
            current_kmod_name, kmod_address, kmod_size);

        if (kld_result) {
            do_kld_unload = 1;
        }

        if (!kld_result || !link_load_address) {
            IOLog("kld_load_from_memory() failed for dependency kmod "
                "\"%s\".\n", current_kmod_name);
            LOG_DELAY();
            result = KERN_FAILURE;
            goto finish;
        }

        kld_forget_symbol("_kmod_info");
    }

   /*****
    * Now that we've done all the dependencies, which should have already
    * been loaded, we do the last requested module, which should not have
    * already been loaded.
    */
    kld_address_func(&alloc_for_kmod);

    g_current_kmod_name = requested_kmod_name;
    g_current_kmod_info = 0;  // there is no kmod yet

    if (!map_and_patch(requested_kmod_name)) {
	IOLog("load_kmod: map_and_patch() failed for "
	    "kmod \"%s\".\n", requested_kmod_name);
	LOG_DELAY();
	result = KERN_FAILURE;
	goto finish;
    }

    kmod_address = (char *)
	kld_file_getaddr(requested_kmod_name, (long *) &kmod_size);
    if (!kmod_address) {
        IOLog("load_kmod: kld_file_getaddr()  failed internal error "
            "on \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }

    kld_result = kld_load_from_memory(&kmod_header,
			    requested_kmod_name, kmod_address, kmod_size);

    if (kld_result) {
        do_kld_unload = 1;
    }

    if (!kld_result || !link_load_address) {
        IOLog("load_kmod(): kld_load_from_memory() failed for "
            "kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }


   /* Copy the linked header and image into the vm_allocated buffer.
    * Move each onto the appropriate page-aligned boundary as given
    * by the global link_... variables.
    */
    bzero((char *)link_buffer_address, link_buffer_size);
    // bcopy() is (from, to, length)
    bcopy((char *)kmod_header, (char *)link_buffer_address, link_header_size);
    bcopy((char *)kmod_header + link_header_size,
        (char *)link_buffer_address + round_page_32(link_header_size),
        link_load_size - link_header_size);


   /* Get the kmod_info struct for the newly-loaded kmod.
    */
    if (!kld_lookup("_kmod_info", (unsigned long *)&kmod_info)) {
        IOLog("kld_lookup() of \"_kmod_info\" failed for "
            "kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }


    if (!stamp_kmod(requested_kmod_name, kmod_info)) {
        // stamp_kmod() logs a meaningful message
        result = KERN_FAILURE;
        goto finish;
    }


   /* kld_lookup of _kmod_info yielded the actual linked address,
    * so now that we've copied the data into its real place,
    * we can set this stuff.
    */
    kmod_info->address = link_buffer_address;
    kmod_info->size = link_buffer_size;
    kmod_info->hdr_size = round_page_32(link_header_size);

   /* We've written data and instructions, so *flush* the data cache
    * and *invalidate* the instruction cache.
    */
    flush_dcache64((addr64_t)link_buffer_address, link_buffer_size, false);
    invalidate_icache64((addr64_t)link_buffer_address, link_buffer_size, false);


   /* Register the new kmod with the kernel proper.
    */
    if (kmod_create_internal(kmod_info, &kmod_id) != KERN_SUCCESS) {
        IOLog("load_kmod(): kmod_create() failed for "
            "kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        result = KERN_FAILURE;
        goto finish;
    }

#if DEBUG
    IOLog("kmod id %d successfully created at 0x%lx, size %ld.\n",
        (unsigned int)kmod_id, link_buffer_address, link_buffer_size);
    LOG_DELAY();
#endif /* DEBUG */

   /* Record dependencies for the newly-loaded kmod.
    */
    for (i = 0; i < num_dependencies; i++) {
        kmod_info_t * cur_dependency_info;
        kmod_t packed_id;
        cur_dependency_info = kmod_dependencies[i];
        packed_id = KMOD_PACK_IDS(kmod_id, cur_dependency_info->id);
        if (kmod_retain(packed_id) != KERN_SUCCESS) {
            IOLog("load_kmod(): kmod_retain() failed for "
                "kmod \"%s\".\n", requested_kmod_name);
            LOG_DELAY();
            kmod_destroy_internal(kmod_id);
            result = KERN_FAILURE;
            goto finish;
        }
    }

   /* Start the kmod (which invokes constructors for I/O Kit
    * drivers.
    */
    // kmod_start_or_stop(id, start?, user data, datalen)
    if (kmod_start_or_stop(kmod_id, 1, 0, 0) != KERN_SUCCESS) {
        IOLog("load_kmod(): kmod_start_or_stop() failed for "
            "kmod \"%s\".\n", requested_kmod_name);
        LOG_DELAY();
        kmod_destroy_internal(kmod_id);
        result = KERN_FAILURE;
        goto finish;
    }

finish:

    if (kmod_info_freeme) {
        kfree((unsigned int)kmod_info_freeme, sizeof(kmod_info_t));
    }

   /* Only do a kld_unload_all() if at least one load happened.
    */
    if (do_kld_unload) {
        kld_unload_all(/* deallocate sets */ 1);
    }

   /* If the link failed, blow away the allocated link buffer.
    */
    if (result != KERN_SUCCESS && link_buffer_address) {
        vm_deallocate(kernel_map, link_buffer_address, link_buffer_size);
    }

    if (kmod_dependencies) {
        for (i = 0; i < num_dependencies; i++) {
            if (kmod_dependencies[i]) {
                kfree((unsigned int)kmod_dependencies[i], sizeof(kmod_info_t));
            }
        }
        kfree((unsigned int)kmod_dependencies,
            num_dependencies * sizeof(kmod_info_t *));
    }

   /* Reset these static global variables for the next call.
    */
    g_current_kmod_name = NULL;
    g_current_kmod_info = NULL;
    link_buffer_address = 0;
    link_load_address = 0;
    link_load_size = 0;
    link_buffer_size = 0;
    link_header_size = 0;

    return result;
}
Пример #28
0
imageBuffer newImageBuffer(int width, int height)
{
  imageBuffer img = (imageBuffer){kalloc(width*height*getScreenDepth()),width,height};
  memset(img.buffer,0,width*height*getScreenDepth());
  return img;
}
Пример #29
0
void WriteFile (void)
{
	FILE		*modelouthandle;
	int			total = 0;
	int			i;

	pStart = kalloc( 1, FILEBUFFER );

	StripExtension (outname);

	for (i = 1; i < numseqgroups; i++)
	{
		// write the non-default sequence group data to separate files
		char groupname[128], localname[128];

		sprintf( groupname, "%s%02d.mdl", outname, i );

		printf ("writing %s:\n", groupname);
		modelouthandle = SafeOpenWrite (groupname);

		pseqhdr = (studioseqhdr_t *)pStart;
		pseqhdr->id = IDSTUDIOSEQHEADER;
		pseqhdr->version = STUDIO_VERSION;

		pData = pStart + sizeof( studioseqhdr_t ); 

		pData = WriteAnimations( pData, pStart, i );

		ExtractFileBase( groupname, localname );
		sprintf( sequencegroup[i].name, "models\\%s.mdl", localname );
		strcpy( pseqhdr->name, sequencegroup[i].name );
		pseqhdr->length = pData - pStart;

		printf("total     %6d\n", pseqhdr->length );

		SafeWrite( modelouthandle, pStart, pseqhdr->length );

		fclose (modelouthandle);
		memset( pStart, 0, pseqhdr->length );
	}

	if (split_textures)
	{
		// write textures out to a separate file
		char texname[128];

		sprintf( texname, "%sT.mdl", outname );

		printf ("writing %s:\n", texname);
		modelouthandle = SafeOpenWrite (texname);

		phdr = (studiohdr_t *)pStart;
		phdr->id = IDSTUDIOHEADER;
		phdr->version = STUDIO_VERSION;

		pData = (byte *)phdr + sizeof( studiohdr_t );

		WriteTextures( );

		phdr->length = pData - pStart;
		printf("textures  %6d bytes\n", phdr->length );

		SafeWrite( modelouthandle, pStart, phdr->length );

		fclose (modelouthandle);
		memset( pStart, 0, phdr->length );
		pData = pStart;
	}

//
// write the model output file
//
	strcat (outname, ".mdl");
	
	printf ("---------------------\n");
	printf ("writing %s:\n", outname);
	modelouthandle = SafeOpenWrite (outname);

	phdr = (studiohdr_t *)pStart;

	phdr->id = IDSTUDIOHEADER;
	phdr->version = STUDIO_VERSION;
	strcpy( phdr->name, outname );
	VectorCopy( eyeposition, phdr->eyeposition );
	VectorCopy( bbox[0], phdr->min ); 
	VectorCopy( bbox[1], phdr->max ); 
	VectorCopy( cbox[0], phdr->bbmin ); 
	VectorCopy( cbox[1], phdr->bbmax ); 

	phdr->flags = gflags;

	pData = (byte *)phdr + sizeof( studiohdr_t );

	WriteBoneInfo( );
	printf("bones     %6d bytes (%d)\n", pData - pStart - total, numbones );
	total = pData - pStart;

	pData = WriteAnimations( pData, pStart, 0 );

	WriteSequenceInfo( );
	printf("sequences %6d bytes (%d frames) [%d:%02d]\n", pData - pStart - total, totalframes, (int)totalseconds / 60, (int)totalseconds % 60 );
	total  = pData - pStart;

	WriteModel( );
	printf("models    %6d bytes\n", pData - pStart - total );
	total  = pData - pStart;

	if (!split_textures)
	{
		WriteTextures( );
		printf("textures  %6d bytes\n", pData - pStart - total );
	}

	phdr->length = pData - pStart;

	printf("total     %6d\n", phdr->length );

	SafeWrite( modelouthandle, pStart, phdr->length );

	fclose (modelouthandle);
}
Пример #30
0
extern int
kperf_timer_set_count(unsigned count)
{
	struct time_trigger *new_timerv = NULL, *old_timerv = NULL;
	unsigned old_count, i;

	/* easy no-op */
	if( count == timerc )
		return 0;

	/* TODO: allow shrinking? */
	if( count < timerc )
		return EINVAL;

	/* cap it for good measure */
	if( count > TIMER_MAX )
		return EINVAL;

	/* creating the action arror for the first time. create a few
	 * more things, too.
	 */
	if( timerc == 0 )
	{
		int r;

		/* main kperf */
		r = kperf_init();
		if( r )
			return r;

		/* get the PET thread going */
		r = kperf_pet_init();
		if( r )
			return r;
	}

	/* first shut down any running timers since we will be messing
	 * with the timer call structures
	 */
	if( kperf_timer_stop() )
		return EBUSY;

	/* create a new array */
	new_timerv = kalloc( count * sizeof(*new_timerv) );
	if( new_timerv == NULL )
		return ENOMEM;

	old_timerv = timerv;
	old_count = timerc;

	if( old_timerv != NULL )
		bcopy( timerv, new_timerv, timerc * sizeof(*timerv) );

	/* zero the new entries */
	bzero( &new_timerv[timerc], (count - old_count) * sizeof(*new_timerv) );

	/* (re-)setup the timer call info for all entries */
	for( i = 0; i < count; i++ )
		setup_timer_call( &new_timerv[i] );

	timerv = new_timerv;
	timerc = count;

	if( old_timerv != NULL )
		kfree( old_timerv, old_count * sizeof(*timerv) );

	return 0;
}