Example #1
0
/*
 *	Routine:	cpu_bootstrap
 *	Function:
 */
void
cpu_bootstrap(
	void)
{
	simple_lock_init(&rht_lock,0);
	simple_lock_init(&SignalReadyLock,0);
	mutex_init(&ppt_lock,0);
}
Example #2
0
int hypcninit(struct consdev *cp)
{
	if (console)
		return 0;
	simple_lock_init(&outlock);
	simple_lock_init(&inlock);
	console = (void*) mfn_to_kv(boot_info.console_mfn);
	pmap_set_page_readwrite(console);
	hyp_evt_handler(boot_info.console_evtchn, hypcnintr, 0, SPL6);
	return 0;
}
Example #3
0
/*
 * Initialize inode hash table.
 */
void
ntfs_nthashinit()
{

	ntfs_nthashtbl = hashinit(desiredvnodes, M_NTFSNTHASH, &ntfs_nthash);
	simple_lock_init(&ntfs_nthash_slock);
}
Example #4
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init()
{
	size_t max_len, min_len;
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	simple_lock_init(&gdt_simplelock);
	lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);

	max_len = MAXGDTSIZ * sizeof(union descriptor);
	min_len = MINGDTSIZ * sizeof(union descriptor);

	gdt_size = MINGDTSIZ;
	gdt_count = NGDT;
	gdt_next = NGDT;
	gdt_free = GNULL_SEL;

	gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    VM_PROT_READ | VM_PROT_WRITE);
	}
	bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
	ci->ci_gdt = gdt;
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);

	gdt_init_cpu(ci);
}
Example #5
0
void
udv_init()
{

	LIST_INIT(&udv_list);
	simple_lock_init(&udv_lock);
}
Example #6
0
static void
acpitz_init_envsys(struct acpitz_softc *sc)
{
	int i;

	simple_lock_init(&sc->sc_slock);

	for (i = 0; i < ATZ_NUMSENSORS; i++) {
		sc->sc_data[i].sensor = sc->sc_info[i].sensor = i;
		sc->sc_data[i].validflags = ENVSYS_FVALID;
		sc->sc_info[i].validflags = ENVSYS_FVALID;
		sc->sc_data[i].warnflags = ENVSYS_WARN_OK;
	}
#define INITDATA(index, unit, string) \
	sc->sc_data[index].units = unit;				   \
	sc->sc_info[index].units = unit;				   \
	snprintf(sc->sc_info[index].desc, sizeof(sc->sc_info[index].desc), \
	    "%s %s", sc->sc_dev.dv_xname, string);

	INITDATA(ATZ_SENSOR_TEMP, ENVSYS_STEMP, "temperature");

	/* hook into sysmon */
	sc->sc_sysmon.sme_ranges = acpitz_ranges;
	sc->sc_sysmon.sme_sensor_info = sc->sc_info;
	sc->sc_sysmon.sme_sensor_data = sc->sc_data;
	sc->sc_sysmon.sme_cookie = sc;
	sc->sc_sysmon.sme_gtredata = acpitz_gtredata;
	sc->sc_sysmon.sme_streinfo = acpitz_streinfo;
	sc->sc_sysmon.sme_nsensors = ATZ_NUMSENSORS;
	sc->sc_sysmon.sme_envsys_version = 1000;

	if (sysmon_envsys_register(&sc->sc_sysmon))
		printf("%s: unable to register with sysmon\n",
		    sc->sc_dev.dv_xname);
}
Example #7
0
/*
 * Initialize inode hash table.
 */
void
ufs_ihashinit()
{

	ihashtbl = hashinit(desiredvnodes, M_UFSMNT, &ihash);
	simple_lock_init(&ufs_ihash_slock);
}
Example #8
0
/*
 * allocate anons
 */
void
uvm_anon_init()
{
	struct vm_anon *anon;
	int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
	int lcv;

	/*
	 * Allocate the initial anons.
	 */
	anon = (struct vm_anon *)uvm_km_alloc(kernel_map,
	    sizeof(*anon) * nanon);
	if (anon == NULL) {
		printf("uvm_anon_init: can not allocate %d anons\n", nanon);
		panic("uvm_anon_init");
	}

	memset(anon, 0, sizeof(*anon) * nanon);
	uvm.afree = NULL;
	uvmexp.nanon = uvmexp.nfreeanon = nanon;
	for (lcv = 0 ; lcv < nanon ; lcv++) {
		anon[lcv].u.an_nxt = uvm.afree;
		uvm.afree = &anon[lcv];
	}
	simple_lock_init(&uvm.afreelock);
}
Example #9
0
/*
 *	Routine:	lock_init
 *	Function:
 *		Initialize a lock; required before use.
 *		Note that clients declare the "struct lock"
 *		variables and then initialize them, rather
 *		than getting a new one from this module.
 */
void
lock_init(
	lock_t		*l,
	boolean_t	can_sleep,
	etap_event_t	event,
	etap_event_t	i_event)
{
	(void) memset((void *) l, 0, sizeof(lock_t));

#if     ETAP_LOCK_TRACE
	etap_event_table_assign(&l->u.event_table_chain, event);
	l->u.s.start_list = SD_ENTRY_NULL;
#endif  /* ETAP_LOCK_TRACE */

	simple_lock_init(&l->interlock, i_event);
	l->want_write = FALSE;
	l->want_upgrade = FALSE;
	l->read_count = 0;
	l->can_sleep = can_sleep;

#if     ETAP_LOCK_ACCUMULATE
	l->cbuff_write = etap_cbuff_reserve(lock_event_table(l));
	if (l->cbuff_write != CBUFF_ENTRY_NULL) {
		l->cbuff_write->event    = event;
		l->cbuff_write->instance = (unsigned long) l;
		l->cbuff_write->kind     = WRITE_LOCK;
	}
	l->cbuff_read = CBUFF_ENTRY_NULL;
#endif  /* ETAP_LOCK_ACCUMULATE */
}
Example #10
0
void PE_init_kprintf(boolean_t vm_initialized)
{
	unsigned int	boot_arg;

	if (PE_state.initialized == FALSE)
		panic("Platform Expert not initialized");

	if (!vm_initialized) {
		unsigned int new_disable_serial_output = TRUE;

		simple_lock_init(&kprintf_lock, 0);

		if (PE_parse_boot_argn("debug", &boot_arg, sizeof (boot_arg)))
			if (boot_arg & DB_KPRT)
				new_disable_serial_output = FALSE;

		/* If we are newly enabling serial, make sure we only
		 * call pal_serial_init() if our previous state was
		 * not enabled */
		if (!new_disable_serial_output && (!disable_serial_output || pal_serial_init()))
			PE_kputc = pal_serial_putc;
		else
			PE_kputc = cnputc;

		disable_serial_output = new_disable_serial_output;
	}
}
Example #11
0
io_return_t
chips_init(struct vc_info * info) 
{
	extern Boot_Video       boot_video_info;
	unsigned char		val;

	if (kernel_map) {
		regBase = (volatile unsigned char *)
			io_map((unsigned int)regBasePhys, 4096);
		simple_lock_init(&chips_lock, ETAP_IO_TTY);
	}

	strcpy(chips_info.v_name, chips_node->name);
	chips_info.v_width = boot_video_info.v_width;
	chips_info.v_height = boot_video_info.v_height;
	chips_info.v_depth = boot_video_info.v_depth;
	chips_info.v_rowbytes = boot_video_info.v_rowBytes;
	chips_info.v_physaddr = boot_video_info.v_baseAddr;
	chips_info.v_baseaddr = chips_info.v_physaddr;
	chips_info.v_type = VC_TYPE_PCI;

	memcpy(info, &chips_info, sizeof(chips_info));

	return	D_SUCCESS;
}
Example #12
0
void
uvm_pager_init()
{
	int lcv;

	/*
	 * init pager map
	 */

	 pager_map = uvm_km_suballoc(kernel_map, &uvm.pager_sva, &uvm.pager_eva,
	 			PAGER_MAP_SIZE, FALSE, FALSE, NULL);
	 simple_lock_init(&pager_map_wanted_lock);
	 pager_map_wanted = FALSE;

	/*
	 * init ASYNC I/O queue
	 */
	
	TAILQ_INIT(&uvm.aio_done);

	/*
	 * call pager init functions
	 */
	for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *);
	    lcv++) {
		if (uvmpagerops[lcv]->pgo_init)
			uvmpagerops[lcv]->pgo_init();
	}
}
Example #13
0
int
boot_script_exec_cmd (void *hook, task_t task, char *path, int argc,
		      char **argv, char *strings, int stringlen)
{
  struct multiboot_module *mod = hook;

  int err;

  if (task != MACH_PORT_NULL)
    {
      thread_t thread;
      struct user_bootstrap_info info = { mod, argv, 0, };
      simple_lock_init (&info.lock);
      simple_lock (&info.lock);

      err = thread_create ((task_t)task, &thread);
      assert(err == 0);
      thread->saved.other = &info;
      thread_start (thread, user_bootstrap);
      thread_resume (thread);

      /* We need to synchronize with the new thread and block this
	 main thread until it has finished referring to our local state.  */
      while (! info.done)
	{
	  thread_sleep ((event_t) &info, simple_lock_addr(info.lock), FALSE);
	  simple_lock (&info.lock);
	}
      printf ("\n");
    }

  return 0;
}
Example #14
0
void
processor_bootstrap(void)
{
	pset_init(&pset0, &pset_node0);
	pset_node0.psets = &pset0;

	simple_lock_init(&pset_node_lock, 0);

	queue_init(&tasks);
	queue_init(&threads);

	simple_lock_init(&processor_list_lock, 0);

	master_processor = cpu_to_processor(master_cpu);

	processor_init(master_processor, master_cpu, &pset0);
}
Example #15
0
void printf_init(void)
{
	if (!_doprnt_lock_initialized)
	{
		_doprnt_lock_initialized = TRUE;
		simple_lock_init(&_doprnt_lock);
	}
}
Example #16
0
/*
 * Initialize inode hash table.
 */
void
ntfs_nthashinit()
{
	lockinit(&ntfs_hashlock, PINOD, "ntfs_nthashlock", 0, 0);
	ntfs_nthashtbl = HASHINIT(desiredvnodes, M_NTFSNTHASH, M_WAITOK,
	    &ntfs_nthash);
	simple_lock_init(&ntfs_nthash_slock);
}
void
tunattach(int unused)
{

	simple_lock_init(&tun_softc_lock);
	LIST_INIT(&tun_softc_list);
	LIST_INIT(&tunz_softc_list);
	if_clone_attach(&tun_cloner);
}
Example #18
0
void
stack_init(void)
{
	simple_lock_init(&stack_lock_data, 0);
	
	if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE))
		panic("stack_init: stack size %d not a multiple of page size %d\n",	KERNEL_STACK_SIZE, PAGE_SIZE);
	
	stack_addr_mask = KERNEL_STACK_SIZE - 1;
}
Example #19
0
static int
rf_ConfigureRDFreeList(RF_ShutdownList_t **listp)
{

	rf_pool_init(&rf_pools.rad, sizeof(RF_RaidAccessDesc_t),
		     "rf_rad_pl", RF_MIN_FREE_RAD, RF_MAX_FREE_RAD);
	rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL);
	simple_lock_init(&rf_rad_lock);
	return (0);
}
Example #20
0
void
timeout_startup(void)
{
	int b;

	CIRCQ_INIT(&timeout_todo);
	for (b = 0; b < BUCKETS; b++)
		CIRCQ_INIT(&timeout_wheel[b]);
	simple_lock_init(&_timeout_lock);
}
Example #21
0
void
timer_call_setup(
	timer_call_t			call,
	timer_call_func_t		func,
	timer_call_param_t		param0)
{
	DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0);
	call_entry_setup(CE(call), func, param0);
	simple_lock_init(&(call)->lock, 0);
	call->async_dequeue = FALSE;
}
Example #22
0
void
wskbd_hotkey_init(void)
{

	if (wskbd_hotkey_initted == 0) {
		simple_lock_init(&queue_lock);
		queue_head = queue_tail = 0;
		kthread_create_deferred(init_hotkey_thread, NULL);
		wskbd_hotkey_initted = 1;
	}
}
Example #23
0
void
spi_transfer_init(struct spi_transfer *st)
{

	simple_lock_init(&st->st_lock);
	st->st_flags = 0;
	st->st_errno = 0;
	st->st_done = NULL;
	st->st_chunks = NULL;
	st->st_private = NULL;
	st->st_slave = -1;
}
Example #24
0
/*
 *	Initialize a usimple_lock.
 *
 *	No change in preemption state.
 */
void
usimple_lock_init(
	usimple_lock_t	l,
	__unused unsigned short	tag)
{
#ifndef	MACHINE_SIMPLE_LOCK
	USLDBG(usld_lock_init(l, tag));
	hw_lock_init(&l->interlock);
#else
	simple_lock_init((simple_lock_t)l,tag);
#endif
}
Example #25
0
/*
 *	Routine:	lock_init
 *	Function:
 *		Initialize a lock; required before use.
 *		Note that clients declare the "struct lock"
 *		variables and then initialize them, rather
 *		than getting a new one from this module.
 */
void lock_init(
	lock_t		l,
	boolean_t	can_sleep)
{
	memset(l, 0, sizeof(lock_data_t));
	simple_lock_init(&l->interlock);
	l->want_write = FALSE;
	l->want_upgrade = FALSE;
	l->read_count = 0;
	l->can_sleep = can_sleep;
	l->thread = (struct thread *)-1;	/* XXX */
	l->recursion_depth = 0;
}
Example #26
0
/*
 * allocate an anon
 */
struct vm_anon *
uvm_analloc()
{
	struct vm_anon *anon;

	anon = pool_get(&uvm_anon_pool, PR_NOWAIT);
	if (anon) {
		simple_lock_init(&anon->an_lock);
		anon->an_ref = 1;
		anon->an_page = NULL;
		anon->an_swslot = 0;
		simple_lock(&anon->an_lock);
	}
	return(anon);
}
Example #27
0
void
thread_daemon_init(void)
{
	kern_return_t	result;
	thread_t	thread = NULL;

	simple_lock_init(&thread_terminate_lock, 0);
	queue_init(&thread_terminate_queue);

	result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_daemon_init: thread_terminate_daemon");

	thread_deallocate(thread);

	simple_lock_init(&thread_stack_lock, 0);
	queue_init(&thread_stack_queue);

	result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_daemon_init: thread_stack_daemon");

	thread_deallocate(thread);
}
Example #28
0
File: debug.c Project: Prajna/xnu
void
panic_init(void)
{
	unsigned long uuidlen = 0;
	void *uuid;

	uuid = getuuidfromheader(&_mh_execute_header, &uuidlen);
	if ((uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
		uuid_unparse_upper(*(uuid_t *)uuid, kernel_uuid);
	}

	simple_lock_init(&panic_lock, 0);
	panic_is_inited = 1;
	panic_caller = 0;
}
Example #29
0
void
timer_call_initialize(void)
{
	spl_t				s;

	simple_lock_init(&timer_call_lock, 0);

	s = splclock();
	simple_lock(&timer_call_lock);

	clock_set_timer_func((clock_timer_func_t)timer_call_interrupt);

	simple_unlock(&timer_call_lock);
	splx(s);
}
Example #30
0
void
mca_cpu_init(void)
{
	unsigned int	i;

	/*
	 * The first (boot) processor is responsible for discovering the
	 * machine check architecture present on this machine.
	 */
	if (!mca_initialized) {
		mca_get_availability();
		mca_initialized = TRUE;
		simple_lock_init(&mca_lock, 0);
	}

	if (mca_MCA_present) {

		/* Enable all MCA features */
		if (mca_control_MSR_present)
			wrmsr64(IA32_MCG_CTL, IA32_MCG_CTL_ENABLE);
	
		switch (mca_family) {
		case 0x06:
			/* Enable all but mc0 */
			for (i = 1; i < mca_error_bank_count; i++)
				wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL); 
			
			/* Clear all errors */
			for (i = 0; i < mca_error_bank_count; i++)
				wrmsr64(IA32_MCi_STATUS(i), 0ULL);
			break;
		case 0x0F:
			/* Enable all banks */
			for (i = 0; i < mca_error_bank_count; i++)
				wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL); 
			
			/* Clear all errors */
			for (i = 0; i < mca_error_bank_count; i++)
				wrmsr64(IA32_MCi_STATUS(i), 0ULL);
			break;
		}
	}

	/* Enable machine check exception handling if available */
	if (mca_MCE_present) {
		set_cr4(get_cr4()|CR4_MCE);
	}
}