Exemple #1
0
static void
mca_save_state(mca_state_t *mca_state)
{
	mca_mci_bank_t  *bank;
	unsigned int	i;

	assert(!ml_get_interrupts_enabled() || get_preemption_level() > 0);

	if  (mca_state == NULL)
		return;

	mca_state->mca_mcg_ctl = mca_control_MSR_present ?
					rdmsr64(IA32_MCG_CTL) : 0ULL;	
	mca_state->mca_mcg_status.u64 = rdmsr64(IA32_MCG_STATUS);

 	bank = (mca_mci_bank_t *) &mca_state->mca_error_bank[0];
	for (i = 0; i < mca_error_bank_count; i++, bank++) {
		bank->mca_mci_ctl        = rdmsr64(IA32_MCi_CTL(i));	
		bank->mca_mci_status.u64 = rdmsr64(IA32_MCi_STATUS(i));	
		if (!bank->mca_mci_status.bits.val)
			continue;
		bank->mca_mci_misc = (bank->mca_mci_status.bits.miscv)?
					rdmsr64(IA32_MCi_MISC(i)) : 0ULL;	
		bank->mca_mci_addr = (bank->mca_mci_status.bits.addrv)?
					rdmsr64(IA32_MCi_ADDR(i)) : 0ULL;	
	} 

	/*
	 * If we're the first thread with MCA state, point our package to it
	 * and don't care about races
	 */
	if (x86_package()->mca_state == NULL)
		x86_package()->mca_state = mca_state;
}
Exemple #2
0
void	pmap_destroy_pcid_sync(pmap_t p) {
	int i;
	pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0);
	for (i = 0; i < PMAP_PCID_MAX_CPUS; i++)
		if (p->pmap_pcid_cpus[i] != PMAP_PCID_INVALID_PCID)
			pmap_pcid_deallocate_pcid(i, p);
}
Exemple #3
0
/*
 * Real-time clock device interrupt.
 */
void
rtclock_intr(
	x86_saved_state_t	*tregs)
{
        uint64_t	rip;
	boolean_t	user_mode = FALSE;

	assert(get_preemption_level() > 0);
	assert(!ml_get_interrupts_enabled());

	if (is_saved_state64(tregs) == TRUE) {
	        x86_saved_state64_t	*regs;
		  
		regs = saved_state64(tregs);

		if (regs->isf.cs & 0x03)
			user_mode = TRUE;
		rip = regs->isf.rip;
	} else {
	        x86_saved_state32_t	*regs;

		regs = saved_state32(tregs);

		if (regs->cs & 0x03)
		        user_mode = TRUE;
		rip = regs->eip;
	}

	/* call the generic etimer */
	timer_intr(user_mode, rip);
}
Exemple #4
0
void __trace_put_tcd(struct trace_cpu_data *tcd)
{
	/*
	 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
	 * from here: this will lead to infinite recursion.
	 */
	LASSERT(trace_owner == current_thread());
	trace_owner = NULL;
	spin_unlock(&trace_cpu_serializer);
	if (get_preemption_level() == 0)
		/* purge all pending pages */
		raw_page_death_row_clean();
}
Exemple #5
0
void
clock_delay_until(
	uint64_t		deadline)
{
	uint64_t		now = mach_absolute_time();

	if (now >= deadline)
		return;

	if (	(deadline - now) < (8 * sched_cswtime)	||
			get_preemption_level() != 0				||
			ml_get_interrupts_enabled() == FALSE	)
		machine_delay_until(deadline);
	else {
		assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);

		thread_block(THREAD_CONTINUE_NULL);
	}
}
Exemple #6
0
/*
 * Preserve the original precise interval that the client
 * requested for comparison to the spin threshold.
 */
void
_clock_delay_until_deadline(
	uint64_t		interval,
	uint64_t		deadline)
{

	if (interval == 0)
		return;

	if (	ml_delay_should_spin(interval)	||
			get_preemption_level() != 0				||
			ml_get_interrupts_enabled() == FALSE	) {
		machine_delay_until(deadline);
	} else {
		assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);

		thread_block(THREAD_CONTINUE_NULL);
	}
}
Exemple #7
0
void
thread_tell_urgency(int urgency,
    uint64_t rt_period,
    uint64_t rt_deadline,
    thread_t nthread)
{
	uint64_t	urgency_notification_time_start, delta;
	boolean_t	urgency_assert = (urgency_notification_assert_abstime_threshold != 0);
	assert(get_preemption_level() > 0 || ml_get_interrupts_enabled() == FALSE);
#if	DEBUG
	urgency_stats[cpu_number() % 64][urgency]++;
#endif
	if (!pmInitDone
	    || pmDispatch == NULL
	    || pmDispatch->pmThreadTellUrgency == NULL)
		return;

	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, 0, 0);

	if (__improbable((urgency_assert == TRUE)))
		urgency_notification_time_start = mach_absolute_time();

	current_cpu_datap()->cpu_nthread = nthread;
	pmDispatch->pmThreadTellUrgency(urgency, rt_period, rt_deadline);

	if (__improbable((urgency_assert == TRUE))) {
		delta = mach_absolute_time() - urgency_notification_time_start;

		if (__improbable(delta > urgency_notification_max_recorded)) {
			/* This is not synchronized, but it doesn't matter
			 * if we (rarely) miss an event, as it is statistically
			 * unlikely that it will never recur.
			 */
			urgency_notification_max_recorded = delta;

			if (__improbable((delta > urgency_notification_assert_abstime_threshold) && !machine_timeout_suspended()))
				panic("Urgency notification callout %p exceeded threshold, 0x%llx abstime units", pmDispatch->pmThreadTellUrgency, delta);
		}
	}

	KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0);
}
Exemple #8
0
struct trace_cpu_data *trace_get_tcd(void)
{
	struct trace_cpu_data *tcd;
	int nr_pages;
	struct list_head pages;

	/*
	 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
	 * from here: this will lead to infinite recursion.
	 */

	/*
	 * debugging check for recursive call to libcfs_debug_msg()
	 */
	if (trace_owner == current_thread()) {
                /*
                 * Cannot assert here.
                 */
		printk(KERN_EMERG "recursive call to %s", __FUNCTION__);
		/*
                 * "The death of God left the angels in a strange position."
		 */
		cfs_enter_debugger();
	}
	tcd = &trace_data[0].tcd;
        CFS_INIT_LIST_HEAD(&pages);
	if (get_preemption_level() == 0)
		nr_pages = trace_refill_stock(tcd, CFS_ALLOC_STD, &pages);
	else
		nr_pages = 0;
	spin_lock(&trace_cpu_serializer);
	trace_owner = current_thread();
	tcd->tcd_cur_stock_pages += nr_pages;
	list_splice(&pages, &tcd->tcd_stock_pages);
	return tcd;
}
Exemple #9
0
void	pmap_pcid_configure(void) {
	int ccpu = cpu_number();
	uintptr_t cr4 = get_cr4();
	boolean_t pcid_present = FALSE;

	pmap_pcid_log("PCID configure invoked on CPU %d\n", ccpu);
	pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0);
	pmap_assert(cpu_mode_is64bit());

	if (PE_parse_boot_argn("-pmap_pcid_disable", &pmap_pcid_disabled, sizeof (pmap_pcid_disabled))) {
		pmap_pcid_log("PMAP: PCID feature disabled\n");
		printf("PMAP: PCID feature disabled, %u\n", pmap_pcid_disabled);
		kprintf("PMAP: PCID feature disabled %u\n", pmap_pcid_disabled);
	}
	 /* no_shared_cr3+PCID is currently unsupported */
#if	DEBUG
	if (pmap_pcid_disabled == FALSE)
		no_shared_cr3 = FALSE;
	else
		no_shared_cr3 = TRUE;
#else
	if (no_shared_cr3)
		pmap_pcid_disabled = TRUE;
#endif
	if (pmap_pcid_disabled || no_shared_cr3) {
		unsigned i;
		/* Reset PCID status, as we may have picked up
		 * strays if discovered prior to platform
		 * expert initialization.
		 */
		for (i = 0; i < real_ncpus; i++) {
			if (cpu_datap(i)) {
				cpu_datap(i)->cpu_pmap_pcid_enabled = FALSE;
			}
			pmap_pcid_ncpus = 0;
		}
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE;
		return;
	}
	/* DRKTODO: assert if features haven't been discovered yet. Redundant
	 * invocation of cpu_mode_init and descendants masks this for now.
	 */
	if ((cpuid_features() & CPUID_FEATURE_PCID))
		pcid_present = TRUE;
	else {
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE;
		pmap_pcid_log("PMAP: PCID not detected CPU %d\n", ccpu);
		return;
	}
	if ((cr4 & (CR4_PCIDE | CR4_PGE)) == (CR4_PCIDE|CR4_PGE)) {
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE;
		pmap_pcid_log("PMAP: PCID already enabled %d\n", ccpu);
		return;
	}
	if (pcid_present == TRUE) {
		pmap_pcid_log("Pre-PCID:CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, cr4);

		if (cpu_number() >= PMAP_PCID_MAX_CPUS) {
			panic("PMAP_PCID_MAX_CPUS %d\n", cpu_number());
		}
		if ((get_cr4() & CR4_PGE) == 0) {
			set_cr4(get_cr4() | CR4_PGE);
			pmap_pcid_log("Toggled PGE ON (CPU: %d\n", ccpu);
		}
		set_cr4(get_cr4() | CR4_PCIDE);
		pmap_pcid_log("Post PCID: CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, get_cr4());
		tlb_flush_global();
		cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE;

		if (OSIncrementAtomic(&pmap_pcid_ncpus) == machine_info.max_cpus) {
			pmap_pcid_log("All PCIDs enabled: real_ncpus: %d, pmap_pcid_ncpus: %d\n", real_ncpus, pmap_pcid_ncpus);
		}
		cpu_datap(ccpu)->cpu_pmap_pcid_coherentp =
		    cpu_datap(ccpu)->cpu_pmap_pcid_coherentp_kernel =
		    &(kernel_pmap->pmap_pcid_coherency_vector[ccpu]);
		cpu_datap(ccpu)->cpu_pcid_refcounts[0] = 1;
	}
}
Exemple #10
0
 * 		   switching of space
 * 
 */
struct thread_shuttle*
switch_context(
	struct thread_shuttle *old,
	void (*continuation)(void),
	struct thread_shuttle *new)
{
	register thread_act_t old_act = old->top_act, new_act = new->top_act;
	register struct thread_shuttle* retval;
	pmap_t	new_pmap;
#if MACH_RT
	assert(old_act->kernel_loaded ||
	       active_stacks[cpu_number()] == old_act->thread->kernel_stack);
	assert (get_preemption_level() == 1);
#endif
	check_simple_locks();

#if	NCPUS > 1
	/* Our context might wake up on another processor, so we must
	 * not keep hot state in our FPU, it must go back to the pcb
	 * so that it can be found by the other if needed
	 */
	fpu_save();
#endif	/* NCPUS > 1 */

#if DEBUG
	if (watchacts & WA_PCB) {
		printf("switch_context(0x%08x, 0x%x, 0x%08x)\n",
		       old,continuation,new);
Exemple #11
0
/*
 *	thread_call_thread:
 */
static void
thread_call_thread(
		thread_call_group_t		group,
		wait_result_t			wres)
{
	thread_t	self = current_thread();
	boolean_t	canwait;

	/*
	 * A wakeup with THREAD_INTERRUPTED indicates that 
	 * we should terminate.
	 */
	if (wres == THREAD_INTERRUPTED) {
		thread_terminate(self);

		/* NOTREACHED */
		panic("thread_terminate() returned?");
	}

	(void)disable_ints_and_lock();

	thread_sched_call(self, group->sched_call);

	while (group->pending_count > 0) {
		thread_call_t			call;
		thread_call_func_t		func;
		thread_call_param_t		param0, param1;

		call = TC(dequeue_head(&group->pending_queue));
		group->pending_count--;

		func = call->tc_call.func;
		param0 = call->tc_call.param0;
		param1 = call->tc_call.param1;

		call->tc_call.queue = NULL;

		_internal_call_release(call);

		/*
		 * Can only do wakeups for thread calls whose storage
		 * we control.
		 */
		if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) {
			canwait = TRUE;
			call->tc_refs++;	/* Delay free until we're done */
		} else
			canwait = FALSE;

		enable_ints_and_unlock();

		KERNEL_DEBUG_CONSTANT(
				MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
				VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);

		(*func)(param0, param1);

		if (get_preemption_level() != 0) {
			int pl = get_preemption_level();
			panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
					pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1);
		}

		(void)thread_funnel_set(self->funnel_lock, FALSE);		/* XXX */

		(void) disable_ints_and_lock();
		
		if (canwait) {
			/* Frees if so desired */
			thread_call_finish(call);
		}
	}

	thread_sched_call(self, NULL);
	group->active_count--;

	if (group_isparallel(group)) {
		/*
		 * For new style of thread group, thread always blocks. 
		 * If we have more than the target number of threads,
		 * and this is the first to block, and it isn't active 
		 * already, set a timer for deallocating a thread if we 
		 * continue to have a surplus.
		 */
		group->idle_count++;

		if (group->idle_count == 1) {
			group->idle_timestamp = mach_absolute_time();
		}   

		if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) &&
				((group->active_count + group->idle_count) > group->target_thread_count)) {
			group->flags |= TCG_DEALLOC_ACTIVE;
			thread_call_start_deallocate_timer(group);
		}   

		/* Wait for more work (or termination) */
		wres = wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTIBLE, 0); 
		if (wres != THREAD_WAITING) {
			panic("kcall worker unable to assert wait?");
		}   

		enable_ints_and_unlock();

		thread_block_parameter((thread_continue_t)thread_call_thread, group);
	} else {
		if (group->idle_count < group->target_thread_count) {
			group->idle_count++;

			wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0); /* Interrupted means to exit */

			enable_ints_and_unlock();

			thread_block_parameter((thread_continue_t)thread_call_thread, group);
			/* NOTREACHED */
		}
	}

	enable_ints_and_unlock();

	thread_terminate(self);
	/* NOTREACHED */
}
Exemple #12
0
vm_offset_t
gzalloc_alloc(zone_t zone, boolean_t canblock) {
	vm_offset_t addr = 0;

	if (__improbable(gzalloc_mode &&
		(((zone->elem_size >= gzalloc_min) &&
		    (zone->elem_size <= gzalloc_max))) &&
		(zone->gzalloc_exempt == 0))) {

		if (get_preemption_level() != 0) {
			if (canblock == TRUE) {
				pdzalloc_count++;
			}
			else
				return 0;
		}

		vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE);
		vm_offset_t residue = rounded_size - zone->elem_size;
		vm_offset_t gzaddr = 0;
		gzhdr_t *gzh;

		if (!kmem_ready || (vm_page_zone == ZONE_NULL)) {
			/* Early allocations are supplied directly from the
			 * reserve.
			 */
			if (gzalloc_reserve_size < rounded_size)
				panic("gzalloc reserve exhausted");
			gzaddr = gzalloc_reserve;
			/* No guard page for these early allocations, just
			 * waste an additional page.
			 */
			gzalloc_reserve += rounded_size + PAGE_SIZE;
			gzalloc_reserve_size -= rounded_size + PAGE_SIZE;
			OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_alloc);
		}
		else {
			kern_return_t kr = kernel_memory_allocate(gzalloc_map,
			    &gzaddr, rounded_size + (1*PAGE_SIZE),
			    0, KMA_KOBJECT | gzalloc_guard);
			if (kr != KERN_SUCCESS)
				panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d", (uint64_t)rounded_size, kr);

		}

		if (gzalloc_uf_mode) {
			gzaddr += PAGE_SIZE;
			/* The "header" becomes a "footer" in underflow
			 * mode.
			 */
			gzh = (gzhdr_t *) (gzaddr + zone->elem_size);
			addr = gzaddr;
		} else {
			gzh = (gzhdr_t *) (gzaddr + residue - GZHEADER_SIZE);
			addr = (gzaddr + residue);
		}

		/* Fill with a pattern on allocation to trap uninitialized
		 * data use. Since the element size may be "rounded up"
		 * by higher layers such as the kalloc layer, this may
		 * also identify overruns between the originally requested
		 * size and the rounded size via visual inspection.
		 * TBD: plumb through the originally requested size,
		 * prior to rounding by kalloc/IOMalloc etc.
		 * We also add a signature and the zone of origin in a header
		 * prefixed to the allocation.
		 */
		memset((void *)gzaddr, gzalloc_fill_pattern, rounded_size);

		gzh->gzone = (kmem_ready && vm_page_zone) ? zone : GZDEADZONE;
		gzh->gzsize = (uint32_t) zone->elem_size;
		gzh->gzsig = GZALLOC_SIGNATURE;

		lock_zone(zone);
		zone->count++;
		zone->sum_count++;
		zone->cur_size += rounded_size;
		unlock_zone(zone);

		OSAddAtomic64((SInt32) rounded_size, &gzalloc_allocated);
		OSAddAtomic64((SInt32) (rounded_size - zone->elem_size), &gzalloc_wasted);
	}
	return addr;
}
Exemple #13
0
static spl_t
panic_prologue(const char *str)
{
	spl_t	s;

	if (write_trace_on_panic && kdebug_enable) {
		if (get_preemption_level() == 0 && !ml_at_interrupt_context()) {
			ml_set_interrupts_enabled(TRUE);
			kdbg_dump_trace_to_file("/var/tmp/panic.trace");
		}
	}

	s = splhigh();
	disable_preemption();

#if	defined(__i386__) || defined(__x86_64__)
	/* Attempt to display the unparsed panic string */
	const char *tstr = str;

	kprintf("Panic initiated, string: ");
	while (tstr && *tstr)
		kprintf("%c", *tstr++);
	kprintf("\n");
#endif

	panic_safe();

	if( logPanicDataToScreen )
		disable_debug_output = FALSE;
		
	debug_mode = TRUE;

restart:
	PANIC_LOCK();

	if (panicstr) {
		if (cpu_number() != paniccpu) {
			PANIC_UNLOCK();
			/*
			 * Wait until message has been printed to identify correct
			 * cpu that made the first panic.
			 */
			while (panicwait)
				continue;
			goto restart;
	    } else {
			nestedpanic +=1;
			PANIC_UNLOCK();
			Debugger("double panic");
			printf("double panic:  We are hanging here...\n");
			panic_stop();
			/* NOTREACHED */
		}
	}
	panicstr = str;
	paniccpu = cpu_number();
	panicwait = 1;

	PANIC_UNLOCK();
	return(s);
}
Exemple #14
0
__private_extern__ int
chudxnu_get_preemption_level(void)
{
	return get_preemption_level();
}
Exemple #15
0
/**
 * sleh_abort
 *
 * Handle prefetch and data aborts. (EXC_BAD_ACCESS IS NOT HERE YET)
 */
void sleh_abort(void* context, int reason)
{
    vm_map_t map;
    thread_t thread;
    kern_return_t kr;
    abort_information_context_t* abort_context = (abort_information_context_t*)context;
    uint32_t prot = VM_PROT_READ;

    if(!abort_context) {
        panic("sleh_abort: abort handler called but with no context");
    }

    /* Could be a page fault */
    {
        map = kernel_map;
        thread = current_thread();
#if 0
        /* Dump current register values */
        kprintf("*** POSSIBLE PAGE FAULT ***\n");
        kprintf("sleh_abort: register dump %d: fault_addr=0x%x\n"
                          "r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
                          "r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
                          "r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
                          "12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
                          "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
                          reason, abort_context->far,
                          abort_context->gprs[0], abort_context->gprs[1], abort_context->gprs[2], abort_context->gprs[3],
                          abort_context->gprs[4], abort_context->gprs[5], abort_context->gprs[6], abort_context->gprs[7],
                          abort_context->gprs[8], abort_context->gprs[9], abort_context->gprs[10], abort_context->gprs[11],
                          abort_context->gprs[12], abort_context->sp, abort_context->lr, abort_context->pc,
                          abort_context->cpsr, abort_context->fsr, abort_context->far
                          );
#endif
        /* If it was a hardware error, let us know. */
        if((abort_context->fsr & 0xF) == 0x8) {
            if(abort_context->fsr & (1 << 12))
                panic_context(0, (void*)abort_context, "sleh_abort: axi slave error %d: fault_addr=0x%x\n"
                              "r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
                              "r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
                              "r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
                              "12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
                              "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
                              reason, abort_context->far,
                              abort_context->gprs[0], abort_context->gprs[1], abort_context->gprs[2], abort_context->gprs[3],
                              abort_context->gprs[4], abort_context->gprs[5], abort_context->gprs[6], abort_context->gprs[7],
                              abort_context->gprs[8], abort_context->gprs[9], abort_context->gprs[10], abort_context->gprs[11],
                              abort_context->gprs[12], abort_context->sp, abort_context->lr, abort_context->pc,
                              abort_context->cpsr, abort_context->fsr, abort_context->far
                              );
            else
                panic_context(0, (void*)abort_context, "sleh_abort: axi decode error %d: fault_addr=0x%x\n"
                              "r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
                              "r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
                              "r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
                              "12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
                              "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
                              reason, abort_context->far,
                              abort_context->gprs[0], abort_context->gprs[1], abort_context->gprs[2], abort_context->gprs[3],
                              abort_context->gprs[4], abort_context->gprs[5], abort_context->gprs[6], abort_context->gprs[7],
                              abort_context->gprs[8], abort_context->gprs[9], abort_context->gprs[10], abort_context->gprs[11],
                              abort_context->gprs[12], abort_context->sp, abort_context->lr, abort_context->pc,
                              abort_context->cpsr, abort_context->fsr, abort_context->far
                              );
        }

        /* Make sure the kernel map isn't null. Dump context if it is. */
        if(!kernel_map) {
            kprintf("*** kernel_map is null, did something fail during VM initialization?\n");
            goto panicOut;
        }
        
        /* Check to see if the thread isn't null */
        if(!thread) {
            kprintf("*** thread was null, did something break before the first thread was set?\n");
            goto panicOut;
        }
        
        /* Eeek. */
        if(get_preemption_level()) {
            kprintf("*** data abort called but preemption level %d is not zero!\n", get_preemption_level());
            goto panicOut;
        }
        
        /* Check to see if it is a fault */
        if(((abort_context->fsr & FSR_FAIL) == FAILURE_TRANSLATION) ||
           ((abort_context->fsr & FSR_FAIL) == FAILURE_SECTION)) {
            map = thread->map;
            assert(map);
            /* Attempt to fault it */
            kr = vm_fault(map, vm_map_trunc_page(abort_context->far), prot,
                        FALSE, THREAD_UNINT, NULL, 0);
            if(kr != KERN_SUCCESS)
                kprintf("*** vm_fault failed with code 0x%08x\n", kr);
            if(kr == KERN_SUCCESS)
                return;
        }
        
        /* Call the recovery routine if there is one. */
		if (thread != THREAD_NULL && thread->recover) {
			abort_context->pc = thread->recover;
			thread->recover = 0;
			return;
		}
        
        /* If it's a user process, let us know. */
        {
            ;
        }
        
    }
panicOut:
    switch(reason) {
        case SLEH_ABORT_TYPE_PREFETCH_ABORT: {
            /* Print out the long long nice prefetch abort. */
            panic_context(0, (void*)abort_context, "sleh_abort: prefetch abort type %d: fault_addr=0x%x\n"
                          "r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
                          "r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
                          "r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
                          "12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
                          "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
                          reason, abort_context->far,
                          abort_context->gprs[0], abort_context->gprs[1], abort_context->gprs[2], abort_context->gprs[3],
                          abort_context->gprs[4], abort_context->gprs[5], abort_context->gprs[6], abort_context->gprs[7],
                          abort_context->gprs[8], abort_context->gprs[9], abort_context->gprs[10], abort_context->gprs[11],
                          abort_context->gprs[12], abort_context->sp, abort_context->lr, abort_context->pc,
                          abort_context->cpsr, abort_context->fsr, abort_context->far
                          );
        }
        case SLEH_ABORT_TYPE_DATA_ABORT: {
            /* Print out the long long nice data abort. */
            panic_context(0, (void*)abort_context, "sleh_abort: data abort type %d: fault_addr=0x%x\n"
                       "r0: 0x%08x  r1: 0x%08x  r2: 0x%08x  r3: 0x%08x\n"
                       "r4: 0x%08x  r5: 0x%08x  r6: 0x%08x  r7: 0x%08x\n"
                       "r8: 0x%08x  r9: 0x%08x r10: 0x%08x r11: 0x%08x\n"
                       "12: 0x%08x  sp: 0x%08x  lr: 0x%08x  pc: 0x%08x\n"
                       "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n",
                       reason, abort_context->far,
                       abort_context->gprs[0], abort_context->gprs[1], abort_context->gprs[2], abort_context->gprs[3],
                       abort_context->gprs[4], abort_context->gprs[5], abort_context->gprs[6], abort_context->gprs[7],
                       abort_context->gprs[8], abort_context->gprs[9], abort_context->gprs[10], abort_context->gprs[11],
                       abort_context->gprs[12], abort_context->sp, abort_context->lr, abort_context->pc,
                       abort_context->cpsr, abort_context->fsr, abort_context->far
                       );
        }
        default:
            panic("sleh_abort: unknown abort called (context: %p, reason: %d)!\n", context, reason);
    }
    
    while(1);
}
Exemple #16
0
boolean_t gzalloc_free(zone_t zone, void *addr) {
	boolean_t gzfreed = FALSE;
	kern_return_t kr;

	if (__improbable(gzalloc_mode &&
		(((zone->elem_size >= gzalloc_min) &&
		    (zone->elem_size <= gzalloc_max))) &&
		(zone->gzalloc_exempt == 0))) {
		gzhdr_t *gzh;
		vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE);
		vm_offset_t residue = rounded_size - zone->elem_size;
		vm_offset_t saddr;
		vm_offset_t free_addr = 0;

		if (gzalloc_uf_mode) {
			gzh = (gzhdr_t *)((vm_offset_t)addr + zone->elem_size);
			saddr = (vm_offset_t) addr - PAGE_SIZE;
		} else {
			gzh = (gzhdr_t *)((vm_offset_t)addr - GZHEADER_SIZE);
			saddr = ((vm_offset_t)addr) - residue;
		}

		assert((saddr & PAGE_MASK) == 0);

		if (gzalloc_consistency_checks) {
			if (gzh->gzsig != GZALLOC_SIGNATURE) {
				panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", addr, GZALLOC_SIGNATURE, gzh->gzsig);
			}

			if (gzh->gzone != zone && (gzh->gzone != GZDEADZONE))
				panic("%s: Mismatched zone or under/overflow, current zone: %p, recorded zone: %p, address: %p", __FUNCTION__, zone, gzh->gzone, (void *)addr);
			/* Partially redundant given the zone check, but may flag header corruption */
			if (gzh->gzsize != zone->elem_size) {
				panic("Mismatched zfree or under/overflow for zone %p, recorded size: 0x%x, element size: 0x%x, address: %p\n", zone, gzh->gzsize, (uint32_t) zone->elem_size, (void *)addr);
			}
		}

		if (!kmem_ready || gzh->gzone == GZDEADZONE) {
			/* For now, just leak frees of early allocations
			 * performed before kmem is fully configured.
			 * They don't seem to get freed currently;
			 * consider ml_static_mfree in the future.
			 */
			OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_free);
			return TRUE;
		}

		if (get_preemption_level() != 0) {
				pdzfree_count++;
		}

		if (gzfc_size) {
			/* Either write protect or unmap the newly freed
			 * allocation
			 */
			kr = vm_map_protect(
				gzalloc_map,
				saddr,
				saddr + rounded_size + (1 * PAGE_SIZE),
				gzalloc_prot,
				FALSE);
			if (kr != KERN_SUCCESS)
				panic("%s: vm_map_protect: %p, 0x%x", __FUNCTION__, (void *)saddr, kr);
		} else {
			free_addr = saddr;
		}

		lock_zone(zone);

		/* Insert newly freed element into the protected free element
		 * cache, and rotate out the LRU element.
		 */
		if (gzfc_size) {
			if (zone->gz.gzfc_index >= gzfc_size) {
				zone->gz.gzfc_index = 0;
			}
			free_addr = zone->gz.gzfc[zone->gz.gzfc_index];
			zone->gz.gzfc[zone->gz.gzfc_index++] = saddr;
		}

		if (free_addr) {
			zone->count--;
			zone->cur_size -= rounded_size;
		}

		unlock_zone(zone);

		if (free_addr) {
			kr = vm_map_remove(
				gzalloc_map,
				free_addr,
				free_addr + rounded_size + (1 * PAGE_SIZE),
				VM_MAP_REMOVE_KUNWIRE);
			if (kr != KERN_SUCCESS)
				panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr, kr);

			OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed);
			OSAddAtomic64(-((SInt32) (rounded_size - zone->elem_size)), &gzalloc_wasted);
		}

		gzfreed = TRUE;
	}
	return gzfreed;
}
Exemple #17
0
/*
 *	thread_call_thread:
 */
static void
thread_call_thread(
	thread_call_group_t		group)
{
	thread_t		self = current_thread();

	(void) splsched();
	thread_call_lock_spin();

	thread_sched_call(self, sched_call_thread);

    while (group->pending_count > 0) {
		thread_call_t			call;
		thread_call_func_t		func;
		thread_call_param_t		param0, param1;

		call = TC(dequeue_head(&group->pending_queue));
		group->pending_count--;

		func = call->func;
		param0 = call->param0;
		param1 = call->param1;
	
		call->queue = NULL;

		_internal_call_release(call);

		thread_call_unlock();
		(void) spllo();

		KERNEL_DEBUG_CONSTANT(
			MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE,
				func, param0, param1, 0, 0);

		(*func)(param0, param1);

		if (get_preemption_level() != 0) {
			int pl = get_preemption_level();
			panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)",
				  pl, func, param0, param1);
		}
		
		(void)thread_funnel_set(self->funnel_lock, FALSE);		/* XXX */

		(void) splsched();
		thread_call_lock_spin();
    }

	thread_sched_call(self, NULL);
	group->active_count--;

    if (group->idle_count < thread_call_thread_min) {
		group->idle_count++;

		wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0);
	
		thread_call_unlock();
		(void) spllo();

		thread_block_parameter((thread_continue_t)thread_call_thread, group);
		/* NOTREACHED */
    }

    thread_call_unlock();
    (void) spllo();
    
    thread_terminate(self);
	/* NOTREACHED */
}