Ejemplo n.º 1
0
/*
 *	stack_alloc_try:
 *
 *	Non-blocking attempt to allocate a
 *	stack for a thread.
 *
 *	Returns TRUE on success.
 *
 *	Called at splsched.
 */
boolean_t
stack_alloc_try(
	thread_t		thread)
{
	struct stack_cache	*cache;
	vm_offset_t			stack;

	cache = &PROCESSOR_DATA(current_processor(), stack_cache);
	stack = cache->free;
	if (stack != 0) {
		cache->free = stack_next(stack);
		cache->count--;
	}
	else {
		if (stack_free_list != 0) {
			stack_lock();
			stack = stack_free_list;
			if (stack != 0) {
				stack_free_list = stack_next(stack);
				stack_free_count--;
				stack_free_delta--;
			}
			stack_unlock();
		}
	}

	if (stack != 0 || (stack = thread->reserved_stack) != 0) {
		machine_stack_attach(thread, stack);
		return (TRUE);
	}

	return (FALSE);
}
Ejemplo n.º 2
0
void
stack_free_stack(
	vm_offset_t		stack)
{
	struct stack_cache	*cache;
	spl_t				s;

	s = splsched();
	cache = &PROCESSOR_DATA(current_processor(), stack_cache);
	if (cache->count < STACK_CACHE_SIZE) {
		stack_next(stack) = cache->free;
		cache->free = stack;
		cache->count++;
	}
	else {
		stack_lock();
		stack_next(stack) = stack_free_list;
		stack_free_list = stack;
		if (++stack_free_count > stack_free_hiwat)
			stack_free_hiwat = stack_free_count;
		stack_free_delta++;
		stack_unlock();
	}
	splx(s);
}
Ejemplo n.º 3
0
Archivo: stack.c Proyecto: TalAloni/xnu
/*
 *	stack_collect:
 *
 *	Free excess kernel stacks, may
 *	block.
 */
void
stack_collect(void)
{
	if (stack_collect_tick != last_stack_tick) {
		unsigned int	target;
		vm_offset_t		stack;
		spl_t			s;

		s = splsched();
		stack_lock();

		target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
		target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;

		while (stack_free_count > target) {
			stack = stack_free_list;
			stack_free_list = stack_next(stack);
			stack_free_count--; stack_total--;
			stack_unlock();
			splx(s);

			/*
			 * Get the stack base address, then decrement by one page
			 * to account for the lower guard page.  Add two extra pages
			 * to the size to account for the guard pages on both ends
			 * that were originally requested when the stack was allocated
			 * back in stack_alloc().
			 */

			stack = (vm_offset_t)vm_map_trunc_page(
				stack,
				VM_MAP_PAGE_MASK(kernel_map));
			stack -= PAGE_SIZE;
			if (vm_map_remove(
				    kernel_map,
				    stack,
				    stack + kernel_stack_size+(2*PAGE_SIZE),
				    VM_MAP_REMOVE_KUNWIRE)
			    != KERN_SUCCESS)
				panic("stack_collect: vm_map_remove");
			stack = 0;

			s = splsched();
			stack_lock();

			target = stack_free_target + (STACK_CACHE_SIZE * processor_count);
			target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta;
		}

		last_stack_tick = stack_collect_tick;

		stack_unlock();
		splx(s);
	}
}
Ejemplo n.º 4
0
/*
 *	stack_alloc:
 *
 *	Allocate a stack for a thread, may
 *	block.
 */
void
stack_alloc(
	thread_t	thread)
{
	vm_offset_t		stack;
	spl_t			s;
	int			guard_flags;

	assert(thread->kernel_stack == 0);

	s = splsched();
	stack_lock();
	stack = stack_free_list;
	if (stack != 0) {
		stack_free_list = stack_next(stack);
		stack_free_count--;
	}
	else {
		if (++stack_total > stack_hiwat)
			stack_hiwat = stack_total;
		stack_new_count++;
	}
	stack_free_delta--;
	stack_unlock();
	splx(s);
		
	if (stack == 0) {

		/*
		 * Request guard pages on either side of the stack.  Ask
		 * kernel_memory_allocate() for two extra pages to account
		 * for these.
		 */

		guard_flags = KMA_GUARD_FIRST | KMA_GUARD_LAST;
		if (kernel_memory_allocate(kernel_map, &stack,
					   KERNEL_STACK_SIZE + (2*PAGE_SIZE),
					   stack_addr_mask,
					   KMA_KOBJECT | guard_flags)
		    != KERN_SUCCESS)
			panic("stack_alloc: kernel_memory_allocate");

		/*
		 * The stack address that comes back is the address of the lower
		 * guard page.  Skip past it to get the actual stack base address.
		 */

		stack += PAGE_SIZE;
	}

	machine_stack_attach(thread, stack);
}
Ejemplo n.º 5
0
bool dbgsymengine::stack_first (CONTEXT* pctx)
{
	if (!pctx || IsBadReadPtr(pctx, sizeof(CONTEXT))) 
		return false;

	if (!check())
		return false;

	if (!m_pframe) 
	{
		m_pframe = new STACKFRAME;
		if (!m_pframe) return false;
	}
				
	memset(m_pframe, 0, sizeof(STACKFRAME));

    #ifdef _X86_
    m_pframe->AddrPC.Offset       = pctx->Eip;
    m_pframe->AddrPC.Mode         = AddrModeFlat;
    m_pframe->AddrStack.Offset    = pctx->Esp;
    m_pframe->AddrStack.Mode      = AddrModeFlat;
    m_pframe->AddrFrame.Offset    = pctx->Ebp;
    m_pframe->AddrFrame.Mode      = AddrModeFlat;
    #else
    m_pframe->AddrPC.Offset       = (DWORD)pctx->Fir;
    m_pframe->AddrPC.Mode         = AddrModeFlat;
    m_pframe->AddrReturn.Offset   = (DWORD)pctx->IntRa;
    m_pframe->AddrReturn.Mode     = AddrModeFlat;
    m_pframe->AddrStack.Offset    = (DWORD)pctx->IntSp;
    m_pframe->AddrStack.Mode      = AddrModeFlat;
    m_pframe->AddrFrame.Offset    = (DWORD)pctx->IntFp;
    m_pframe->AddrFrame.Mode      = AddrModeFlat;
    #endif

	m_pctx = pctx;
	return stack_next();
}