Пример #1
0
void save_stack_trace(struct stack_trace *trace)
{
	unsigned long sp;

	sp = current_stack_pointer();

	save_context_stack(trace, sp, current, 1);
}
Пример #2
0
void save_stack_trace(struct stack_trace *trace)
{
	unsigned long sp;

	sp = current_stack_pointer();
	dump_trace(save_address, trace, NULL, sp);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
// Bang the shadow pages if they need to be touched to be mapped.
inline void os::bang_stack_shadow_pages() {
  // Write to each page of our new frame to force OS mapping.
  // If we decrement stack pointer more than one page
  // the OS may not map an intervening page into our space
  // and may fault on a memory access to interior of our frame.
  address sp = current_stack_pointer();
  for (size_t pages = 1; pages <= (JavaThread::stack_shadow_zone_size() / os::vm_page_size()); pages++) {
    *((int *)(sp - (pages * vm_page_size()))) = 0;
  }
}
Пример #4
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	unsigned long sp;

	sp = tsk->thread.ksp;
	if (tsk == current)
		sp = current_stack_pointer();
	dump_trace(save_address_nosched, trace, tsk, sp);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Пример #5
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	unsigned long sp;

	if (tsk == current)
		sp = current_stack_pointer();
	else
		sp = tsk->thread.ksp;

	save_context_stack(trace, sp, tsk, 0);
}
void call_dylan_stack_overflow_handler ()
{
  MEMORY_BASIC_INFORMATION memBuf;
  PVOID stack_ptr = current_stack_pointer();
  int res = VirtualQuery(stack_ptr, &memBuf, sizeof(memBuf));

  PVOID baseAddress    = memBuf.BaseAddress;     // base address of region
  PVOID allocationBase = memBuf.AllocationBase;  // allocation base address
  DWORD protect        = memBuf.Protect;         // current access protection

  dylan_stack_overflow_handler(baseAddress, VPAGESIZE, PAGE_GUARD + protect);

}
Пример #7
0
frame os::current_frame() {
  // The only thing that calls this is the stack printing code in
  // VMError::report:
  //   - Step 110 (printing stack bounds) uses the sp in the frame
  //     to determine the amount of free space on the stack.  We
  //     set the sp to a close approximation of the real value in
  //     order to allow this step to complete.
  //   - Step 120 (printing native stack) tries to walk the stack.
  //     The frame we create has a NULL pc, which is ignored as an
  //     invalid frame.
  frame dummy = frame();
  dummy.set_sp((intptr_t *) current_stack_pointer());
  return dummy;
}
Пример #8
0
/*
 * handle normal device IRQs
 */
asmlinkage void do_IRQ(void)
{
	unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
	int irq;

	sp = current_stack_pointer();
	if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN)
		BUG();

	/* make sure local_irq_enable() doesn't muck up the interrupt priority
	 * setting in EPSW */
	old_irq_enabled_epsw = __mn10300_irq_enabled_epsw;
	local_save_flags(epsw);
	__mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw);
	irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;

	__IRQ_STAT(smp_processor_id(), __irq_count)++;

	irq_enter();

	for (;;) {
		/* ask the interrupt controller for the next IRQ to process
		 * - the result we get depends on EPSW.IM
		 */
		irq = IAGR & IAGR_GN;
		if (!irq)
			break;

		local_irq_restore(irq_disabled_epsw);

		generic_handle_irq(irq >> 2);

		/* restore IRQ controls for IAGR access */
		local_irq_restore(epsw);
	}

	__mn10300_irq_enabled_epsw = old_irq_enabled_epsw;

	irq_exit();
}
LONG DylanExceptionFilter (LPEXCEPTION_POINTERS info)
{

  LPEXCEPTION_RECORD er = info->ExceptionRecord;

  if (inside_dylan_ffi_barrier() == 0) {
    return(EXCEPTION_CONTINUE_SEARCH);
  }

  switch (er->ExceptionCode)
  {
  case EXCEPTION_STACK_OVERFLOW:
    {
      // On a stack overflow, the filter calls into Dylan to signal
      // an error, via dylan_signal_overflow_handler. The dylan
      // code will arrange to re-establish the guard protection on
      // the appropriate page of the stack (probably during the
      // rewind when recovering from the error). Before calling the
      // handler, we do a check to ensure that there is sufficient
      // spare stack space after the guard to allow the handler itself
      // to run.

      MEMORY_BASIC_INFORMATION memBuf;
      PVOID stack_ptr = current_stack_pointer();
      int res = VirtualQuery(stack_ptr, &memBuf, sizeof(memBuf));

      PVOID baseAddress    = memBuf.BaseAddress;    // base address of region
      PVOID allocationBase = memBuf.AllocationBase; // allocation base addr

      if (((int)baseAddress - (int)allocationBase) >= (2 * VPAGESIZE)) {
        // There's enough space past the guard to invoke the Dylan handler.
        // Rather than attempt a long-jump within the filter (by simply
        // calling the Dylan handler) we destructively modify the execution
        // context, so that when Windows continues from the exception, it
        // actually continues in the Dylan handler calling code instead.
        // This handler will never return - instead it will ultimately NLX

        info->ContextRecord->Eip = (unsigned long) &call_dylan_stack_overflow_handler;
        return(EXCEPTION_CONTINUE_EXECUTION);
      } else {
        return(EXCEPTION_CONTINUE_SEARCH);
      }
    }
  case EXCEPTION_INT_OVERFLOW:
    { info->ContextRecord->Eip = (unsigned long) &dylan_integer_overflow_handler;
      return(EXCEPTION_CONTINUE_EXECUTION);
    }
  case EXCEPTION_INT_DIVIDE_BY_ZERO:
    { info->ContextRecord->Eip = (unsigned long) &dylan_integer_divide_0_handler;
      return(EXCEPTION_CONTINUE_EXECUTION);
    }
  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
    { info->ContextRecord->Eip = (unsigned long) &dylan_float_divide_0_handler;
      return(EXCEPTION_CONTINUE_EXECUTION);
    }
  case EXCEPTION_FLT_INVALID_OPERATION:
    { info->ContextRecord->Eip = (unsigned long) &dylan_float_invalid_handler;
      return(EXCEPTION_CONTINUE_EXECUTION);
    }
  case EXCEPTION_FLT_OVERFLOW:
    { info->ContextRecord->Eip = (unsigned long) &dylan_float_overflow_handler;
      return(EXCEPTION_CONTINUE_EXECUTION);
    }
  case EXCEPTION_FLT_UNDERFLOW:
    { info->ContextRecord->Eip = (unsigned long) &dylan_float_underflow_handler;
      return(EXCEPTION_CONTINUE_EXECUTION);
    }
  /*
  case  DBG_CONTROL_C:
    { dylan_keyboard_interruptQ = TRUE;
      return(EXCEPTION_CONTINUE_EXECUTION);
    }
   */

  default:
    return(EXCEPTION_CONTINUE_SEARCH);
  }
}
Пример #10
0
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
			struct task_struct *tsk)
{
	struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
	u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
	unsigned cpu = smp_processor_id();
	u64 next_tlb_gen;

	/*
	 * NB: The scheduler will call us with prev == next when switching
	 * from lazy TLB mode to normal mode if active_mm isn't changing.
	 * When this happens, we don't assume that CR3 (and hence
	 * cpu_tlbstate.loaded_mm) matches next.
	 *
	 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
	 */

	/* We don't want flush_tlb_func_* to run concurrently with us. */
	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
		WARN_ON_ONCE(!irqs_disabled());

	/*
	 * Verify that CR3 is what we think it is.  This will catch
	 * hypothetical buggy code that directly switches to swapper_pg_dir
	 * without going through leave_mm() / switch_mm_irqs_off() or that
	 * does something like write_cr3(read_cr3_pa()).
	 *
	 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
	 * isn't free.
	 */
#ifdef CONFIG_DEBUG_VM
	if (WARN_ON_ONCE(__read_cr3() !=
			 (__sme_pa(real_prev->pgd) | prev_asid))) {
		/*
		 * If we were to BUG here, we'd be very likely to kill
		 * the system so hard that we don't see the call trace.
		 * Try to recover instead by ignoring the error and doing
		 * a global flush to minimize the chance of corruption.
		 *
		 * (This is far from being a fully correct recovery.
		 *  Architecturally, the CPU could prefetch something
		 *  back into an incorrect ASID slot and leave it there
		 *  to cause trouble down the road.  It's better than
		 *  nothing, though.)
		 */
		__flush_tlb_all();
	}
#endif

	if (real_prev == next) {
		VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
			  next->context.ctx_id);

		if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
			/*
			 * There's nothing to do: we weren't lazy, and we
			 * aren't changing our mm.  We don't need to flush
			 * anything, nor do we need to update CR3, CR4, or
			 * LDTR.
			 */
			return;
		}

		/* Resume remote flushes and then read tlb_gen. */
		cpumask_set_cpu(cpu, mm_cpumask(next));
		next_tlb_gen = atomic64_read(&next->context.tlb_gen);

		if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
		    next_tlb_gen) {
			/*
			 * Ideally, we'd have a flush_tlb() variant that
			 * takes the known CR3 value as input.  This would
			 * be faster on Xen PV and on hypothetical CPUs
			 * on which INVPCID is fast.
			 */
			this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
				       next_tlb_gen);
			write_cr3(__sme_pa(next->pgd) | prev_asid);
			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
					TLB_FLUSH_ALL);
		}

		/*
		 * We just exited lazy mode, which means that CR4 and/or LDTR
		 * may be stale.  (Changes to the required CR4 and LDTR states
		 * are not reflected in tlb_gen.)
		 */
	} else {
		u16 new_asid;
		bool need_flush;

		if (IS_ENABLED(CONFIG_VMAP_STACK)) {
			/*
			 * If our current stack is in vmalloc space and isn't
			 * mapped in the new pgd, we'll double-fault.  Forcibly
			 * map it.
			 */
			unsigned int index = pgd_index(current_stack_pointer());
			pgd_t *pgd = next->pgd + index;

			if (unlikely(pgd_none(*pgd)))
				set_pgd(pgd, init_mm.pgd[index]);
		}

		/* Stop remote flushes for the previous mm */
		if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
			cpumask_clear_cpu(cpu, mm_cpumask(real_prev));

		VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));

		/*
		 * Start remote flushes and then read tlb_gen.
		 */
		cpumask_set_cpu(cpu, mm_cpumask(next));
		next_tlb_gen = atomic64_read(&next->context.tlb_gen);

		choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);

		if (need_flush) {
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
			this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
			write_cr3(__sme_pa(next->pgd) | new_asid);
			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
					TLB_FLUSH_ALL);
		} else {
			/* The new ASID is already up to date. */
			write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH);
			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
		}

		this_cpu_write(cpu_tlbstate.loaded_mm, next);
		this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
	}

	load_mm_cr4(next);
	switch_ldt(real_prev, next);
}
Пример #11
0
int
save_stack_trace_tsk_reliable(struct task_struct *tsk,
				struct stack_trace *trace)
{
	unsigned long sp;
	unsigned long stack_page = (unsigned long)task_stack_page(tsk);
	unsigned long stack_end;
	int graph_idx = 0;

	/*
	 * The last frame (unwinding first) may not yet have saved
	 * its LR onto the stack.
	 */
	int firstframe = 1;

	if (tsk == current)
		sp = current_stack_pointer();
	else
		sp = tsk->thread.ksp;

	stack_end = stack_page + THREAD_SIZE;
	if (!is_idle_task(tsk)) {
		/*
		 * For user tasks, this is the SP value loaded on
		 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
		 * system_call_common()/EXCEPTION_PROLOG_COMMON().
		 *
		 * Likewise for non-swapper kernel threads,
		 * this also happens to be the top of the stack
		 * as setup by copy_thread().
		 *
		 * Note that stack backlinks are not properly setup by
		 * copy_thread() and thus, a forked task() will have
		 * an unreliable stack trace until it's been
		 * _switch()'ed to for the first time.
		 */
		stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
	} else {
		/*
		 * idle tasks have a custom stack layout,
		 * c.f. cpu_idle_thread_init().
		 */
		stack_end -= STACK_FRAME_OVERHEAD;
	}

	if (sp < stack_page + sizeof(struct thread_struct) ||
	    sp > stack_end - STACK_FRAME_MIN_SIZE) {
		return 1;
	}

	for (;;) {
		unsigned long *stack = (unsigned long *) sp;
		unsigned long newsp, ip;

		/* sanity check: ABI requires SP to be aligned 16 bytes. */
		if (sp & 0xF)
			return 1;

		/* Mark stacktraces with exception frames as unreliable. */
		if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
		    stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
			return 1;
		}

		newsp = stack[0];
		/* Stack grows downwards; unwinder may only go up. */
		if (newsp <= sp)
			return 1;

		if (newsp != stack_end &&
		    newsp > stack_end - STACK_FRAME_MIN_SIZE) {
			return 1; /* invalid backlink, too far up. */
		}

		/* Examine the saved LR: it must point into kernel code. */
		ip = stack[STACK_FRAME_LR_SAVE];
		if (!firstframe && !__kernel_text_address(ip))
			return 1;
		firstframe = 0;

		/*
		 * FIXME: IMHO these tests do not belong in
		 * arch-dependent code, they are generic.
		 */
		ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, NULL);
#ifdef CONFIG_KPROBES
		/*
		 * Mark stacktraces with kretprobed functions on them
		 * as unreliable.
		 */
		if (ip == (unsigned long)kretprobe_trampoline)
			return 1;
#endif

		if (!trace->skip)
			trace->entries[trace->nr_entries++] = ip;
		else
			trace->skip--;

		if (newsp == stack_end)
			break;

		if (trace->nr_entries >= trace->max_entries)
			return -E2BIG;

		sp = newsp;
	}
	return 0;
}