void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) { user_exit(); if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } user_enter(); }
/* Called with IRQs disabled. */ __visible void prepare_exit_to_usermode(struct pt_regs *regs) { if (WARN_ON(!irqs_disabled())) local_irq_disable(); /* * In order to return to user mode, we need to have IRQs off with * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY, * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags * can be set at any time on preemptable kernels if we have IRQs on, * so we need to loop. Disabling preemption wouldn't help: doing the * work to clear some of the flags can sleep. */ while (true) { u32 cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags); if (!(cached_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY))) break; /* We have work to do. */ local_irq_enable(); if (cached_flags & _TIF_NEED_RESCHED) schedule(); if (cached_flags & _TIF_UPROBE) uprobe_notify_resume(regs); /* deal with pending signal delivery */ if (cached_flags & _TIF_SIGPENDING) do_signal(regs); if (cached_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } if (cached_flags & _TIF_USER_RETURN_NOTIFY) fire_user_return_notifiers(); /* Disable IRQs and retry */ local_irq_disable(); } user_enter(); }
/* Called with IRQs disabled. */ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) { u32 cached_flags; if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled())) local_irq_disable(); lockdep_sys_exit(); cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags); if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) exit_to_usermode_loop(regs, cached_flags); user_enter(); }
/* * notification of userspace execution resumption * - triggered by the TIF_WORK_MASK flags */ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { local_irq_enable(); user_exit(); /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } user_enter(); }
/* * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ asmlinkage void syscall_trace_leave(struct pt_regs *regs) { /* * We may come here right after calling schedule_user() * or do_notify_resume(), in which case we can be in RCU * user mode. */ user_exit(); audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_exit(regs, regs_return_value(regs)); if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); user_enter(); }
void task_entry(const char *exe_name) { __asm__ ("sti"); screen_print("spawned new task\n"); screen_print("loading "); screen_print(exe_name); screen_put('\n'); const char *paths[] = {"bin", exe_name}; auto mData = _kernel_state.fs.GetInode(2, paths); if(mData.IsNothing()) { kernel_panic("failed to get inode"); } auto data = mData.FromJust(); _kernel_state.pager->Enable(_kernel_state.task->context); ELF elf(data); user_enter(elf.entry(), &_kernel_state.task->stack[PAGE_ALLOCATOR_PAGE_SIZE * Task::STACK_PAGES]); for(;;) { __asm__ ("hlt"); } // unreachable }