void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { int cpu; set_user_gs(regs, 0); regs->fs = 0; set_fs(USER_DS); regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; cpu = get_cpu(); load_user_cs_desc(cpu, current->mm); put_cpu(); /* * Free the old FP and other extended state */ free_thread_xstate(current); }
int import_thread_struct(struct epm_action *action, ghost_t *ghost, struct task_struct *tsk) { int r; r = ghost_read(ghost, &tsk->thread, sizeof (tsk->thread)); if (r) goto out; /* * Make get_wchan return do_exit for zombies * We only set a marker to let copy_thread() do the right thing. */ if (tsk->exit_state) tsk->thread.sp = ~0UL; else tsk->thread.sp = 0; if (tsk->thread.xstate) { r = -ENOMEM; tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); if (!tsk->thread.xstate) goto out; r = ghost_read(ghost, tsk->thread.xstate, xstate_size); if (r) free_thread_xstate(tsk); } out: return r; }
void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { set_user_gs(regs, 0); regs->fs = 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; free_thread_xstate(current); }
void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { set_user_gs(regs, 0); regs->fs = 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; /* * Free the old FP and other extended state */ free_thread_xstate(current); }
void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { set_user_gs(regs, 0); regs->fs = 0; set_fs(USER_DS); regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; #ifndef CONFIG_IPIPE /* Lazily handled, init_fpu() will reset the state. */ /* * Free the old FP and other extended state */ free_thread_xstate(current); #endif }
void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { #ifdef CONFIG_L4_VCPU unsigned cs, ds; __asm__("mov %%cs, %0; mov %%ds, %1 \n" : "=r"(cs), "=r"(ds) ); #endif //set_user_gs(regs, 0); regs->fs = 0; #ifdef CONFIG_L4_VCPU regs->ds = ds; regs->es = ds; regs->ss = ds; regs->cs = cs; #else //regs->ds = __USER_DS; //regs->es = __USER_DS; //regs->ss = __USER_DS; //regs->cs = __USER_CS; #endif regs->ip = new_ip; regs->sp = new_sp; /* * Free the old FP and other extended state */ free_thread_xstate(current); current->thread.gs = 0; #ifndef CONFIG_L4_VCPU current->thread.restart = 1; #endif if (new_ip > TASK_SIZE) force_sig(SIGSEGV, current); }
void arch_release_task_struct(struct task_struct *tsk) { free_thread_xstate(tsk); }
void free_thread_info(struct thread_info *ti) { free_thread_xstate(ti->task); free_pages((unsigned long)ti, get_order(THREAD_SIZE)); }
void free_thread_info(struct thread_info *ti) { free_thread_xstate(ti->task); free_pages((unsigned long)ti, THREAD_ORDER); }
void free_thread_info(struct thread_info *ti) { free_thread_xstate(ti->task); kmem_cache_free(thread_info_cache, ti); }
void unimport_thread_struct(struct task_struct *task) { free_thread_xstate(task); }
void free_task_struct(struct task_struct *task) { free_thread_xstate(task); kmem_cache_free(task_struct_cachep, task); }