int l4x_kvm_destroy_vm(struct kvm *kvm) { printk("%s: cap = %08lx\n", __func__, kvm->arch.l4vmcap); if (!l4lx_task_delete_task(kvm->arch.l4vmcap, 1)) { printk("%s: kvm task destruction failed cap=%08lx\n", __func__, kvm->arch.l4vmcap); l4lx_task_number_free(kvm->arch.l4vmcap); return -ENOENT; } l4lx_task_number_free(kvm->arch.l4vmcap); return 0; }
int l4x_kvm_create_vm(struct kvm *kvm) { l4_msgtag_t t; l4_utcb_t *u = l4_utcb(); int r; L4XV_V(f); kvm->arch.l4vmcap = L4_INVALID_CAP; if (l4lx_task_get_new_task(L4_INVALID_CAP, &kvm->arch.l4vmcap)) { printk("%s: could not allocate task cap\n", __func__); return -ENOENT; } L4XV_L(f); t = l4_factory_create_vm_u(l4re_env()->factory, kvm->arch.l4vmcap, u); if (unlikely((r = l4_error_u(t, u)))) { printk("%s: kvm task creation failed cap=%08lx: %d\n", __func__, kvm->arch.l4vmcap, r); l4lx_task_number_free(kvm->arch.l4vmcap); L4XV_U(f); return -ENOENT; } L4XV_U(f); printk("%s: cap = %08lx\n", __func__, kvm->arch.l4vmcap); #ifdef CONFIG_L4_DEBUG_REGISTER_NAMES L4XV_L(f); l4_debugger_set_object_name(kvm->arch.l4vmcap, "kvmVM"); L4XV_U(f); #endif return 0; }
void l4x_exit_thread(void) { #ifndef CONFIG_L4_VCPU int i; if (unlikely(current->thread.is_hybrid)) { l4_cap_idx_t hybgate; l4_msgtag_t tag; l4_umword_t o = 0; hybgate = L4LX_KERN_CAP_HYBRID_BASE + (current->pid << L4_CAP_SHIFT); tag = l4_ipc_gate_get_infos(hybgate, &o); if (l4_error(tag)) printk("hybrid: Could not get gate info, leaking mem.\n"); else kfree((void *)o); tag = l4_task_unmap(L4_BASE_TASK_CAP, l4_obj_fpage(hybgate, 0, L4_FPAGE_RWX), L4_FP_ALL_SPACES); if (l4_error(tag)) printk("hybrid: Delete of gate failed.\n"); } for (i = 0; i < NR_CPUS; i++) { l4_cap_idx_t thread_id = current->thread.user_thread_ids[i]; /* check if we were a non-user thread (i.e., have no user-space partner) */ if (unlikely(l4_is_invalid_cap(thread_id) || !thread_id)) continue; #ifdef DEBUG LOG_printf("exit_thread: trying to delete %s(%d, " PRINTF_L4TASK_FORM ")\n", current->comm, current->pid, PRINTF_L4TASK_ARG(thread_id)); #endif /* If task_delete fails we don't free the task number so that it * won't be used again. */ if (likely(!l4lx_task_delete_thread(thread_id))) { l4x_hybrid_remove(current); current->thread.user_thread_ids[i] = L4_INVALID_CAP; l4lx_task_number_free(thread_id); current->thread.started = 0; } else printk("%s: failed to delete task " PRINTF_L4TASK_FORM "\n", __func__, PRINTF_L4TASK_ARG(thread_id)); } #endif #ifdef CONFIG_X86_DS ds_exit_thread(current); #endif }
void l4x_evict_tasks(struct task_struct *exclude) { struct task_struct *p; int cnt = 0; rcu_read_lock(); for_each_process(p) { l4_cap_idx_t t; struct mm_struct *mm; if (p == exclude) continue; task_lock(p); mm = p->mm; if (!mm) { task_unlock(p); continue; } t = ACCESS_ONCE(mm->context.task); if (l4_is_invalid_cap(t)) { task_unlock(p); continue; } if (down_read_trylock(&mm->mmap_sem)) { struct vm_area_struct *vma; for (vma = mm->mmap; vma; vma = vma->vm_next) if (vma->vm_flags & VM_LOCKED) { t = L4_INVALID_CAP; break; } up_read(&mm->mmap_sem); if (!vma) if (cmpxchg(&mm->context.task, t, L4_INVALID_CAP) != t) t = L4_INVALID_CAP; } else t = L4_INVALID_CAP; task_unlock(p); if (!l4_is_invalid_cap(t)) { l4lx_task_delete_task(t); l4lx_task_number_free(t); if (++cnt > 10) break; } } rcu_read_unlock(); if (cnt == 0) pr_info_ratelimited("l4x-evict: Found no process to free.\n"); }
void destroy_context(struct mm_struct *mm) { l4_cap_idx_t task_id; destroy_context_origarch(mm); if (!mm || !mm->context.task || l4_is_invalid_cap(task_id = mm->context.task)) return; if (l4lx_task_delete_task(task_id)) do_exit(9); mm->context.task = L4_INVALID_CAP; l4lx_task_number_free(task_id); }
/* kernel-internal execve() */ asmlinkage int l4_kernelinternal_execve(const char * file, const char * const * argv, const char * const * envp) { int ret; struct thread_struct *t = ¤t->thread; ASSERT(l4_is_invalid_cap(t->user_thread_id)); /* we are going to become a real user task now, so prepare a real * pt_regs structure. */ /* Enable Interrupts, Set IOPL (needed for X, hwclock etc.) */ t->regs.flags = 0x3200; /* XXX hardcoded */ /* do_execve() will create the user task for us in start_thread() and call set_fs(USER_DS) in flush_thread. I know this sounds strange but there are places in the kernel (kernel/kmod.c) which call execve with parameters inside the kernel. They set fs to KERNEL_DS before calling execve so we can't set it back to USER_DS before execve had a chance to look at the name of the executable. */ ASSERT(segment_eq(get_fs(), KERNEL_DS)); ret = do_execve(file, argv, envp, &t->regs); if (ret < 0) { /* we failed -- become a kernel thread again */ if (!l4_is_invalid_cap(t->user_thread_id)) l4lx_task_number_free(t->user_thread_id); set_fs(KERNEL_DS); t->user_thread_id = L4_INVALID_CAP; return -1; } l4x_user_dispatcher(); /* not reached */ return 0; }