static void timer_set_mode(enum clock_event_mode mode, struct clock_event_device *clk) { const l4timer_time_t increment = 1000000 / HZ; int r; switch (mode) { case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_ONESHOT: r = L4XV_FN_i(l4_error(l4timer_stop(timer_srv))); if (r) printk(KERN_WARNING "l4timer: stop failed (%d)\n", r); while (L4XV_FN_i(l4_ipc_error(l4_irq_receive(timer_irq_cap, L4_IPC_BOTH_TIMEOUT_0), l4_utcb())) != L4_IPC_RETIMEOUT) ; break; case CLOCK_EVT_MODE_PERIODIC: case CLOCK_EVT_MODE_RESUME: r = L4XV_FN_i(l4_error(l4timer_start(timer_srv, 0, l4lx_kinfo->clock, increment))); if (r) printk(KERN_WARNING "l4timer: start failed (%d)\n", r); break; case CLOCK_EVT_MODE_UNUSED: break; default: printk("l4timer_set_mode: Unknown mode %d\n", mode); break; } }
void l4x_exit_thread(void) { #ifndef CONFIG_L4_VCPU int i; if (unlikely(current->thread.is_hybrid)) { l4_cap_idx_t hybgate; l4_msgtag_t tag; l4_umword_t o = 0; hybgate = L4LX_KERN_CAP_HYBRID_BASE + (current->pid << L4_CAP_SHIFT); tag = l4_ipc_gate_get_infos(hybgate, &o); if (l4_error(tag)) printk("hybrid: Could not get gate info, leaking mem.\n"); else kfree((void *)o); tag = l4_task_unmap(L4_BASE_TASK_CAP, l4_obj_fpage(hybgate, 0, L4_FPAGE_RWX), L4_FP_ALL_SPACES); if (l4_error(tag)) printk("hybrid: Delete of gate failed.\n"); } for (i = 0; i < NR_CPUS; i++) { l4_cap_idx_t thread_id = current->thread.user_thread_ids[i]; /* check if we were a non-user thread (i.e., have no user-space partner) */ if (unlikely(l4_is_invalid_cap(thread_id) || !thread_id)) continue; #ifdef DEBUG LOG_printf("exit_thread: trying to delete %s(%d, " PRINTF_L4TASK_FORM ")\n", current->comm, current->pid, PRINTF_L4TASK_ARG(thread_id)); #endif /* If task_delete fails we don't free the task number so that it * won't be used again. */ if (likely(!l4lx_task_delete_thread(thread_id))) { l4x_hybrid_remove(current); current->thread.user_thread_ids[i] = L4_INVALID_CAP; l4lx_task_number_free(thread_id); current->thread.started = 0; } else printk("%s: failed to delete task " PRINTF_L4TASK_FORM "\n", __func__, PRINTF_L4TASK_ARG(thread_id)); } #endif #ifdef CONFIG_X86_DS ds_exit_thread(current); #endif }
int l4x_kvm_svm_run(struct kvm_vcpu *kvcpu, unsigned long vmcb) { l4_msgtag_t tag; unsigned cpu; unsigned long orig_state, orig_saved_state; l4_vcpu_state_t *vcpu; L4XV_V(f); L4XV_L(f); cpu = smp_processor_id(); vcpu = l4x_vcpu_state(cpu); orig_state = vcpu->state; vcpu->state = L4_VCPU_F_FPU_ENABLED; orig_saved_state = vcpu->saved_state; vcpu->saved_state = L4_VCPU_F_USER_MODE | L4_VCPU_F_FPU_ENABLED; vcpu->user_task = kvcpu->kvm->arch.l4vmcap; l4x_kvm_kvm_to_l4vcpu(kvcpu, vcpu); memcpy((char *)vcpu + L4_VCPU_OFFSET_EXT_STATE, (void *)vmcb, L4_PAGESIZE - L4_VCPU_OFFSET_EXT_STATE); tag = l4_thread_vcpu_resume_start(); tag = l4_thread_vcpu_resume_commit(L4_INVALID_CAP, tag); l4x_kvm_l4vcpu_to_kvm(vcpu, kvcpu); memcpy((void *)vmcb, (char *)vcpu + L4_VCPU_OFFSET_EXT_STATE, L4_PAGESIZE - L4_VCPU_OFFSET_EXT_STATE); vcpu->user_task = current->mm->context.task; vcpu->state = orig_state; vcpu->saved_state = orig_saved_state; if (l4_error(tag) < 0) { L4XV_U(f); printk("%s: vm run failed with %ld\n", __func__, l4_error(tag)); return 1; } L4XV_U(f); return 0; }
static void l4x_flush_page(struct mm_struct *mm, unsigned long address, unsigned long vaddr, int size, unsigned long flush_rights, unsigned long caller) { l4_msgtag_t tag; if (IS_ENABLED(CONFIG_ARM)) return; if (mm && mm->context.l4x_unmap_mode == L4X_UNMAP_MODE_SKIP) return; if ((address & PAGE_MASK) == 0) address = PAGE0_PAGE_ADDRESS; if (likely(mm)) { unmap_log_add(mm, vaddr, size, flush_rights, caller); return; } /* do the real flush */ if (mm && !l4_is_invalid_cap(mm->context.task)) { /* Direct flush in the child, use virtual address in the * child address space */ tag = L4XV_FN(l4_msgtag_t, l4_task_unmap(mm->context.task, l4_fpage(vaddr & PAGE_MASK, size, flush_rights), L4_FP_ALL_SPACES)); } else { /* Flush all pages in all childs using the 'physical' * address known in the Linux server */ tag = L4XV_FN(l4_msgtag_t, l4_task_unmap(L4RE_THIS_TASK_CAP, l4_fpage(address & PAGE_MASK, size, flush_rights), L4_FP_OTHER_SPACES)); } if (l4_error(tag)) l4x_printf("l4_task_unmap error %ld\n", l4_error(tag)); }
void l4x_unmap_log_flush(void) { unsigned i; struct unmap_log_t *log; unsigned long flags; local_irq_save(flags); log = this_cpu_ptr(&unmap_log); for (i = 0; i < log->cnt; ++i) { l4_msgtag_t tag; struct mm_struct *mm = log->log[i].mm; if (unlikely(l4_is_invalid_cap(mm->context.task))) continue; tag = L4XV_FN(l4_msgtag_t, l4_task_unmap(mm->context.task, l4_fpage(log->log[i].addr, log->log[i].size, log->log[i].rights), L4_FP_ALL_SPACES)); if (unlikely(l4_error(tag))) { l4x_printf("l4_task_unmap error %ld: t=%lx\n", l4_error(tag), mm->context.task); WARN_ON(1); } else if (0) l4x_printf("flushing(%d) %lx:%08lx[%d,%x]\n", i, mm->context.task, log->log[i].addr, log->log[i].size, log->log[i].rights); } log->cnt = 0; local_irq_restore(flags); }
static inline void l4x_unmap_self(unsigned long a) { l4_msgtag_t t; if (0) printk("dma-self-unmap: %08lx\n", a); a &= PAGE_MASK; t = L4XV_FN(l4_msgtag_t, l4_task_unmap(L4_BASE_TASK_CAP, l4_fpage(a, PAGE_SHIFT, L4_FPAGE_RWX), L4_FP_ALL_SPACES)); if (l4_error(t)) printk("dma-remap: internal unmapping of %08lx failed\n", a); }
static inline void l4x_map_self(unsigned long src, unsigned long dst, unsigned mapflags) { l4_msgtag_t t; if (0) printk("dma-self-map: %08lx -> %08lx [%x]\n", src, dst, mapflags); src &= PAGE_MASK; dst &= PAGE_MASK; t = L4XV_FN(l4_msgtag_t, l4_task_map(L4_BASE_TASK_CAP, L4_BASE_TASK_CAP, l4_fpage(src, PAGE_SHIFT, L4_FPAGE_RWX), dst | L4_MAP_ITEM_MAP | mapflags)); if (l4_error(t)) printk("dma-remap: internal mapping failed: %08lx -> %08lx\n", src, dst); }
int main(void) { l4_msgtag_t tag; #ifdef MEASURE l4_cpu_time_t s, e; #endif l4_utcb_t *u = l4_utcb(); l4_exc_regs_t exc; l4_umword_t mr0, mr1; printf("Alien feature testing\n"); l4_debugger_set_object_name(l4re_env()->main_thread, "alientest"); /* Start alien thread */ if (l4_is_invalid_cap(alien = l4re_util_cap_alloc())) return 1; l4_touch_rw(alien_thread_stack, sizeof(alien_thread_stack)); tag = l4_factory_create_thread(l4re_env()->factory, alien); if (l4_error(tag)) return 1; l4_debugger_set_object_name(alien, "alienth"); l4_thread_control_start(); l4_thread_control_pager(l4re_env()->main_thread); l4_thread_control_exc_handler(l4re_env()->main_thread); l4_thread_control_bind((l4_utcb_t *)l4re_env()->first_free_utcb, L4RE_THIS_TASK_CAP); l4_thread_control_alien(1); tag = l4_thread_control_commit(alien); if (l4_error(tag)) return 2; tag = l4_thread_ex_regs(alien, (l4_umword_t)alien_thread, (l4_umword_t)alien_thread_stack + sizeof(alien_thread_stack), 0); if (l4_error(tag)) return 3; l4_sched_param_t sp = l4_sched_param(1, 0); tag = l4_scheduler_run_thread(l4re_env()->scheduler, alien, &sp); if (l4_error(tag)) return 4; #ifdef MEASURE l4_calibrate_tsc(l4re_kip()); #endif /* Pager/Exception loop */ if (l4_msgtag_has_error(tag = l4_ipc_receive(alien, u, L4_IPC_NEVER))) { printf("l4_ipc_receive failed"); return 1; } memcpy(&exc, l4_utcb_exc(), sizeof(exc)); mr0 = l4_utcb_mr()->mr[0]; mr1 = l4_utcb_mr()->mr[1]; for (;;) { #ifdef MEASURE s = l4_rdtsc(); #endif if (l4_msgtag_is_exception(tag)) { #ifndef MEASURE printf("PC=%08lx SP=%08lx Err=%08lx Trap=%lx, %s syscall, SC-Nr: %lx\n", l4_utcb_exc_pc(&exc), exc.sp, exc.err, exc.trapno, (exc.err & 4) ? " after" : "before", exc.err >> 3); #endif tag = l4_msgtag((exc.err & 4) ? 0 : L4_PROTO_ALLOW_SYSCALL, L4_UTCB_EXCEPTION_REGS_SIZE, 0, 0); } else printf("Umm, non-handled request (like PF): %lx %lx\n", mr0, mr1); memcpy(l4_utcb_exc(), &exc, sizeof(exc)); /* Reply and wait */ if (l4_msgtag_has_error(tag = l4_ipc_call(alien, u, tag, L4_IPC_NEVER))) { printf("l4_ipc_call failed\n"); return 1; } memcpy(&exc, l4_utcb_exc(), sizeof(exc)); mr0 = l4_utcb_mr()->mr[0]; mr1 = l4_utcb_mr()->mr[1]; #ifdef MEASURE e = l4_rdtsc(); printf("time %lld\n", l4_tsc_to_ns(e - s)); #endif }
static void L4_CV timer_thread(void *data) { l4_timeout_t to; l4_utcb_t *u = l4_utcb(); l4_cap_idx_t irq_cap = *(l4_cap_idx_t *)data; l4_msgtag_t t; l4_umword_t l; l4_msg_regs_t *v = l4_utcb_mr_u(u); l4timer_time_t increment = 0; l4_cpu_time_t next_to = 0; enum { idx_at = 2, idx_increment = idx_at + sizeof(l4timer_time_t) / sizeof(v->mr[0]), }; to = L4_IPC_NEVER; t = l4_ipc_wait(u, &l, to); while (1) { int reply = 1; int r = 0; if (l4_ipc_error(t, u) == L4_IPC_RETIMEOUT) { if (l4_error(l4_irq_trigger_u(irq_cap, u)) != -1) LOG_printf("IRQ timer trigger failed\n"); if (increment) { next_to += increment; to = l4_timeout(L4_IPC_TIMEOUT_0, l4_timeout_abs_u(next_to, 1, u)); } else to = L4_IPC_NEVER; reply = 0; } else if (l4_error(t) == L4_PROTO_TIMER) { switch (v->mr[0]) { case L4_TIMER_OP_START: next_to = *(l4timer_time_t *)&v->mr[idx_at]; to = l4_timeout(L4_IPC_TIMEOUT_0, l4_timeout_abs_u(next_to, 1, u)); increment = *(l4timer_time_t *)&v->mr[idx_increment]; r = 0; break; case L4_TIMER_OP_STOP: to = L4_IPC_NEVER; increment = 0; r = 0; break; default: LOG_printf("l4timer: invalid opcode\n"); r = -ENOSYS; break; }; } else LOG_printf("l4timer: msg r=%ld\n", l4_error(t)); t = l4_msgtag(r, 0, 0, 0); if (reply) t = l4_ipc_reply_and_wait(u, t, &l, to); else t = l4_ipc_wait(u, &l, to); } }
static int __init l4x_timer_init_ret(void) { int r; l4lx_thread_t thread; int irq; L4XV_V(f); timer_irq_cap = l4x_cap_alloc(); if (l4_is_invalid_cap(timer_irq_cap)) { printk(KERN_ERR "l4timer: Failed to alloc\n"); return -ENOMEM; } r = L4XV_FN_i(l4_error(l4_factory_create_irq(l4re_env()->factory, timer_irq_cap))); if (r) { printk(KERN_ERR "l4timer: Failed to create irq: %d\n", r); goto out1; } if ((irq = l4x_register_irq(timer_irq_cap)) < 0) { r = -ENOMEM; goto out2; } printk("l4timer: Using IRQ%d\n", irq); setup_irq(irq, &l4timer_irq); L4XV_L(f); thread = l4lx_thread_create (timer_thread, /* thread function */ smp_processor_id(), /* cpu */ NULL, /* stack */ &timer_irq_cap, sizeof(timer_irq_cap), /* data */ l4x_cap_alloc(), /* cap */ PRIO_TIMER, /* prio */ 0, /* vcpup */ "timer", /* name */ NULL); L4XV_U(f); timer_srv = l4lx_thread_get_cap(thread); if (!l4lx_thread_is_valid(thread)) { printk(KERN_ERR "l4timer: Failed to create thread\n"); r = -ENOMEM; goto out3; } l4timer_clockevent.irq = irq; l4timer_clockevent.mult = div_sc(1000000, NSEC_PER_SEC, l4timer_clockevent.shift); l4timer_clockevent.max_delta_ns = clockevent_delta2ns(0xffffffff, &l4timer_clockevent); l4timer_clockevent.min_delta_ns = clockevent_delta2ns(0xf, &l4timer_clockevent); l4timer_clockevent.cpumask = cpumask_of(0); clockevents_register_device(&l4timer_clockevent); return 0; out3: l4x_unregister_irq(irq); out2: L4XV_FN_v(l4_task_delete_obj(L4RE_THIS_TASK_CAP, timer_irq_cap)); out1: l4x_cap_free(timer_irq_cap); return r; }
static void l4x_flush_page(struct mm_struct *mm, unsigned long address, unsigned long vaddr, int size, unsigned long flush_rights) { l4_msgtag_t tag; if (mm && mm->context.l4x_unmap_mode == L4X_UNMAP_MODE_SKIP) return; /* some checks: */ if (address > 0x80000000UL) { unsigned long remap; remap = find_ioremap_entry(address); /* VU: it may happen, that memory is not remapped but mapped in * user space, if a task mmaps /dev/mem but never accesses it. * Therefore, we fail silently... */ if (!remap) return; address = remap; } else if ((address & PAGE_MASK) == 0) address = PAGE0_PAGE_ADDRESS; #if 0 /* only for debugging */ else { if ((address >= (unsigned long)high_memory) && (address < 0x80000000UL)) { printk("flushing non physical page (0x%lx)\n", address); enter_kdebug("flush_page: non physical page"); } } #endif /* do the real flush */ if (mm && !l4_is_invalid_cap(mm->context.task)) { L4XV_V(f); if (!mm->context.task) l4x_printf("%s: Ups, task == 0\n", __func__); /* Direct flush in the child, use virtual address in the * child address space */ L4XV_L(f); tag = l4_task_unmap(mm->context.task, l4_fpage(vaddr & PAGE_MASK, size, flush_rights), L4_FP_ALL_SPACES); L4XV_U(f); } else { L4XV_V(f); /* Flush all pages in all childs using the 'physical' * address known in the Linux server */ L4XV_L(f); tag = l4_task_unmap(L4RE_THIS_TASK_CAP, l4_fpage(address & PAGE_MASK, size, flush_rights), L4_FP_OTHER_SPACES); L4XV_U(f); } if (l4_error(tag)) l4x_printf("l4_task_unmap error %ld\n", l4_error(tag)); }
/* Our main function */ int main(void) { /* Get a capability slot for our new thread. */ l4_cap_idx_t t1 = l4re_util_cap_alloc(); l4_utcb_t *u = l4_utcb(); l4_exc_regs_t *e = l4_utcb_exc_u(u); l4_msgtag_t tag; int err; printf("Example showing how to start a thread with an exception.\n"); /* We do not want to implement a pager here, take the shortcut. */ printf("Make sure to start this program with ldr-flags=eager_map\n"); if (l4_is_invalid_cap(t1)) return 1; /* Create the thread using our default factory */ tag = l4_factory_create_thread(l4re_env()->factory, t1); if (l4_error(tag)) return 1; /* Setup the thread by setting the pager and task. */ l4_thread_control_start(); l4_thread_control_pager(l4re_env()->main_thread); l4_thread_control_exc_handler(l4re_env()->main_thread); l4_thread_control_bind((l4_utcb_t *)l4re_env()->first_free_utcb, L4RE_THIS_TASK_CAP); tag = l4_thread_control_commit(t1); if (l4_error(tag)) return 2; /* Start the thread by finally setting instruction and stack pointer */ tag = l4_thread_ex_regs(t1, (l4_umword_t)thread, (l4_umword_t)thread_stack + sizeof(thread_stack), L4_THREAD_EX_REGS_TRIGGER_EXCEPTION); if (l4_error(tag)) return 3; l4_sched_param_t sp = l4_sched_param(1, 0); tag = l4_scheduler_run_thread(l4re_env()->scheduler, t1, &sp); if (l4_error(tag)) return 4; /* Receive initial exception from just started thread */ tag = l4_ipc_receive(t1, u, L4_IPC_NEVER); if ((err = l4_ipc_error(tag, u))) { printf("Umm, ipc error: %x\n", err); return 1; } /* We expect an exception IPC */ if (!l4_msgtag_is_exception(tag)) { printf("PF?: %lx %lx (not prepared to handle this) %ld\n", l4_utcb_mr_u(u)->mr[0], l4_utcb_mr_u(u)->mr[1], l4_msgtag_label(tag)); return 1; } /* Fill out the complete register set of the new thread */ e->sp = (l4_umword_t)(thread_stack + sizeof(thread_stack)); #ifdef ARCH_x86 e->ip = (l4_umword_t)thread; e->eax = 1; e->ebx = 4; e->ecx = 2; e->edx = 3; e->esi = 6; e->edi = 7; e->ebp = 5; #endif #ifdef ARCH_arm e->pc = (l4_umword_t)thread; e->r[0] = 0; e->r[1] = 1; e->r[2] = 2; e->r[3] = 3; e->r[4] = 4; e->r[5] = 5; e->r[6] = 6; e->r[7] = 7; #endif /* Send a complete exception */ tag = l4_msgtag(0, L4_UTCB_EXCEPTION_REGS_SIZE, 0, 0); /* Send reply and start the thread with the defined CPU register set */ tag = l4_ipc_send(t1, u, tag, L4_IPC_NEVER); if ((err = l4_ipc_error(tag, u))) printf("Error sending IPC: %x\n", err); /* Idle around */ while (1) l4_sleep(10000); return 0; }
/* Our main function */ int main(void) { /* Get a capability slot for our new thread. */ l4_cap_idx_t t1 = l4re_util_cap_alloc(); l4_utcb_t *u = l4_utcb(); l4_exc_regs_t *e = l4_utcb_exc_u(u); l4_msgtag_t tag; int err; extern char _start[], _end[], _sdata[]; if (l4_is_invalid_cap(t1)) return 1; /* Prevent pagefaults of our new thread because we do not want to * implement a pager as well. */ l4_touch_ro(_start, _sdata - _start + 1); l4_touch_rw(_sdata, _end - _sdata); /* Create the thread using our default factory */ tag = l4_factory_create_thread(l4re_env()->factory, t1); if (l4_error(tag)) return 1; /* Setup the thread by setting the pager and task. */ l4_thread_control_start(); l4_thread_control_pager(l4re_env()->main_thread); l4_thread_control_exc_handler(l4re_env()->main_thread); l4_thread_control_bind((l4_utcb_t *)l4re_env()->first_free_utcb, L4RE_THIS_TASK_CAP); tag = l4_thread_control_commit(t1); if (l4_error(tag)) return 2; /* Start the thread by finally setting instruction and stack pointer */ tag = l4_thread_ex_regs(t1, (l4_umword_t)thread, (l4_umword_t)thread_stack + sizeof(thread_stack), L4_THREAD_EX_REGS_TRIGGER_EXCEPTION); if (l4_error(tag)) return 3; l4_sched_param_t sp = l4_sched_param(1, 0); tag = l4_scheduler_run_thread(l4re_env()->scheduler, t1, &sp); if (l4_error(tag)) return 4; /* Receive initial exception from just started thread */ tag = l4_ipc_receive(t1, u, L4_IPC_NEVER); if ((err = l4_ipc_error(tag, u))) { printf("Umm, ipc error: %x\n", err); return 1; } /* We expect an exception IPC */ if (!l4_msgtag_is_exception(tag)) { printf("PF?: %lx %lx (not prepared to handle this) %ld\n", l4_utcb_mr_u(u)->mr[0], l4_utcb_mr_u(u)->mr[1], l4_msgtag_label(tag)); return 1; } /* Fill out the complete register set of the new thread */ e->ip = (l4_umword_t)thread; e->sp = (l4_umword_t)(thread_stack + sizeof(thread_stack)); e->eax = 1; e->ebx = 4; e->ecx = 2; e->edx = 3; e->esi = 6; e->edi = 7; e->ebp = 5; /* Send a complete exception */ tag = l4_msgtag(0, L4_UTCB_EXCEPTION_REGS_SIZE, 0, 0); /* Send reply and start the thread with the defined CPU register set */ tag = l4_ipc_send(t1, u, tag, L4_IPC_NEVER); if ((err = l4_ipc_error(tag, u))) printf("Error sending IPC: %x\n", err); /* Idle around */ while (1) l4_sleep(10000); return 0; }