PUBLIC L4_msg_tag Irq_muxer::kinvoke(L4_obj_ref, L4_fpage::Rights /*rights*/, Syscall_frame *f, Utcb const *utcb, Utcb *) { register Context *const c_thread = ::current(); assert_opt (c_thread); register Space *const c_space = c_thread->space(); assert_opt (c_space); L4_msg_tag tag = f->tag(); if (EXPECT_FALSE(tag.proto() != L4_msg_tag::Label_irq)) return commit_result(-L4_err::EBadproto); if (EXPECT_FALSE(tag.words() < 1)) return commit_result(-L4_err::EInval); switch ((utcb->values[0] & 0xffff)) { case Op_chain: return sys_attach(tag, utcb, f, c_space); case Op_trigger: log(); hit(0); return no_reply(); default: return commit_result(-L4_err::EInval); } }
L4_msg_tag Ipc_gate_ctl::bind_thread(L4_obj_ref, L4_fpage::Rights, Syscall_frame *f, Utcb const *in, Utcb *) { L4_msg_tag tag = f->tag(); if (tag.words() < 2) return commit_result(-L4_err::EMsgtooshort); L4_fpage::Rights t_rights(0); Thread *t = Ko::deref<Thread>(&tag, in, &t_rights); if (!t) return tag; if (!(t_rights & L4_fpage::Rights::CS())) return commit_result(-L4_err::EPerm); Ipc_gate_obj *g = static_cast<Ipc_gate_obj*>(this); g->_id = in->values[1]; Mem::mp_wmb(); t->inc_ref(); g->_thread = t; Mem::mp_wmb(); g->unblock_all(); current()->rcu_wait(); g->unblock_all(); return commit_result(0); }
PUBLIC L4_msg_tag Vlog::kinvoke(L4_obj_ref ref, Mword rights, Syscall_frame *f, Utcb const *r_msg, Utcb *s_msg) { L4_msg_tag const t = f->tag(); if (t.proto() == L4_msg_tag::Label_irq) return Icu_h<Vlog>::icu_invoke(ref, rights, f, r_msg, s_msg); else if (t.proto() != L4_msg_tag::Label_log) return commit_result(-L4_err::EBadproto); switch (r_msg->values[0]) { case 0: log_string(f, r_msg); return no_reply(); case 2: // set attr return set_attr(rights, f, r_msg); case 3: // get attr return get_attr(rights, f, s_msg); default: return get_input(rights, f, s_msg); } }
PRIVATE L4_msg_tag Irq_sender::sys_attach(L4_msg_tag const &tag, Utcb const *utcb, Syscall_frame * /*f*/, Obj_space *o_space) { L4_snd_item_iter snd_items(utcb, tag.words()); Thread *thread = 0; if (tag.items() == 0) { // detach Reap_list rl; free(_irq_thread, rl.list()); _irq_id = ~0UL; cpu_lock.clear(); rl.del(); cpu_lock.lock(); return commit_result(0); } if (tag.items() && snd_items.next()) { L4_fpage bind_thread(snd_items.get()->d); if (EXPECT_FALSE(!bind_thread.is_objpage())) return commit_error(utcb, L4_error::Overflow); thread = Kobject::dcast<Thread_object*>(o_space->lookup_local(bind_thread.obj_index())); } if (!thread) thread = current_thread(); if (alloc(thread)) { _irq_id = utcb->values[1]; return commit_result(0); } return commit_result(-L4_err::EInval); }
PRIVATE L4_msg_tag Irq_muxer::sys_attach(L4_msg_tag const &tag, Utcb const *utcb, Syscall_frame * /*f*/, Obj_space *o_space) { L4_snd_item_iter snd_items(utcb, tag.words()); Irq *irq = 0; if (tag.items() == 0) return commit_result(-L4_err::EInval); if (tag.items() && snd_items.next()) { L4_fpage bind_irq(snd_items.get()->d); if (EXPECT_FALSE(!bind_irq.is_objpage())) return commit_error(utcb, L4_error::Overflow); irq = Kobject::dcast<Irq*>(o_space->lookup_local(bind_irq.obj_index())); } if (!irq) return commit_result(-L4_err::EInval); irq->unbind(); if (!irq->masked()) { Smword old; do old = _mask_cnt; while (!mp_cas(&_mask_cnt, old, old + 1)); } bind(irq, 0); irq->Irq_base::_next = Irq_base::_next; Irq_base::_next = irq; return commit_result(0); }
static void tag_interpreter_snprintf(char *&buf, int &maxlen, L4_msg_tag const &tag) { int len; char const *s; if (maxlen<=0) return; s = tag_to_string(tag); if (s) len = snprintf(buf, maxlen, "%s%04lx", s, tag.raw() & 0xffff); else len = snprintf(buf, maxlen, L4_PTR_FMT, tag.raw()); if (len<0 || len>=maxlen) len = maxlen-1; buf += len; maxlen -= len; }
PUBLIC L4_msg_tag Ipc_gate_ctl::kinvoke(L4_obj_ref self, L4_fpage::Rights rights, Syscall_frame *f, Utcb const *in, Utcb *out) { L4_msg_tag tag = f->tag(); if (EXPECT_FALSE(tag.proto() != L4_msg_tag::Label_kobject)) return commit_result(-L4_err::EBadproto); if (EXPECT_FALSE(tag.words() < 1)) return commit_result(-L4_err::EInval); switch (in->values[0]) { case Op_bind: return bind_thread(self, rights, f, in, out); case Op_get_info: return get_infos(self, rights, f, in, out); default: return static_cast<Ipc_gate_obj*>(this)->kobject_invoke(self, rights, f, in, out); } }
PRIVATE inline bool Task::invoke_arch(L4_msg_tag &tag, Utcb *utcb) { switch (utcb->values[0]) { case Ldt_set_x86: { enum { Utcb_values_per_ldt_entry = Cpu::Ldt_entry_size / sizeof(utcb->values[0]), }; if (EXPECT_FALSE(tag.words() < 3 || tag.words() % Utcb_values_per_ldt_entry)) { tag = commit_result(-L4_err::EInval); return true; } unsigned entry_number = utcb->values[1]; unsigned idx = 2; Mword *trampoline_page = (Mword *)Kmem::kernel_trampoline_page; for (; idx < tag.words() ; idx += Utcb_values_per_ldt_entry, ++entry_number) { Gdt_entry *d = (Gdt_entry *)&utcb->values[idx]; if (!d->limit()) continue; Ldt_user_desc info; info.entry_number = entry_number; info.base_addr = d->base(); info.limit = d->limit(); info.seg_32bit = d->seg32(); info.contents = d->contents(); info.read_exec_only = !d->writable(); info.limit_in_pages = d->granularity(); info.seg_not_present = !d->present(); info.useable = d->avl(); // Set up data on trampoline for (unsigned i = 0; i < sizeof(info) / sizeof(Mword); i++) *(trampoline_page + i + 1) = *(((Mword *)&info) + i); // Call modify_ldt for given user process Trampoline::syscall(pid(), __NR_modify_ldt, 1, // write LDT Mem_layout::Trampoline_page + sizeof(Mword), sizeof(info)); // Also set this for the fiasco kernel so that // segment registers can be set, this is necessary for signal // handling, esp. for sigreturn to work in the Fiasco kernel // with the context of the client (gs/fs values). if (*(trampoline_page + 1)) Emulation::modify_ldt(*(trampoline_page + 1), // entry 0, // base 1); // size } } return true; } return false; }
PRIVATE inline L4_msg_tag Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb) { if (this != current() || !(state() & Thread_vcpu_enabled)) return commit_result(-L4_err::EInval); Space *s = space(); Vcpu_state *vcpu = vcpu_state().access(true); L4_obj_ref user_task = vcpu->user_task; if (user_task.valid()) { L4_fpage::Rights task_rights = L4_fpage::Rights(0); Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(), &task_rights)); if (EXPECT_FALSE(task && !(task_rights & L4_fpage::Rights::W()))) return commit_result(-L4_err::EPerm); if (task != vcpu_user_space()) vcpu_set_user_space(task); vcpu->user_task = L4_obj_ref(); } else if (user_task.op() == L4_obj_ref::Ipc_reply) vcpu_set_user_space(0); L4_snd_item_iter snd_items(utcb, tag.words()); int items = tag.items(); if (vcpu_user_space()) for (; items && snd_items.more(); --items) { if (EXPECT_FALSE(!snd_items.next())) break; Lock_guard<Lock> guard; if (!guard.check_and_lock(&static_cast<Task *>(vcpu_user_space())->existence_lock)) return commit_result(-L4_err::ENoent); cpu_lock.clear(); L4_snd_item_iter::Item const *const item = snd_items.get(); L4_fpage sfp(item->d); Reap_list rl; L4_error err = fpage_map(space(), sfp, vcpu_user_space(), L4_fpage::all_spaces(), item->b, &rl); rl.del(); cpu_lock.lock(); if (EXPECT_FALSE(!err.ok())) return commit_error(utcb, err); } if ((vcpu->_saved_state & Vcpu_state::F_irqs) && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending)) { assert_kdb(cpu_lock.test()); do_ipc(L4_msg_tag(), 0, 0, true, 0, L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero), &vcpu->_ipc_regs, L4_fpage::Rights::FULL()); vcpu = vcpu_state().access(true); if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error() || this->utcb().access(true)->error.error() == L4_error::R_timeout)) { vcpu->_ts.set_ipc_upcall(); Address sp; // tried to resume to user mode, so an IRQ enters from user mode if (vcpu->_saved_state & Vcpu_state::F_user_mode) sp = vcpu->_entry_sp; else sp = vcpu->_ts.sp(); arch_load_vcpu_kern_state(vcpu, true); LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log, l->type = 4; l->state = vcpu->state; l->ip = vcpu->_entry_ip; l->sp = sp; l->space = static_cast<Task*>(_space.vcpu_aware())->dbg_id(); );