IMPLEMENT Vm * Vm_factory::create(Ram_quota *quota) { if (Svm::cpus.cpu(current_cpu()).svm_enabled()) return allocate<Vm_svm>(quota); if (Vmx::cpus.cpu(current_cpu()).vmx_enabled()) return allocate<Vm_vmx>(quota); return 0; }
PUBLIC static int Vkey::check_() { int ret = 1; bool hit = false; // disable last branch recording, branch trace recording ... Cpu::cpus.cpu(current_cpu()).debugctl_disable(); while (1) { int c = Kconsole::console()->getchar(false); if (c == -1) break; if (is_debugger_entry_key(c)) { ret = 0; // break into kernel debugger break; } switch (c) { case KEY_CURSOR_UP: hit |= add("\033[A"); break; case KEY_CURSOR_DOWN: hit |= add("\033[B"); break; case KEY_CURSOR_LEFT: hit |= add("\033[D"); break; case KEY_CURSOR_RIGHT: hit |= add("\033[C"); break; case KEY_CURSOR_HOME: hit |= add("\033[1~"); break; case KEY_CURSOR_END: hit |= add("\033[4~"); break; case KEY_PAGE_UP: hit |= add("\033[5~"); break; case KEY_PAGE_DOWN: hit |= add("\033[6~"); break; case KEY_INSERT: hit |= add("\033[2~"); break; case KEY_DELETE: hit |= add("\033[3~"); break; case KEY_F1: hit |= add("\033OP"); break; case KEY_BACKSPACE: hit |= add(127); break; case KEY_TAB: hit |= add(9); break; case KEY_ESC: hit |= add(27); break; case KEY_RETURN: hit |= add(13); break; default: hit |= add(c); break; } } if (hit) trigger(); // reenable debug stuff (undo debugctl_disable) Cpu::cpus.cpu(current_cpu()).debugctl_enable(); return ret; }
void Fpu::init_state(Fpu_state *s) { Cpu const &_cpu = Cpu::cpus.cpu(current_cpu()); if (_cpu.features() & FEAT_FXSR) { assert (_state_size >= sizeof (sse_regs)); sse_regs *sse = reinterpret_cast<sse_regs *>(s->state_buffer()); memset(sse, 0, sizeof (*sse)); sse->cwd = 0x37f; if (_cpu.features() & FEAT_SSE) sse->mxcsr = 0x1f80; if (_cpu.ext_features() & FEATX_XSAVE) memset(reinterpret_cast<Xsave_buffer *>(s->state_buffer())->header, 0, sizeof (((Xsave_buffer *) 0)->header)); static_assert(sizeof (sse_regs) == 512, "SSE-regs size not 512 bytes"); } else { fpu_regs *fpu = reinterpret_cast<fpu_regs *>(s->state_buffer()); assert (_state_size >= sizeof (*fpu)); memset(fpu, 0, sizeof (*fpu)); fpu->cwd = 0xffff037f; fpu->swd = 0xffff0000; fpu->twd = 0xffffffff; fpu->fos = 0xffff0000; } }
Obj_space_virt<SPACE>::v_delete(V_pfn virt, Order size, L4_fpage::Rights page_attribs) { (void)size; assert (size == Order(0)); Entry *c; if (Optimize_local && SPACE::mem_space(this) == Mem_space::current_mem_space(current_cpu())) { c = cap_virt(virt); if (!c) return L4_fpage::Rights(0); Capability cap = Mem_layout::read_special_safe((Capability*)c); if (!cap.valid()) return L4_fpage::Rights(0); } else c = get_cap(virt); if (c && c->valid()) { if (page_attribs & L4_fpage::Rights::R()) c->invalidate(); else c->del_rights(page_attribs & L4_fpage::Rights::CWSD()); } return L4_fpage::Rights(0); }
PRIVATE inline bool NO_INSTRUMENT Switch_lock::set_lock_owner(Context *o) { bool have_no_locks = access_once(&o->_lock_cnt) < 1; if (have_no_locks) { assert_kdb (current_cpu() == o->home_cpu()); for (;;) { if (EXPECT_FALSE(access_once(&o->_running_under_lock))) continue; if (EXPECT_TRUE(mp_cas(&o->_running_under_lock, Mword(false), Mword(true)))) break; } } else assert_kdb (o->_running_under_lock); Mem::mp_wmb(); if (EXPECT_FALSE(!mp_cas(&_lock_owner, Mword(0), Address(o)))) { if (have_no_locks) { Mem::mp_wmb(); write_now(&o->_running_under_lock, Mword(false)); } return false; } return true; }
unsigned long Generic_obj_space<SPACE>::v_delete(Page_number virt, Size size, unsigned long page_attribs = L4_fpage::CRWSD) { (void)size; assert (size.value() == 1); Entry *c; if (Optimize_local && mem_space() == Mem_space::current_mem_space(current_cpu())) { c = cap_virt(virt.value()); if (!c) return 0; Capability cap = Mem_layout::read_special_safe((Capability*)c); if (!cap.valid()) return 0; } else c = get_cap(virt.value()); if (c && c->valid()) { if (page_attribs & L4_fpage::R) c->invalidate(); else c->del_rights(page_attribs & L4_fpage::CWSD); } return 0; }
bool Generic_obj_space<SPACE>::v_lookup(Addr const &virt, Phys_addr *phys = 0, Size *size = 0, unsigned *attribs = 0) { if (size) size->set_value(1); Entry *cap; if (Optimize_local && mem_space() == Mem_space::current_mem_space(current_cpu())) cap = cap_virt(virt.value()); else cap = get_cap(virt.value()); if (EXPECT_FALSE(!cap)) { if (size) size->set_value(Caps_per_page); return false; } if (Optimize_local) { Capability c = Mem_layout::read_special_safe((Capability*)cap); if (phys) *phys = c.obj(); if (c.valid() && attribs) *attribs = c.rights(); return c.valid(); } else { Obj::set_entry(virt, cap); if (phys) *phys = cap->obj(); if (cap->valid() && attribs) *attribs = cap->rights(); return cap->valid(); } }
PRIVATE void Thread::ipc_send_msg(Receiver *recv) { Syscall_frame *regs = _snd_regs; bool success = transfer_msg(regs->tag(), nonull_static_cast<Thread*>(recv), regs, _ipc_send_rights); sender_dequeue(recv->sender_list()); recv->vcpu_update_state(); //printf(" done\n"); regs->tag(L4_msg_tag(regs->tag(), success ? 0 : L4_msg_tag::Error)); Mword state_del = Thread_ipc_mask | Thread_ipc_transfer; Mword state_add = Thread_ready; if (Receiver::prepared()) // same as in Receiver::prepare_receive_dirty_2 state_add |= Thread_receive_wait; if (cpu() == current_cpu()) { state_change_dirty(~state_del, state_add); if (current_sched()->deblock(cpu(), current_sched(), true)) recv->switch_to_locked(this); } else { drq_state_change(~state_del, state_add); current()->schedule_if(current()->handle_drq()); } }
bool Ipc_sender_base::handle_shortcut(Syscall_frame *dst_regs, Receiver *receiver) { if (EXPECT_TRUE ((current() != receiver && receiver->sched()->deblock(current_cpu(), current()->sched(), true) // avoid race in do_ipc() after Thread_send_in_progress // flag was deleted from receiver's thread state // also: no shortcut for alien threads, they need to see the // after-syscall exception && !(receiver->state() & (Thread_ready_mask | Thread_alien)) && !current()->schedule_in_progress()))) // no schedule in progress { // we don't need to manipulate the state in a safe way // because we are still running with interrupts turned off receiver->state_add_dirty(Thread_ready); if (!Config::Irq_shortcut) { // no shortcut: switch to the interrupt thread which will // calls Irq::ipc_receiver_ready current()->switch_to_locked(receiver); return true; } // The following shortcut optimization does not work if PROFILE // is defined because fast_ret_from_irq does not handle the // different implementation of the kernel lock in profiling mode // At this point we are sure that the connected interrupt // thread is waiting for the next interrupt and that its // thread priority is higher than the current one. So we // choose a short cut: Instead of doing the full ipc handshake // we simply build up the return stack frame and go out as // quick as possible. // // XXX We must own the kernel lock for this optimization! // Mword *esp = reinterpret_cast<Mword*>(dst_regs); // set return address of irq_thread *--esp = reinterpret_cast<Mword>(fast_ret_from_irq); // XXX set stack pointer of irq_thread receiver->set_kernel_sp(esp); // directly switch to the interrupt thread context and go out // fast using fast_ret_from_irq (implemented in assembler). // kernel-unlock is done in switch_exec() (on switchee's side). // no shortcut if profiling: switch to the interrupt thread current()->switch_to_locked (receiver); return true; } return false; }
Unsigned64 Timer::system_clock() { if (!current_cpu() && Config::Kip_timer_uses_rdtsc) Kip::k()->clock = Cpu::cpus.cpu(_cpu).time_us(); return Kip::k()->clock; }
PRIVATE static Context::Drq::Result Irq_sender::handle_remote_hit(Context::Drq *, Context *, void *arg) { Irq_sender *irq = (Irq_sender*)arg; irq->set_cpu(current_cpu()); irq->send_msg(irq->_irq_thread); return Context::Drq::no_answer(); }
void Timer::update_system_clock() { if (current_cpu()) return; if (Config::kinfo_timer_uses_rdtsc) Kip::k()->clock = Cpu::cpus.cpu(_cpu).time_us(); else Kip::k()->clock += Config::scheduler_granularity; }
// screen spinner for debugging purposes static inline void irq_spinners(int irqnum) { #ifdef CONFIG_IRQ_SPINNER Unsigned16 *p = (Unsigned16 *)Mem_layout::Adap_vram_cga_beg; p += (20 + current_cpu()) * 80 + irqnum; if (p < (Unsigned16 *)Mem_layout::Adap_vram_cga_end) (*p)++; #else (void)irqnum; #endif }
PRIVATE inline void Irq_sender::count_and_send(Smword queued) { if (EXPECT_TRUE (queued == 0) && EXPECT_TRUE(_irq_thread != 0)) // increase hit counter { if (EXPECT_FALSE(_irq_thread->home_cpu() != current_cpu())) _irq_thread->drq(&_drq, handle_remote_hit, this, 0, Context::Drq::Target_ctxt, Context::Drq::No_wait); else send_msg(_irq_thread); } }
typename Obj_space_virt<SPACE>::Capability __attribute__((__flatten__)) Obj_space_virt<SPACE>::lookup(Cap_index virt) { Capability *c; virt &= Cap_index(~(~0UL << Whole_space)); if (SPACE::mem_space(this) == Mem_space::current_mem_space(current_cpu())) c = reinterpret_cast<Capability*>(cap_virt(virt)); else c = get_cap(virt); if (EXPECT_FALSE(!c)) return Capability(0); // void return Mem_layout::read_special_safe(c); }
typename Generic_obj_space<SPACE>::Capability Generic_obj_space<SPACE>::lookup(Address virt) { Capability *c; virt &= ~(~0UL << Whole_space); if (mem_space() == Mem_space::current_mem_space(current_cpu())) c = reinterpret_cast<Capability*>(cap_virt(virt)); else c = get_cap(virt); if (EXPECT_FALSE(!c)) return Capability(0); // void return Mem_layout::read_special_safe(c); }
PUBLIC static //inline NEEDS["cpu_lock.h", "globals.h", "lock_guard.h", "logdefs.h"] void Rcu::call(Rcu_item *i, bool (*cb)(Rcu_item *)) { i->_call_back = cb; LOG_TRACE("Rcu call", "rcu", ::current(), Log_rcu, l->cpu = current_cpu(); l->event = Rcu_call; l->item = i; l->cb = (void*)cb); auto guard = lock_guard(cpu_lock); Rcu_data *rdp = &_rcu_data.current(); rdp->enqueue(i); }
typename Generic_obj_space<SPACE>::Status Generic_obj_space<SPACE>::v_insert(Phys_addr phys, Addr const &virt, Size size, unsigned char page_attribs) { (void)size; assert (size.value() == 1); Entry *c; if (Optimize_local && mem_space() == Mem_space::current_mem_space(current_cpu())) { c = cap_virt(virt.value()); if (!c) return Insert_err_nomem; Capability cap; if (!Mem_layout::read_special_safe((Capability*)c, cap) && !caps_alloc(virt.value())) return Insert_err_nomem; } else { c = alien_lookup(virt.value()); if (!c && !(c = caps_alloc(virt.value()))) return Insert_err_nomem; Obj::set_entry(virt, c); } if (c->valid()) { if (c->obj() == phys) { if (EXPECT_FALSE(c->rights() == page_attribs)) return Insert_warn_exists; c->add_rights(page_attribs); return Insert_warn_attrib_upgrade; } else return Insert_err_exists; } c->set(phys, page_attribs); return Insert_ok; }
Obj_space_virt<SPACE>::v_insert(Phys_addr phys, V_pfn const &virt, Order size, Attr page_attribs) { (void)size; assert (size == Order(0)); Entry *c; if (Optimize_local && SPACE::mem_space(this) == Mem_space::current_mem_space(current_cpu())) { c = cap_virt(virt); if (!c) return Obj::Insert_err_nomem; Capability cap; if (!Mem_layout::read_special_safe((Capability*)c, cap) && !caps_alloc(virt)) return Obj::Insert_err_nomem; } else { c = get_cap(virt); if (!c && !(c = caps_alloc(virt))) return Obj::Insert_err_nomem; Obj::set_entry(virt, c); } if (c->valid()) { if (c->obj() == phys) { if (EXPECT_FALSE(c->rights() == page_attribs)) return Obj::Insert_warn_exists; c->add_rights(page_attribs); return Obj::Insert_warn_attrib_upgrade; } else return Obj::Insert_err_exists; } c->set(phys, page_attribs); return Obj::Insert_ok; }
PUBLIC Rcu_data::~Rcu_data() { if (current_cpu() == _cpu) return; Rcu_data *current_rdp = &Rcu::_rcu_data.current(); Rcu_glbl *rgp = Rcu::rcu(); { auto guard = lock_guard(rgp->_lock); if (rgp->_current != rgp->_completed) rgp->cpu_quiet(_cpu); } current_rdp->move_batch(_c); current_rdp->move_batch(_n); current_rdp->move_batch(_d); }
PRIVATE inline NOEXPORT L4_error Ipc_gate::block(Thread *ct, L4_timeout const &to, Utcb *u) { Unsigned64 t = 0; if (!to.is_never()) { t = to.microsecs(Timer::system_clock(), u); if (!t) return L4_error::Timeout; } { auto g = lock_guard(_wait_q.lock()); ct->set_wait_queue(&_wait_q); ct->sender_enqueue(&_wait_q, ct->sched_context()->prio()); } ct->state_change_dirty(~Thread_ready, Thread_send_wait); IPC_timeout timeout; if (t) { timeout.set(t, current_cpu()); ct->set_timeout(&timeout); } ct->schedule(); ct->state_change(~Thread_ipc_mask, Thread_ready); ct->reset_timeout(); if (EXPECT_FALSE(ct->in_sender_list() && timeout.has_hit())) { auto g = lock_guard(_wait_q.lock()); if (!ct->in_sender_list()) return L4_error::None; ct->sender_dequeue(&_wait_q); return L4_error::Timeout; } return L4_error::None; }
/** Page fault handler. This handler suspends any ongoing IPC, then sets up page-fault IPC. Finally, the ongoing IPC's state (if any) is restored. @param pfa page-fault virtual address @param error_code page-fault error code. */ PRIVATE bool Thread::handle_page_fault_pager(Thread_ptr const &_pager, Address pfa, Mword error_code, L4_msg_tag::Protocol protocol) { #ifndef NDEBUG // do not handle user space page faults from kernel mode if we're // already handling a request if (EXPECT_FALSE(!PF::is_usermode_error(error_code) && thread_lock()->test() == Thread_lock::Locked)) { kdb_ke("Fiasco BUG: page fault, under lock"); panic("page fault in locked operation"); } #endif if (EXPECT_FALSE((state() & Thread_alien))) return false; Lock_guard<Cpu_lock> guard(&cpu_lock); unsigned char rights; Kobject_iface *pager = _pager.ptr(space(), &rights); if (!pager) { WARN("CPU%d: Pager of %lx is invalid (pfa=" L4_PTR_FMT ", errorcode=" L4_PTR_FMT ") to %lx (pc=%lx)\n", current_cpu(), dbg_id(), pfa, error_code, _pager.raw(), regs()->ip()); LOG_TRACE("Page fault invalid pager", "pf", this, __fmt_page_fault_invalid_pager, Log_pf_invalid *l = tbe->payload<Log_pf_invalid>(); l->cap_idx = _pager.raw(); l->err = error_code; l->pfa = pfa); pager = this; // block on ourselves }
Switch_lock::Status NO_INSTRUMENT Switch_lock::lock_dirty() { assert(cpu_lock.test()); if (!valid()) return Invalid; // have we already the lock? if ((_lock_owner & ~1UL) == Address(current())) return Locked; do { for (;;) { Mword o = access_once(&_lock_owner); if (o & 1) return Invalid; if (!o) break; // Help lock owner until lock becomes free // while (test()) Context *c = current(); if ( c->switch_exec_helping((Context *)o, Context::Helping, &_lock_owner, o) == Context::Switch::Failed && c->home_cpu() != current_cpu()) c->schedule(); Proc::preemption_point(); if (!valid()) return Invalid; } } while (!set_lock_owner(current())); Mem::mp_wmb(); current()->inc_lock_cnt(); // Do not lose this lock if current is deleted return Not_locked; }
/** Thread context switchin. Called on every re-activation of a thread (switch_exec()). This method is public only because it is called from from assembly code in switch_cpu(). */ IMPLEMENT void Context::switchin_context(Context *from) { assert_kdb (this == current()); assert_kdb (state() & Thread_ready_mask); // Set kernel-esp in case we want to return to the user. // kmem::kernel_sp() returns a pointer to the kernel SP (in the // TSS) the CPU uses when next switching from user to kernel mode. // regs() + 1 returns a pointer to the end of our kernel stack. Cpu::cpus.cpu(cpu()).kernel_sp() = reinterpret_cast<Address>(regs() + 1); // switch to our page directory if necessary vcpu_aware_space()->switchin_context(from->vcpu_aware_space()); // load new segment selectors load_segments(); // update the global UTCB pointer to make the thread find its UTCB // using fs:[0] Mem_layout::user_utcb_ptr(current_cpu()) = utcb().usr(); }
IMPLEMENTATION [!mp]: static void suspend_ap_cpus() {} IMPLEMENTATION: /** * \brief Initiate a full system suspend to RAM. * \pre must run a the boot CPU */ static Context::Drq::Result do_system_suspend(Context::Drq *, Context *, void *data) { Context::spill_current_fpu(current_cpu()); suspend_ap_cpus(); facs->fw_wake_vector = phys_wake_vector; if (facs->len > 32 && facs->version >= 1) facs->x_fw_wake_vector = 0; Mword sleep_type = *(Mword *)data; *reinterpret_cast<Mword*>(data) = 0; Pm_object::run_on_suspend_hooks(current_cpu()); Cpu::cpus.current().pm_suspend(); if (acpi_save_cpu_and_suspend(sleep_type, (_pm1b << 16) | _pm1a, (_pm1b_sts << 16) | _pm1a_sts)) *reinterpret_cast<Mword *>(data) = -L4_err::EInval; Cpu::cpus.current().pm_resume(); Pm_object::run_on_resume_hooks(current_cpu()); Fpu::init(current_cpu(), true); Timer::init(current_cpu()); Timer_tick::enable(current_cpu()); Kernel_thread::boot_app_cpus(); return Context::Drq::no_answer_resched(); }
Obj_space_virt<SPACE>::v_lookup(V_pfn const &virt, Phys_addr *phys, Page_order *size, Attr *attribs) { if (size) *size = Order(0); Entry *cap; if (Optimize_local && SPACE::mem_space(this) == Mem_space::current_mem_space(current_cpu())) cap = cap_virt(virt); else cap = get_cap(virt); if (EXPECT_FALSE(!cap)) { if (size) *size = Order(Obj::Caps_per_page_ld2); return false; } if (Optimize_local) { Capability c = Mem_layout::read_special_safe((Capability*)cap); if (phys) *phys = c.obj(); if (c.valid() && attribs) *attribs = Attr(c.rights()); return c.valid(); } else { Obj::set_entry(virt, cap); if (phys) *phys = cap->obj(); if (cap->valid() && attribs) *attribs = Attr(cap->rights()); return cap->valid(); } }
struct svalue * debug_command(char *debcmd, int argc, struct svalue *argv) { static struct svalue retval; int dbnum, dbi, il; char buff[200]; for (dbi = -1, dbnum = 0; debc[dbnum]; dbnum++) { if (strcmp(debcmd, debc[dbnum]) == 0) dbi = dbnum; } if (dbi < 0) { retval.type = T_NUMBER; retval.u.number = 0; return &retval; } switch (dbi) { case 0: /* index */ retval.type = T_POINTER; retval.u.vec = allocate_array(dbnum); for (il = 0; il < dbnum; il++) { retval.u.vec->item[il].type = T_STRING; retval.u.vec->item[il].string_type = STRING_CSTRING; retval.u.vec->item[il].u.string = debc[il]; } return &retval; case 1: /* malloc */ retval.type = T_STRING; retval.string_type = STRING_MSTRING; retval.u.string = make_mstring((char *)dump_malloc_data()); return &retval; case 2: /* status */ case 3: /* status tables */ retval.type = T_STRING; retval.string_type = STRING_MSTRING; retval.u.string = (char *)get_gamedriver_info(debc[dbi]); return &retval; case 4: /* mudstatus on/off eval_lim time_lim */ if (argc < 3 || argv[0].type != T_STRING || argv[1].type != T_NUMBER || argv[2].type != T_NUMBER) break; if (strcmp(argv[0].u.string, "on") == 0) mudstatus_set(1, argv[1].u.number, argv[2].u.number); else if (strcmp(argv[0].u.string, "off") == 0) mudstatus_set(0, argv[1].u.number, argv[2].u.number); else break; retval.type = T_NUMBER; retval.u.number = 1; return &retval; case 5: /* functionlist object */ if (argc < 1 || argv[0].type != T_OBJECT) break; retval.type = T_POINTER; retval.u.vec = allocate_array(argv[0].u.ob->prog->num_functions); for (il = 0; il < (int)argv[0].u.ob->prog->num_functions; il++) { retval.u.vec->item[il].type = T_STRING; retval.u.vec->item[il].string_type = STRING_SSTRING; retval.u.vec->item[il].u.string = reference_sstring(argv[0].u.ob->prog->functions[il].name); } return &retval; case 6: /* rusage */ { #ifdef RUSAGE /* Only defined if we compile GD with RUSAGE */ char buff[500]; struct rusage rus; long utime, stime; long maxrss; if (getrusage(RUSAGE_SELF, &rus) < 0) buff[0] = 0; else { utime = rus.ru_utime.tv_sec * 1000 + rus.ru_utime.tv_usec / 1000; stime = rus.ru_stime.tv_sec * 1000 + rus.ru_stime.tv_usec / 1000; maxrss = rus.ru_maxrss; (void)sprintf(buff, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld", utime, stime, maxrss, rus.ru_ixrss, rus.ru_idrss, rus.ru_isrss, rus.ru_minflt, rus.ru_majflt, rus.ru_nswap, rus.ru_inblock, rus.ru_oublock, rus.ru_msgsnd, rus.ru_msgrcv, rus.ru_nsignals, rus.ru_nvcsw, rus.ru_nivcsw); } retval.type = T_STRING; retval.string_type = STRING_MSTRING; retval.u.string = make_mstring(buff); return &retval; #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with RUSAGE flag.\n"; return &retval; #endif } #if defined(PROFILE_LPC) case 7: /* top_ten_cpu */ { #define NUMBER_OF_TOP_TEN 100 struct program *p[NUMBER_OF_TOP_TEN]; struct vector *v; struct program *prog; int i, j; for(i = 0; i < NUMBER_OF_TOP_TEN; i++) p[i] = (struct program *)0L; prog = prog_list; do { for(i = NUMBER_OF_TOP_TEN-1; i >= 0; i--) { if ( p[i] && (prog->cpu <= p[i]->cpu)) break; } if (i < (NUMBER_OF_TOP_TEN - 1)) for (j = 0; j <= i; j++) if (strcmp(p[j]->name,prog->name) == 0) { i = NUMBER_OF_TOP_TEN-1; break; } if (i < (NUMBER_OF_TOP_TEN - 1)) { j = NUMBER_OF_TOP_TEN - 2; while(j > i) { p[j + 1] = p[j]; j--; } p[i + 1] = prog; } } while (prog_list != (prog = prog->next_all)); v = make_cpu_array(NUMBER_OF_TOP_TEN, p); if (v) { retval.type = T_POINTER; retval.u.vec = v; return &retval; } break; #undef NUMBER_OF_TOP_TEN } #else case 7: retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with PROFILE_LPC flag.\n"; return &retval; #endif case 8: /* object_cpu object */ { long long c_num; if (argc && (argv[0].type == T_OBJECT)) { #if defined(PROFILE_LPC) c_num = argv[0].u.ob->prog->cpu * 1e6; #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with PROFILE_LPC flag.\n"; return &retval; #endif } else { #ifdef RUSAGE struct rusage rus; if (getrusage(RUSAGE_SELF, &rus) < 0) { c_num = -1; } else { c_num = (long long)rus.ru_utime.tv_sec * 1000000 + rus.ru_utime.tv_usec + (long long)rus.ru_stime.tv_sec * 1000000 + rus.ru_stime.tv_usec; } #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with RUSAGE flag.\n"; return &retval; #endif } retval.type = T_NUMBER; retval.u.number = c_num; return &retval; } case 9: /* swap, object */ #if 0 /* can not swap while executing */ if (argc && (argv[0].type == T_OBJECT)) (void)swap(argv[0].u.ob); #endif retval = const1; return &retval; case 10: /* version, */ { char buff[64]; (void)snprintf(buff, sizeof(buff), "%6.6s%02d %s %s", GAME_VERSION, PATCH_LEVEL, __DATE__, __TIME__); retval.type = T_STRING; retval.string_type = STRING_MSTRING; retval.u.string = make_mstring(buff); return &retval; } case 11: /* wizlist, wizname */ /* * Prints information, will be changed */ retval = const1; return &retval; case 12: /* trace, bitmask */ { int ot = -1; extern struct object *current_interactive; if (current_interactive && current_interactive->interactive) { if (argc && (argv[0].type == T_NUMBER)) { ot = current_interactive->interactive->trace_level; current_interactive->interactive->trace_level = argv[0].u.number; } } retval.type = T_NUMBER; retval.u.number = ot; return &retval; } case 13: /* traceprefix, pathstart */ { char *old = 0; extern struct object *current_interactive; if (current_interactive && current_interactive->interactive) { if (argc) { old = current_interactive->interactive->trace_prefix; if (argv[0].type == T_STRING) { current_interactive->interactive->trace_prefix = make_sstring(argv[0].u.string); } else current_interactive->interactive->trace_prefix = 0; } } if (old) { retval.type = T_STRING; retval.string_type = STRING_SSTRING; retval.u.string = old; } else retval = const0; return &retval; } case 14: /* call_out_info, */ { extern struct vector *get_calls(struct object *); if (argv[0].type != T_OBJECT) break; retval.type = T_POINTER; retval.u.vec = get_calls(argv[0].u.ob); return &retval; } case 15: /* inherit_list, object */ if (argc && (argv[0].type == T_OBJECT)) { retval.type = T_POINTER; retval.u.vec = inherit_list(argv[0].u.ob); return &retval; } else { retval = const0; return &retval; } case 16: /* load_average, */ retval.type = T_STRING; retval.string_type = STRING_MSTRING; retval.u.string = make_mstring(query_load_av()); return &retval; case 17: /* shutdown, */ startshutdowngame(0); retval = const1; return &retval; case 18: /* "object_info", num object */ { struct object *ob; char db_buff[1024], tdb[200]; int i; if (argc < 2 || argv[0].type != T_NUMBER || argv[1].type != T_OBJECT) break; if (argv[0].u.number == 0) { int flags; struct object *obj2; if ( argv[1].type != T_OBJECT) break; ob = argv[1].u.ob; flags = ob->flags; (void)sprintf(db_buff,"O_ENABLE_COMMANDS : %s\nO_CLONE : %s\nO_DESTRUCTED : %s\nO_SWAPPED : %s\nO_ONCE_INTERACTIVE: %s\nO_CREATED : %s\n", flags&O_ENABLE_COMMANDS ?"TRUE":"FALSE", flags&O_CLONE ?"TRUE":"FALSE", flags&O_DESTRUCTED ?"TRUE":"FALSE", flags&O_SWAPPED ?"TRUE":"FALSE", flags&O_ONCE_INTERACTIVE?"TRUE":"FALSE", flags&O_CREATED ?"TRUE":"FALSE"); (void)sprintf(tdb,"time_of_ref : %d\n", ob->time_of_ref); (void)strcat(db_buff, tdb); (void)sprintf(tdb,"ref : %d\n", ob->ref); (void)strcat(db_buff, tdb); #ifdef DEBUG (void)sprintf(tdb,"extra_ref : %d\n", ob->extra_ref); (void)strcat(db_buff, tdb); #endif (void)sprintf(tdb,"swap_num : %d\n", ob->swap_num); (void)strcat(db_buff, tdb); (void)snprintf(tdb, sizeof(tdb), "name : '%s'\n", ob->name); (void)strcat(db_buff, tdb); (void)snprintf(tdb, sizeof(tdb), "next_all : OBJ(%s)\n", ob->next_all?ob->next_all->name: "NULL"); (void)strcat(db_buff, tdb); if (obj_list == ob) { (void)strcat(db_buff, "This object is the head of the object list.\n"); } obj2 = obj_list; i = 1; do if (obj2->next_all == ob) { (void)snprintf(tdb, sizeof(tdb), "Previous object in object list: OBJ(%s)\n", obj2->name); (void)strcat(db_buff, tdb); (void)sprintf(tdb, "position in object list:%d\n",i); (void)strcat(db_buff, tdb); } while (obj_list != (obj2 = obj2->next_all)); } else if (argv[0].u.number == 1) { if (argv[1].type != T_OBJECT) break; ob = argv[1].u.ob; (void)sprintf(db_buff,"program ref's %d\n", ob->prog->ref); (void)snprintf(tdb, sizeof(tdb), "Name %s\n", ob->prog->name); (void)strcat(db_buff, tdb); (void)sprintf(tdb,"program size %d\n", ob->prog->program_size); (void)strcat(db_buff, tdb); (void)sprintf(tdb, "num func's %u (%u) \n", ob->prog->num_functions ,ob->prog->num_functions * (unsigned) sizeof(struct function)); (void)strcat(db_buff, tdb); (void)sprintf(tdb,"sizeof rodata %d\n", ob->prog->rodata_size); (void)strcat(db_buff, tdb); (void)sprintf(tdb,"num vars %u (%u)\n", ob->prog->num_variables ,ob->prog->num_variables * (unsigned) sizeof(struct variable)); (void)strcat(db_buff, tdb); (void)sprintf(tdb,"num inherits %u (%u)\n", ob->prog->num_inherited ,ob->prog->num_inherited * (unsigned) sizeof(struct inherit)); (void)strcat(db_buff, tdb); (void)sprintf(tdb,"total size %d\n", ob->prog->total_size); (void)strcat(db_buff, tdb); } else { (void)sprintf(db_buff, "Bad number argument to object_info: %lld\n", argv[0].u.number); } retval.type = T_STRING; retval.string_type = STRING_MSTRING; retval.u.string = make_mstring(db_buff); return &retval; } case 19: /* opcdump, 19 */ { #ifdef OPCPROF opcdump(); retval = const1; return &retval; #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with OPCPROF flag.\n"; return &retval; #endif } case 20: /* send_udp, 20 host, port, msg */ { #ifdef CATCH_UDP_PORT extern udpsvc_t *udpsvc; #endif if (argc < 3 || argv[0].type != T_STRING || argv[1].type != T_NUMBER || argv[2].type != T_STRING) break; #ifdef CATCH_UDP_PORT tmp = udpsvc_send(udpsvc, argv[0].u.string, argv[1].u.number, argv[2].u.string); if (tmp) retval = const1; else #endif retval = const0; return &retval; } case 21: /* mud_port, 21 */ { extern int port_number; retval.type = T_NUMBER; retval.u.number = port_number; return &retval; } case 22: /* udp_port, 22 */ { #ifdef CATCH_UDP_PORT extern int udp_port; retval.u.number = udp_port; #else retval.u.number = -1; #endif retval.type = T_NUMBER; return &retval; } case 23: /* set_wizard, object */ if (argc && (argv[0].type == T_OBJECT)) { retval = const1; return &retval; } else { retval = const0; return &retval; } case 24: /* ob_flags, 24 ob */ { if (argc && (argv[0].type == T_OBJECT)) { retval.type = T_NUMBER; retval.u.number = argv[0].u.ob->flags; return &retval; } retval = const0; return &retval; } case 25: /* get_variables, 25 object NULL/string */ { struct svalue get_variables(struct object *); struct svalue get_variable(struct object *, char *); switch (argc) { case 1: if ( argv[0].type != T_OBJECT) { retval = const0; return &retval; } retval = get_variables(argv[0].u.ob); return &retval; case 2: if ( argv[0].type != T_OBJECT || argv[1].type != T_STRING) { retval = const0; return &retval; } retval = get_variable(argv[0].u.ob, argv[1].u.string); return &retval; case 3: if ( argv[0].type == T_OBJECT && argv[1].type == T_STRING) { retval = get_variable(argv[0].u.ob, argv[1].u.string); return &retval; } if ( argv[0].type == T_OBJECT) { retval = get_variables(argv[0].u.ob); return &retval; } retval = const0; return &retval; default: retval = const0; return &retval; } } case 26: /* get_eval_cost, 26 */ { extern int eval_cost; retval.type = T_NUMBER; retval.u.number = eval_cost; return &retval; } case 27: /* debug malloc, 27 */ { retval = const1; return &retval; } case 28: /* getprofile, 28 object */ { int format = 0; #ifndef PROFILE_LPC retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with PROFILE_LPC flag.\n"; return &retval; #else if (argc < 1 || argv[0].type != T_OBJECT) break; if (argc >= 2 && argv[1].type == T_NUMBER) format = argv[1].u.number; if (format == 0) { retval.type = T_POINTER; retval.u.vec = allocate_array(argv[0].u.ob->prog->num_functions); for (il = 0; il < (int)argv[0].u.ob->prog->num_functions; il++) { (void)snprintf(buff, sizeof(buff), "%016lld:%020lld: %s", (long long)argv[0].u.ob->prog->functions[il].num_calls, (long long)(argv[0].u.ob->prog->functions[il].time_spent * 1e6), argv[0].u.ob->prog->functions[il].name); retval.u.vec->item[il].type = T_STRING; retval.u.vec->item[il].string_type = STRING_MSTRING; retval.u.vec->item[il].u.string = make_mstring(buff); } } else if (format == 1) { retval.type = T_POINTER; retval.u.vec = allocate_array(argv[0].u.ob->prog->num_functions); double now = current_cpu(); struct program *prog = argv[0].u.ob->prog; for (il = 0; il < (int)prog->num_functions; il++) { struct function *func = prog->functions + il; struct vector *res = allocate_array(7); update_func_profile(func, now, 0.0, 0.0, 0); res->item[0].type = T_STRING; res->item[0].string_type = STRING_MSTRING; res->item[0].u.string = make_mstring(func->name); res->item[1].type = T_FLOAT; res->item[1].u.real = func->time_spent * 1e6; res->item[2].type = T_FLOAT; res->item[2].u.real = func->tot_time_spent * 1e6; res->item[3].type = T_FLOAT; res->item[3].u.real = func->num_calls; res->item[4].type = T_FLOAT; res->item[4].u.real = func->avg_time * 1e6; res->item[5].type = T_FLOAT; res->item[5].u.real = func->avg_tot_time * 1e6; res->item[6].type = T_FLOAT; res->item[6].u.real = func->avg_calls; retval.u.vec->item[il].type = T_POINTER; retval.u.vec->item[il].u.vec = res; } } else { retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Unknown format.\n"; } return &retval; #endif } case 29: /* get_avg_response, 29 */ { extern int get_msecs_response(int); extern int msr_point; int sum, num, tmp; if (msr_point >=0) { sum = 0; num = 0; for (il = 0; il < 100; il++) { if ((tmp = get_msecs_response(il)) >=0) { sum += tmp; num++; } } retval.type = T_NUMBER; retval.u.number = (num > 0) ? sum / num : 0; return &retval; } break; } case 30: /* destruct, 30 */ case 31: /* destroy, 31 */ { extern void destruct_object(struct object *); if (argc && argv[0].type == T_OBJECT && !(argv[0].u.ob->flags & O_DESTRUCTED)) destruct_object(argv[0].u.ob); break; } case 32: /* update snoops, 31 */ #ifdef SUPER_SNOOP update_snoop_file(); #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with SUPER_SNOOP flag.\n"; #endif break; case 33: /* call_warnings, int 0 = off, 1 = on */ if (argc && (argv[0].type == T_STRING)) { if (strcmp(argv[0].u.string, "on") == 0) call_warnings++; else call_warnings = call_warnings > 0 ? call_warnings - 1 : 0; retval.type = T_NUMBER; retval.u.number = call_warnings; return &retval; } else { retval.type = T_NUMBER; retval.u.number = -1; return &retval; } case 34: /* dump objects */ { FILE *ufile; struct object *ob; if ((ufile = fopen(OBJECT_DUMP_FILE, "w")) == NULL) { retval.type = T_NUMBER; retval.u.number = -1; return &retval; } fputs("Array (size), Mapping (size), String (size), Objs, Ints, Floats, Inventory, Callouts, Environment, Name\n", ufile); ob = obj_list; do { mem_variables(ufile, ob); } while (obj_list != (ob = ob->next_all)); (void)fclose(ufile); break; } case 35: /* query_debug_ob */ if (!argc || argv[0].type != T_OBJECT) break; retval.type = T_NUMBER; retval.u.number = argv[0].u.ob->debug_flags; return &retval; case 36: /* set_debug_ob */ if (!argc || argv[0].type != T_OBJECT || argv[1].type != T_NUMBER) break; retval.type = T_NUMBER; retval.u.number = argv[0].u.ob->debug_flags; argv[0].u.ob->debug_flags = argv[1].u.number; return &retval; case 37: /* set_swap */ retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Obsolete function.\n"; return &retval; case 38: /* query_swap */ retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Obsolete function.\n"; return &retval; case 39: /* query_debug_prog */ if (!argc || argv[0].type != T_OBJECT) break; retval.type = T_NUMBER; retval.u.number = argv[0].u.ob->prog->debug_flags; return &retval; case 40: /* set_debug_prog */ if (argc < 2 || argv[0].type != T_OBJECT || argv[1].type != T_NUMBER) break; retval.type = T_NUMBER; retval.u.number = argv[0].u.ob->prog->debug_flags; argv[0].u.ob->prog->debug_flags = argv[1].u.number; return &retval; #ifdef FUNCDEBUG case 41: dumpfuncs(); retval = const0; return &retval; #endif case 42: /* inhibitcallouts */ if (argc && (argv[0].type == T_STRING)) { extern int inhibitcallouts; int old; old = inhibitcallouts; if (strcmp(argv[0].u.string, "on") == 0) inhibitcallouts = 1; else inhibitcallouts = 0; retval.type = T_NUMBER; retval.u.number = old; return &retval; } else { retval.type = T_NUMBER; retval.u.number = -1; return &retval; } case 43: /* inhibitcallouts */ if (argc && (argv[0].type == T_STRING)) { extern int warnobsoleteflag; int old; old = warnobsoleteflag; if (strcmp(argv[0].u.string, "on") == 0) warnobsoleteflag = 1; else warnobsoleteflag = 0; retval.type = T_NUMBER; retval.u.number = old; return &retval; } else { retval.type = T_NUMBER; retval.u.number = -1; return &retval; } case 44: /* shared_strings */ #ifdef DEBUG dump_sstrings(); retval = const0; #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with DEBUG flag.\n"; #endif return &retval; case 45: /* dump_alarms */ { int c; FILE *ufile; if ((ufile = fopen(ALARM_DUMP_FILE, "w")) == NULL) { retval.type = T_NUMBER; retval.u.number = -1; return &retval; } c = dump_callouts(ufile); fclose(ufile); retval.type = T_NUMBER; retval.u.number = c; return &retval; } #ifdef PROFILE_LPC case 46: /* top_ten_cpu */ { #define NUMBER_OF_TOP_TEN 100 struct program *p[NUMBER_OF_TOP_TEN]; struct program *prog; struct vector *v; int i, j; double now = current_cpu(); for(i = 0; i < NUMBER_OF_TOP_TEN; i++) p[i] = (struct program *)0L; prog = prog_list; do { update_prog_profile(prog, now, 0.0, 0.0); for(i = NUMBER_OF_TOP_TEN-1; i >= 0; i--) { if ( p[i] && (prog->cpu_avg <= p[i]->cpu_avg)) break; } if (i < (NUMBER_OF_TOP_TEN - 1)) for (j = 0; j <= i; j++) if (strcmp(p[j]->name,prog->name) == 0) { i = NUMBER_OF_TOP_TEN-1; break; } if (i < (NUMBER_OF_TOP_TEN - 1)) { j = NUMBER_OF_TOP_TEN - 2; while(j > i) { p[j + 1] = p[j]; j--; } p[i + 1] = prog; } } while (prog_list != (prog = prog->next_all)); v = make_cpu_array2(NUMBER_OF_TOP_TEN, p); if (v) { retval.type = T_POINTER; retval.u.vec = v; return &retval; } break; #undef NUMBER_OF_TOP_TEN } #else case 46: retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with PROFILE_LPC flag.\n"; return &retval; #endif case 47: /* object_cpu_avg object */ { #if defined(PROFILE_LPC) if (argc < 1 || (argv[0].type != T_OBJECT)) break; update_prog_profile(argv[0].u.ob->prog, current_cpu(), 0.0, 0.0); retval.type = T_FLOAT; retval.u.number =argv[0].u.ob->prog->cpu_avg * 1e6; return &retval; #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with PROFILE_LPC flag.\n"; return &retval; #endif } case 48: /* getprofile_avg, 28 object */ { #if defined(PROFILE_LPC) if (argc < 1 || argv[0].type != T_OBJECT) break; retval.type = T_POINTER; retval.u.vec = allocate_array(argv[0].u.ob->prog->num_functions); double now = current_cpu(); struct program *prog = argv[0].u.ob->prog; for (il = 0; il < (int)prog->num_functions; il++) { struct function *func = prog->functions + il; struct vector *res = allocate_array(3); update_func_profile(func, now, 0.0, 0.0, 0); res->item[0].type = T_STRING; res->item[0].string_type = STRING_MSTRING; res->item[0].u.string = make_mstring(func->name); res->item[1].type = T_FLOAT; res->item[1].u.real = func->avg_time * 1e6; res->item[2].type = T_FLOAT; res->item[2].u.real = func->avg_calls; retval.u.vec->item[il].type = T_POINTER; retval.u.vec->item[il].u.vec = res; } return &retval; #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with PROFILE_LPC flag.\n"; return &retval; #endif } case 49: /* profile_timebase */ { #if defined(PROFILE_LPC) if (argc < 1) { /* Return current value */ retval.type = T_FLOAT; retval.u.real = get_profile_timebase(); return &retval; } /* Update using old timebase */ double now = current_cpu(); struct program *prog = prog_list; do { update_prog_profile(prog, now, 0.0, 0.0); for (int i = 0; i < prog->num_functions; i++) { struct function *func = prog->functions + i; update_func_profile(func, now, 0.0, 0.0, 0); } } while (prog_list != (prog = prog->next_all)); /* Set the new value */ if (argv[0].type == T_NUMBER && argv[0].u.number > 0) set_profile_timebase(argv[0].u.number); else if (argv[0].type == T_FLOAT && argv[0].u.real > 1e-3) set_profile_timebase(argv[0].u.real); else break; retval = const1; return &retval; #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with PROFILE_LPC flag.\n"; return &retval; #endif } case 50: { #ifdef PROFILE_LPC extern int trace_calls; extern FILE *trace_calls_file; if (argc < 1 || argv[0].type != T_NUMBER) break; if (!trace_calls && argv[0].u.number) { if ((trace_calls_file = fopen(TRACE_CALLS_FILE, "w")) == 0) break; setvbuf(trace_calls_file, 0, _IOFBF, 1<<20); /* Set a 1MB buffer */ trace_calls = 1; } else if (trace_calls && !argv[0].u.number) { fclose(trace_calls_file); trace_calls_file = 0; trace_calls = 0; } retval = const1; return &retval; #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with PROFILE_LPC.\n"; return &retval; #endif } case 51: { #if defined(PROFILE_LPC) long long num_top, criteria, num_items = 0; double now = current_cpu(), crit_val; struct program *prog; struct { struct program *prog; double crit_val; unsigned short func_index; } *result; if (argc < 2 || argv[0].type != T_NUMBER || argv[1].type != T_NUMBER) break; num_top = argv[0].u.number; criteria = argv[1].u.number; if (num_top < 0 || num_top > 1000) { retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "The number of itmes must be >= 0 and <= 1000."; break; } if (criteria < 0 || criteria > 9) { retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "The criteria must be >= 0 and <= 9."; break; } if (num_top == 0) { retval.type = T_POINTER; retval.u.vec = allocate_array(0); return &retval; } result = xalloc(sizeof(*result) * (num_top + 1)); memset(result, 0, sizeof(*result) * (num_top + 1)); prog = prog_list; do { update_prog_profile(prog, now, 0.0, 0.0); for (int i = 0; i < prog->num_functions; i++) { struct function *func = prog->functions + i; update_func_profile(func, now, 0.0, 0.0, 0); crit_val = get_top_func_criteria(func, criteria); if (num_items == num_top && result[num_items - 1].crit_val >= crit_val) continue; if (num_items == 0 || (num_items < num_top && result[num_items - 1].crit_val >= crit_val)) { result[num_items].prog = prog; result[num_items].func_index = i; result[num_items].crit_val = crit_val; num_items++; } else { int insert = num_items; while (insert > 0 && result[insert - 1].crit_val < crit_val) insert--; memmove(&result[insert + 1], &result[insert], sizeof(*result) * (num_items - insert)); result[insert].prog = prog; result[insert].func_index = i; result[insert].crit_val = crit_val; if (num_items < num_top) num_items++; } } } while ((prog = prog->next_all) != prog_list); retval.type = T_POINTER; retval.u.vec = allocate_array(num_items); for (int i = 0; i < num_items; i++) { struct vector *val = allocate_array(9); prog = result[i].prog; struct function *func = &prog->functions[result[i].func_index]; crit_val = result[i].crit_val; val->item[0].type = T_STRING; val->item[0].string_type = STRING_MSTRING; val->item[0].u.string = make_mstring(prog->name); val->item[1].type = T_STRING; val->item[1].string_type = STRING_SSTRING; val->item[1].u.string = func->name; reference_sstring(func->name); val->item[2].type = T_FLOAT; val->item[2].u.real = crit_val; val->item[3].type = T_FLOAT; val->item[3].u.real = func->time_spent; val->item[4].type = T_FLOAT; val->item[4].u.real = func->avg_time; val->item[5].type = T_FLOAT; val->item[5].u.real = func->tot_time_spent; val->item[6].type = T_FLOAT; val->item[6].u.real = func->avg_tot_time; val->item[7].type = T_FLOAT; val->item[7].u.real = func->num_calls; val->item[8].type = T_FLOAT; val->item[8].u.real = func->avg_calls; retval.u.vec->item[i].type = T_POINTER; retval.u.vec->item[i].u.vec = val; } free(result); return &retval; #else retval.type = T_STRING; retval.string_type = STRING_CSTRING; retval.u.string = "Only valid if GD compiled with PROFILE_LPC.\n"; return &retval; #endif } } retval = const0; return &retval; }
static void getcpu(Thread *, Entry_frame *r) { r->r[0] = current_cpu(); }
IMPLEMENTATION [ia32 || amd64]: #include "gdt.h" #include "std_macros.h" #include "x86desc.h" PRIVATE inline NEEDS["gdt.h"] bool Task::invoke_arch(L4_msg_tag &tag, Utcb *utcb) { switch (utcb->values[0]) { case Ldt_set_x86: { enum { Utcb_values_per_ldt_entry = Cpu::Ldt_entry_size / sizeof(utcb->values[0]), }; if (EXPECT_FALSE(tag.words() < 3 || tag.words() % Utcb_values_per_ldt_entry)) { tag = commit_result(-L4_err::EInval); return true; } unsigned entry_number = utcb->values[1]; unsigned size = (tag.words() - 2) * sizeof(utcb->values[0]); // Allocate the memory if not yet done if (!_ldt.addr()) _ldt.alloc(); if (entry_number * Cpu::Ldt_entry_size + size > Config::PAGE_SIZE) { WARN("set_ldt: LDT size exceeds one page, not supported."); tag = commit_result(-L4_err::EInval); return true; } _ldt.size(size + Cpu::Ldt_entry_size * entry_number); Address desc_addr = reinterpret_cast<Address>(&utcb->values[2]); Gdt_entry desc; Gdt_entry *ldtp = reinterpret_cast<Gdt_entry *>(_ldt.addr()) + entry_number; while (size >= Cpu::Ldt_entry_size) { desc = *reinterpret_cast<Gdt_entry const *>(desc_addr); if (desc.unsafe()) { WARN("set_ldt: Bad descriptor."); tag = commit_result(-L4_err::EInval); return true; } *ldtp = desc; size -= Cpu::Ldt_entry_size; desc_addr += Cpu::Ldt_entry_size; ldtp++; } if (this == current()->space()) Cpu::cpus.cpu(current_cpu()).enable_ldt(_ldt.addr(), _ldt.size()); tag = commit_result(0); } return true; } return false; }