void minios_do_halt(int reason) { minios_printk("minios: halting, reason=%d\n", reason); for( ;; ) { struct sched_shutdown sched_shutdown = { .reason = (reason == MINIOS_HALT_POWEROFF) ? SHUTDOWN_poweroff : SHUTDOWN_crash }; HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); } } /* * do_exit: This is called whenever an IRET fails in entry.S. * This will generally be because an application has got itself into * a really bad state (probably a bad CS or SS). It must be killed. * Of course, minimal OS doesn't have applications :-) */ void minios_do_exit(void) { minios_printk("Do_exit called!\n"); stack_walk(); for( ;; ) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_crash }; HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); } }
void do_page_fault(struct pt_regs *regs, unsigned long error_code) { unsigned long addr = read_cr2(); struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_crash }; if ((error_code & TRAP_PF_WRITE) && handle_cow(addr)) return; /* If we are already handling a page fault, and got another one that means we faulted in pagetable walk. Continuing here would cause a recursive fault */ if(handling_pg_fault == 1) { printk("Page fault in pagetable walk (access to invalid memory?).\n"); HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); } handling_pg_fault++; barrier(); printk("Page fault at linear address %p, rip %p, regs %p, sp %p, our_sp %p, code %lx\n", addr, regs->rip, regs, regs->rsp, &addr, error_code); dump_regs(regs); //do_stack_walk(regs->rbp); dump_mem(regs->rsp); dump_mem(regs->rbp); dump_mem(regs->rip); page_walk(addr); HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); /* We should never get here ... but still */ handling_pg_fault--; }
static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle) { int rc; common_cpu_up(cpu, idle); xen_setup_runstate_info(cpu); /* * PV VCPUs are always successfully taken down (see 'while' loop * in xen_cpu_die()), so -EBUSY is an error. */ rc = cpu_check_up_prepare(cpu); if (rc) return rc; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; xen_pmu_init(cpu); rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL); BUG_ON(rc); while (cpu_report_state(cpu) != CPU_ONLINE) HYPERVISOR_sched_op(SCHEDOP_yield, NULL); return 0; }
static int privcmd_HYPERVISOR_sched_op(int cmd, void *arg) { int error; int size = 0; import_export_t op_ie; struct sched_remote_shutdown op; switch (cmd) { case SCHEDOP_remote_shutdown: size = sizeof (struct sched_remote_shutdown); break; default: #ifdef DEBUG printf("unrecognized sched op 0x%x\n", cmd); #endif return (-X_EINVAL); } error = import_buffer(&op_ie, arg, &op, size, IE_IMPORT); if (error == 0) error = HYPERVISOR_sched_op(cmd, (arg == NULL) ? NULL : &op); export_buffer(&op_ie, &error); return (error); }
static void xen_reboot(int reason) { struct sched_shutdown r = { .reason = reason }; if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) BUG(); }
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) { struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; int rc; rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); BUG_ON(rc); }
static void xen_power_off(void) { struct sched_shutdown r = { .reason = SHUTDOWN_poweroff }; int rc; rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); BUG_ON(rc); }
void block_domain(u32 millisecs) { struct timeval tv; gettimeofday(&tv); HYPERVISOR_set_timer_op(monotonic_clock() + 1000000LL * (s64) millisecs); HYPERVISOR_sched_op(SCHEDOP_block, 0); }
CAMLprim value stub_sched_shutdown(value v_reason) { CAMLparam1(v_reason); struct sched_shutdown sched_shutdown = { .reason = reasons[Int_val(v_reason)] }; HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); CAMLreturn(Val_unit); }
void __attribute__ ((noreturn)) grub_reboot (void) { for ( ;; ) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_reboot }; HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); } }
static void xen_restart(char str, const char *cmd) { struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; int rc; rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); if (rc) BUG(); }
void domain_poweroff(void) { printk("\nBye\n"); console_done(); // flushes and restores terminal mode struct sched_shutdown op; op.reason = SHUTDOWN_poweroff; HYPERVISOR_sched_op(SCHEDOP_shutdown, &op); }
void do_exit(void) { printk("Do_exit called!\n"); arch_do_exit(); for( ;; ) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_crash }; HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); } }
static void xen_reboot(int reason) { struct sched_shutdown r = { .reason = reason }; #ifdef CONFIG_SMP smp_send_stop(); #endif if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) BUG(); }
void xen_reboot(int reason) { struct sched_shutdown r = { .reason = reason }; int cpu; for_each_online_cpu(cpu) xen_pmu_finish(cpu); if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) BUG(); }
CAMLprim value caml_block_domain(value v_timeout) { CAMLparam1(v_timeout); block_secs = (s_time_t)(Double_val(v_timeout) * 1000000000); set_xen_guest_handle(sched_poll.ports, ports); sched_poll.nr_ports = sizeof(ports) / sizeof(evtchn_port_t); sched_poll.timeout = NOW() + block_secs; HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll); CAMLreturn(Val_unit); }
/* Shutdown remote domain that is misbehaving */ int net_accel_shutdown_remote(int domain) { struct sched_remote_shutdown sched_shutdown = { .domain_id = domain, .reason = SHUTDOWN_crash }; EPRINTK("Crashing domain %d\n", domain); return HYPERVISOR_sched_op(SCHEDOP_remote_shutdown, &sched_shutdown); }
void _exit(int ret) { printk("main returned %d\n", ret); stop_kernel(); if (!ret) { /* No problem, just shutdown. */ struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_poweroff }; HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); } do_exit(); }
void do_general_protection(struct pt_regs *regs, long error_code) { struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_crash }; printk("GPF rip: %p, error_code=%lx\n", regs->rip, error_code); dump_regs(regs); //do_stack_walk(regs->rbp); dump_mem(regs->rsp); dump_mem(regs->rbp); dump_mem(regs->rip); HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); }
void console_done(void) { // (attempt to) restore the terminal mode char modes[] = "\x1b[4l"; console_write(modes, sizeof(modes) -1); while (console.intf->out_cons < console.intf->out_prod) { HYPERVISOR_sched_op(SCHEDOP_yield, 0); mb(); } }
int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, int wait) { struct call_data_struct data; int cpus, cpu; bool yield; /* Holding any lock stops cpus from going down. */ spin_lock(&call_lock); cpu_clear(smp_processor_id(), mask); cpus = cpus_weight(mask); if (!cpus) { spin_unlock(&call_lock); return 0; } /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); data.func = func; data.info = info; atomic_set(&data.started, 0); data.wait = wait; if (wait) atomic_set(&data.finished, 0); call_data = &data; mb(); /* write everything before IPI */ /* Send a message to other CPUs and wait for them to respond */ xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); /* Make sure other vcpus get a chance to run if they need to. */ yield = false; for_each_cpu_mask(cpu, mask) if (xen_vcpu_stolen(cpu)) yield = true; if (yield) HYPERVISOR_sched_op(SCHEDOP_yield, 0); /* Wait for response */ while (atomic_read(&data.started) != cpus || (wait && atomic_read(&data.finished) != cpus)) cpu_relax(); spin_unlock(&call_lock); return 0; }
static int __cpuinit xen_cpu_up(unsigned int cpu) { struct task_struct *idle = idle_task(cpu); int rc; #ifdef CONFIG_X86_64 /* Allocate node local memory for AP pdas */ WARN_ON(cpu == 0); if (cpu > 0) { rc = get_local_pda(cpu); if (rc) return rc; } #endif #ifdef CONFIG_X86_32 init_gdt(cpu); per_cpu(current_task, cpu) = idle; irq_ctx_init(cpu); #else cpu_pda(cpu)->pcurrent = idle; clear_tsk_thread_flag(idle, TIF_FORK); #endif xen_setup_timer(cpu); xen_init_lock_cpu(cpu); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) alternatives_smp_switch(1); rc = xen_smp_intr_init(cpu); if (rc) return rc; rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { HYPERVISOR_sched_op(SCHEDOP_yield, 0); barrier(); } return 0; }
static void xen_smp_send_call_function_ipi(cpumask_t mask) { int cpu; xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); /* Make sure other vcpus get a chance to run if they need to. */ for_each_cpu_mask_nr(cpu, mask) { if (xen_vcpu_stolen(cpu)) { HYPERVISOR_sched_op(SCHEDOP_yield, 0); break; } } }
static void xen_smp_send_call_function_ipi(const struct cpumask *mask) { int cpu; xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); for_each_cpu(cpu, mask) { if (xen_vcpu_stolen(cpu)) { HYPERVISOR_sched_op(SCHEDOP_yield, NULL); break; } } }
static int __cpuinit xen_cpu_up(unsigned int cpu) { struct task_struct *idle = idle_task(cpu); int rc; per_cpu(current_task, cpu) = idle; #ifdef CONFIG_X86_32 irq_ctx_init(cpu); #else clear_tsk_thread_flag(idle, TIF_FORK); per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - KERNEL_STACK_OFFSET + THREAD_SIZE; per_cpu(kernel_stack8k, cpu) = (unsigned long)task_stack_page(idle) - KERNEL_STACK_OFFSET + THREAD_SIZE - 8192; #endif xen_setup_runstate_info(cpu); xen_setup_timer(cpu); xen_init_lock_cpu(cpu); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) alternatives_smp_switch(1); rc = xen_smp_intr_init(cpu); if (rc) return rc; rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { HYPERVISOR_sched_op(SCHEDOP_yield, NULL); barrier(); } return 0; }
CAMLprim value caml_block_domain(value v_timeout) { CAMLparam1(v_timeout); s_time_t block_nsecs = (s_time_t)(Double_val(v_timeout) * 1000000000); HYPERVISOR_set_timer_op(NOW() + block_nsecs); /* xen/common/schedule.c:do_block clears evtchn_upcall_mask to re-enable interrupts. It blocks the domain and immediately checks for pending events which otherwise may be missed. */ HYPERVISOR_sched_op(SCHEDOP_block, 0); /* set evtchn_upcall_mask: there's no need to be interrupted when we know we have outstanding work to do. When we next call this function, the call to SCHEDOP_block will check for pending events. */ local_irq_disable(); CAMLreturn(Val_unit); }
static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) { int rc; per_cpu(current_task, cpu) = idle; per_cpu(current_tinfo, cpu) = &idle->tinfo; #ifdef CONFIG_X86_32 irq_ctx_init(cpu); #else clear_tsk_thread_flag(idle, TIF_FORK); per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE; #endif xen_setup_runstate_info(cpu); xen_setup_timer(cpu); xen_init_lock_cpu(cpu); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) /* Just in case we booted with a single CPU. */ alternatives_enable_smp(); rc = xen_smp_intr_init(cpu); if (rc) return rc; rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { HYPERVISOR_sched_op(SCHEDOP_yield, NULL); barrier(); } return 0; }
void _exit(int ret) { int i; for (i = 0; __DTOR_LIST__[i] != 0; i++) ((void((*)(void)))__DTOR_LIST__[i]) (); close_all_files(); __libc_fini_array(); printk("main returned %d\n", ret); #if defined(HAVE_LWIP) && defined(CONFIG_NETFRONT) stop_networking(); #endif stop_kernel(); if (!ret) { /* No problem, just shutdown. */ struct sched_shutdown sched_shutdown = { .reason = SHUTDOWN_poweroff }; HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown); } do_exit(); }
void fatal_error(const char *fmt, ...) { char buffer[BUFSIZ]; va_list ap; va_start(ap, fmt); vsnprintf(buffer, sizeof(buffer), fmt, ap); va_end(ap); printk("*** CRASH: %s\r\n", buffer); while (1) { #ifdef LING_DEBUG // Provide for attaching the debugger to examine the crash gdb_break(); #endif HYPERVISOR_sched_op(SCHEDOP_yield, 0); } }
/* Main kernel entry point, called by trampoline */ void start_kernel(start_info_t * start_info) { /* Map the shared info page */ HYPERVISOR_update_va_mapping((unsigned long) &shared_info, __pte(start_info->shared_info | 7), UVMF_INVLPG); /* Set the pointer used in the bootstrap for reenabling * event delivery after an upcall */ HYPERVISOR_shared_info = &shared_info; /* Set up and unmask events */ init_events(); /* Initialise the console */ console_init(start_info); /* Write a message to check that it worked */ console_write("Hello world!\r\n"); /* Loop, handling events */ while(1) { HYPERVISOR_sched_op(SCHEDOP_block,0); } }