/** Halt wrapper * * Set halt flag and halt the CPU. * */ void halt() { #if (defined(CONFIG_DEBUG)) && (defined(CONFIG_KCONSOLE)) bool rundebugger = false; if (!atomic_get(&haltstate)) { atomic_set(&haltstate, 1); rundebugger = true; } #else atomic_set(&haltstate, 1); #endif interrupts_disable(); #if (defined(CONFIG_DEBUG)) && (defined(CONFIG_KCONSOLE)) if ((rundebugger) && (kconsole_check_poll())) kconsole("panic", "\nLast resort kernel console ready.\n", false); #endif if (CPU) printf("cpu%u: halted\n", CPU->id); else printf("cpu: halted\n"); cpu_halt(); }
void task_idle_process(void) { /* struct TASK *task_cur; task_cur = tasktbl->next; task_cur->status = TASK_STAT_IDLE; */ for(;;) { // console_puts("idle "); // console_putc('I'); Asm("nop"); cpu_halt(); /* { static int i=0; int pos; char s[4]; byte2hex(i,s); i++; pos = console_getpos(); console_locatepos(77,0); console_puts(s); console_putpos(pos); } */ } }
void interprocessor_interrupt(void) { uint32_t bits; int i; spinlock_acquire(&curcpu->c_ipi_lock); bits = curcpu->c_ipi_pending; if (bits & (1U << IPI_PANIC)) { /* panic on another cpu - just stop dead */ spinlock_release(&curcpu->c_ipi_lock); cpu_halt(); } if (bits & (1U << IPI_OFFLINE)) { /* offline request */ spinlock_release(&curcpu->c_ipi_lock); spinlock_acquire(&curcpu->c_runqueue_lock); if (!curcpu->c_isidle) { kprintf("cpu%d: offline: warning: not idle\n", curcpu->c_number); } spinlock_release(&curcpu->c_runqueue_lock); kprintf("cpu%d: offline.\n", curcpu->c_number); cpu_halt(); } if (bits & (1U << IPI_UNIDLE)) { /* * The cpu has already unidled itself to take the * interrupt; don't need to do anything else. */ } if (bits & (1U << IPI_TLBSHOOTDOWN)) { if (curcpu->c_numshootdown == TLBSHOOTDOWN_ALL) { vm_tlbshootdown_all(); } else { for (i=0; i<curcpu->c_numshootdown; i++) { vm_tlbshootdown(&curcpu->c_shootdown[i]); } } curcpu->c_numshootdown = 0; } curcpu->c_ipi_pending = 0; spinlock_release(&curcpu->c_ipi_lock); }
int irq_wait(irqnum_t irqnum) { return_err_if(irqnum >= 16, -EINVAL, "Wrong IRQ number"); irq_happened[irqnum] = false; do cpu_halt(); while (irq_happened[irqnum]); return 0; }
static void pacland_halt_mcu_w( int offset, int data ) { int v = 0; if ( offset == 0 ) v = 1; cpu_halt( 1, v ); }
void cpu_die(void) { max_xtp(); local_irq_disable(); cpu_halt(); /* */ BUG(); for (;;); }
void cpu_die(void) { max_xtp(); local_irq_disable(); cpu_halt(); /* Should never be here */ BUG(); for (;;); }
void alpha_ipi_halt(struct cpu_info *ci, struct trapframe *framep) { SCHED_ASSERT_UNLOCKED(); fpusave_cpu(ci, 1); (void)splhigh(); cpu_halt(); /* NOTREACHED */ }
void test_timer(void) { poll_exit = false; kbd_set_onpress((kbd_event_f)tt_keypress); timer_t timer_id = timer_push_ontimer(on_timer); while (!poll_exit) cpu_halt(); timer_pop_ontimer(timer_id); kbd_set_onpress(null); }
static void stop_this_cpu(void) { /* */ set_cpu_online(smp_processor_id(), false); max_xtp(); local_irq_disable(); cpu_halt(); }
static void stop_this_cpu(void) { /* * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); max_xtp(); local_irq_disable(); cpu_halt(); }
static void stop_this_cpu (void) { extern void cpu_halt (void); /* * Remove this CPU: */ clear_bit(smp_processor_id(), &cpu_online_map); max_xtp(); __cli(); cpu_halt(); }
void do_task1(void) { int i = 0; while (1) { ++i; if (0 == (i % 75)) { i = 0; k_printf("\r"); } if (i > 75) { logmsgf("\nB: assert i <= 75 failed, i=0x%x\n", i); while (1) cpu_halt(); } k_printf("1"); } }
/* * If the shutdown was a clean halt, behave accordingly. */ static void shutdown_halt(void *junk, int howto) { if (howto & RB_HALT) { kprintf("\n"); kprintf("The operating system has halted.\n"); #ifdef _KERNEL_VIRTUAL cpu_halt(); #else kprintf("Please press any key to reboot.\n\n"); switch (cngetc()) { case -1: /* No console, just die */ cpu_halt(); /* NOTREACHED */ default: howto &= ~RB_HALT; break; } #endif } }
void slapfight_init_machine(void) { /* MAIN CPU */ slapfight_status_state=0; slapfight_status = 0xc7; getstar_sequence_index = 0; getstar_sh_intenabled = 0; /* disable sound cpu interrupts */ /* SOUND CPU */ cpu_halt(1,0); }
void do_task0(void) { k_printf("do_task0: CPL = %d\n", (int)i386_current_privlevel()); int i = 0; while (1) { ++i; if (0 == (i % 75)) { i = 0; k_printf("\r"); } if (i > 75) { logmsgf("\nA: assert i <= 75 failed, i=0x%x\n", i); while (1) cpu_halt(); } k_printf("0"); } }
/* * Have the bus controller power the system off. */ void lamebus_poweroff(struct lamebus_softc *lamebus) { /* * Write 0 to the power register to shut the system off. */ cpu_irqoff(); write_ctl_register(lamebus, CTLREG_PWR, 0); /* The power doesn't go off instantly... so halt the cpu. */ cpu_halt(); }
static void do_exit(os_emul_data *emul, unsigned call, const int arg0, cpu *processor, unsigned_word cia) { int status = (int)cpu_registers(processor)->gpr[arg0]; SYS(exit); if (WITH_TRACE && ppc_trace[trace_os_emul]) printf_filtered ("%d)\n", status); cpu_halt(processor, cia, was_exited, status); }
static void do_kill(os_emul_data *emul, unsigned call, const int arg0, cpu *processor, unsigned_word cia) { pid_t pid = cpu_registers(processor)->gpr[arg0]; int sig = cpu_registers(processor)->gpr[arg0+1]; if (WITH_TRACE && ppc_trace[trace_os_emul]) printf_filtered ("%d, %d", (int)pid, sig); SYS(kill); printf_filtered("SYS_kill at 0x%lx - more to this than just being killed\n", (long)cia); cpu_halt(processor, cia, was_signalled, sig); }
static unsigned hw_vm_add_space(device *me, unsigned_word addr, unsigned nr_bytes, cpu *processor, unsigned_word cia) { hw_vm_device *vm = (hw_vm_device*)device_data(me); unsigned_word block_addr; unsigned block_nr_bytes; /* an address in the stack area, allocate just down to the addressed page */ if (addr >= vm->stack_base && addr < vm->stack_lower_limit) { block_addr = FLOOR_PAGE(addr); block_nr_bytes = vm->stack_lower_limit - block_addr; vm->stack_lower_limit = block_addr; } /* an address in the heap area, allocate all of the required heap */ else if (addr >= vm->heap_upper_limit && addr < vm->heap_bound) { block_addr = vm->heap_upper_limit; block_nr_bytes = vm->heap_bound - vm->heap_upper_limit; vm->heap_upper_limit = vm->heap_bound; } /* oops - an invalid address - abort the cpu */ else if (processor != NULL) { cpu_halt(processor, cia, was_signalled, SIGSEGV); return 0; } /* 2*oops - an invalid address and no processor */ else { return 0; } /* got the parameters, allocate the space */ device_attach_address(device_parent(me), attach_raw_memory, 0 /*address space*/, block_addr, block_nr_bytes, access_read_write, me); return block_nr_bytes; }
static void lwkt_idleloop(void *dummy) { globaldata_t gd = mycpu; DBPRINTF(("idlestart cpu %d pri %d (should be < 32) mpcount %d (should be 0)\n", gd->gd_cpuid, curthread->td_pri, curthread->td_mpcount)); gd->gd_pid = getpid(); for (;;) { /* * If only our 'main' thread is left, schedule it. */ if (gd->gd_num_threads == gd->gd_sys_threads) { int i; globaldata_t tgd; for (i = 0; i < ncpus; ++i) { tgd = globaldata_find(i); if (tgd->gd_num_threads != tgd->gd_sys_threads) break; } if (i == ncpus && (main_td.td_flags & TDF_RUNQ) == 0) lwkt_schedule(&main_td); } /* * Wait for an interrupt, aka wait for a signal or an upcall to * occur, then switch away. */ crit_enter(); if (gd->gd_runqmask || (curthread->td_flags & TDF_IDLE_NOHLT)) { curthread->td_flags &= ~TDF_IDLE_NOHLT; } else { printf("cpu %d halting\n", gd->gd_cpuid); cpu_halt(); printf("cpu %d resuming\n", gd->gd_cpuid); } crit_exit(); lwkt_switch(); } }
static unsigned hw_pal_io_write_buffer_callback(device *me, const void *source, int space, unsigned_word addr, unsigned nr_bytes, cpu *processor, unsigned_word cia) { hw_pal_device *hw_pal = (hw_pal_device*)device_data(me); unsigned_1 *byte = (unsigned_1*)source; switch (addr & hw_pal_address_mask) { case hw_pal_reset_register: cpu_halt(processor, cia, was_exited, byte[0]); break; case hw_pal_int_register: device_interrupt_event(me, byte[0], /*port*/ (nr_bytes > 1 ? byte[1] : 0), /* val */ processor, cia); break; case hw_pal_read_fifo: hw_pal->input.buffer = byte[0]; DTRACE(pal, ("write - input-fifo %d\n", byte[0])); break; case hw_pal_read_status: hw_pal->input.status = byte[0]; DTRACE(pal, ("write - input-status %d\n", byte[0])); break; case hw_pal_write_fifo: write_hw_pal(hw_pal, byte[0]); DTRACE(pal, ("write - output-fifo %d\n", byte[0])); break; case hw_pal_write_status: hw_pal->output.status = byte[0]; DTRACE(pal, ("write - output-status %d\n", byte[0])); break; } return nr_bytes; }
void test_serial(const char *arg) { logmsgf("IRQs state = 0x%x\n", (uint)irq_get_mask()); uint8_t saved_color = vcsa_get_attribute(VIDEOMEM_VCSA); k_printf("Use <Esc> to quit, <Del> for register info, <F1> to toggle char/code mode\n"); serial_setup(); //poll_serial(); poll_exit = false; serial_set_on_receive(on_serial_received); kbd_set_onpress(on_press); while (!poll_exit) cpu_halt(); kbd_set_onpress(null); serial_set_on_receive(null); vcsa_set_attribute(0, saved_color); }
/* All cores end up calling this whenever there is nothing left to do or they * don't know explicitly what to do. Non-zero cores call it when they are done * booting. Other cases include after getting a DEATH IPI. * * All cores attempt to run the context of any owning proc. Barring that, they * halt and wake up when interrupted, do any work on their work queue, then halt * again. In between, the ksched gets a chance to tell it to do something else, * or perhaps to halt in another manner. */ static void __attribute__((noreturn)) __smp_idle(void *arg) { struct per_cpu_info *pcpui = &per_cpu_info[core_id()]; pcpui->cur_kthread->flags = KTH_DEFAULT_FLAGS; while (1) { /* This might wake a kthread (the gp ktask), so be sure to run * PRKM after reporting the quiescent state. */ rcu_report_qs(); /* If this runs an RKM, we'll call smp_idle from the top. */ process_routine_kmsg(); try_run_proc(); cpu_bored(); /* call out to the ksched */ /* cpu_halt() atomically turns on interrupts and halts the core. * Important to do this, since we could have a RKM come in via * an interrupt right while PRKM is returning, and we wouldn't * catch it. When it returns, IRQs are back off. */ __set_cpu_state(pcpui, CPU_STATE_IDLE); cpu_halt(); __set_cpu_state(pcpui, CPU_STATE_KERNEL); } assert(0); }
/* All cores end up calling this whenever there is nothing left to do or they * don't know explicitly what to do. Non-zero cores call it when they are done * booting. Other cases include after getting a DEATH IPI. * * All cores attempt to run the context of any owning proc. Barring that, they * halt and wake up when interrupted, do any work on their work queue, then halt * again. In between, the ksched gets a chance to tell it to do something else, * or perhaps to halt in another manner. */ static void __attribute__((noinline, noreturn)) __smp_idle(void) { struct per_cpu_info *pcpui = &per_cpu_info[core_id()]; disable_irq(); /* might not be needed - need to look at KMSGs closely */ clear_rkmsg(pcpui); pcpui->cur_kthread->flags = KTH_DEFAULT_FLAGS; enable_irq(); /* one-shot change to get any IRQs before we halt later */ while (1) { disable_irq(); process_routine_kmsg(); try_run_proc(); cpu_bored(); /* call out to the ksched */ /* cpu_halt() atomically turns on interrupts and halts the core. * Important to do this, since we could have a RKM come in via an * interrupt right while PRKM is returning, and we wouldn't catch * it. */ __set_cpu_state(pcpui, CPU_STATE_IDLE); cpu_halt(); /* interrupts are back on now (given our current semantics) */ } assert(0); }
void cpu_reset() { cpu_halt(); while (1); }
void cpu_handle_ipi_halt(struct registers *regs) { atomic_fetch_add(&num_halted_cpus, 1); cpu_halt(); }
void test_tasks(void) { def_task = task_current(); #if 0 task_kthread_init(&task0, (void *)do_task0, (void *)((ptr_t)task0_stack + TASK_KERNSTACK_SIZE - 0x20)); task_kthread_init(&task1, (void *)do_task1, (void *)((ptr_t)task1_stack + TASK_KERNSTACK_SIZE - 0x20)); #else const segment_selector ucs = { .as.word = SEL_USER_CS }; const segment_selector uds = { .as.word = SEL_USER_DS }; uint espU0 = ((uint)task0_usr_stack + R3_STACK_SIZE - 0x18); uint espK0 = ((uint)task0_stack + TASK_KERNSTACK_SIZE - CONTEXT_SIZE - 0x14); uint espU1 = ((ptr_t)task1_usr_stack + R3_STACK_SIZE); uint espK1 = ((ptr_t)task1_stack + TASK_KERNSTACK_SIZE - CONTEXT_SIZE - 0x14); task_init((task_struct *)&task0, (void *)do_task0, (void *)espK0, (void *)espU0, ucs, uds); task_init((task_struct *)&task1, (void *)do_task1, (void *)espK1, (void *)espU1, ucs, uds); #endif /* allow tasks to update cursor with `outl` */ task0.tss.eflags |= eflags_iopl(PL_USER); task1.tss.eflags |= eflags_iopl(PL_USER); quit = false; kbd_set_onpress((kbd_event_f)key_press); task_set_scheduler(next_task); /* wait for first timer tick, when execution will be transferred to do_task0 */ cpu_halt(); task_set_scheduler(null); kbd_set_onpress(null); k_printf("\nBye.\n"); } /***********************************************************/ void run_userspace(void) { char buf[100]; snprintf(buf, 100, "Current privilege level = %d\n", i386_current_privlevel()); size_t nbuf = strlen(buf); asm("int $0x80 \n" :: "a"(SYS_WRITE), "c"(STDOUT_FILENO), "d"((uint)buf), "b"(nbuf)); while (1); } extern void start_userspace(uint eip3, uint cs3, uint eflags, uint esp3, uint ss3); task_struct task3; void test_userspace(void) { /* init task */ task3.tss.eflags = x86_eflags(); // | eflags_iopl(PL_USER); task3.tss.cs = SEL_USER_CS; task3.tss.ds = task3.tss.es = task3.tss.fs = task3.tss.gs = SEL_USER_DS; task3.tss.ss = SEL_USER_DS; task3.tss.esp = (uint)task0_usr_stack + R3_STACK_SIZE - CONTEXT_SIZE - 0x20; task3.tss.eip = (uint)run_userspace; task3.tss.ss0 = SEL_KERN_DS; task3.tss.esp0 = (uint)task0_stack + R0_STACK_SIZE - CONTEXT_SIZE - 0x20; /* make a GDT task descriptor */ segment_descriptor taskdescr; segdescr_taskstate_init(taskdescr, (uint)&(task3.tss), PL_USER); segdescr_taskstate_busy(taskdescr, 0); index_t taskdescr_index = gdt_alloc_entry(taskdescr); segment_selector tss_sel; tss_sel.as.word = make_selector(taskdescr_index, 0, taskdescr.as.strct.dpl); kbd_set_onpress((kbd_event_f)key_press); uint efl = 0x00203202; test_eflags(); logmsgf("efl = 0x%x\n", efl); logmsgf("tss_sel = 0x%x\n", (uint)tss_sel.as.word); logmsgf("tssd = %x %x\n", taskdescr.as.ints[0], taskdescr.as.ints[1]); logmsgf("tssd.base = %x\n", taskdescr.as.strct.base_l + (taskdescr.as.strct.base_m << 16) + (taskdescr.as.strct.base_h << 24)); /* load TR and LDT */ i386_load_task_reg(tss_sel); //asm ("lldt %%ax \n\t"::"a"( SEL_DEF_LDT )); /* go userspace */ start_userspace( (uint)run_userspace, task3.tss.cs, //(uint)run_userspace, (uint)tss_sel.as.word, efl, task3.tss.esp, task3.tss.ss); }
/* * Halt the system. * On some systems, this would return to the boot monitor. But we don't * have one. */ void mainbus_halt(void) { cpu_halt(); }
/* Low power app example */ int main(void) { /* Setup the RTC to get out of sleep mode. deep sleep will require an */ /* analog comparator interrupt to wake up the system. */ /* Variables */ uint32_t pmux_sel_save[2], pmux_in_en_save, pmux_pullup_save; qm_ac_config_t ac_cfg; qm_rtc_config_t rtc_cfg; QM_PUTS("Low power mode example."); clk_periph_enable(CLK_PERIPH_RTC_REGISTER | CLK_PERIPH_CLK); /* Initialise RTC configuration. */ rtc_cfg.init_val = 0; rtc_cfg.alarm_en = 1; rtc_cfg.alarm_val = QM_RTC_ALARM_SECOND; rtc_cfg.callback = rtc_example_callback; qm_rtc_set_config(QM_RTC_0, &rtc_cfg); qm_irq_request(QM_IRQ_RTC_0, qm_rtc_isr_0); QM_PUTS("CPU Halt."); /* Halt the CPU, RTC alarm will wake me up. */ cpu_halt(); QM_PUTS("CPU Halt wakeup."); /* Set another alarm one minute from now. */ qm_rtc_set_alarm(QM_RTC_0, QM_RTC[QM_RTC_0].rtc_ccvr + QM_RTC_ALARM_SECOND); QM_PUTS("Go to sleep."); /* Go to sleep, RTC will wake me up. */ soc_sleep(); QM_PUTS("Wake up from sleep."); /* Physical step at this stage to raise the V on the comparator pin. */ /* Go to deep sleep, a comparator should wake me up. */ QM_PUTS("Go to deep sleep."); ac_cfg.reference = BIT(WAKEUP_COMPARATOR_PIN); /* Ref internal voltage */ ac_cfg.polarity = 0x0; /* Fire if greater than ref (high level) */ ac_cfg.power = BIT(WAKEUP_COMPARATOR_PIN); /* Normal operation mode */ ac_cfg.int_en = BIT(WAKEUP_COMPARATOR_PIN); /* Enable comparator */ ac_cfg.callback = ac_example_callback; qm_ac_set_config(&ac_cfg); qm_irq_request(QM_IRQ_AC, qm_ac_isr); /* * Comparator pin will fire an interrupt when the input voltage is * greater than the reference voltage (0.95V). */ /* * In order to minimise power, pmux_sel must be set to 0, input enable * must be cleared for any pins not expecting to be used to wake the * SoC from deep sleep mode, in this example we are using AC 6. */ pmux_sel_save[0] = QM_SCSS_PMUX->pmux_sel[0]; pmux_sel_save[1] = QM_SCSS_PMUX->pmux_sel[1]; pmux_in_en_save = QM_SCSS_PMUX->pmux_in_en[0]; pmux_pullup_save = QM_SCSS_PMUX->pmux_pullup[0]; QM_SCSS_PMUX->pmux_sel[0] = QM_SCSS_PMUX->pmux_sel[1] = 0; QM_SCSS_PMUX->pmux_in_en[0] = BIT(WAKEUP_COMPARATOR_PIN); QM_SCSS_PMUX->pmux_pullup[0] = 0; /* Mux out comparator */ qm_pmux_select(QM_PIN_ID_6, QM_PMUX_FN_1); qm_pmux_input_en(QM_PIN_ID_6, true); soc_deep_sleep(); /* Restore previous pinmuxing settings. */ QM_SCSS_PMUX->pmux_sel[0] = pmux_sel_save[0]; QM_SCSS_PMUX->pmux_sel[1] = pmux_sel_save[1]; QM_SCSS_PMUX->pmux_in_en[0] = pmux_in_en_save; QM_SCSS_PMUX->pmux_pullup[0] = pmux_pullup_save; QM_PUTS("Wake up from deep sleep."); return 0; }