void do_irq (context_t context) { int flags; hw_save_flags_and_cli (flags); #if 1 irq_nesting_counter ++; if (hw_irq_ctrl [context.irqnr].ack) hw_ack_irq (context.irqnr); #endif if (irq_handler_table [context.irqnr]) (*irq_handler_table [context.irqnr]) (&context); else default_irq_handler (&context); #if 1 if (hw_irq_ctrl [context.irqnr].end) hw_end_irq (context.irqnr); irq_nesting_counter --; #endif hw_restore_flags (flags); if (irq_nesting_counter == SCHED_PENDING) { scheduling (); } do_signals(); }
asmlinkage int pthread_join_sys (pthread_t thread, void **value_ptr) { int flags, c_state; pthread_t old_thread; if (!thread || thread -> magic_number != PTHREAD_MAGIC_NUMBER) { SET_ERRNO (ESRCH); return -1; } if (GET_THREAD_DETACH_STATE(thread)) { SET_ERRNO (EINVAL); return -1; } hw_save_flags_and_cli (flags); c_state = GET_THREAD_STATE (thread); while (c_state != FINISHED_THREAD) { thread -> joining_thread = current_thread; //printf("suspend_thread current_thread 0x%x thread 0x%x\n", (unsigned long)current_thread, (unsigned long)thread); old_thread = current_thread; suspend_thread (current_thread); //printf("current_thread 0x%x join 0x%x c_state %d flags %d\n", (unsigned long)current_thread, (unsigned long)thread, c_state, flags); scheduling (); //ret_may_switch(current_thread, old_thread); //printf("current_thread 0x%x join 0x%x c_state %d flags %d\n", (unsigned long)current_thread, (unsigned long)thread, c_state, flags); c_state = GET_THREAD_STATE (thread); if (GET_THREAD_DETACH_STATE(thread)) { hw_restore_flags (flags); SET_ERRNO (EINVAL); return -1; } } if (value_ptr) *value_ptr = thread -> exit_value; //printf("thread 0x%x exit_value 0x%x\n", (unsigned long)current_thread, thread->exit_value); delete_pthread_struct (thread); hw_restore_flags (flags); return 0; }
// sync_event calls the event handlers void sync_events (void) { unsigned long hw_flags; hw_save_flags_and_cli(&hw_flags); if (is_events_flag_enabled (xm_current_domain) && is_some_bit_set (xm_current_domain -> events -> pending_events)) { if (xm_current_domain == xm_root_domain) sync_root_events (); else sync_domain_events (); // Events flag is automatically enabled (iret emulation) //enable_events_flag (xm_root_domain); } hw_restore_flags (hw_flags); }
static void sync_domain_events (void) { unsigned long hw_flags, event; volatile bitmap_t pending; struct pt_regs dummy_regs = FAKE_REGS; if (get_domain_state (xm_current_domain) == DOMAIN_FINISHED) return; hw_save_flags_and_cli(&hw_flags); pending = xm_current_domain -> events -> pending_events; xm_current_domain -> events -> pending_events &= xm_current_domain -> events -> masked_events; // Executing all pending events while ((event = get_next_set_bit_and_clear (pending, xm_current_domain -> events -> masked_events)) != -1) { // An event pending when there is not any handler installed // catching it??? it must be an error //assert (xm_current_domain -> event_handler [event]); if (!xm_current_domain -> events -> event_handler [event]) continue; // Before executing an event handler, events' flag is disabled // and the event is masked set_bit (xm_current_domain -> events -> masked_events, event); disable_events_flag (xm_current_domain); SET_PT_REG_IRQ (dummy_regs, event); hw_sti (); // Here, the event handler is executed always // with the same conditions, that is, event flag disabled // and the executed event masked as well (*xm_current_domain -> events -> event_handler[event]) (event, &dummy_regs); hw_cli (); // Events flag is automatically enabled (iret emulation) enable_events_flag (xm_current_domain); } hw_restore_flags (hw_flags); }
asmlinkage int pthread_detach_sys (pthread_t thread) { int flags; if (!thread || thread -> magic_number != PTHREAD_MAGIC_NUMBER) { SET_ERRNO (ESRCH); return -1; } if (GET_THREAD_DETACH_STATE(thread)) { SET_ERRNO (EINVAL); return -1; } hw_save_flags_and_cli (flags); SET_THREAD_DETACH_STATE(thread, 1); activate_thread (current_thread -> joining_thread); current_thread -> joining_thread = 0; hw_restore_flags (flags); scheduling (); return 0; }
asmlinkage int pthread_create_sys (pthread_t *thread, const pthread_attr_t *attr, void *(*startup)(void *), void *(*start_routine)(void *), void *args) { int flags; hw_save_flags_and_cli (flags); // Check policy & prio if (attr) { if (attr->policy != SCHED_FIFO) { SET_ERRNO(EINVAL); return -1; } else { if (attr -> sched_param.sched_priority > MIN_SCHED_PRIORITY || attr -> sched_param.sched_priority < MAX_SCHED_PRIORITY) { SET_ERRNO(EINVAL); return -1; } } } // Creating the pthread structure if (!(*thread = create_pthread_struct ())) { SET_ERRNO (EAGAIN); hw_restore_flags (flags); return -1; } /* * Configuring the new thread either with attr (if not NULL) * or with the default values */ if (attr) { (*thread) -> sched_param = attr -> sched_param; (*thread) -> stack_info.stack_size = attr -> stack_size; (*thread) -> stack_info.stack_bottom = attr -> stack_addr; SET_THREAD_DETACH_STATE((*thread), attr -> detachstate); SET_THREAD_POLICY ((*thread), attr -> policy); } else { (*thread) -> sched_param.sched_priority = MIN_SCHED_PRIORITY; (*thread) -> stack_info.stack_size = STACK_SIZE; (*thread) -> stack_info.stack_bottom = 0; SET_THREAD_DETACH_STATE((*thread), 0); SET_THREAD_POLICY ((*thread), SCHED_FIFO); } if (!((*thread) -> stack_info.stack_bottom)) { // Creating the thread stack if (alloc_stack (&(*thread) -> stack_info) < 0) { SET_ERRNO (EAGAIN); hw_restore_flags (flags); return -1; } } // This is arhictecture dependent (*thread) -> stack = setup_stack ((*thread)->stack_info.stack_bottom + (*thread)->stack_info.stack_size / sizeof (int), startup, start_routine, args); activate_thread (*thread); pthread_t tmp = *thread; printf("pthred_create_sys thread 0x%x state:%d\n", (unsigned long)tmp, GET_THREAD_STATE(tmp)); // no error at all hw_restore_flags (flags); // Calling the scheduler scheduling (); return 0; }