/* schedules a thread for deletion */ void destroy_thread(struct Thread *thread) { /* before entering - make sure it's not awake */ lock_interrupts(); struct Process *process = thread->process; /* remove this thread from the process */ if(thread->next != 0) thread->next->previous = thread->previous; if(thread->previous != 0) thread->previous->next = thread->next; else { if(process == 0) kernel_threads = thread->next; else { process->threads = thread->next; process->threads_count--; } } /* schedule this thread for deletion */ thread->next = next_thread_to_clean; next_thread_to_clean = thread; /* wake up the thread cleaner */ schedule_thread(thread_cleaner_thread); unlock_interrupts(); }
void thread_delay(unsigned msecs) { thread_t *prev, *next; uint64_t st; uint64_t delay; if (!msecs) return; st = clock_get_ticks(); delay = clock_convert_msecs_to_ticks(msecs); prev = scheduler_running_thread(); if (!scheduler_delay_thread(st + delay)) { kernel_panic("cannot delay a thread.\n"); return; } schedule_thread(); next = scheduler_running_thread(); switch_context(prev->context, next->context); }
/////////////////////////////////////////////////////////////////////// // create a new thread and schedule it if the initial state is equal to // pending thread_id_type create_thread(thread_init_data& data, thread_state_enum initial_state, bool run_now, std::size_t num_thread, error_code& ec) { if (run_now) { mutex_type::scoped_lock lk(mtx_); HPX_STD_UNIQUE_PTR<threads::thread_data> thrd ( new (memory_pool_) threads::thread_data( data, memory_pool_, initial_state)); // add a new entry in the map for this thread thread_id_type id = thrd->get_thread_id(); std::pair<thread_map_type::iterator, bool> p = thread_map_.insert(id, thrd.get()); if (HPX_UNLIKELY(!p.second)) { HPX_THROWS_IF(ec, hpx::out_of_memory, "threadmanager::register_thread", "Couldn't add new thread to the map of threads"); return invalid_thread_id; } // push the new thread in the pending queue thread if (initial_state == pending) schedule_thread(thrd.get(), num_thread); // this thread has to be in the map now BOOST_ASSERT(thread_map_.find(id) != thread_map_.end()); BOOST_ASSERT(thrd->is_created_from(&memory_pool_)); do_some_work(); // try to execute the new work item thrd.release(); // release ownership to the map if (&ec != &throws) ec = make_success_code(); // return the thread_id of the newly created thread return id; } // do not execute the work, but register a task description for // later thread creation #if HPX_THREAD_MAINTAIN_QUEUE_WAITTIME new_tasks_.enqueue(new task_description( boost::move(data), initial_state, util::high_resolution_clock::now() )); #else new_tasks_.enqueue(new task_description( boost::move(data), initial_state)); #endif ++new_tasks_count_; if (&ec != &throws) ec = make_success_code(); return invalid_thread_id; // thread has not been created yet }
void callback_sync_handler(size_t status, void *tag) { struct CallbackSyncTag *_tag = (struct CallbackSyncTag *)tag; _tag->status = status; /* wake this thread */ _tag->response = 1; schedule_thread(_tag->thread); }
/* detects a filesystem on a storage device and mounts it */ void scan_for_fs(struct StorageDevice *storage_device) { /* create a thread to scan for file systems on this device */ struct Thread *thread = create_thread(0, (size_t)scan_for_fs_entry, (size_t)storage_device); if(!thread) return; schedule_thread(thread); }
void thread_terminate() { thread_t *me = scheduler_running_thread(); (void) scheduler_remove_thread(me->tid); schedule_thread(); me = scheduler_running_thread(); load_context(me->context); }
void callback_sync_param_handler(size_t status, size_t result, void *tag) { struct CallbackSyncParamTag *_tag = (struct CallbackSyncParamTag *)tag; _tag->status = status; _tag->result = result; /* wake this thread */ _tag->response = 1; schedule_thread(_tag->thread); }
void abort_all_suspended_threads(std::size_t num_thread) { mutex_type::scoped_lock lk(mtx_); thread_map_type::iterator end = thread_map_.end(); for (thread_map_type::iterator it = thread_map_.begin(); it != end; ++it) { if ((*it).second->get_state() == suspended) { (*it).second->set_state_ex(wait_abort); (*it).second->set_state(pending); schedule_thread((*it).second, num_thread); } } }
void svc_handler(unsigned int call, void *arg) { printk("svc_handler: got call %d with arg (%x)\r\n", call, arg); switch (call) { case SVC_THREAD_SWITCH: printk("SVC call ask for a thread switch\r\n"); schedule_thread((struct thread *)arg); break; default: printk("Invalid svc call\r\n"); break; } }
void kernel_run() { thread_t *init; unsigned i; for (i = 0; i < NELEMENTS(start_array); i++) { start_array[i](); } kprintf("system is running.\n"); schedule_thread(); init = scheduler_running_thread(); load_context(init->context); }
void create_kernel_thread(void) { irq_state_t irq_state = irq_save(); thread_t *thread = (thread_t *)kmalloc(sizeof(thread_t)); if (!thread) { return; } memset(thread, 0, sizeof(thread_t)); thread->id = request_thread_id(); thread->process = 0; thread->page_dir = current_directory; ++num_threads; schedule_thread(thread); irq_restore(irq_state); }
void thread_lock_mutex(mutex_t *mtx) { thread_t *prev, *next; BOOL is_locked; if (!mtx) { return; } prev = scheduler_running_thread(); is_locked = mutex_is_locked(mtx); if (lock_mutex(prev->tid, mtx) && is_locked) { scheduler_wait_thread(THREAD_FLAG_WAIT_MUTEX); schedule_thread(); next = scheduler_running_thread(); switch_context(prev->context, next->context); } }
void thread_suspend() { thread_t *prev, *next; prev = scheduler_running_thread(); if (!scheduler_suspend_thread()) { kernel_panic("cannot suspend a thread.\n"); return; } schedule_thread(); next = scheduler_running_thread(); /* No one to wait for */ if (prev == next) { return; } switch_context(prev->context, next->context); }
/////////////////////////////////////////////////////////////////////// // add new threads if there is some amount of work available std::size_t add_new(boost::int64_t add_count, thread_queue* addfrom, std::unique_lock<mutex_type> &lk, bool steal = false) { HPX_ASSERT(lk.owns_lock()); if (HPX_UNLIKELY(0 == add_count)) return 0; std::size_t added = 0; task_description* task = 0; while (add_count-- && addfrom->new_tasks_.pop(task, steal)) { #ifdef HPX_HAVE_THREAD_QUEUE_WAITTIME if (maintain_queue_wait_times) { addfrom->new_tasks_wait_ += util::high_resolution_clock::now() - util::get<2>(*task); ++addfrom->new_tasks_wait_count_; } #endif --addfrom->new_tasks_count_; // measure thread creation time util::block_profiler_wrapper<add_new_tag> bp(add_new_logger_); // create the new thread threads::thread_init_data& data = util::get<0>(*task); thread_state_enum state = util::get<1>(*task); threads::thread_id_type thrd; create_thread_object(thrd, data, state, lk); delete task; // add the new entry to the map of all threads std::pair<thread_map_type::iterator, bool> p = thread_map_.insert(thrd); if (HPX_UNLIKELY(!p.second)) { HPX_THROW_EXCEPTION(hpx::out_of_memory, "threadmanager::add_new", "Couldn't add new thread to the thread map"); return 0; } ++thread_map_count_; // only insert the thread into the work-items queue if it is in // pending state if (state == pending) { // pushing the new thread into the pending queue of the // specified thread_queue ++added; schedule_thread(thrd.get()); } // this thread has to be in the map now HPX_ASSERT(thread_map_.find(thrd.get()) != thread_map_.end()); HPX_ASSERT(thrd->get_pool() == &memory_pool_); } if (added) { LTM_(debug) << "add_new: added " << added << " tasks to queues"; //-V128 } return added; }
uint8_t schedule_thread(uint8_t el_num){ pthread *th = get_thread(el_num); assert_raise_return(th, ERR_VALUE, false); schedule_thread(th); return true; }
void schedule_thread_last(threads::thread_data* thrd, std::size_t num_thread, thread_priority priority = thread_priority_normal) { schedule_thread(thrd, num_thread, priority); }
uint32_t create_thread(process_t *process, entry_t entry, void *args, uint32_t priority, int user, int vm86) { /* create thread */ thread_t *thread = (thread_t *)kmalloc(sizeof(thread_t)); if (!thread) { return 0; } memset(thread, 0, sizeof(thread_t)); /* setup the stack(s) */ thread->kstack = (uintptr_t)kmalloc(STACK_SIZE); /* if (user) { uint32_t *stack = (uint32_t *)thread->kstack; (void)stack; DBPRINT("+++ thread->kstack: %x\n", thread->kstack); for (int i = -15; i < 15; ++i) { DBPRINT("%x -> stack[%d] = %x\n", thread->kstack + i, i, stack[i]); } } */ if (!thread->kstack) { return 0; } if (user) { thread->ustack = (uintptr_t)kmalloc(STACK_SIZE); if (!thread->ustack) { return 0; } } uint32_t *kstack = (uint32_t *)stack_top(thread->kstack); uint32_t data_segment = user ? 0x20+3 : 0x10; uint32_t code_segment = user ? 0x18+3 : 0x08; /* bit 2 always set - bit 9 = interruption flag (IF) - bits 12/13 = privilege level * bit 17 = virtua-8086 mode (VM) */ uint32_t eflags = (user ? 0x3202 : 0x0202) | (vm86 ? 1 << 17 : 0 << 17); if (user) { uint32_t *ustack = (uint32_t *)stack_top(thread->ustack); PUSH(ustack, (uintptr_t)args); /* args */ PUSH(ustack, 0xdeadcaca); /* return address - thread should be finished * with a system call, not by jumping to this address */ PUSH(kstack, 0x23); /* ss */ PUSH(kstack, (uintptr_t)ustack); /* esp */ } else { PUSH(kstack, (uintptr_t)args); /* args */ PUSH(kstack, (uintptr_t)&thread_exit); /* return address */ } PUSH(kstack, eflags); /* eflags */ PUSH(kstack, code_segment); /* cs */ PUSH(kstack, (uintptr_t)entry); /* eip */ PUSH(kstack, 0); /* error code */ PUSH(kstack, 0); /* interrupt number */ PUSH(kstack, 0); /* eax */ PUSH(kstack, 0); /* ebx */ PUSH(kstack, 0); /* ecx */ PUSH(kstack, 0); /* edx */ PUSH(kstack, 0); /* esi */ PUSH(kstack, 0); /* edi */ PUSH(kstack, 0); /* ebp */ PUSH(kstack, data_segment); /* ds */ PUSH(kstack, data_segment); /* es */ PUSH(kstack, data_segment); /* fs */ PUSH(kstack, data_segment); /* gs */ thread->esp = (uintptr_t)kstack; thread->ss = data_segment; thread->id = request_thread_id(); thread->process = process; /* thread's priority can't exceed its parent's priority */ thread->priority = min(priority, process->priority); thread->page_dir = process ? process->page_dir : kernel_directory; thread->runtime = 0; irq_state_t irq_state = irq_save(); ++num_threads; /* register this thread */ schedule_thread(thread); irq_restore(irq_state); return thread->id; }