static int mutex_thread(void *arg) { int i; const int iterations = 50000; static volatile int shared = 0; mutex_t *m = (mutex_t *)arg; printf("mutex tester thread %p starting up, will go for %d iterations\n", get_current_thread(), iterations); for (i = 0; i < iterations; i++) { mutex_acquire(m); if (shared != 0) panic("someone else has messed with the shared data\n"); shared = (intptr_t)get_current_thread(); thread_yield(); shared = 0; mutex_release(m); thread_yield(); } return 0; }
static int quantum_tester(void *arg) { for (;;) { printf("%p: in this thread. rq %d\n", get_current_thread(), get_current_thread()->remaining_quantum); } return 0; }
static int timer_stress_worker(void* void_arg) { timer_stress_args* args = reinterpret_cast<timer_stress_args*>(void_arg); while (!atomic_load(&args->timer_stress_done)) { timer_t t = TIMER_INITIAL_VALUE(t); zx_duration_t timer_duration = rand_duration(ZX_MSEC(5)); // Set a timer, then switch to a different CPU to ensure we race with it. arch_disable_ints(); uint timer_cpu = arch_curr_cpu_num(); const Deadline deadline = Deadline::no_slack(current_time() + timer_duration); timer_set(&t, deadline, timer_stress_cb, void_arg); thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu)); DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu); arch_enable_ints(); // We're now running on something other than timer_cpu. atomic_add_u64(&args->num_set, 1); // Sleep for the timer duration so that this thread's timer_cancel races with the timer // callback. We want to race to ensure there are no synchronization or memory visibility // issues. thread_sleep_relative(timer_duration); timer_cancel(&t); } return 0; }
static enum handler_return threadload(struct timer *t, lk_time_t now, void *arg) { static struct thread_stats old_stats; static lk_bigtime_t last_idle_time; lk_bigtime_t idle_time = thread_stats.idle_time; if (get_current_thread()->priority == IDLE_PRIORITY) { idle_time += current_time_hires() - thread_stats.last_idle_timestamp; } lk_bigtime_t delta_time = idle_time - last_idle_time; lk_bigtime_t busy_time = 1000000ULL - (delta_time > 1000000ULL ? 1000000ULL : delta_time); uint busypercent = (busy_time * 10000) / (1000000); // printf("idle_time %lld, busytime %lld\n", idle_time - last_idle_time, busy_time); printf("LOAD: %d.%02d%%, cs %d, ints %d, timer ints %d, timers %d\n", busypercent / 100, busypercent % 100, thread_stats.context_switches - old_stats.context_switches, thread_stats.interrupts - old_stats.interrupts, thread_stats.timer_ints - old_stats.timer_ints, thread_stats.timers - old_stats.timers); old_stats = thread_stats; last_idle_time = idle_time; return INT_NO_RESCHEDULE; }
void isr_exn_ud_bottom(struct x86_exregs *regs) { /* see if it's a LOCK NOP. (this is why the "syscall" is so slow.) */ /* NOTE: could extend the kernel data segment to the full address space, * and wrap the user-space pointer. that'd remove the farting around with * the mapping database, page tables, and supervisor space. */ struct thread *current = get_current_thread(); uint8_t buf[2]; size_t n = space_memcpy_from(current->space, buf, regs->eip, 2); if(n < 2) { panic("can't read from #UD eip? what."); } if(buf[0] == 0xf0 && buf[1] == 0x90) { /* it is L4_KernelInterface(). * TODO: proper values */ regs->eip += 2; regs->eax = L4_Address(current->space->kip_area); /* TODO: replace these with proper KIP accessors */ regs->ecx = *(L4_Word_t *)(kip_mem + 0x04); /* API VERSION */ regs->edx = *(L4_Word_t *)(kip_mem + 0x08); /* API FLAGS */ /* id = 23 (because 2 + 3 = 5); subid = 17 * TODO: get proper values at some point. */ regs->esi = (23 << 24) | (17 << 16); /* KERNEL ID */ } else { printf("#UD at eip 0x%lx, esp 0x%lx\n", regs->eip, regs->esp); /* TODO: pop an "invalid opcode" exception. */ thread_halt(current); assert(current->status == TS_STOPPED); return_to_scheduler(); } }
static int sleep_thread(void *arg) { for (;;) { printf("sleeper %p\n", get_current_thread()); thread_sleep(rand() % 500); } return 0; }
static int semaphore_producer(void *unused) { printf("semaphore producer %p starting up, running for %d iterations\n", get_current_thread(), sem_total_its); for (int x = 0; x < sem_total_its; x++) { sem_post(&sem); } return 0; }
static void fbsd_thread_activate (void) { fbsd_thread_active = 1; init_thread_list(); if (fbsd_thread_core == 0) enable_thread_event_reporting (); fbsd_thread_find_new_threads (); get_current_thread (); }
// Returns a pointer to the ThreadLockState instance for the current thread when // thread context or for the current CPU when in irq context. ThreadLockState* SystemGetThreadLockState() { ThreadLockState* state; if (arch_blocking_disallowed()) state = ToThreadLockState(&get_local_percpu()->lock_state); else state = ToThreadLockState(&get_current_thread()->lock_state); return state; }
static void fbsd_thread_activate (void) { fbsd_thread_active = 1; init_thread_list(); if (target_has_execution) enable_thread_event_reporting (); fbsd_thread_find_new_threads (NULL); get_current_thread (); }
static int event_waiter(void *arg) { int count = (intptr_t)arg; printf("event waiter starting\n"); while (count > 0) { printf("%p: waiting on event...\n", get_current_thread()); if (event_wait(&e) < 0) { printf("%p: event_wait() returned error\n", get_current_thread()); return -1; } printf("%p: done waiting on event...\n", get_current_thread()); thread_yield(); count--; } return 0; }
/* Handles timer interrupt. * By default, the timer fires at 18.222hz */ void timer_handler(struct regs *r) { (void)r; /* prevent 'unused' parameter warning */ g_num_ticks++; if (get_current_thread() && get_current_thread()->id == 5) { DEBUGF("%s\n", "timer_handler in user!"); } /* if the current thread has outlived the quantum, add it to the * run queue and schedule a new thread */ thread_t* current = get_current_thread(); if (current) { if (++current->num_ticks > THREAD_QUANTUM && preemption_enabled()) { /* DEBUGF("preempting thread %d\n", current->id); */ make_runnable(current); g_need_reschedule = true; } } }
static void sys_unmap_wrap(struct x86_exregs *regs) { L4_Word_t control = regs->eax; /* TODO: pass utcb to sys_unmap()? */ void *utcb = thread_get_utcb(get_current_thread()); if((control & 0x3f) > 0) L4_VREG(utcb, L4_TCR_MR(0)) = regs->esi; sys_unmap(control); regs->esi = L4_VREG(utcb, L4_TCR_MR(0)); }
static int semaphore_consumer(void *unused) { unsigned int iterations = 0; mutex_acquire(&sem_test_mutex); if (sem_remaining_its >= sem_thread_max_its) { iterations = rand(); iterations %= sem_thread_max_its; } else { iterations = sem_remaining_its; } sem_remaining_its -= iterations; mutex_release(&sem_test_mutex); printf("semaphore consumer %p starting up, running for %u iterations\n", get_current_thread(), iterations); for (unsigned int x = 0; x < iterations; x++) sem_wait(&sem); printf("semaphore consumer %p done\n", get_current_thread()); atomic_add(&sem_threads, -1); return 0; }
static void report_thread_death() { // block signals while reporting the thread death sigset_t block_set; sigset_t original_set; sigfillset(&block_set); if (setsigmask(SIG_SETMASK, &block_set, &original_set) == -1) { udi_abort(); } udi_log("thread %a dying", get_user_thread_id()); thread *thr = get_current_thread(); thr->stack_event_pending = 1; int block_result = block_other_threads(); if (block_result < 0) { udi_log("failed to block other threads"); udi_abort(); return; } if (block_result > 0) { // the thread should always eventually be the control thread udi_abort(); } else { thr->stack_event_pending = 0; int death_result = handle_thread_death(); if (death_result != RESULT_SUCCESS) { udi_log("failed to handle thread death"); udi_abort(); return; } udi_errmsg errmsg; errmsg.size = ERRMSG_SIZE; errmsg.msg[ERRMSG_SIZE-1] = '\0'; thread *request_thr = NULL; int request_result = wait_and_execute_command(&errmsg, &request_thr); if (request_result == RESULT_ERROR) { udi_log("failed to handle command after thread death"); udi_abort(); } release_other_threads(); } setsigmask(SIG_SETMASK, &original_set, NULL); }
static void wait_for_buffer(struct buf *buf) { #ifdef WIN32 struct thread *self = get_current_thread(); self->next_buffer_waiter = buf->waiters; buf->waiters = self; ResetEvent(self->ready); WaitForSingleObject(self->ready, INFINITE); #else panic("wait_for_buffer() not supported"); #endif }
static int display_server_thread(void *args) { for (;;) { // wait for start event dprintf(INFO, "%s: IDLE\n", __func__); if (event_wait(&e_start_server) < 0) { dprintf(INFO, "%p: event_wait() returned error\n", get_current_thread()); return -1; } // main worker loop dprintf(INFO, "%s: START\n", __func__); is_running = 1; // ignore first key to prevent unwanted interactions getkey(); int keycode = 0; for(;;) { // render frame if(renderer) renderer(keycode); // signal refresh event_signal(&e_frame_finished, true); // poll key lk_time_t last_refresh = current_time(); while(!(keycode=getkey()) && !request_stop && !request_refresh) { // refresh every 59s if((current_time()-last_refresh)>=59000) break; thread_yield(); } // stop request if(request_stop) { request_stop = 0; break; } // refresh request if(request_refresh) { request_refresh = 0; } event_wait(&e_continue); } dprintf(INFO, "%s: EXIT\n", __func__); is_running = 0; } return 0; }
char *resolve_name(int serviceid) { struct directory_resolvename res_cmd; struct directory_response dir_res; char *service_name = ""; int id_proc; res_cmd.command = DIRECTORY_RESOLVENAME; res_cmd.serviceid = serviceid; res_cmd.ret_port = dirlib_port; res_cmd.thr_id = get_current_thread(); // we will send an smo with size 1 first // servicename will not fit and directory will return // an error spacifying dir name size. res_cmd.name_smo = share_mem(DIRECTORY_TASK, service_name, 1, WRITE_PERM); send_msg(DIRECTORY_TASK, DIRECTORY_PORT, &res_cmd); while (get_msg_count(dirlib_port) == 0) { reschedule(); } get_msg(dirlib_port, &dir_res, &id_proc); claim_mem(res_cmd.name_smo); if(dir_res.ret == DIRECTORYERR_SMO_TOOSMALL) { // now malloc for servicename service_name = (char *)malloc(dir_res.ret_value + 1); res_cmd.name_smo = share_mem(DIRECTORY_TASK, service_name, len(service_name) + 1, WRITE_PERM); send_msg(DIRECTORY_TASK, DIRECTORY_PORT, &res_cmd); while (get_msg_count(dirlib_port) == 0) { reschedule(); } get_msg(dirlib_port, &dir_res, &id_proc); claim_mem(res_cmd.name_smo); if(dir_res.ret != DIRECTORYERR_OK) { free(service_name); return NULL; // fail } return service_name; } else { return NULL; // fail } }
/*----------------------------------------------------------------------------- * *---------------------------------------------------------------------------*/ void destroy_proc(void) { thread_t* thread; process_t* proc; /*asm volatile ("cli");*/ stop(); thread = get_current_thread(); proc = thread->process; remove_thread(thread); /* Free process memory pages */ kfree(thread->stack); thread->stack = NULL; free_phys_pages(proc->user_stack_paddr, proc->stack_page_count); free_phys_pages(proc->seg_paddr, proc->seg_page_count); free_phys_pages(proc->heap_paddr, proc->heap_page_count); free_phys_pages(proc->blocks_paddr, proc->blocks_page_count); kfree(thread); thread = NULL; proc->threads_count--; while (proc->threads_count > 0) { thread = get_thread(proc->thread_id[proc->threads_count - 1]); remove_thread(thread); thread->process->threads_count--; /* Free thread's memory (handler and stack) */ kfree(thread->stack); thread->stack = NULL; kfree(thread); thread = NULL; } /* Here we must free memory!!! */ /*asm volatile ("sti");*/ start(); task_switch(); }
void BrwLock::ContendedReadUpgrade() { Guard<spin_lock_t, IrqSave> guard{ThreadLock::Get()}; // Convert our reading into waiting uint64_t prev = state_.fetch_add(-kBrwLockReader + kBrwLockWaiter, fbl::memory_order_relaxed); if ((prev & ~kBrwLockWaiterMask) == kBrwLockReader) { writer_.store(get_current_thread(), fbl::memory_order_relaxed); // There are no writers or readers. There might be waiters, but as we // already have some form of lock we still have fairness even if we // bypass the queue, so we convert our waiting into writing state_.fetch_add(-kBrwLockWaiter + kBrwLockWriter, fbl::memory_order_acquire); } else { Block(true); } }
static void dump_fault_frame(struct arm_fault_frame *frame) { struct thread *current_thread = get_current_thread(); dprintf(CRITICAL, "current_thread %p, name %s\n", current_thread, current_thread ? current_thread->name : ""); dprintf(CRITICAL, "r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n", frame->r[0], frame->r[1], frame->r[2], frame->r[3]); dprintf(CRITICAL, "r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n", frame->r[4], frame->r[5], frame->r[6], frame->r[7]); dprintf(CRITICAL, "r8 0x%08x r9 0x%08x r10 0x%08x r11 0x%08x\n", frame->r[8], frame->r[9], frame->r[10], frame->r[11]); dprintf(CRITICAL, "r12 0x%08x usp 0x%08x ulr 0x%08x pc 0x%08x\n", frame->r[12], frame->usp, frame->ulr, frame->pc); dprintf(CRITICAL, "spsr 0x%08x\n", frame->spsr); dump_mode_regs(frame->spsr); }
// See that timer_trylock_or_cancel acquires the lock when the holder releases it. static bool trylock_or_cancel_get_lock() { BEGIN_TEST; // We need 2 or more CPUs for this test. if (get_num_cpus_online() < 2) { printf("skipping test trylock_or_cancel_get_lock, not enough online cpus\n"); return true; } timer_args arg{}; timer_t t = TIMER_INITIAL_VALUE(t); SpinLock lock; arg.lock = lock.GetInternal(); arg.wait = 1; arch_disable_ints(); uint timer_cpu = arch_curr_cpu_num(); const Deadline deadline = Deadline::no_slack(current_time() + ZX_USEC(100)); timer_set(&t, deadline, timer_trylock_cb, &arg); // The timer is set to run on timer_cpu, switch to a different CPU, acquire the spinlock then // signal the callback to proceed. thread_set_cpu_affinity(get_current_thread(), ~cpu_num_to_mask(timer_cpu)); DEBUG_ASSERT(arch_curr_cpu_num() != timer_cpu); arch_enable_ints(); { AutoSpinLock guard(&lock); while (!atomic_load(&arg.timer_fired)) { } // Callback should now be running. Tell it to stop waiting and start trylocking. atomic_store(&arg.wait, 0); } // See that timer_cancel returns false indicating that the timer ran. ASSERT_FALSE(timer_cancel(&t), ""); // Note, we cannot assert the value of arg.result. We have both released the lock and canceled // the timer, but we don't know which of these events the timer observed first. END_TEST; }
/* #<pydoc> def ph_get_operand_info(): """ Returns the operand information given an ea and operand number. @param ea: address @param n: operand number @return: Returns an idd_opinfo_t as a tuple: (modified, ea, reg_ival, regidx, value_size). Please refer to idd_opinfo_t structure in the SDK. """ pass #</pydoc> */ static PyObject *ph_get_operand_info( ea_t ea, int n) { PYW_GIL_CHECK_LOCKED_SCOPE(); bool ok = false; idd_opinfo_t opinf; Py_BEGIN_ALLOW_THREADS; do { if ( dbg == NULL || n == - 1 ) break; // Allocate register space thid_t tid = get_current_thread(); regvals_t regvalues; regvalues.resize(dbg->registers_size); // Read registers if ( get_reg_vals(tid, -1, regvalues.begin()) != 1 ) break; // Call the processor module if ( ph.notify(ph.get_operand_info, ea, n, tid, _py_getreg, regvalues.begin(), &opinf) != 0 ) { break; } ok = true; } while (false); Py_END_ALLOW_THREADS; if ( ok ) return Py_BuildValue("(i" PY_FMT64 "Kii)", opinf.modified, opinf.ea, opinf.value.ival, opinf.debregidx, opinf.value_size); else Py_RETURN_NONE; }
static void initial_thread_func(void) { int ret; // dprintf("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg); // dump_thread(current_thread); /* exit the implicit critical section we're within */ exit_critical_section(); thread_t *ct = get_current_thread(); ret = ct->entry(ct->arg); // dprintf("initial_thread_func: thread %p exiting with %d\n", current_thread, ret); thread_exit(ret); }
//--------------------------------------------------------------------------- static bool idaapi show_window(void *) { thid_t tid = get_current_thread(); // Find and refresh existing window char title[MAXSTR]; qsnprintf(title, sizeof(title), "[%04X] - Structured exception handlers list", tid); TForm *form = find_tform(title); //lint !e64 if ( form != NULL ) { switchto_tform(form, true); //lint !e64 return true; } x86seh_ctx_t *ch = new x86seh_ctx_t(tid, title); if ( !ch->get_sehlist() ) { delete ch; return false; } int code = choose2(CH_NOBTNS, -1, -1, -1, -1, ch, qnumber(x86seh_chooser_cols), widths, ch_sizer, ch_getl, title, 144, // icon 1, NULL, NULL, ch_update, NULL, ch_enter, ch_destroy, NULL, NULL); if ( code != -1 ) hook_to_notification_point(HT_DBG, dbg_callback, ch); //lint -esym(429,ch) custodial pointer has not been freed or returned return true; }
static void initial_thread_func(void) { thread_t *ct = get_current_thread(); #if LOCAL_TRACE LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg); dump_thread(ct); #endif /* exit the implicit critical section we're within */ exit_critical_section(); int ret = ct->entry(ct->arg); LTRACEF("thread %p exiting with %d\n", ct, ret); thread_exit(ret); }
static void initial_thread_func(void) { int ret; thread_t *current_thread = get_current_thread(); LTRACEF("initial_thread_func: thread %p calling %p with arg %p\n", current_thread, current_thread->entry, current_thread->arg); /* release the thread lock that was implicitly held across the reschedule */ spin_unlock(&thread_lock); arch_enable_ints(); ret = current_thread->entry(current_thread->arg); LTRACEF("initial_thread_func: thread %p exiting with %d\n", current_thread, ret); thread_exit(ret); }
static inline void suspend_thread() { struct thread *thread; thread = get_current_thread(); disable_interrupts(); thread->counter -= COUNTER_SUSPEND; thread->need_reschedule = 1; enable_interrupts(); spin_lock(&schedule_lock); thread->state = THREAD_UNREADY; spin_unlock(&schedule_lock); schedule(); return; }
static void initial_thread_func(void) { thread_t *ct = get_current_thread(); #if LOCAL_TRACE LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg); dump_thread(ct); #endif /* release the thread lock that was implicitly held across the reschedule */ spin_unlock(&thread_lock); arch_enable_ints(); int ret = ct->entry(ct->arg); LTRACEF("thread %p exiting with %d\n", ct, ret); thread_exit(ret); }
void svc_sem_wait(struct semaphore *sem) { struct thread *current_thread; thread_lock(state); if (--sem->count < 0) { debug_printk("unable to got sem (%p)(%d)\r\n", sem, sem->count); current_thread = get_current_thread(); current_thread->state = THREAD_BLOCKED; insert_waiting_thread(sem, current_thread); sem->waiting++; arch_system_call(SVC_THREAD_SWITCH, NULL, NULL, NULL); } thread_unlock(state); }