/* * irq_kernel_stack_check() * See if the kernel stack is within STACK_WARN of the end. */ static void irq_kernel_stack_check(int irq, struct pt_regs *regs) { #ifdef CONFIG_DEBUG_STACKOVERFLOW unsigned long sp = current_stack_pointer; unsigned long low = sw_ksp[thread_get_self()]; unsigned long high = low + THREAD_SIZE; /* * test for between. */ if ((sp <= low) || (sp > high)) { printk(KERN_CRIT "tid[%d]: sp: %lx outside of stack: [%lx:%lx]\n", thread_get_self(), sp, low, high); THREAD_STALL; } /* * Make sure that we are not close to the top of the stack and thus * can not really service this interrupt. */ if (sp < (low + STACK_WARN)) { printk(KERN_CRIT "tid[%d]: irq: %d, regs: %p, remain: %lx, overflow?\n", thread_get_self(), irq, regs, sp - low); dump_stack(); THREAD_STALL; } #endif }
static void trace_stream(FILE* out_stream, const char* filename, int line_num, const char* function_name, char const* format, va_list ap) { /* Any message we print should be prefixed by: . "<thread ID>: <filename>:<line num>:<function name>: " Even though individual fprintf() calls can be expected to execute atomically, there is no such guarantee across multiple calls. We use a mutex to synchronize access to the console. */ thread_t* this_thread = thread_get_self(); critical_section_t cs(stream_mutex); /* Try to print a meaningful (string) thread ID. If no ID is registered, just use pthread_t returned by pthread_self(). */ if ( this_thread != NULL ) fprintf(out_stream, "%s", this_thread->thread_name().data()); else trace_print_pthread(out_stream, pthread_self()); fprintf(out_stream, ": %s:%d:%s: ", filename, line_num, function_name); vfprintf(out_stream, format, ap); /* No need to flush in a critical section. Worst-case, someone else prints between out print and our flush. Since fflush() is atomic with respect to fprintf(), we end up simply flushing someone else's data along with our own. */ fflush(out_stream); }
predicate_randgen_t predicate_randgen_t::acquire(const char* caller_tag) { if (use_deterministic_predicates()) /* deterministic */ return predicate_randgen_t(caller_tag); else /* non-deterministic */ return predicate_randgen_t(thread_get_self()->randgen()); }
/* * ubicom32_build_cpu_th_mask() * * Build a lookup table for translation between hardware thread * "ROSR" values and Linux CPU ids * * *** This gets executed on all CPUs at once! *** */ static void ubicom32_build_cpu_th_mask(void *mask) { thread_t self = thread_get_self(); unsigned long *th_m = mask; BUG_ON(self <= 0 || self >= THREAD_ARCHITECTURAL_MAX); cpu_map[self] = smp_processor_id(); set_bit(self, th_m); }
/* * smp_halt_processor() * Halt this hardware thread. */ static void smp_halt_processor(void) { int cpuid = thread_get_self(); cpu_clear(smp_processor_id(), cpu_online_map); local_irq_disable(); printk(KERN_EMERG "cpu[%d] has halted. It is not OK to turn off power \ until all cpu's are off.\n", cpuid); for (;;) { thread_suspend(); } }
short URandShort(const short low, const short high) { thread_t* self = thread_get_self(); assert (self); randgen_t* randgenp = self->randgen(); assert (randgenp); short d = high - low + 1; return (low + (short)randgenp->rand(d)); }
//Zipfian between low and high int ZRand(const int low, const int high) { zipfian myZipf(high-low+2,_g_ZipfS); thread_t* self = thread_get_self(); assert (self); randgen_t* randgenp = self->randgen(); assert (randgenp); double u = (double)randgenp->rand(10000)/double(10000); return (myZipf.next(u)+low-1); }