static void adjust_intr_threshold(void) { CMCore *self = coremu_get_core_self(); uint64_t tsc = read_host_tsc(); if (self->time_stamp) { if ((tsc - self->time_stamp) > SIG_HANDLE_INTERVAL) self->intr_thresh_hold = 0; else if (self->intr_thresh_hold > 0) self->intr_thresh_hold--; } self->time_stamp = tsc; }
void coremu_receive_intr() { CMCore *core = coremu_get_core_self(); void *intr = NULL; if (event_handler) { while ((intr = coremu_get_intr(core)) != NULL) { /* call registed interrupt handler */ event_handler(intr); } } }
void coremu_core_signal_handler(int signo, siginfo_t *info, void *context) { CMCore *self = coremu_get_core_self(); adjust_intr_threshold(); coremu_thread_setpriority(PRIO_PROCESS, 0, avg_prio); if (event_notifier) { event_notifier(); } self->sig_pending = 0; }
/* handle the halted event */ static inline void sched_halted() { struct timespec halt_interval; CMCore *self = coremu_get_core_self(); self->state = CM_STATE_HALT; halt_interval.tv_sec = 0; halt_interval.tv_nsec = HALT_SLEEP_MAX_TIME; nanosleep(&halt_interval, NULL); self->state = CM_STATE_RUN; }
/* handle the pause event */ static inline void sched_pause() { struct timespec pause_interval; CMCore *self = coremu_get_core_self(); pause_interval.tv_sec = 0; pause_interval.tv_nsec = PAUSE_SLEEP_TIME; if (pause_cnt < PAUSE_THRESHOLD) { pause_cnt++; } else if (pause_cnt <= PAUSE_THRESHOLD + 1) { pause_cnt++; pthread_yield(); } else { self->state = CM_STATE_PAUSE; nanosleep(&pause_interval, NULL); self->state = CM_STATE_RUN; pause_cnt = 0; } }
/* Send an interrupt and notify the accept core * Here we use apdaptive signal delay mechanism * But this mechanism seems to be wonderful when number of emulated * cores is more than 128 (test enviroment R900) */ void coremu_send_intr(void *e, int coreid) { cm_assert(e, "interrupt argument is NULL"); CMCore *core = coremu_get_core(coreid); if (!coremu_init_done_p()) return; coremu_put_intr(core, e); /* Call event notifier directly if sending interrupt to self. * Note that we still need to put the interrupt in the queue, otherwise, the * core will lost this interrupt. */ if (!coremu_hw_thr_p()) { if (core == coremu_get_core_self()) { event_notifier(); return; } } coremu_send_signal(core); }
void coremu_init_sched_core() { int policy; CMCore *self; struct sched_param param; assert(!pthread_getschedparam(pthread_self(), &policy, ¶m)); assert(policy == CM_SCHED_POLICY); coremu_thread_setpriority(PRIO_PROCESS, 0, avg_prio); self = coremu_get_core_self(); self->tid = coremu_gettid(); /* display the scheduling info */ display_thread_sched_attr("CORE thread scheduler settings:"); #ifdef CM_ENABLE_BIND_CORE /* bind thread to a specific core */ //topology_bind_core(); #endif }
static void topology_bind_core() { topo_obj_t obj; topo_cpuset_t cpuset; CMCore *self = coremu_get_core_self(); int index; index = (self->serial % cores); obj = topo_get_obj_by_depth(topology, depth, index); cpuset = obj->cpuset; topo_cpuset_singlify(&cpuset); if (topo_set_cpubind(topology, &cpuset, TOPO_CPUBIND_THREAD)) { char s[TOPO_CPUSET_STRING_LENGTH + 1]; topo_cpuset_snprintf(s, sizeof(s), &obj->cpuset); printf("Couldn't bind to cpuset %s\n", s); exit(-1); } //fprintf(stderr, "core [%u] binds to %d\n", self->serial, index); }