struct sched_thd * sched_get_thread_in_spd_from_runqueue(spdid_t spdid, spdid_t target, int index) { struct sched_thd *t; int i, cnt = 0; /* copied from runqueue_print, a better way would use a visitor */ for (i = 0 ; i < NUM_PRIOS ; i++) { for (t = FIRST_LIST(&PERCPU_GET(fprr_state)->priorities[i].runnable, prio_next, prio_prev) ; t != &PERCPU_GET(fprr_state)->priorities[i].runnable ; t = FIRST_LIST(t, prio_next, prio_prev)) { /* TODO: do we care to differentiate if the thread is * currently in the spd, versus previously? */ if (cos_thd_cntl(COS_THD_INV_SPD, t->id, target, 0) >= 0) if (cnt++ == index) return t; } } #ifdef DEFERRABLE for (t = FIRST_LIST(&PERCPU_GET(fprr_state)->servers, sched_next, sched_prev) ; t != &PERCPU_GET(fprr_state)->servers ; t = FIRST_LIST(t, sched_next, sched_prev)) { if (cos_thd_cntl(COS_THD_INV_SPD, t->id, target, 0) >= 0) if (cnt++ == index) return t; } #endif return 0; }
void test_iploop() { printc("set eip back testing....\n"); assert(cos_thd_cntl(COS_THD_IP_LFT, med, 0, 0) != -1); timed_event_block(cos_spd_id(), 5); return; }
int fault_inject(int spd) { int ret = 0; int tid, spdid; entry_cnt++; printc("\nthread %d in SWIFI %ld (%d) ... TARGET_COMPONENT %d\n", cos_get_thd_id(), cos_spd_id(), entry_cnt, spd); if (spd == 0) return 0; struct cos_regs r; for (tid = 1; tid <= MAX_NUM_THREADS; tid++) { spdid = cos_thd_cntl(COS_THD_FIND_SPD_TO_FLIP, tid, spd, 0); if (tid == IDLE_THD || spdid == -1) continue; counter++; printc("<<flip counter %lu>> flip the register in spd %d (thd %d)!!!\n", counter, spd, tid); cos_regs_read(tid, spdid, &r); cos_regs_print(&r); flip_all_regs(&r); /* cos_regs_print(&r); */ } return 0; }
int fault_page_fault_handler(spdid_t spdid, void *fault_addr, int flags, void *ip) { unsigned long r_ip; /* the ip to return to */ int tid = cos_get_thd_id(); int i; /* START UNCOMMENT FOR FAULT INFO */ if (regs_active) BUG(); regs_active = 1; cos_regs_save(tid, spdid, fault_addr, ®s); printc("Thread %d faults in spd %d @ %p\n", tid, spdid, fault_addr); cos_regs_print(®s); regs_active = 0; for (i = 0 ; i < 5 ; i++) printc("Frame ip:%lx, sp:%lx\n", cos_thd_cntl(COS_THD_INVFRM_IP, tid, i, 0), cos_thd_cntl(COS_THD_INVFRM_SP, tid, i, 0)); /* END UNCOMMENT FOR FAULT INFO */ /* remove from the invocation stack the faulting component! */ assert(!cos_thd_cntl(COS_THD_INV_FRAME_REM, tid, 1, 0)); /* Manipulate the return address of the component that called * the faulting component... */ assert(r_ip = cos_thd_cntl(COS_THD_INVFRM_IP, tid, 1, 0)); /* ...and set it to its value -8, which is the fault handler * of the stub. */ assert(!cos_thd_cntl(COS_THD_INVFRM_SET_IP, tid, 1, r_ip-8)); /* * Look at the booter: when recover is happening, the sstub is * set to 0x1, thus we should just wait till recovery is done. */ if ((int)ip == 1) failure_notif_wait(cos_spd_id(), spdid); else failure_notif_fail(cos_spd_id(), spdid); return 0; }
static void walk_stack_all(spdid_t spdid, struct cos_regs *regs) { unsigned long *fp, *stack, fp_off; int i, tid = cos_get_thd_id(); printc("Stack trace for thread %d [spdid, instruction pointer]:\n", tid); fp = (unsigned long *)regs->regs.bp; stack = map_stack(spdid, (vaddr_t)fp); printc("\t[%d, %lx]\n", spdid, (unsigned long)regs->regs.ip); walk_stack(spdid, fp, stack); unmap_stack(spdid, stack); assert(cos_spd_id() == cos_thd_cntl(COS_THD_INV_FRAME, tid, 0, 0)); assert(spdid == cos_thd_cntl(COS_THD_INV_FRAME, tid, 1, 0)); for (i = 2 ; (spdid = cos_thd_cntl(COS_THD_INV_FRAME, tid, i, 0)) != 0 ; i++) { unsigned long sp; /* We're ignoring the initial IPs the IP is in the * invocation stubs, and noone cares about the * stubs */ sp = cos_thd_cntl(COS_THD_INVFRM_SP, tid, i, 0); assert(sp); stack = map_stack(spdid, sp); /* The invocation stubs save ebp last, thus *(esp+16) * = ebp. This offset corresponds to the number of * registers pushed in * SS_ipc_client_marshal_args... */ fp_off = ((sp & (~PAGE_MASK))/sizeof(unsigned long)); fp = (unsigned long *)&stack[fp_off]; walk_stack(spdid, fp, stack); unmap_stack(spdid, stack); } }