void vcore_entry(void) { uint32_t vcoreid = vcore_id(); if (vcoreid) { mcs_barrier_wait(&b, vcoreid); udelay(5000000); if (vcoreid == 1) printf("Proc %d's vcores are yielding\n", getpid()); sys_yield(0); } else { /* trip the barrier here, all future times are in the loop */ mcs_barrier_wait(&b, vcoreid); while (1) { udelay(15000000); printf("Proc %d requesting its cores again\n", getpid()); begin = read_tsc(); sys_resource_req(RES_CORES, max_vcores(), 1, REQ_SOFT); mcs_barrier_wait(&b, vcoreid); end = read_tsc(); printf("Took %llu usec (%llu nsec) to get my yielded cores back.\n", udiff(begin, end), ndiff(begin, end)); printf("[T]:010:%llu:%llu\n", udiff(begin, end), ndiff(begin, end)); } } printf("We're screwed!\n"); exit(-1); }
int main(int argc, char** argv) { uint32_t vcoreid = vcore_id(); int retval = 0; mcs_barrier_init(&b, max_vcores()); /* begin: stuff userspace needs to do before switching to multi-mode */ vcore_init(); #if 0 /* tell the kernel where and how we want to receive notifications */ struct notif_method *nm; for (int i = 0; i < MAX_NR_NOTIF; i++) { nm = &__procdata.notif_methods[i]; nm->flags |= NOTIF_WANTED | NOTIF_MSG | NOTIF_IPI; nm->vcoreid = i % 2; // vcore0 or 1, keepin' it fresh. } #endif /* Need to save this somewhere that you can find it again when restarting * core0 */ core0_tls = get_tls_desc(0); /* Need to save our floating point state somewhere (like in the * user_thread_tcb so it can be restarted too */ /* end: stuff userspace needs to do before switching to multi-mode */ begin = read_tsc(); retval = vcore_request(max_vcores()); if (retval) printf("F****d!\n"); mcs_barrier_wait(&b, vcoreid); end = read_tsc(); printf("Took %llu usec (%llu nsec) to receive %d cores (cold).\n", udiff(begin, end), ndiff(begin, end), max_vcores()); printf("[T]:001:%llu:%llu:%d:C.\n", udiff(begin, end), ndiff(begin, end), max_vcores()); udelay(5000000); begin = read_tsc(); retval = vcore_request(max_vcores() - 1); if (retval) printf("F****d!\n"); mcs_barrier_wait(&b, vcoreid); end = read_tsc(); printf("Took %llu usec (%llu nsec) to receive %d cores (warm).\n", udiff(begin, end), ndiff(begin, end), max_vcores()); printf("[T]:001:%llu:%llu:%d:W.\n", udiff(begin, end), ndiff(begin, end), max_vcores()); return 0; }
void vcore_entry(void) { uint32_t vcoreid = vcore_id(); /* begin: stuff userspace needs to do to handle notifications */ struct vcore *vc = &__procinfo.vcoremap[vcoreid]; struct preempt_data *vcpd; vcpd = &__procdata.vcore_preempt_data[vcoreid]; /* Lets try to restart vcore0's context. Note this doesn't do anything to * set the appropriate TLS. On x86, this will involve changing the LDT * entry for this vcore to point to the TCB of the new user-thread. */ if (vcoreid == 0) { handle_events(vcoreid); set_tls_desc(core0_tls, 0); assert(__vcoreid == 0); /* in case anyone uses this */ /* Load silly state (Floating point) too */ pop_user_ctx(&vcpd->uthread_ctx, vcoreid); panic("should never see me!"); } /* end: stuff userspace needs to do to handle notifications */ /* all other vcores are down here */ mcs_barrier_wait(&b, vcoreid); udelay(1000000); if (vcoreid == 1) printf("Proc %d's vcores are yielding\n", getpid()); sys_yield(0); while(1); }
void* thread_idle(void *arg) { extern uint_t __ktext_start; register uint_t id; register uint_t cpu_nr; register struct thread_s *this; register struct cpu_s *cpu; struct thread_s *thread; register struct page_s *reserved_pg; register uint_t reserved; kthread_args_t *args; bool_t isBSCPU; uint_t tm_now; uint_t count; error_t err; this = current_thread; cpu = current_cpu; id = cpu->gid; cpu_nr = arch_onln_cpu_nr(); args = (kthread_args_t*) arg; isBSCPU = (cpu == cpu->cluster->bscpu); cpu_trace_write(cpu, thread_idle_func); if(isBSCPU) pmm_tlb_flush_vaddr((vma_t)&__ktext_start, PMM_UNKNOWN); cpu_set_state(cpu, CPU_ACTIVE); rt_timer_read(&tm_now); this->info.tm_born = tm_now; this->info.tm_tmp = tm_now; //// Reset stats /// cpu_time_reset(cpu); //////////////////// mcs_barrier_wait(&boot_sync); printk(INFO, "INFO: Starting Thread Idle On Core %d\tOK\n", cpu->gid); if(isBSCPU && (id == args->val[2])) { for(reserved = args->val[0]; reserved < args->val[1]; reserved += PMM_PAGE_SIZE) { reserved_pg = ppm_ppn2page(&cpu->cluster->ppm, reserved >> PMM_PAGE_SHIFT); page_state_set(reserved_pg, PGINIT); ppm_free_pages(reserved_pg); } } thread = kthread_create(this->task, &thread_event_manager, NULL, cpu->cluster->id, cpu->lid); if(thread == NULL) PANIC("Failed to create default events handler Thread for CPU %d\n", id); thread->task = this->task; cpu->event_mgr = thread; wait_queue_init(&thread->info.wait_queue, "Events"); err = sched_register(thread); assert(err == 0); sched_add_created(thread); if(isBSCPU) { dqdt_update(); #if 0 thread = kthread_create(this->task, &cluster_manager_thread, cpu->cluster, cpu->cluster->id, cpu->lid); if(thread == NULL) { PANIC("Failed to create cluster manager thread, cid %d, cpu %d\n", cpu->cluster->id, cpu->gid); } thread->task = this->task; cpu->cluster->manager = thread; wait_queue_init(&thread->info.wait_queue, "Cluster-Mgr"); err = sched_register(thread); assert(err == 0); sched_add_created(thread); #endif if(clusters_tbl[cpu->cluster->id].flags & CLUSTER_IO) { thread = kthread_create(this->task, &kvfsd, NULL, cpu->cluster->id, cpu->lid); if(thread == NULL) { PANIC("Failed to create KVFSD on cluster %d, cpu %d\n", cpu->cluster->id, cpu->gid); } thread->task = this->task; wait_queue_init(&thread->info.wait_queue, "KVFSD"); err = sched_register(thread); assert(err == 0); sched_add_created(thread); printk(INFO,"INFO: kvfsd has been created\n"); } } cpu_set_state(cpu,CPU_IDLE); while (true) { cpu_disable_all_irq(NULL); if((event_is_pending(&cpu->re_listner)) || (event_is_pending(&cpu->le_listner))) { wakeup_one(&cpu->event_mgr->info.wait_queue, WAIT_ANY); } sched_idle(this); count = sched_runnable_count(&cpu->scheduler); cpu_enable_all_irq(NULL); if(count != 0) sched_yield(this); //arch_set_power_state(cpu, ARCH_PWR_IDLE); } return NULL; }