static int allocate_transition_tls(int id) { /* Libc function to initialize TLS-based locale info for ctype functions. */ extern void __ctype_init(void); /* We want to free and then reallocate the tls rather than simply * reinitializing it because its size may have changed. TODO: not sure if * this is right. 0-ing is one thing, but freeing and reallocating can be * expensive, esp if syscalls are involved. Check out glibc's * allocatestack.c for what might work. */ free_transition_tls(id); void *tcb = allocate_tls(); if (!tcb) { errno = ENOMEM; return -1; } /* Setup some intitial TLS data for the newly allocated transition tls. */ void *temp_tcb = get_tls_desc(); set_tls_desc(tcb); begin_safe_access_tls_vars(); __vcoreid = id; __vcore_context = TRUE; __ctype_init(); end_safe_access_tls_vars(); set_tls_desc(temp_tcb); /* Install the new tls into the vcpd. */ set_vcpd_tls_desc(id, tcb); return 0; }
void vcore_entry(void) { uint32_t vcoreid = vcore_id(); /* begin: stuff userspace needs to do to handle notifications */ struct vcore *vc = &__procinfo.vcoremap[vcoreid]; struct preempt_data *vcpd; vcpd = &__procdata.vcore_preempt_data[vcoreid]; /* Lets try to restart vcore0's context. Note this doesn't do anything to * set the appropriate TLS. On x86, this will involve changing the LDT * entry for this vcore to point to the TCB of the new user-thread. */ if (vcoreid == 0) { handle_events(vcoreid); set_tls_desc(core0_tls, 0); assert(__vcoreid == 0); /* in case anyone uses this */ /* Load silly state (Floating point) too */ pop_user_ctx(&vcpd->uthread_ctx, vcoreid); panic("should never see me!"); } /* end: stuff userspace needs to do to handle notifications */ /* all other vcores are down here */ core1_up = TRUE; while (core1_up) cpu_relax(); printf("Proc %d's vcore %d is yielding\n", getpid(), vcoreid); sys_yield(0); while(1); }
int regset_tls_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; const struct user_desc *info; if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || (pos % sizeof(struct user_desc)) != 0 || (count % sizeof(struct user_desc)) != 0) return -EINVAL; if (kbuf) info = kbuf; else if (__copy_from_user(infobuf, ubuf, count)) return -EFAULT; else info = infobuf; set_tls_desc(target, GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), info, count / sizeof(struct user_desc)); return 0; }
/* * Set a given TLS descriptor: */ int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info, int can_allocate) { struct user_desc info; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; if (idx == -1) idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1 && can_allocate) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; set_tls_desc(p, idx, &info, 1); return 0; }
/** * Switch into vcore mode to run the scheduler code. **/ void switch_to_vcore() { uint32_t vcoreid = vcore_id(); /* Disable notifications. Once we do this, we might miss a notif_pending, * so we need to enter vcore entry later. Need to disable notifs so we * don't get in weird loops */ struct preempt_data *vcpd = &__procdata.vcore_preempt_data[vcoreid]; vcpd->notif_enabled = FALSE; /* Grab a reference to the currently running thread on this vcore */ thread_t *t = current_thread; /* Switch to the vcore's tls region */ extern void** vcore_thread_control_blocks; set_tls_desc(vcore_thread_control_blocks[vcoreid], vcoreid); /* Verify that the thread the vcore thinks was running is the same as the thread * that was actually running */ assert(current_thread == t); /* Set the stack pointer to the stack of the vcore. * We know this function is always inlined because of the attribute we set * on it, so there will be no stack unwinding when this function "returns". * After this call, make sure you don't use local variables. */ set_stack_pointer((void*)vcpd->transition_stack); assert(in_vcore_context()); /* Leave the current vcore completely */ current_thread = NULL; /* Restart the vcore and run the scheduler code */ vcore_entry(); assert(0); }
/** Set thread-local-storage pointer * * TLS pointer is set in GS register. That means, the GS contains * selector, and the descriptor->base is the correct address. */ sysarg_t sys_tls_set(uintptr_t addr) { THREAD->arch.tls = addr; set_tls_desc(addr); return EOK; }
/* Helper, makes VC ctx tracks uthread as its current_uthread in its TLS. * * Whether or not uthreads have TLS, thread0 has TLS, given to it by glibc. * This TLS will get set whenever we use thread0, regardless of whether or not * we use TLS for uthreads in general. glibc cares about this TLS and will use * it at exit. We can't simply use that TLS for VC0 either, since we don't know * where thread0 will be running when the program ends. */ static void uthread_track_thread0(struct uthread *uthread) { set_tls_desc(get_vcpd_tls_desc(0)); begin_safe_access_tls_vars(); /* We might have a basic uthread already installed (from a prior call), so * free it before installing the new one. */ if (current_uthread) free(current_uthread); current_uthread = uthread; /* We may not be an MCP at this point (and thus not really working with * vcores), but there is still the notion of something vcore_context-like * even when running as an SCP (i.e. its more of a scheduler_context than a * vcore_context). Threfore we need to set __vcore_context to TRUE here to * represent this (otherwise we will hit some asserts of not being in * vcore_context when running in scheduler_context for the SCP. */ __vcore_context = TRUE; end_safe_access_tls_vars(); set_tls_desc(uthread->tls_desc); }
/** * Initialize the vcores, including jumping into muticore mode. **/ void vcore_startup() { /* Initilize the bootstrap code for using the vcores */ if (vcore_init()) printf("vcore_init() failed, we're f****d!\n"); assert(vcore_id() == 0); /* Tell the kernel where and how we want to receive events. This is just an * example of what to do to have a notification turned on. We're turning on * USER_IPIs, posting events to vcore 0's vcpd, and telling the kernel to * send to vcore 0. Note sys_self_notify will ignore the vcoreid pref. * Also note that enable_kevent() is just an example, and you probably want * to use parts of event.c to do what you want. */ enable_kevent(EV_USER_IPI, 0, EVENT_IPI); /* Don't forget to enable notifs on vcore0. if you don't, the kernel will * restart your _S with notifs disabled, which is a path to confusion. */ struct preempt_data *vcpd = &__procdata.vcore_preempt_data[0]; vcpd->notif_enabled = TRUE; /* Grab a reference to the main_thread on the current stack (i.e. * current_thread, since we know this has been set up for us properly by * the fact that the constructor calls main_thread_init() before this * function. We will need this reference below. */ thread_t *t = current_thread; /* Change temporarily to vcore0s tls region so we can save the main_thread * into its thread local current_thread variable. One minor issue is that * vcore0's transition-TLS isn't TLS_INITed yet. Until it is (right before * vcore_entry(), don't try and take the address of any of its TLS vars. */ extern void** vcore_thread_control_blocks; set_tls_desc(vcore_thread_control_blocks[0], 0); current_thread = t; set_tls_desc(t->context->tls_desc, 0); /* Jump into multi-core mode! */ /* The next line of code that will run is inside vcore_entry(). When this * thread is resumed, it will continue directly after this call to * vcore_request() */ vcore_request(1); }
void vcore_entry(void) { uint32_t vcoreid = vcore_id(); static bool first_time = TRUE; printf("GIANT WARNING: this is ancient shit\n"); /* begin: stuff userspace needs to do to handle events/notifications */ struct vcore *vc = &__procinfo.vcoremap[vcoreid]; struct preempt_data *vcpd; vcpd = &__procdata.vcore_preempt_data[vcoreid]; /* Ghetto way to get just an event number */ unsigned int ev_type = get_event_type(&vcpd->ev_mbox_public); /* ETHAUD app: process the packet if we got a notif */ if (ev_type == EV_FREE_APPLE_PIE) process_packet(); if (vc->preempt_pending) { printf("Oh crap, vcore %d is being preempted! Yielding\n", vcoreid); sys_yield(TRUE); printf("After yield on vcore %d. I wasn't being preempted.\n", vcoreid); } /* Lets try to restart vcore0's context. Note this doesn't do anything to * set the appropriate TLS. On x86, this will involve changing the LDT * entry for this vcore to point to the TCB of the new user-thread. */ if (vcoreid == 0) { handle_events(vcoreid); set_tls_desc(core0_tls, 0); assert(__vcoreid == 0); /* in case anyone uses this */ /* Load silly state (Floating point) too */ pop_user_ctx(&vcpd->uthread_ctx, vcoreid); printf("should never see me!"); } /* unmask notifications once you can let go of the uthread_ctx and it is * okay to clobber the transition stack. * Check Documentation/processes.txt: 4.2.4. In real code, you should be * popping the tf of whatever user process you want (get off the x-stack) */ vcpd->notif_disabled = FALSE; /* end: stuff userspace needs to do to handle notifications */ /* The other vcores will hit here. */ while (1) cpu_relax(); }
/** Perform ia32 specific tasks needed before the new thread is scheduled. * * THREAD is locked and interrupts are disabled. */ void before_thread_runs_arch(void) { uintptr_t kstk = (uintptr_t) &THREAD->kstack[STACK_SIZE]; #ifndef PROCESSOR_i486 if (CPU->arch.fi.bits.sep) { /* Set kernel stack for CP3 -> CPL0 switch via SYSENTER */ write_msr(IA32_MSR_SYSENTER_ESP, kstk - sizeof(istate_t)); } #endif /* Set kernel stack for CPL3 -> CPL0 switch via interrupt */ CPU->arch.tss->esp0 = kstk; CPU->arch.tss->ss0 = GDT_SELECTOR(KDATA_DES); /* Set up TLS in GS register */ set_tls_desc(THREAD->arch.tls); }
/* * Set a given TLS descriptor: */ int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info, int can_allocate) { struct user_desc info; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; if (!tls_desc_okay(&info)) return -EINVAL; if (idx == -1) idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1 && can_allocate) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; #ifdef CONFIG_PAX_SEGMEXEC if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) return -EINVAL; #endif set_tls_desc(p, idx, &info, 1); return 0; }
/* The lowest level function jumped to by the kernel on every vcore_entry. * Currently, this function is only necessary so we can set the tls_desc from * the vcpd for non x86_64 architectures. We should consider removing this and * making it mandatory to set the tls_desc in the kernel. We wouldn't even * need to pass the vcore id to user space at all if we did this. It would * already be set in the preinstalled TLS as __vcore_id. */ static void __attribute__((noreturn)) __kernel_vcore_entry(void) { /* The kernel sets the TLS desc for us, based on whatever is in VCPD. * * x86 32-bit TLS is pretty jacked up, so the kernel doesn't set the TLS * desc for us. it's a little more expensive to do it here, esp for * amd64. Can remove this when/if we overhaul 32 bit TLS. * * AFAIK, riscv's TLS changes are really cheap, and they don't do it in * the kernel (yet/ever), so they can set their TLS here too. */ int id = __vcore_id_on_entry; #ifndef __x86_64__ set_tls_desc(vcpd_of(id)->vcore_tls_desc); #endif /* Every time the vcore comes up, it must set that it is in vcore context. * uthreads may share the same TLS as their vcore (when uthreads do not have * their own TLS), and if a uthread was preempted, __vcore_context == FALSE, * and that will continue to be true the next time the vcore pops up. */ __vcore_context = TRUE; vcore_entry(); fprintf(stderr, "vcore_entry() should never return!\n"); abort(); __builtin_unreachable(); }
/* * Set a given TLS descriptor: */ int do_set_thread_area(struct task_struct *p, int idx, struct user_desc __user *u_info, int can_allocate) { struct user_desc info; unsigned short __maybe_unused sel, modified_sel; if (copy_from_user(&info, u_info, sizeof(info))) return -EFAULT; if (!tls_desc_okay(&info)) return -EINVAL; if (idx == -1) idx = info.entry_number; /* * index -1 means the kernel should try to find and * allocate an empty descriptor: */ if (idx == -1 && can_allocate) { idx = get_free_idx(); if (idx < 0) return idx; if (put_user(idx, &u_info->entry_number)) return -EFAULT; } if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; set_tls_desc(p, idx, &info, 1); /* * If DS, ES, FS, or GS points to the modified segment, forcibly * refresh it. Only needed on x86_64 because x86_32 reloads them * on return to user mode. */ modified_sel = (idx << 3) | 3; if (p == current) { #ifdef CONFIG_X86_64 savesegment(ds, sel); if (sel == modified_sel) loadsegment(ds, sel); savesegment(es, sel); if (sel == modified_sel) loadsegment(es, sel); savesegment(fs, sel); if (sel == modified_sel) loadsegment(fs, sel); savesegment(gs, sel); if (sel == modified_sel) load_gs_index(sel); #endif #ifdef CONFIG_X86_32_LAZY_GS savesegment(gs, sel); if (sel == modified_sel) loadsegment(gs, sel); #endif } else { #ifdef CONFIG_X86_64 if (p->thread.fsindex == modified_sel) p->thread.fsbase = info.base_addr; if (p->thread.gsindex == modified_sel) p->thread.gsbase = info.base_addr; #endif } return 0; }