phantom_thread_t * phantom_create_thread( void (*func)(void *), void *arg, int flags ) { assert(threads_inited); assert( ! (flags & ~CREATION_POSSIBLE_FLAGS) ); #if NEW_SNAP_SYNC // No thread starts in snap, sorry snap_lock(); #endif SHOW_FLOW( 7, "flags = %b", flags, "\020\1USER\2VM\3JIT\4NATIVE\5KERNEL\6?PF\7?PA\10?CH\11TIMEOUT\12UNDEAD\13NOSCHED" ); phantom_thread_t *t = calloc(1, sizeof(phantom_thread_t)); //phantom_thread_t *t = calloc_aligned(1, sizeof(phantom_thread_t),16); // align at 16 bytes for ia32 fxsave // Can't be run yet t->sleep_flags = THREAD_SLEEP_LOCKED; t->tid = find_tid(t); SHOW_FLOW( 7, "tid = %d", t->tid ); #if CONF_NEW_CTTY t_inherit_ctty( t ); #else // inherit ctty t->ctty = GET_CURRENT_THREAD()->ctty; #endif common_thread_init(t, DEF_STACK_SIZE ); //t->priority = THREAD_PRIO_NORM; SHOW_FLOW( 7, "cpu = %d", t->cpu_id ); t->start_func_arg = arg; t->start_func = func; phantom_thread_state_init(t); SHOW_FLOW0( 7, "phantom_thread_state_init done" ); t->thread_flags |= flags; // Let it be elegible to run t->sleep_flags &= ~THREAD_SLEEP_LOCKED; t_enqueue_runq(t); SHOW_FLOW0( 7, "on run q" ); #if NEW_SNAP_SYNC snap_unlock(); #endif return t; }
// Called per each CPU except for boot one. void phantom_import_cpu_thread(int ncpu) { assert(threads_inited); // No malloc on new CPU before thread is imported! Malloc has mutex! physaddr_t pa; // unused phantom_thread_t *t; // = calloc(1, sizeof(phantom_thread_t)); hal_pv_alloc( &pa, (void **)&t, sizeof(phantom_thread_t) ); memset( t, 0, sizeof(phantom_thread_t) ); // Can't be run yet t->sleep_flags = THREAD_SLEEP_LOCKED; t->tid = find_tid(t); t->start_func_arg = 0; t->start_func = 0; common_thread_init(t, DEF_STACK_SIZE ); phantom_thread_state_init(t); t->thread_flags |= THREAD_FLAG_UNDEAD; t->thread_flags |= THREAD_FLAG_NOSCHEDULE; // Let it be elegible to run t->sleep_flags &= ~THREAD_SLEEP_LOCKED; //GET_CURRENT_THREAD() = t; SET_CURRENT_THREAD(t); char *name = calloc(1, 20); snprintf( name, 20, "CPU %d idle", ncpu ); t_current_set_name(name); free(name); t->priority = THREAD_PRIO_IDLE; GET_IDLEST_THREAD() = t; }
void message_handle(kolibri_IPC_message_t *message){ char *msg = (char *)message+sizeof(kolibri_IPC_message_t); char cmd = msg[0]; thread_list_t *thread = find_tid(main_group_list, message->tid); group_list_t *group; int i; if (cmd == KOBRA_CMD_REGISTER && !thread) { kobra_register(message->tid); } else if (thread) { switch (cmd) { case KOBRA_CMD_JOIN: if (message->length < 3 || msg[message->length-1] != '\0') { // Here should be some error handler return; } if (!(group = find_group(msg+1))){ group = create_group(msg+1); } add_to_group(group, message->tid); break; case KOBRA_CMD_UNJOIN: if (message->length < 3 || msg[message->length-1] != '\0') { // Here should be some error handler return; } if ((group = find_group(msg+1)) && (thread = find_tid(group, message->tid))) { remove_from_group(group, thread); } break; case KOBRA_CMD_SEND: if (message->length < 4) { // Here should be some error handler return; } // Check if group name is correct for (i = 1; i < message->length-1 && msg[i]; ++i); if (msg[i]) { // Here should be some error handler return; } group = find_group(msg+1); if (!group) { // Here should be some error handler return; } send_group_message(group, message->tid, msg+i+1, message->length-i-1); break; case KOBRA_CMD_GET_LIST_NAME: // This is temporary realisation kolibri_IPC_send(message->tid, KOBRA_MEMAREA_NAME, KOBRA_MEMAREA_NAME_LENGTH); default: // Here should be some error handler return; } } }
phantom_thread_t * phantom_create_thread( void (*func)(void *), void *arg, int flags ) { assert( ! (flags & ~CREATION_POSSIBLE_FLAGS) ); #if USE_FORK_LUKE phantom_thread_t *t = calloc(1, sizeof(phantom_thread_t)); // Can't be run yet t->sleep_flags = THREAD_SLEEP_LOCKED; t->thread_flags = 0; t->tid = find_tid(t); t->name = "?"; //t->priority = THREAD_PRIO_NORM; int ssize = 64*1024; t->stack_size = ssize; t->stack = calloc( 1, ssize ); assert(t->stack != 0); t->start_func_arg = arg; t->start_func = func; phantom_thread_state_init(t); // Let it be elegible to run //t->sleep_flags &= ~THREAD_SLEEP_LOCKED; //t_enqueue_runq(t); GET_CURRENT_THREAD()->child_tid = t->tid; GET_CURRENT_THREAD()->thread_flags |= THREAD_FLAG_PARENT; t->thread_flags |= THREAD_FLAG_CHILD; if(t_prepare_fork()) { // child - NEW STACK, no local vars are accessible phantom_thread_t *me = GET_CURRENT_THREAD(); void (*sstart)(void *) = me->start_func; sstart(me->start_func_arg); panic("thread returned"); } // parent return t; #else //phantom_thread_t *t = find_thread(); phantom_thread_t *t = calloc(1, sizeof(phantom_thread_t)); // Can't be run yet t->sleep_flags = THREAD_SLEEP_LOCKED; t->tid = find_tid(t); common_thread_init(t, 64*1024 ); //t->priority = THREAD_PRIO_NORM; t->start_func_arg = arg; t->start_func = func; phantom_thread_state_init(t); t->thread_flags |= flags; // Let it be elegible to run t->sleep_flags &= ~THREAD_SLEEP_LOCKED; t_enqueue_runq(t); return t; #endif }