void team_list_init(void) { char *name; if (g_server.team_list != NULL) return ; g_server.team_list = list_create(); name = strtok(g_server.param.team_names, "\n"); while (name) { team_create(++g_server.param.team_max_id, name); name = strtok(NULL, "\n"); } free(g_server.param.team_names); if (g_server.team_list->size < 1) { printf_warning("No team names specified, defaulting to \"%s\"", DEFAULT_N); team_create(++g_server.param.team_max_id, DEFAULT_N); } }
// This test routine is only called by the master thread (thread_num = 0). bool team_example(void) { bool ok = true; size_t num_threads = NUMBER_THREADS; // Check that no memory is in use or avialable at start // (using thread_alloc in sequential mode) size_t thread_num; for(thread_num = 0; thread_num < num_threads; thread_num++) { ok &= thread_alloc::inuse(thread_num) == 0; ok &= thread_alloc::available(thread_num) == 0; } // initialize work_all_ for(thread_num = 0; thread_num < num_threads; thread_num++) { // allocate separate memory for this thread to avoid false sharing size_t min_bytes(sizeof(work_one_t)), cap_bytes; void* v_ptr = thread_alloc::get_memory(min_bytes, cap_bytes); work_all_[thread_num] = static_cast<work_one_t*>(v_ptr); // incase this thread's worker does not get called work_all_[thread_num]->ok = false; // parameter that defines the work for this thread work_all_[thread_num]->x = double(thread_num) + 1.; } ok &= team_create(num_threads); ok &= team_work(worker); ok &= team_destroy(); // go down so that free memrory for other threads before memory for master thread_num = num_threads; while(thread_num--) { // check that this thread was ok with the work it did ok &= work_all_[thread_num]->ok; // delete problem specific information void* v_ptr = static_cast<void*>( work_all_[thread_num] ); thread_alloc::return_memory( v_ptr ); // check that there is no longer any memory inuse by this thread // (for general applications, the master might still be using memory) ok &= thread_alloc::inuse(thread_num) == 0; // return all memory being held for future use by this thread thread_alloc::free_available(thread_num); } return ok; }
void go_kernel(void) { task_t *t; int i,len; void *ptr,*phys; port_t *uberport; area_t *uberarea; team_t *team; for (i = 0, len = 1; (bdir->bd_entry[i].be_type != BE_TYPE_NONE); i++){ len += bdir->bd_entry[i].be_size; } len *= 0x1000; uberport = rsrc_find_port(uberportnum = port_create(0,"uberport")); uberarea = rsrc_find_area(uberareanum = area_create_uber(len, (void *) 0x100000)); kprintf("uberport allocated with rid = %d",uberportnum); kprintf("uberarea allocated with rid = %d",uberareanum); kernel_team = team_create(); rsrc_set_name(&kernel_team->rsrc, "kernel team"); run_queue = rsrc_find_queue(queue_create("run queue",kernel_team)); reaper_queue = rsrc_find_queue(queue_create("reaper queue",kernel_team)); timer_queue = rsrc_find_queue(queue_create("timer queue",kernel_team)); rsrc_set_owner(&uberarea->rsrc,kernel_team); rsrc_set_owner(&uberport->rsrc,kernel_team); for(i=3;bdir->bd_entry[i].be_type;i++){ if(bdir->bd_entry[i].be_type != BE_TYPE_CODE) continue; team = team_create(); t = new_thread(team, 0x1074 /*bdir->bd_entry[i].be_code_ventr*/, 0); current = t; phys = (void *) (bdir->bd_entry[i].be_offset*0x1000 + 0x100000); team->text_area = area_create(team->aspace,bdir->bd_entry[i].be_size*0x1000, 0x1000, &phys, AREA_PHYSMAP); team->heap_id = area_create(team->aspace,0x2000,0x1000 + bdir->bd_entry[i].be_size* 0x1000, &ptr, 0); /* make the thread own it's address space */ /* rsrc_set_owner(&a->rsrc, t); */ if (!strcmp (bdir->bd_entry[i].be_name, "namer")) { rsrc_set_owner(&uberport->rsrc, team); } rsrc_set_name(&t->rsrc,bdir->bd_entry[i].be_name); rsrc_set_name(&team->rsrc,bdir->bd_entry[i].be_name); kprintf("task %X @ 0x%x, size = 0x%x (%s)",t->rsrc.id, bdir->bd_entry[i].be_offset*4096+0x100000, bdir->bd_entry[i].be_size*4096, t->rsrc.name); } kprintf("creating idler..."); idle_task = new_thread(kernel_team, (int) idler, 1); rsrc_set_name((resource_t*)idle_task,"idler"); kprintf("creating grim reaper..."); current = new_thread(kernel_team, (int) reaper, 1); rsrc_set_name((resource_t*)current,"grim reaper"); reaper_sem = sem_create(0,"death toll"); rsrc_enqueue(run_queue, current); live_tasks++; kprintf("creating pager..."); current = pager_task = new_thread(kernel_team, (int) pager, 1); rsrc_set_name((resource_t*)current,"pager"); pager_port_no = port_create(0,"pager port"); pager_sem_no = sem_create(0, "pager sem"); rsrc_enqueue(run_queue, current); live_tasks++; rsrc_set_name((resource_t*)kernel,"kernel"); #ifdef __SMP__ smp_init (); #endif // DEBUGGER(); kprintf("starting scheduler..."); #ifdef __SMP__ if (smp_configured) { smp_final_setup (); kprintf ("smp: signaling other processors"); smp_begin (); } #endif /* * when the new vm stuffas are done, we can at this point discard any * complete pages in the .text.init and .data.init sections of the kernel * by zeroing them and adding them to the free physical page pool. */ current = kernel; current->flags = tDEAD; current->waiting_on = NULL; swtch(); kprintf("panic: returned from scheduler?"); asm("hlt"); }