/* * Create a new thread based on an existing one. * * The new thread has name NAME, and starts executing in function * ENTRYPOINT. DATA1 and DATA2 are passed to ENTRYPOINT. * * The new thread is created in the process P. If P is null, the * process is inherited from the caller. It will start on the same CPU * as the caller, unless the scheduler intervenes first. */ int thread_fork(const char *name, struct proc *proc, void (*entrypoint)(void *data1, unsigned long data2), void *data1, unsigned long data2) { struct thread *newthread; int result; newthread = thread_create(name); if (newthread == NULL) { return ENOMEM; } /* Allocate a stack */ newthread->t_stack = kmalloc(STACK_SIZE); if (newthread->t_stack == NULL) { thread_destroy(newthread); return ENOMEM; } thread_checkstack_init(newthread); /* * Now we clone various fields from the parent thread. */ /* Thread subsystem fields */ newthread->t_cpu = curthread->t_cpu; /* Attach the new thread to its process */ if (proc == NULL) { proc = curthread->t_proc; } result = proc_addthread(proc, newthread); if (result) { /* thread_destroy will clean up the stack */ thread_destroy(newthread); return result; } /* * Because new threads come out holding the cpu runqueue lock * (see notes at bottom of thread_switch), we need to account * for the spllower() that will be done releasing it. */ newthread->t_iplhigh_count++; spinlock_acquire(&thread_count_lock); ++thread_count; wchan_wakeall(thread_count_wchan, &thread_count_lock); spinlock_release(&thread_count_lock); /* Set up the switchframe so entrypoint() gets called */ switchframe_init(newthread, entrypoint, data1, data2); /* Lock the current cpu's run queue and make the new thread runnable */ thread_make_runnable(newthread, false); return 0; }
void socket_close(struct socket_t* s) { mutex_lock(s->mutex); if (s->is_connected) { s->is_connected = false; close(s->socket); s->socket = -1; if (s->accept_thread != NULL) { mutex_unlock(s->mutex); thread_destroy(s->accept_thread); mutex_lock(s->mutex); s->accept_thread = NULL; } if (s->receive_thread != NULL) { mutex_unlock(s->mutex); thread_destroy(s->receive_thread); mutex_lock(s->mutex); s->receive_thread = NULL; } if (s->callbacks.closed) { mutex_unlock(s->mutex); s->callbacks.closed(s, s->callbacks.ctx.closed); mutex_lock(s->mutex); } } mutex_unlock(s->mutex); }
int bacstack_session_destroy(bacstack_session_t *session) { return 1 && rwlock_destroy(&session->routetable_lock) && bacstack_routetable_destroy(&session->routetable) && pool_destroy(&session->message_pool) && pool_destroy(&session->proc_pool) && bacdl_server_destroy(&session->dl_server) && thread_destroy(&session->msg_proc_thread) && thread_destroy(&session->dl_accept_thread) && thread_destroy(&session->dl_receive_thread); }
void free_threads() { /* free all thread mutex */ thread_destroy(&nuauthdatas->tls_pusher); thread_destroy(&nuauthdatas->search_and_fill_worker); thread_list_destroy(nuauthdatas->tls_auth_servers); thread_list_destroy(nuauthdatas->tls_nufw_servers); thread_destroy(&nuauthdatas->limited_connections_handler); if (nuauthconf->push && nuauthconf->hello_authentication) { thread_destroy(&nuauthdatas->localid_auth_thread); } }
/* * Last exit routine... * Use global *tty_tmp and terms[] */ void g00dbye(void) { write_log(0," g00dbye function called from %d\n",(int)pthread_self()); term_delete_all_tty(); #ifdef HAS_CURSES if (tty_tmp->interactive && (terms->gui_th.id != 0)) thread_destroy(&terms->gui_th); #endif #ifdef HAVE_GTK if (tty_tmp->gtk && (terms->gui_gtk_th.id != 0)) { thread_destroy(&terms->gui_gtk_th); } #endif #ifdef HAVE_REMOTE_ADMIN if (tty_tmp->daemonize) admin_exit(); #endif /* Kill Uptime thread... */ if (terms->uptime_th.id) thread_destroy_cancel(terms->uptime_th.id); if (tty_tmp && tty_tmp->term) free(tty_tmp->term); /* Destroy interfaces only if they are initialized!! */ if (terms) interfaces_destroy(&terms->pcap_listen_th); protocol_destroy(); if (terms) term_destroy(); if (!tty_tmp->daemonize) { write_log(0, " Showing MOTD..\n"); show_vty_motd(); } finish_log(); if (tty_tmp) free(tty_tmp); exit(0); }
void grab_blocks()//goes through the routine to pick up the blocks {//starts once near to the pipe, with plowed tribbles; ends back from the pipe, with the tribbles in front, tribble arm down servo_set(BLOCK_CLAW, BC_CLOSE, .1);//drop the claw move_block_arm(BLA_DOWN);// servo_set(BLOCK_CLAW, BC_OPEN, .1);// time_drive(50,50,1700);//drive up to blocks servo_set(BLOCK_CLAW, BC_CLOSE,.75);//and pick them up msleep(250); move_block_arm(BLA_LIFT);//get off the ground thread hold=thread_create(hold_ba_lift);//this will hold the block arm at the lift location thread_start(hold);// back(6, 60);//back away from the pipe thread_destroy(hold);//don't want to hold it up any more servo_set(TRIBBLE_ARM, TA_JUMP, .5);//bring the ta up to keep the blocks in the basket servo_set(TRIBBLE_CLAW, TC_CLOSE, .5);// servo_set(TRIBBLE_ARM, TA_UP, .5);// move_block_arm(BLA_UP);//drop in the basket msleep(500); motor(BLOCK_ARM, -50);//stall it into the basket servo_set(BLOCK_CLAW, BC_OPEN,.75);// msleep(300); servo_set(BLOCK_CLAW, BC_START, .4);//shake the claw a bit to try to push the blocks in if they didn't go in the first time msleep(100); servo_set(BLOCK_CLAW, BC_OPEN, .75);// msleep(300); servo_set(BLOCK_CLAW, BC_START, .4);// off(BLOCK_ARM);//stop stalling the motor servo_set(TRIBBLE_CLAW, TC_PART_OPEN, .4);//put the claw back down servo_set(TRIBBLE_ARM, TA_START, .4);// msleep(200); servo_set(TRIBBLE_CLAW, TC_OPEN, .6);// servo_set(TRIBBLE_ARM, TA_DOWN, .1);// }
static void sys_thread_control(uint32_t *param1, uint32_t *param2) { l4_thread_t dest = param1[REG_R0]; l4_thread_t space = param1[REG_R1]; l4_thread_t pager = param1[REG_R3]; void *utcb = (void *) param2[0]; /* R4 */ mempool_t *utcb_pool = mempool_getbyid(mempool_search((memptr_t) utcb, UTCB_SIZE)); if (!utcb_pool || !(utcb_pool->flags & (MP_UR | MP_UW))) { /* Incorrect UTCB relocation */ return; } if (space != L4_NILTHREAD) { /* Creation of thread */ tcb_t *thr = thread_create(dest, utcb); thread_space(thr, space, utcb); thr->utcb->t_pager = pager; param1[REG_R0] = 1; } else { /* Removal of thread */ tcb_t *thr = thread_by_globalid(dest); thread_free_space(thr); thread_destroy(thr); } }
/* Destroy the threadpool */ void thpool_destroy(thpool_* thpool_p){ volatile int threads_total = thpool_p->num_threads_alive; /* End each thread 's infinite loop */ threads_keepalive = 0; /* Give one second to kill idle threads */ double TIMEOUT = 1.0; time_t start, end; double tpassed = 0.0; time (&start); while (tpassed < TIMEOUT && thpool_p->num_threads_alive){ bsem_post_all(thpool_p->jobqueue.has_jobs); time (&end); tpassed = difftime(end,start); } /* Poll remaining threads */ while (thpool_p->num_threads_alive){ bsem_post_all(thpool_p->jobqueue.has_jobs); sleep(1); } /* Job queue cleanup */ jobqueue_destroy(&thpool_p->jobqueue); /* Deallocs */ int n; for (n=0; n < threads_total; n++){ thread_destroy(thpool_p->threads[n]); } free(thpool_p->threads); free(thpool_p); }
void thread_join(thread_t thread, void **result) { pthread_t *pthread = (pthread_t *)thread; CHECK_EQ(pthread_join(*pthread, result), 0); thread_destroy(pthread); }
void rtp_recorder_destroy(struct rtp_recorder_t* rr) { mutex_lock(rr->timer_mutex); if (rr->synchronization_thread != NULL) { condition_signal(rr->timer_cond); mutex_unlock(rr->timer_mutex); thread_join(rr->synchronization_thread); mutex_lock(rr->timer_mutex); thread_destroy(rr->synchronization_thread); rr->synchronization_thread = NULL; } mutex_unlock(rr->timer_mutex); rtp_socket_destroy(rr->streaming_socket); rtp_socket_destroy(rr->control_socket); rtp_socket_destroy(rr->timing_socket); sockaddr_destroy(rr->remote_control_end_point); sockaddr_destroy(rr->remote_timing_end_point); mutex_destroy(rr->timer_mutex); condition_destroy(rr->timer_cond); free(rr); }
/* ------------------------------------------------------------------- * Any necesary cleanup before Iperf quits. Called at program exit, * either by exit() or terminating main(). * ------------------------------------------------------------------- */ void cleanup( void ) { #ifdef WIN32 // Shutdown Winsock WSACleanup(); #endif /* WIN32 */ // clean up the list of clients Iperf_destroy ( &clients ); // shutdown the thread subsystem IPERF_DEBUGF( THREAD_DEBUG | IPERF_DBG_TRACE, ( "Deinitializing the thread subsystem.\n" ) ); thread_destroy( ); IPERF_DEBUGF( CONDITION_DEBUG | IPERF_DBG_TRACE, ( "Destroying report condition.\n" ) ); Condition_Destroy( &ReportCond ); IPERF_DEBUGF( CONDITION_DEBUG | IPERF_DBG_TRACE, ( "Destroying report done condition.\n" ) ); Condition_Destroy( &ReportDoneCond ); IPERF_DEBUGF( MUTEX_DEBUG | IPERF_DBG_TRACE, ( "Destroying group condition mutex.\n" ) ); Mutex_Destroy( &groupCond ); IPERF_DEBUGF( MUTEX_DEBUG | IPERF_DBG_TRACE, ( "Destroying clients mutex.\n" ) ); Mutex_Destroy( &clients_mutex ); #ifdef IPERF_DEBUG debug_init(); #endif /* IPERF_DEBUG */ } // end cleanup
DECLARE_TEST( atomic, cas ) { int num_threads = 32; int ithread; object_t threads[32]; cas_value_t cas_values[32]; for( ithread = 0; ithread < num_threads; ++ithread ) { threads[ithread] = thread_create( cas_thread, "cas", THREAD_PRIORITY_NORMAL, 0 ); cas_values[ithread].val_32 = ithread; cas_values[ithread].val_64 = ithread; cas_values[ithread].val_ptr = (void*)(uintptr_t)ithread; } for( ithread = 0; ithread < num_threads; ++ithread ) thread_start( threads[ithread], &cas_values[ithread] ); test_wait_for_threads_startup( threads, num_threads ); for( ithread = 0; ithread < num_threads; ++ithread ) thread_destroy( threads[ithread] ); test_wait_for_threads_exit( threads, num_threads ); EXPECT_EQ( val_32, 0 ); EXPECT_EQ( val_64, 0 ); EXPECT_EQ( val_ptr, 0 ); return 0; }
void log_exit_platform(void) { if (_named_pipe_running) { SetEvent(_named_pipe_stop_event); thread_join(&_named_pipe_thread); thread_destroy(&_named_pipe_thread); } _named_pipe_connected = false; if (_named_pipe != INVALID_HANDLE_VALUE) { CloseHandle(_named_pipe); } if (_named_pipe_stop_event != NULL) { CloseHandle(_named_pipe_stop_event); } if (_named_pipe_write_event != NULL) { CloseHandle(_named_pipe_write_event); } if (_event_log != NULL) { DeregisterEventSource(_event_log); } mutex_destroy(&_named_pipe_write_event_mutex); }
DECLARE_TEST( error, thread ) { //Launch 32 threads object_t thread[32]; int i; for( i = 0; i < 32; ++i ) { thread[i] = thread_create( error_thread, "error", THREAD_PRIORITY_NORMAL, 0 ); thread_start( thread[i], 0 ); } test_wait_for_threads_startup( thread, 32 ); test_wait_for_threads_finish( thread, 32 ); for( i = 0; i < 32; ++i ) { EXPECT_EQ( thread_result( thread[i] ), 0 ); thread_destroy( thread[i] ); } test_wait_for_threads_exit( thread, 32 ); return 0; }
void profile_enable( bool enable ) { bool was_enabled = ( _profile_enable > 0 ); bool is_enabled = enable; if( is_enabled && !was_enabled ) { _profile_enable = 1; //Start output thread _profile_io_thread = thread_create( _profile_io, "profile_io", THREAD_PRIORITY_BELOWNORMAL, 0 ); thread_start( _profile_io_thread, 0 ); while( !thread_is_running( _profile_io_thread ) ) thread_yield(); } else if( !is_enabled && was_enabled ) { //Stop output thread thread_terminate( _profile_io_thread ); thread_destroy( _profile_io_thread ); while( thread_is_running( _profile_io_thread ) ) thread_yield(); _profile_enable = 0; } }
/* * Kill attack thread pertaining to "node". * If "pid" == 0 then kill *ALL* node attack threads. * Return -1 on error. Return 0 if Ok. */ int8_t attack_kill_th(struct term_node *node, pthread_t pid) { u_int16_t i, j; i = 0; while (i < MAX_PROTOCOLS) { if (protocols[i].visible && node->protocol[i].attacks) { j=0; while (j < MAX_THREAD_ATTACK) { if (node->protocol[i].attacks[j].up == 1) { if (!pid || (node->protocol[i].attacks[j].attack_th.id == pid) ) { thread_destroy(&node->protocol[i].attacks[j].attack_th); pthread_mutex_destroy(&node->protocol[i].attacks[j].attack_th.finished); if (pid) return 0; } } j++; } } i++; } /* while protocols...*/ return 0; }
void CGraphicsBackend_Threaded::StopProcessor() { m_Shutdown = true; m_Activity.signal(); thread_wait(m_pThread); thread_destroy(m_pThread); }
DECLARE_TEST( mutex, sync ) { mutex_t* mutex; object_t thread[32]; int ith; mutex = mutex_allocate( "test" ); mutex_lock( mutex ); for( ith = 0; ith < 32; ++ith ) { thread[ith] = thread_create( mutex_thread, "mutex_thread", THREAD_PRIORITY_NORMAL, 0 ); thread_start( thread[ith], mutex ); } test_wait_for_threads_startup( thread, 32 ); for( ith = 0; ith < 32; ++ith ) { thread_terminate( thread[ith] ); thread_destroy( thread[ith] ); } mutex_unlock( mutex ); test_wait_for_threads_exit( thread, 32 ); mutex_deallocate( mutex ); EXPECT_EQ( thread_counter, 32 * 128 ); return 0; }
int main(int argc, char **argv) { thread_t *threads[2]; list_t *l; l = list_new(&g_long_op); threads[0] = thread_new(NULL, _routine_insert0, (void *) l); threads[1] = thread_new(NULL, _routine_insert1, (void *) l); thread_start(threads[0]); thread_start(threads[1]); thread_destroy(threads[0], 1); thread_destroy(threads[1], 1); list_destroy(l); return EXIT_SUCCESS; }
static void test_run( void ) { unsigned int ig, gsize, ic, csize; void* result = 0; #if !BUILD_MONOLITHIC object_t thread_event = 0; #endif log_infof( HASH_TEST, "Running test suite: %s", test_suite.application().short_name ); _test_failed = false; #if !BUILD_MONOLITHIC thread_event = thread_create( test_event_thread, "event_thread", THREAD_PRIORITY_NORMAL, 0 ); thread_start( thread_event, 0 ); while( !thread_is_running( thread_event ) ) thread_yield(); #endif for( ig = 0, gsize = array_size( _test_groups ); ig < gsize; ++ig ) { log_infof( HASH_TEST, "Running tests from group %s", _test_groups[ig]->name ); for( ic = 0, csize = array_size( _test_groups[ig]->cases ); ic < csize; ++ic ) { log_infof( HASH_TEST, " Running %s tests", _test_groups[ig]->cases[ic]->name ); result = _test_groups[ig]->cases[ic]->fn(); if( result != 0 ) { log_warn( HASH_TEST, WARNING_SUSPICIOUS, " FAILED" ); _test_failed = true; } else { log_info( HASH_TEST, " PASSED" ); } #if BUILD_MONOLITHIC if( _test_should_terminate ) { _test_failed = true; goto exit; } #endif } } #if !BUILD_MONOLITHIC thread_terminate( thread_event ); thread_destroy( thread_event ); while( thread_is_running( thread_event ) || thread_is_thread( thread_event ) ) thread_yield(); #else exit: #endif log_infof( HASH_TEST, "Finished test suite: %s", test_suite.application().short_name ); }
void event_test(void) { pthread_t threads[2]; assert(0 == event_create(&ev1)); assert(0 == event_create(&ev2)); event_signal(&ev1); event_reset(&ev1); thread_create(&threads[0], thread0, NULL); thread_create(&threads[1], thread1, NULL); thread_destroy(threads[0]); thread_destroy(threads[1]); assert(0 == event_destroy(&ev1)); assert(0 == event_destroy(&ev2)); }
static int aio_worker_cleanup(void) { size_t i; s_running = 0; for(i = 0; i < MAX_THREAD; i++) thread_destroy(s_threads[i]); aio_socket_clean(); return 0; }
/* * Pass nullptr to l4_getid syscall * * This exercise proves that the kernel does not crash * and validly sends a page fault to offending thread's * pager. */ int test_getid_nullptr(void) { struct l4_thread *thread; int err; /* * Create a new thread who will attempt * passing null ptr argument */ if ((err = thread_create(thread_getid_nullptr, 0, TC_SHARE_SPACE, &thread)) < 0) { dbg_printf("Thread create failed. " "err=%d\n", err); return err; } dbg_printf("Thread created successfully. " "tid=%d\n", thread->ids.tid); /* * Listen on thread for its page fault * ipc. (Recap: Upon illegal access, the kernel sends * a page fault ipc message to thread's pager) */ if ((err = l4_receive(thread->ids.tid)) < 0) { dbg_printf("%s: listening on page fault for " "nullptr thread failed. " "err = %d\n", __FUNCTION__, err); return err; } /* * Verify ipc was a page fault ipc */ if (l4_get_tag() != L4_IPC_TAG_PFAULT) { dbg_printf("%s: Nullptr thread ipc does not " "have expected page fault tag.\n" "tag=%d, expected=%d\n", __FUNCTION__, l4_get_tag(), L4_IPC_TAG_PFAULT); return -1; } /* * Destroy the thread. */ if ((err = thread_destroy(thread)) < 0) { dbg_printf("%s: Failed destroying thread. " "err= %d, tid = %d\n", __FUNCTION__, err, thread->ids.tid); return err; } return 0; }
thread_t thread_create(thread_fn fn, const char *name, void *data) { pthread_t *pthread = calloc(1, sizeof(pthread_t)); if (pthread_create(pthread, NULL, fn, data)) { thread_destroy(pthread); return NULL; } return (thread_t)pthread; }
DECLARE_TEST( profile, thread ) { object_t thread[32]; int ith; int frame; error_t err = error(); _test_profile_offset = 0; atomic_store32( &_test_profile_output_counter, 0 ); profile_initialize( "test_profile", _test_profile_buffer, 30000/*_test_profile_buffer_size*/ ); profile_enable( true ); profile_set_output_wait( 1 ); log_info( HASH_TEST, "This test will intentionally run out of memory in profiling system" ); for( ith = 0; ith < 32; ++ith ) { thread[ith] = thread_create( _profile_fail_thread, "profile_thread", THREAD_PRIORITY_NORMAL, 0 ); thread_start( thread[ith], 0 ); } test_wait_for_threads_startup( thread, 32 ); for( frame = 0; frame < 1000; ++frame ) { thread_sleep( 16 ); profile_end_frame( frame ); } for( ith = 0; ith < 32; ++ith ) { thread_terminate( thread[ith] ); thread_destroy( thread[ith] ); thread_yield(); } test_wait_for_threads_exit( thread, 32 ); thread_sleep( 1000 ); profile_enable( false ); profile_shutdown(); err = error(); #if BUILD_ENABLE_PROFILE EXPECT_GT( atomic_load32( &_test_profile_output_counter ), 0 ); //TODO: Implement parsing output results #else EXPECT_EQ( atomic_load32( &_test_profile_output_counter ), 0 ); #endif EXPECT_EQ( err, ERROR_NONE ); return 0; }
void semaphore_test(void) { pthread_t threads[2]; assert(0 == semaphore_create(&sem1, NULL, 3)); assert(0 == semaphore_create(&sem2, NULL, 0)); assert(0 == semaphore_wait(&sem1)); assert(0 == semaphore_wait(&sem1)); assert(0 == semaphore_trywait(&sem1)); assert(0 != semaphore_trywait(&sem1)); thread_create(&threads[0], thread0, NULL); thread_create(&threads[1], thread1, NULL); thread_destroy(threads[0]); thread_destroy(threads[1]); printf("all thread exit\n"); assert(0 == semaphore_destroy(&sem1)); assert(0 == semaphore_destroy(&sem2)); }
void cleanup( void ) { #ifdef WIN32 // Shutdown Winsock WSACleanup(); #endif // clean up the list of clients Iperf_destroy ( &clients ); // shutdown the thread subsystem thread_destroy( ); } // end cleanup
thread::~thread() { --*refcnt; if(*refcnt == 0) { if(isjoin) thread_destroy(handle); delete refcnt; refcnt = 0; delete isdone; } }
void CAStar::OnStateChange(int NewState, int OldState) { if(m_pThread) { m_ThreadShouldExit = true; thread_destroy(m_pThread); m_pThread = 0; m_ThreadShouldExit = false; } m_Path.clear(); }
/* * Clean up zombies. (Zombies are threads that have exited but still * need to have thread_destroy called on them.) * * The list of zombies is per-cpu. */ static void exorcise(void) { struct thread *z; while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) { KASSERT(z != curthread); KASSERT(z->t_state == S_ZOMBIE); thread_destroy(z); } }