void test_tcp_accept_socket(void) { int i, j; for(i = 0; i < TEST_TCP_SOCKET_SERVER_NUM; i++) { netdata[i].iport = TEST_TCP_SOCKET_SERVER_BEGIN_PORT+i; netdata[i].pbuf = (char*)NON_CACHE(g_RemoteNet_Buf1[i]); cyg_thread_create(THREAD_PRIORITY, &test_tcp_socket_server, (cyg_addrword_t)&netdata[i], NULL, thread_stack[i], STACK_SIZE, &thread_handle[i], &thread[i]); cyg_thread_resume(thread_handle[i]); } j = TEST_TCP_SOCKET_SERVER_NUM; for(i = 0; i < TEST_TCP_SOCKET_SERVER_NUM; i++) { netdata[i + j].iport = TEST_TCP_SOCKET_SERVER_BEGIN_PORT + j + i; netdata[i + j].pbuf = (char*)NON_CACHE(g_RemoteNet_Buf1[i + j]); cyg_thread_create(THREAD_PRIORITY, &test_tcp_socket_client, (cyg_addrword_t)&netdata[i + j], NULL, thread_stack[i + j], STACK_SIZE, &thread_handle[i + j], &thread[i + j]); cyg_thread_resume(thread_handle[i + j]); } j = TEST_TCP_SOCKET_SERVER_NUM; for(i = 0; i < TEST_TCP_SOCKET_SERVER_NUM; i++) { test_tcp_socket_thread_join(thread_handle[i]); cyg_thread_delete(thread_handle[i]); test_tcp_socket_thread_join(thread_handle[i + j]); cyg_thread_delete(thread_handle[i + j]); } }
void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) { struct super_block *sb=OFNI_BS_2SFFJ(c); CYG_ASSERTC(sb->s_gc_thread_handle); D1(printk("jffs2_stop_garbage_collect_thread\n")); /* Stop the thread and wait for it if necessary */ cyg_flag_setbits(&sb->s_gc_thread_flags,GC_THREAD_FLAG_STOP); D1(printk("jffs2_stop_garbage_collect_thread wait\n")); cyg_flag_wait(&sb->s_gc_thread_flags, GC_THREAD_FLAG_HAS_EXIT, CYG_FLAG_WAITMODE_OR| CYG_FLAG_WAITMODE_CLR); // Kill and free the resources ... this is safe due to the flag // from the thread. cyg_thread_kill(sb->s_gc_thread_handle); cyg_thread_delete(sb->s_gc_thread_handle); cyg_mutex_destroy(&sb->s_lock); cyg_flag_destroy(&sb->s_gc_thread_flags); }
static void cleanup_thread(cyg_addrword_t data) { shell_thread_t *nt; /* * This thread sleeps on the cleanup semaphore * Any time that gets tagged, we wakeup, read from the mailbox * (which should contain the thread info about the thread(s) waiting * to be cleaned up, delete the thread, free up it's context, * and go back to sleep until more work comes in * * As a possible optimization for the future -- should I check * the mailbox for any more waiting entries? If the semaphore gets * incremented while in here, do we have a race? It doesn't look * like it, but check more carefully... */ while(1) { cyg_semaphore_wait(&cleanup.cleanup_sem); do { nt = cyg_mbox_get(cleanup.mbox_handle); if(nt) { cyg_thread_kill(nt->thread_handle); cyg_thread_delete(nt->thread_handle); free(nt->name); free(nt); } else SHELL_PRINT("Cleanup received a NULL in mbox?!\n"); } while(nt); } }
void test_recv_entry(void) { int iPort = TEST_RECV_SERVER_BEGIN_PORT; int i; for(i = 0; i < TEST_RECV_SERVER_NUM; i++) { netdata[i].iport = iPort+i; netdata[i].pbuf = (char*)NON_CACHE(g_RemoteNet_Buf1[i]); netdata[i].precvbuf = msg[i]; cyg_thread_create(THREAD_PRIORITY, &test_recv_server, (cyg_addrword_t)&netdata[i], NULL, thread_stack[i], STACK_SIZE, &thread_handle[i], &thread[i]); cyg_thread_resume(thread_handle[i]); } tt_msleep(1000); for(; i < TEST_RECV_SERVER_NUM * 2; i++) { netdata[i].iport = iPort+i; netdata[i].pbuf = (char*)NON_CACHE(g_RemoteNet_Buf1[i]); netdata[i].precvbuf = msg[i]; cyg_thread_create(THREAD_PRIORITY, &test_recv_client, (cyg_addrword_t)&netdata[i], NULL, thread_stack[i], STACK_SIZE, &thread_handle[i], &thread[i]); cyg_thread_resume(thread_handle[i]); } for(i = 0; i < TEST_RECV_SERVER_NUM * 2; i++) { test_tcp_socket_thread_join(thread_handle[i]); cyg_thread_delete(thread_handle[i]); } }
void main_cleanup() { /* * This might seem superfluous, but there needs to be * another thread to cleanup after 'main'. * It can't kill itself. */ cyg_handle_t thandle = 0, *thandleptr = &thandle; cyg_uint16 tid; cyg_thread_info tinfo; cyg_thread_get_next(thandleptr, &tid); do { cyg_thread_get_info(*thandleptr, tid, &tinfo); if(!strcmp(tinfo.name, "main")) { SHELL_DEBUG_PRINT("Found TID for main [%d]\n", tinfo.handle); cyg_thread_kill(thandle); cyg_thread_delete(thandle); } } while(cyg_thread_get_next(thandleptr, &tid)); }
/** terminates sw threads */ void sample_sw_delete(void){ int i; // terminate all sw threads for (i=0; i<sw_number_of_threads_s;i++){ while (!cyg_thread_delete(sw_thread_s_handle[i])) cyg_thread_release(sw_thread_s_handle[i]); } }
/** terminates sw threads */ void observation_sw_delete(void){ int i; // terminate all sw threads for (i=0; i<sw_number_of_threads_o;i++){ while (!cyg_thread_delete(sw_thread_o_handle[i])) cyg_thread_release(sw_thread_o_handle[i]); } }
int DOT1X_Stop(void) { int i; DBGPRINT(RT_DEBUG_ERROR,"DOT1X_Stop\n"); if(interfaces.rtapd != NULL) { eloop_terminate(); cyg_thread_delay(300); cyg_thread_delete(dot1x_thread); } else DBGPRINT(RT_DEBUG_ERROR,"1x daemon not running interfaces.rtapd == NULL\n"); }
// // Destroy a TFTP server, using a previously created server 'id'. // int tftpd_stop(int p) { struct tftp_server *server = (struct tftp_server *)p; // Simple sanity check if (server->tag == TFTP_tag) { cyg_thread_kill(server->thread_handle); cyg_thread_set_priority(server->thread_handle, 0); cyg_thread_delay(1); // Make sure it gets to die... if (cyg_thread_delete(server->thread_handle)) { // Success shutting down the thread free(server); // Give up memory return 1; } } return 0; }
void lua_ecosfreeterminatedthreads(void) { cyg_scheduler_lock(); { lua_ecos_thread_t *thread = terminated_threads; while (thread != NULL && thread->handle != cyg_thread_self()) { cyg_thread_delete(thread->handle); terminated_threads = thread->next; free(thread->stack); free(thread); thread = terminated_threads; } } cyg_scheduler_unlock(); }
void thread_join(cyg_handle_t* handle, cyg_thread* thread, cyg_thread_info* info) { BOOL ex = FALSE; while((ex == FALSE) && (handle != NULL)) { cyg_thread_get_info(*handle, thread->unique_id, info); //diag_printf("state is %d\n",info->state); switch(info->state) { case 16: cyg_thread_delete(*handle); ex = TRUE; break; default: cyg_thread_yield(); break; } } }
void test_sendmsg_entry(void) { int iPort = TEST_SENDMSG_SERVER_BEGIN_PORT; int i; for(i = 0; i < TEST_SENDMSG_SERVER_NUM; i++) { netdata[i].iport = iPort+i; netdata[i].pbuf = (char*)NON_CACHE(g_RemoteNet_Buf1[i]); cyg_thread_create(THREAD_PRIORITY, &test_sendmsg_server, (cyg_addrword_t)&netdata[i], NULL, thread_stack[i], STACK_SIZE, &thread_handle[i], &thread[i]); cyg_thread_resume(thread_handle[i]); } for(i = 0; i < TEST_SENDMSG_SERVER_NUM; i++) { test_tcp_socket_thread_join(thread_handle[i]); cyg_thread_delete(thread_handle[i]); } }
// // Destroy a TFTP server, using a previously created server 'id'. // int tftpd_stop(int p) { struct tftp_server *server = (struct tftp_server *)p; // Simple sanity check if (server->tag == TFTP_tag) { cyg_thread_kill(server->thread_handle); cyg_thread_set_priority(server->thread_handle, 0); cyg_thread_delay(1); // Make sure it gets to die... if (cyg_thread_delete(server->thread_handle)) { // Success shutting down the thread. Close all its sockets. int i; for (i = 0 ; i < CYGNUM_NET_MAX_INET_PROTOS; i++) { if (server->s[i]) { close (server->s[i]); } } freeaddrinfo(server->res); free(server); // Give up memory return 1; } } return 0; }
void run_test_timeslice(int nthread) { int i,j; cyg_uint32 cpu_total[CYGNUM_KERNEL_CPU_MAX]; cyg_uint32 cpu_threads[CYGNUM_KERNEL_CPU_MAX]; cyg_uint32 thread_total[NTHREADS_MAX]; CYG_TEST_INFO( "Timeslice Test: Check timeslicing works"); // Init flags. for (i = 0; i < nthread; i++) for( j = 0; j < ncpus; j++ ) slicerun[i][j] = 0; // Set my priority higher than any I plan to create cyg_thread_set_priority(cyg_thread_self(), 2); for (i = 0; i < nthread; i++) { cyg_thread_create(10, // Priority - just a number test_thread_timeslice, // entry i, // index "test_thread", // Name &stacks[i][0], // Stack STACK_SIZE, // Size &threads[i], // Handle &test_threads[i] // Thread data structure ); cyg_thread_resume( threads[i]); } // Just wait a while, until the threads have all run for a bit. cyg_thread_delay( CYGNUM_KERNEL_SCHED_TIMESLICE_TICKS*100 ); // Suspend all the threads for (i = 0; i < nthread; i++) { cyg_thread_suspend(threads[i]); } // And check that a thread ran on each CPU, and that each thread // ran. diag_printf(" Thread "); for( j = 0; j < ncpus; j++ ) { cpu_total[j] = 0; cpu_threads[j] = 0; // " %11d" __123456789ab" diag_printf(" CPU %2d",j); } // " %11d" __123456789ab" diag_printf(" Total\n"); for (i = 0; i < nthread; i++) { thread_total[i] = 0; diag_printf(" %2d ",i); for( j = 0; j < ncpus; j++ ) { thread_total[i] += slicerun[i][j]; cpu_total[j] += slicerun[i][j]; if( slicerun[i][j] > 0 ) cpu_threads[j]++; diag_printf(" %11d",slicerun[i][j]); } diag_printf(" %11d\n",thread_total[i]); if( thread_total[i] == 0 ) failed++; } diag_printf(" Total "); for( j = 0; j < ncpus; j++ ) diag_printf(" %11d",cpu_total[j]); diag_printf("\n"); diag_printf("Threads "); for( j = 0; j < ncpus; j++ ) { diag_printf(" %11d",cpu_threads[j]); if( cpu_threads[j] < 2 ) failed++; } diag_printf("\n"); // Delete all the threads for (i = 0; i < nthread; i++) { cyg_thread_delete(threads[i]); } CYG_TEST_INFO( "Timeslice Test: done"); }
NSFOSThread_eCOS::~NSFOSThread_eCOS() { cyg_thread_delete(handle); }
// Adding --- By Arius 3/15/2000 // This function will begin service of telnet when printer server was started. // It will prepare parameter for telnet connection. void telnetstart(cyg_addrword_t data) { struct sockaddr_in lsocket; struct sockaddr_in sa_client; int s; FILE *network; char Buffer[50]; int16 clen; while( Network_TCPIP_ON == 0 ) ppause(100); if (telnetlink != -1) return; lsocket.sin_family = AF_INET; lsocket.sin_addr.s_addr = htonl(INADDR_ANY); lsocket.sin_port = htons(IPPORT_TELNET); telnetlink = socket(AF_INET,SOCK_STREAM,0); bind(telnetlink,(struct sockaddr *)&lsocket,sizeof(lsocket)); listen(telnetlink,1); for (;;) { clen=sizeof(sa_client); memset(&sa_client, 0, clen); if ((s = accept(telnetlink,(struct sockaddr *) &sa_client, &clen)) == -1) break; /* Service is shutting down */ if (TELNETDUsers >= TELNET_MAX_USER || availmem() != 0) { // open file will prepare to send message to client network = fdopen(s,"r+t"); fputs("\r\n This service will offer one user to use it. \r\n",network); sprintf(Buffer," The Current User is [IP:%s]\r\n",inet_ntoa(CurrentIP)); fputs(Buffer,network); fclose(network); // close network //Jesse shutdown(s,1); //Jesse close_s(s); } else { CurrentIP.s_addr = sa_client.sin_addr.s_addr; // Spawn a child process // newproc("ServicedForTelnet",1024,TServerMainFunction,s,NULL,NULL,0); if( TELNET_SERVER_MAIN_TaskHdl != 0 ) cyg_thread_delete(TELNET_SERVER_MAIN_TaskHdl); //Create TELNET_SERVER_MAIN Thread cyg_thread_create(TELNET_SERVER_MAIN_TASK_PRI, TServerMainFunction, s, "ServicedForTelnet", (void *) (TELNET_SERVER_MAIN_Stack), TELNET_SERVER_MAIN_TASK_STACK_SIZE, &TELNET_SERVER_MAIN_TaskHdl, &TELNET_SERVER_MAIN_Task); //Start TELNET_SERVER_MAIN Thread cyg_thread_resume(TELNET_SERVER_MAIN_TaskHdl); } } return; }