void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) { struct super_block *sb=OFNI_BS_2SFFJ(c); CYG_ASSERTC(sb->s_gc_thread_handle); D1(printk("jffs2_stop_garbage_collect_thread\n")); /* Stop the thread and wait for it if necessary */ cyg_flag_setbits(&sb->s_gc_thread_flags,GC_THREAD_FLAG_STOP); D1(printk("jffs2_stop_garbage_collect_thread wait\n")); cyg_flag_wait(&sb->s_gc_thread_flags, GC_THREAD_FLAG_HAS_EXIT, CYG_FLAG_WAITMODE_OR| CYG_FLAG_WAITMODE_CLR); // Kill and free the resources ... this is safe due to the flag // from the thread. cyg_thread_kill(sb->s_gc_thread_handle); cyg_thread_delete(sb->s_gc_thread_handle); cyg_mutex_destroy(&sb->s_lock); cyg_flag_destroy(&sb->s_gc_thread_flags); }
static void cleanup_thread(cyg_addrword_t data) { shell_thread_t *nt; /* * This thread sleeps on the cleanup semaphore * Any time that gets tagged, we wakeup, read from the mailbox * (which should contain the thread info about the thread(s) waiting * to be cleaned up, delete the thread, free up it's context, * and go back to sleep until more work comes in * * As a possible optimization for the future -- should I check * the mailbox for any more waiting entries? If the semaphore gets * incremented while in here, do we have a race? It doesn't look * like it, but check more carefully... */ while(1) { cyg_semaphore_wait(&cleanup.cleanup_sem); do { nt = cyg_mbox_get(cleanup.mbox_handle); if(nt) { cyg_thread_kill(nt->thread_handle); cyg_thread_delete(nt->thread_handle); free(nt->name); free(nt); } else SHELL_PRINT("Cleanup received a NULL in mbox?!\n"); } while(nt); } }
void main_cleanup() { /* * This might seem superfluous, but there needs to be * another thread to cleanup after 'main'. * It can't kill itself. */ cyg_handle_t thandle = 0, *thandleptr = &thandle; cyg_uint16 tid; cyg_thread_info tinfo; cyg_thread_get_next(thandleptr, &tid); do { cyg_thread_get_info(*thandleptr, tid, &tinfo); if(!strcmp(tinfo.name, "main")) { SHELL_DEBUG_PRINT("Found TID for main [%d]\n", tinfo.handle); cyg_thread_kill(thandle); cyg_thread_delete(thandle); } } while(cyg_thread_get_next(thandleptr, &tid)); }
// exit void reconos_hwsched_destroy() { cyg_thread_kill( reconos_hwsched_thread_handle ); cyg_cond_destroy(&reconos_hwsched_condvar); cyg_mutex_destroy(&reconos_hwsched_mutex); }
// // Destroy a TFTP server, using a previously created server 'id'. // int tftpd_stop(int p) { struct tftp_server *server = (struct tftp_server *)p; // Simple sanity check if (server->tag == TFTP_tag) { cyg_thread_kill(server->thread_handle); cyg_thread_set_priority(server->thread_handle, 0); cyg_thread_delay(1); // Make sure it gets to die... if (cyg_thread_delete(server->thread_handle)) { // Success shutting down the thread free(server); // Give up memory return 1; } } return 0; }
// // Destroy a TFTP server, using a previously created server 'id'. // int tftpd_stop(int p) { struct tftp_server *server = (struct tftp_server *)p; // Simple sanity check if (server->tag == TFTP_tag) { cyg_thread_kill(server->thread_handle); cyg_thread_set_priority(server->thread_handle, 0); cyg_thread_delay(1); // Make sure it gets to die... if (cyg_thread_delete(server->thread_handle)) { // Success shutting down the thread. Close all its sockets. int i; for (i = 0 ; i < CYGNUM_NET_MAX_INET_PROTOS; i++) { if (server->s[i]) { close (server->s[i]); } } freeaddrinfo(server->res); free(server); // Give up memory return 1; } } return 0; }