/* * Assumes: by the time tpool_destroy() is called no one will use this * thread pool in any way and no one will try to dispatch entries to it. * Calling tpool_destroy() from a job in the pool will cause deadlock. */ void tpool_destroy(tpool_t *tpool) { tpool_active_t *activep; pthread_mutex_lock(&tpool->tp_mutex); pthread_cleanup_push((_Voidfp)pthread_mutex_unlock, &tpool->tp_mutex); /* mark the pool as being destroyed; wakeup idle workers */ tpool->tp_flags |= TP_DESTROY; tpool->tp_flags &= ~TP_SUSPEND; (void) pthread_cond_broadcast(&tpool->tp_workcv); /* cancel all active workers */ for (activep = tpool->tp_active; activep; activep = activep->tpa_next) (void) pthread_cancel(activep->tpa_tid); /* wait for all active workers to finish */ while (tpool->tp_active != NULL) { tpool->tp_flags |= TP_WAIT; (void) pthread_cond_wait(&tpool->tp_waitcv, &tpool->tp_mutex); } /* the last worker to terminate will wake us up */ while (tpool->tp_current != 0) (void) pthread_cond_wait(&tpool->tp_busycv, &tpool->tp_mutex); pthread_cleanup_pop(1); /* pthread_mutex_unlock(&tpool->tp_mutex); */ delete_pool(tpool); }
// //go through m_pools_to_delete bitmap to delete each pool // int NVM_KV_Pool_Del_Manager::start_pool_delete(bool delete_all_pools) { NVM_KV_Store *kv_store = get_store(); uint32_t max_pools = kv_store->get_store_metadata()->max_pools; int ret_code = NVM_SUCCESS; if (delete_all_pools) { if ((ret_code = delete_pool(-1, m_validate_pool_id_on_media)) != NVM_SUCCESS) { return ret_code; } if ((ret_code = kv_store->get_pool_mgr()->clear_pool_bitmaps(m_pools_to_delete)) != NVM_SUCCESS) { return ret_code; } } else { for (uint32_t i = 1; i < max_pools; i++) { if (bitmap_test(m_pools_to_delete, i)) { if ((ret_code = delete_pool(i, m_validate_pool_id_on_media)) != NVM_SUCCESS) { return ret_code; } if ((ret_code = kv_store->get_pool_mgr()->clear_pool_bitmaps(i)) != NVM_SUCCESS) { return ret_code; } } } } return ret_code; }
void postfork1_child_tpool(void) { pthread_t my_tid = pthread_self(); tpool_t *tpool; tpool_job_t *job; /* * All of the thread pool workers are gone, except possibly * for the current thread, if it is a thread pool worker thread. * Retain the thread pools, but make them all empty. Whatever * jobs were queued or running belong to the parent process. */ top: if ((tpool = thread_pools) == NULL) return; do { tpool_active_t *activep; (void) mutex_init(&tpool->tp_mutex, USYNC_THREAD, NULL); (void) cond_init(&tpool->tp_busycv, USYNC_THREAD, NULL); (void) cond_init(&tpool->tp_workcv, USYNC_THREAD, NULL); (void) cond_init(&tpool->tp_waitcv, USYNC_THREAD, NULL); for (job = tpool->tp_head; job; job = tpool->tp_head) { tpool->tp_head = job->tpj_next; lfree(job, sizeof (*job)); } tpool->tp_tail = NULL; tpool->tp_njobs = 0; for (activep = tpool->tp_active; activep; activep = activep->tpa_next) { if (activep->tpa_tid == my_tid) { activep->tpa_next = NULL; break; } } tpool->tp_idle = 0; tpool->tp_current = 0; if ((tpool->tp_active = activep) != NULL) tpool->tp_current = 1; tpool->tp_flags &= ~TP_WAIT; if (tpool->tp_flags & (TP_DESTROY | TP_ABANDON)) { tpool->tp_flags &= ~TP_DESTROY; tpool->tp_flags |= TP_ABANDON; if (tpool->tp_current == 0) { delete_pool(tpool); goto top; /* start over */ } } } while ((tpool = tpool->tp_forw) != thread_pools); }
/* * Worker thread is terminating. */ static void worker_cleanup(void *arg) { tpool_t *tpool = arg; if (--tpool->tp_current == 0 && (tpool->tp_flags & (TP_DESTROY | TP_ABANDON))) { if (tpool->tp_flags & TP_ABANDON) { pthread_mutex_unlock(&tpool->tp_mutex); delete_pool(tpool); return; } if (tpool->tp_flags & TP_DESTROY) (void) pthread_cond_broadcast(&tpool->tp_busycv); } pthread_mutex_unlock(&tpool->tp_mutex); }
/* * Like tpool_destroy(), but don't cancel workers or wait for them to finish. * The last worker to terminate will delete the pool. */ void tpool_abandon(tpool_t *tpool) { pthread_mutex_lock(&tpool->tp_mutex); if (tpool->tp_current == 0) { /* no workers, just delete the pool */ pthread_mutex_unlock(&tpool->tp_mutex); delete_pool(tpool); } else { /* wake up all workers, last one will delete the pool */ tpool->tp_flags |= TP_ABANDON; tpool->tp_flags &= ~TP_SUSPEND; (void) pthread_cond_broadcast(&tpool->tp_workcv); pthread_mutex_unlock(&tpool->tp_mutex); } }
/* * Worker thread is terminating. */ static void worker_cleanup(tpool_t *tpool) { ASSERT(MUTEX_HELD(&tpool->tp_mutex)); if (--tpool->tp_current == 0 && (tpool->tp_flags & (TP_DESTROY | TP_ABANDON))) { if (tpool->tp_flags & TP_ABANDON) { sig_mutex_unlock(&tpool->tp_mutex); delete_pool(tpool); return; } if (tpool->tp_flags & TP_DESTROY) (void) cond_broadcast(&tpool->tp_busycv); } sig_mutex_unlock(&tpool->tp_mutex); }
/* * Like tpool_destroy(), but don't cancel workers or wait for them to finish. * The last worker to terminate will delete the pool. */ void tpool_abandon(tpool_t *tpool) { ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON))); sig_mutex_lock(&tpool->tp_mutex); if (tpool->tp_current == 0) { /* no workers, just delete the pool */ sig_mutex_unlock(&tpool->tp_mutex); delete_pool(tpool); } else { /* wake up all workers, last one will delete the pool */ tpool->tp_flags |= TP_ABANDON; tpool->tp_flags &= ~TP_SUSPEND; (void) cond_broadcast(&tpool->tp_workcv); sig_mutex_unlock(&tpool->tp_mutex); } }
/* * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after * all memory objects have been freed back to the pool. * * Parameters: * mgr: INPUT The handle to the pools manager * pool: INPUT The handle of the pool to delete * * Returns: * BCME_OK Pool deleted ok. * other Pool not deleted due to indicated error. * */ int BCMATTACHFN(bcm_mpm_delete_heap_pool)(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp) { return (delete_pool(mgr, poolp)); }