apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) { apr_status_t status; workers_unregister(m); status = apr_thread_mutex_lock(m->lock); if (APR_SUCCESS == status) { while (!h2_io_set_iter(m->stream_ios, stream_done_iter, m)) { /* iterator until all h2_io have been orphaned or destroyed */ } release(m, 0); while (m->refs > 0) { m->join_wait = wait; ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, "h2_mplx(%ld): release_join, refs=%d, waiting...", m->id, m->refs); apr_thread_cond_wait(wait, m->lock); } ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, "h2_mplx(%ld): release_join -> destroy, (#ios=%ld)", m->id, (long)h2_io_set_size(m->stream_ios)); h2_mplx_destroy(m); /* all gone */ /*apr_thread_mutex_unlock(m->lock);*/ } return status; }
apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) { apr_status_t status; workers_unregister(m); status = apr_thread_mutex_lock(m->lock); if (APR_SUCCESS == status) { int attempts = 0; release(m); while (apr_atomic_read32(&m->refs) > 0) { m->join_wait = wait; ap_log_cerror(APLOG_MARK, (attempts? APLOG_INFO : APLOG_DEBUG), 0, m->c, "h2_mplx(%ld): release_join, refs=%d, waiting...", m->id, m->refs); apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(10)); if (++attempts >= 6) { ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(02952) "h2_mplx(%ld): join attempts exhausted, refs=%d", m->id, m->refs); break; } } if (m->join_wait) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, "h2_mplx(%ld): release_join -> destroy", m->id); } m->join_wait = NULL; apr_thread_mutex_unlock(m->lock); h2_mplx_destroy(m); } return status; }
apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) { apr_status_t status; int acquired; h2_workers_unregister(m->workers, m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { int i, wait_secs = 5; /* disable WINDOW_UPDATE callbacks */ h2_mplx_set_consumed_cb(m, NULL, NULL); while (!h2_io_set_iter(m->stream_ios, stream_done_iter, m)) { /* iterate until all ios have been orphaned or destroyed */ } /* Any remaining ios have handed out requests to workers that are * not done yet. Any operation they do on their assigned stream ios will * be errored ECONNRESET/ABORTED, so that should find out pretty soon. */ for (i = 0; h2_io_set_size(m->stream_ios) > 0; ++i) { m->join_wait = wait; ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): release_join, waiting on %d worker to report back", m->id, (int)h2_io_set_size(m->stream_ios)); status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs)); if (APR_STATUS_IS_TIMEUP(status)) { if (i > 0) { /* Oh, oh. Still we wait for assigned workers to report that * they are done. Unless we have a bug, a worker seems to be hanging. * If we exit now, all will be deallocated and the worker, once * it does return, will walk all over freed memory... */ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, "h2_mplx(%ld): release, waiting for %d seconds now for " "all h2_workers to return, have still %d requests outstanding", m->id, i*wait_secs, (int)h2_io_set_size(m->stream_ios)); } } } ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, "h2_mplx(%ld): release_join -> destroy", m->id); leave_mutex(m, acquired); h2_mplx_destroy(m); /* all gone */ } return status; }
/** * A h2_mplx needs to be thread-safe *and* if will be called by * the h2_session thread *and* the h2_worker threads. Therefore: * - calls are protected by a mutex lock, m->lock * - the pool needs its own allocator, since apr_allocator_t are * not re-entrant. The separate allocator works without a * separate lock since we already protect h2_mplx itself. * Since HTTP/2 connections can be expected to live longer than * their HTTP/1 cousins, the separate allocator seems to work better * than protecting a shared h2_session one with an own lock. */ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, const h2_config *conf, apr_interval_time_t stream_timeout, h2_workers *workers) { apr_status_t status = APR_SUCCESS; apr_allocator_t *allocator = NULL; h2_mplx *m; AP_DEBUG_ASSERT(conf); status = apr_allocator_create(&allocator); if (status != APR_SUCCESS) { return NULL; } m = apr_pcalloc(parent, sizeof(h2_mplx)); if (m) { m->id = c->id; APR_RING_ELEM_INIT(m, link); m->c = c; apr_pool_create_ex(&m->pool, parent, NULL, allocator); if (!m->pool) { return NULL; } apr_allocator_owner_set(allocator, m->pool); status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT, m->pool); if (status != APR_SUCCESS) { h2_mplx_destroy(m); return NULL; } m->q = h2_tq_create(m->pool, h2_config_geti(conf, H2_CONF_MAX_STREAMS)); m->stream_ios = h2_io_set_create(m->pool); m->ready_ios = h2_io_set_create(m->pool); m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); m->stream_timeout = stream_timeout; m->workers = workers; m->tx_handles_reserved = 0; m->tx_chunk_size = 4; } return m; }
/** * A h2_mplx needs to be thread-safe *and* if will be called by * the h2_session thread *and* the h2_worker threads. Therefore: * - calls are protected by a mutex lock, m->lock * - the pool needs its own allocator, since apr_allocator_t are * not re-entrant. The separate allocator works without a * separate lock since we already protect h2_mplx itself. * Since HTTP/2 connections can be expected to live longer than * their HTTP/1 cousins, the separate allocator seems to work better * than protecting a shared h2_session one with an own lock. */ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, h2_workers *workers) { apr_status_t status = APR_SUCCESS; h2_config *conf = h2_config_get(c); apr_allocator_t *allocator = NULL; h2_mplx *m; AP_DEBUG_ASSERT(conf); status = apr_allocator_create(&allocator); if (status != APR_SUCCESS) { return NULL; } m = apr_pcalloc(parent, sizeof(h2_mplx)); if (m) { m->id = c->id; APR_RING_ELEM_INIT(m, link); apr_atomic_set32(&m->refs, 1); m->c = c; apr_pool_create_ex(&m->pool, parent, NULL, allocator); if (!m->pool) { return NULL; } apr_allocator_owner_set(allocator, m->pool); status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT, m->pool); if (status != APR_SUCCESS) { h2_mplx_destroy(m); return NULL; } m->bucket_alloc = apr_bucket_alloc_create(m->pool); m->q = h2_tq_create(m->id, m->pool); m->stream_ios = h2_io_set_create(m->pool); m->ready_ios = h2_io_set_create(m->pool); m->closed = h2_stream_set_create(m->pool); m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); m->workers = workers; m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES); } return m; }
/** * A h2_mplx needs to be thread-safe *and* if will be called by * the h2_session thread *and* the h2_worker threads. Therefore: * - calls are protected by a mutex lock, m->lock * - the pool needs its own allocator, since apr_allocator_t are * not re-entrant. The separate allocator works without a * separate lock since we already protect h2_mplx itself. * Since HTTP/2 connections can be expected to live longer than * their HTTP/1 cousins, the separate allocator seems to work better * than protecting a shared h2_session one with an own lock. */ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, const h2_config *conf, apr_interval_time_t stream_timeout, h2_workers *workers) { apr_status_t status = APR_SUCCESS; apr_allocator_t *allocator = NULL; h2_mplx *m; AP_DEBUG_ASSERT(conf); status = apr_allocator_create(&allocator); if (status != APR_SUCCESS) { return NULL; } m = apr_pcalloc(parent, sizeof(h2_mplx)); if (m) { m->id = c->id; APR_RING_ELEM_INIT(m, link); m->c = c; apr_pool_create_ex(&m->pool, parent, NULL, allocator); if (!m->pool) { return NULL; } apr_pool_tag(m->pool, "h2_mplx"); apr_allocator_owner_set(allocator, m->pool); status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT, m->pool); if (status != APR_SUCCESS) { h2_mplx_destroy(m); return NULL; } status = apr_thread_cond_create(&m->task_thawed, m->pool); if (status != APR_SUCCESS) { h2_mplx_destroy(m); return NULL; } m->bucket_alloc = apr_bucket_alloc_create(m->pool); m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS); m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); m->q = h2_iq_create(m->pool, m->max_streams); m->stream_ios = h2_io_set_create(m->pool); m->ready_ios = h2_io_set_create(m->pool); m->stream_timeout = stream_timeout; m->workers = workers; m->workers_max = workers->max_workers; m->workers_def_limit = 4; m->workers_limit = m->workers_def_limit; m->last_limit_change = m->last_idle_block = apr_time_now(); m->limit_change_interval = apr_time_from_msec(200); m->tx_handles_reserved = 0; m->tx_chunk_size = 4; m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*)); m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, m->stream_max_mem); h2_ngn_shed_set_ctx(m->ngn_shed , m); } return m; }
void h2_session_destroy(h2_session *session) { assert(session); if (session->streams) { if (h2_stream_set_size(session->streams)) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, "h2_session(%ld): destroy, %ld streams open", session->id, h2_stream_set_size(session->streams)); /* destroy all sessions, join all existing tasks */ h2_stream_set_iter(session->streams, close_active_iter, session); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, "h2_session(%ld): destroy, %ld streams remain", session->id, h2_stream_set_size(session->streams)); } h2_stream_set_destroy(session->streams); session->streams = NULL; } if (session->zombies) { if (h2_stream_set_size(session->zombies)) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, "h2_session(%ld): destroy, %ld zombie streams", session->id, h2_stream_set_size(session->zombies)); /* destroy all zombies, join all existing tasks */ h2_stream_set_iter(session->zombies, close_zombie_iter, session); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, "h2_session(%ld): destroy, %ld zombies remain", session->id, h2_stream_set_size(session->zombies)); } h2_stream_set_destroy(session->zombies); session->zombies = NULL; } if (session->ngh2) { nghttp2_session_del(session->ngh2); session->ngh2 = NULL; } if (session->mplx) { h2_mplx_destroy(session->mplx); session->mplx = NULL; } h2_conn_io_destroy(&session->io); if (session->iowait) { apr_thread_cond_destroy(session->iowait); session->iowait = NULL; } apr_allocator_t *allocator = session->allocator; if (session->alock) { if (allocator) { apr_allocator_mutex_set(allocator, session->alock); } apr_thread_mutex_destroy(session->alock); session->alock = NULL; } if (session->pool) { apr_pool_destroy(session->pool); } if (allocator) { apr_allocator_destroy(allocator); } }