static void pool_bucket_destroy(void *data)
{
    apr_bucket_pool *p = data;

    /* If the pool is cleaned up before the last reference goes
     * away, the data is really now on the heap; heap_destroy() takes
     * over.  free() in heap_destroy() thinks it's freeing
     * an apr_bucket_heap, when in reality it's freeing the whole
     * apr_bucket_pool for us.
     */
    if (p->pool) {
        /* the shared resource is still in the pool
         * because the pool has not been cleaned up yet
         */
        if (apr_bucket_shared_destroy(p)) {
            apr_pool_cleanup_kill(p->pool, p, pool_bucket_cleanup);
            apr_bucket_free(p);
        }
    }
    else {
        /* the shared resource is no longer in the pool, it's
         * on the heap, but this reference still thinks it's a pool
         * bucket.  we should just go ahead and pass control to
         * heap_destroy() for it since it doesn't know any better.
         */
        apr_bucket_type_heap.destroy(p);
    }
}
static void error_bucket_destroy(void *data)
{
    ap_bucket_error *h = data;

    if (apr_bucket_shared_destroy(h)) {
        apr_bucket_free(h);
    }
}
Beispiel #3
0
static void heap_bucket_destroy(void *data)
{
    apr_bucket_heap *h = data;

    if (apr_bucket_shared_destroy(h)) {
        (*h->free_func)(h->base);
        apr_bucket_free(h);
    }
}
Beispiel #4
0
static void file_bucket_destroy(void *data)
{
    apr_bucket_file *f = data;

    if (apr_bucket_shared_destroy(f)) {
        /* no need to close the file here; it will get
         * done automatically when the pool gets cleaned up */
        apr_bucket_free(f);
    }
}
Beispiel #5
0
static void lob_bucket_destroy(void *data)
{
    apr_bucket_lob *f = data;

    if (apr_bucket_shared_destroy(f)) {
        /* no need to destroy database objects here; it will get
         * done automatically when the pool gets cleaned up */
        apr_bucket_free(f);
    }
}
Beispiel #6
0
static void bucket_destroy(void *data)
{
    h2_bucket_eos *h = data;

    if (apr_bucket_shared_destroy(h)) {
        h2_stream *stream = h->stream;
        if (stream) {
            h2_stream_eos_destroy(stream);
        }
        apr_bucket_free(h);
    }
}
static void nginx_bucket_destroy(void *data)
{
    apr_bucket_nginx *n = data;
    ngx_buf_t *buf = n->buf;

    if (apr_bucket_shared_destroy(n)) {
        if (!ngx_buf_in_memory(buf) && buf->pos != NULL) {
            apr_bucket_free(buf->pos);
            buf->pos = NULL;
        }
        apr_bucket_free(n);
    }
}
Beispiel #8
0
static void bucket_destroy(void *data)
{
    h2_bucket_eos *h = data;

    if (apr_bucket_shared_destroy(h)) {
        h2_stream *stream = h->stream;
        if (stream && stream->pool) {
            apr_pool_cleanup_kill(stream->pool, &h->stream, bucket_cleanup);
        }
        apr_bucket_free(h);
        if (stream) {
            h2_stream_dispatch(stream, H2_SEV_EOS_SENT);
        }
    }
}
Beispiel #9
0
static void beam_bucket_destroy(void *data)
{
    h2_beam_proxy *d = data;

    if (apr_bucket_shared_destroy(d)) {
        /* When the beam gets destroyed before this bucket, it will
         * NULLify its reference here. This is not protected by a mutex,
         * so it will not help with race conditions.
         * But it lets us shut down memory pool with circulare beam
         * references. */
        if (d->beam) {
            h2_beam_emitted(d->beam, d);
        }
        apr_bucket_free(d);
    }
}