Ejemplo n.º 1
0
/* Create RTSP connection */
static apt_bool_t rtsp_client_connection_create(rtsp_client_t *client, rtsp_client_session_t *session)
{
	rtsp_client_connection_t *rtsp_connection;
	apr_pool_t *pool = apt_pool_create();
	if(!pool) {
		return FALSE;
	}

	rtsp_connection = apr_palloc(pool,sizeof(rtsp_client_connection_t));
	rtsp_connection->pool = pool;
	rtsp_connection->sock = NULL;
	APR_RING_ELEM_INIT(rtsp_connection,link);

	if(rtsp_client_connect(client,rtsp_connection,session->server_ip.buf,session->server_port) == FALSE) {
		apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Connect to RTSP Server %s:%hu",
			session->server_ip.buf,session->server_port);
		apr_pool_destroy(pool);
		return FALSE;
	}
	rtsp_connection->handle_table = apr_hash_make(pool);
	rtsp_connection->session_table = apr_hash_make(pool);
	rtsp_connection->inprogress_request_queue = apt_list_create(pool);
	apt_text_stream_init(&rtsp_connection->rx_stream,rtsp_connection->rx_buffer,sizeof(rtsp_connection->rx_buffer)-1);
	apt_text_stream_init(&rtsp_connection->tx_stream,rtsp_connection->tx_buffer,sizeof(rtsp_connection->tx_buffer)-1);
	rtsp_connection->parser = rtsp_parser_create(pool);
	rtsp_connection->generator = rtsp_generator_create(pool);
	rtsp_connection->last_cseq = 0;

	rtsp_connection->client = client;
	APR_RING_INSERT_TAIL(&client->connection_list,rtsp_connection,rtsp_client_connection_t,link);
	session->connection = rtsp_connection;
	return TRUE;
}
Ejemplo n.º 2
0
h2_task *h2_task_create(long session_id,
                        int stream_id,
                        apr_pool_t *stream_pool,
                        h2_mplx *mplx)
{
    h2_task *task = apr_pcalloc(stream_pool, sizeof(h2_task));
    if (task == NULL) {
        ap_log_perror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, stream_pool,
                      "h2_task(%ld-%d): create stream task", 
                      session_id, stream_id);
        h2_mplx_out_close(mplx, stream_id);
        return NULL;
    }
    
    APR_RING_ELEM_INIT(task, link);

    task->id = apr_psprintf(stream_pool, "%ld-%d", session_id, stream_id);
    task->stream_id = stream_id;
    task->mplx = mplx;
    
    /* We would like to have this happening when our task is about
     * to be processed by the worker. But something corrupts our
     * stream pool if we shift this to the worker thread.
     * TODO.
     */
    task->conn = h2_conn_create(mplx->c, stream_pool);
    if (task->conn == NULL) {
        return NULL;
    }

    ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, stream_pool,
                  "h2_task(%s): created", task->id);
    return task;
}
Ejemplo n.º 3
0
h2_task *h2_task_create(long session_id,
                        int stream_id,
                        apr_pool_t *stream_pool,
                        h2_mplx *mplx, conn_rec *c)
{
    h2_task *task = apr_pcalloc(stream_pool, sizeof(h2_task));
    if (task == NULL) {
        ap_log_perror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, stream_pool,
                      "h2_task(%ld-%d): create stream task", 
                      session_id, stream_id);
        h2_mplx_out_close(mplx, stream_id);
        return NULL;
    }
    
    APR_RING_ELEM_INIT(task, link);

    task->id = apr_psprintf(stream_pool, "%ld-%d", session_id, stream_id);
    task->stream_id = stream_id;
    task->mplx = mplx;
    
    task->c = c;
    
    ap_log_perror(APLOG_MARK, APLOG_DEBUG, 0, stream_pool,
                  "h2_task(%s): created", task->id);
    return task;
}
Ejemplo n.º 4
0
/*
 * NOTE: This function is not thread safe by itself. Caller should hold the lock
 */
static apr_thread_pool_task_t *task_new(apr_thread_pool_t * me,
                                        apr_thread_start_t func,
                                        void *param, apr_byte_t priority,
                                        void *owner, apr_time_t time)
{
    apr_thread_pool_task_t *t;

    if (APR_RING_EMPTY(me->recycled_tasks, apr_thread_pool_task, link)) {
        t = apr_pcalloc(me->pool, sizeof(*t));
        if (NULL == t) {
            return NULL;
        }
    }
    else {
        t = APR_RING_FIRST(me->recycled_tasks);
        APR_RING_REMOVE(t, link);
    }

    APR_RING_ELEM_INIT(t, link);
    t->func = func;
    t->param = param;
    t->owner = owner;
    if (time > 0) {
        t->dispatch.time = apr_time_now() + time;
    }
    else {
        t->dispatch.priority = priority;
    }
    return t;
}
Ejemplo n.º 5
0
/* Accept RTSP connection */
static apt_bool_t rtsp_server_connection_accept(rtsp_server_t *server)
{
	rtsp_server_connection_t *rtsp_connection;
	char *local_ip = NULL;
	char *remote_ip = NULL;
	apr_sockaddr_t *l_sockaddr = NULL;
	apr_sockaddr_t *r_sockaddr = NULL;
	apr_pool_t *pool = apt_pool_create();
	if(!pool) {
		return FALSE;
	}

	rtsp_connection = apr_palloc(pool,sizeof(rtsp_server_connection_t));
	rtsp_connection->pool = pool;
	rtsp_connection->sock = NULL;
	rtsp_connection->client_ip = NULL;
	APR_RING_ELEM_INIT(rtsp_connection,link);

	if(apr_socket_accept(&rtsp_connection->sock,server->listen_sock,rtsp_connection->pool) != APR_SUCCESS) {
		apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Accept RTSP Connection");
		apr_pool_destroy(pool);
		return FALSE;
	}

	if(apr_socket_addr_get(&l_sockaddr,APR_LOCAL,rtsp_connection->sock) != APR_SUCCESS ||
		apr_socket_addr_get(&r_sockaddr,APR_REMOTE,rtsp_connection->sock) != APR_SUCCESS) {
		apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Get RTSP Socket Address");
		apr_pool_destroy(pool);
		return FALSE;
	}

	apr_sockaddr_ip_get(&local_ip,l_sockaddr);
	apr_sockaddr_ip_get(&remote_ip,r_sockaddr);
	rtsp_connection->client_ip = remote_ip;
	rtsp_connection->id = apr_psprintf(pool,"%s:%hu <-> %s:%hu",
		local_ip,l_sockaddr->port,
		remote_ip,r_sockaddr->port);

	memset(&rtsp_connection->sock_pfd,0,sizeof(apr_pollfd_t));
	rtsp_connection->sock_pfd.desc_type = APR_POLL_SOCKET;
	rtsp_connection->sock_pfd.reqevents = APR_POLLIN;
	rtsp_connection->sock_pfd.desc.s = rtsp_connection->sock;
	rtsp_connection->sock_pfd.client_data = rtsp_connection;
	if(apt_poller_task_descriptor_add(server->task,&rtsp_connection->sock_pfd) != TRUE) {
		apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Add to Pollset %s",rtsp_connection->id);
		apr_socket_close(rtsp_connection->sock);
		apr_pool_destroy(pool);
		return FALSE;
	}

	apt_log(RTSP_LOG_MARK,APT_PRIO_NOTICE,"Accepted TCP Connection %s",rtsp_connection->id);
	rtsp_connection->session_table = apr_hash_make(rtsp_connection->pool);
	apt_text_stream_init(&rtsp_connection->rx_stream,rtsp_connection->rx_buffer,sizeof(rtsp_connection->rx_buffer)-1);
	apt_text_stream_init(&rtsp_connection->tx_stream,rtsp_connection->tx_buffer,sizeof(rtsp_connection->tx_buffer)-1);
	rtsp_connection->parser = rtsp_parser_create(rtsp_connection->pool);
	rtsp_connection->generator = rtsp_generator_create(rtsp_connection->pool);
	rtsp_connection->server = server;
	APR_RING_INSERT_TAIL(&server->connection_list,rtsp_connection,rtsp_server_connection_t,link);
	return TRUE;
}
Ejemplo n.º 6
0
/** Create timer */
APT_DECLARE(apt_timer_t*) apt_timer_create(apt_timer_queue_t *timer_queue, apt_timer_proc_f proc, void *obj, apr_pool_t *pool)
{
	apt_timer_t *timer = apr_palloc(pool,sizeof(apt_timer_t));
	APR_RING_ELEM_INIT(timer,link);
	timer->queue = timer_queue;
	timer->scheduled_time = 0;
	timer->proc = proc;
	timer->obj = obj;
	return timer;
}
Ejemplo n.º 7
0
h2_task_queue *h2_tq_create(long id, apr_pool_t *pool)
{
    h2_task_queue *q = apr_pcalloc(pool, sizeof(h2_task_queue));
    if (q) {
        q->id = id;
        APR_RING_ELEM_INIT(q, link);
        APR_RING_INIT(&q->tasks, h2_task, link);
    }
    return q;
}
Ejemplo n.º 8
0
static apr_status_t impl_pollset_add(apr_pollset_t *pollset,
                                     const apr_pollfd_t *descriptor)
{
    apr_os_sock_t fd;
    pfd_elem_t *elem;
    apr_status_t rv = APR_SUCCESS;

    pollset_lock_rings();

    if (!APR_RING_EMPTY(&(pollset->p->free_ring), pfd_elem_t, link)) {
        elem = APR_RING_FIRST(&(pollset->p->free_ring));
        APR_RING_REMOVE(elem, link);
    }
    else {
        elem = (pfd_elem_t *) apr_palloc(pollset->pool, sizeof(pfd_elem_t));
        APR_RING_ELEM_INIT(elem, link);
    }
    elem->pfd = *descriptor;

    if (descriptor->desc_type == APR_POLL_SOCKET) {
        fd = descriptor->desc.s->socketdes;
    }
    else {
        fd = descriptor->desc.f->filedes;
    }

    if (descriptor->reqevents & APR_POLLIN) {
        EV_SET(&pollset->p->kevent, fd, EVFILT_READ, EV_ADD, 0, 0, elem);

        if (kevent(pollset->p->kqueue_fd, &pollset->p->kevent, 1, NULL, 0,
                   NULL) == -1) {
            rv = apr_get_netos_error();
        }
    }

    if (descriptor->reqevents & APR_POLLOUT && rv == APR_SUCCESS) {
        EV_SET(&pollset->p->kevent, fd, EVFILT_WRITE, EV_ADD, 0, 0, elem);

        if (kevent(pollset->p->kqueue_fd, &pollset->p->kevent, 1, NULL, 0,
                   NULL) == -1) {
            rv = apr_get_netos_error();
        }
    }

    if (rv == APR_SUCCESS) {
        APR_RING_INSERT_TAIL(&(pollset->p->query_ring), elem, pfd_elem_t, link);
    }
    else {
        APR_RING_INSERT_TAIL(&(pollset->p->free_ring), elem, pfd_elem_t, link);
    }

    pollset_unlock_rings();

    return rv;
}
Ejemplo n.º 9
0
static apr_status_t impl_pollset_add(apr_pollset_t *pollset,
                                     const apr_pollfd_t *descriptor)
{
    struct epoll_event ev = {0};
    int ret = -1;
    pfd_elem_t *elem = NULL;
    apr_status_t rv = APR_SUCCESS;

    ev.events = get_epoll_event(descriptor->reqevents);

    if (pollset->flags & APR_POLLSET_NOCOPY) {
        ev.data.ptr = (void *)descriptor;
    }
    else {
        pollset_lock_rings();

        if (!APR_RING_EMPTY(&(pollset->p->free_ring), pfd_elem_t, link)) {
            elem = APR_RING_FIRST(&(pollset->p->free_ring));
            APR_RING_REMOVE(elem, link);
        }
        else {
            elem = (pfd_elem_t *) apr_palloc(pollset->pool, sizeof(pfd_elem_t));
            APR_RING_ELEM_INIT(elem, link);
        }
        elem->pfd = *descriptor;
        ev.data.ptr = elem;
    }
    if (descriptor->desc_type == APR_POLL_SOCKET) {
        ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_ADD,
                        descriptor->desc.s->socketdes, &ev);
    }
    else {
        ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_ADD,
                        descriptor->desc.f->filedes, &ev);
    }

    if (0 != ret) {
        rv = apr_get_netos_error();
    }

    if (!(pollset->flags & APR_POLLSET_NOCOPY)) {
        if (rv != APR_SUCCESS) {
            APR_RING_INSERT_TAIL(&(pollset->p->free_ring), elem, pfd_elem_t, link);
        }
        else {
            APR_RING_INSERT_TAIL(&(pollset->p->query_ring), elem, pfd_elem_t, link);
        }
        pollset_unlock_rings();
    }

    return rv;
}
Ejemplo n.º 10
0
Archivo: port.c Proyecto: Ga-vin/apache
static apr_status_t impl_pollset_add(apr_pollset_t *pollset,
                                     const apr_pollfd_t *descriptor)
{
    apr_os_sock_t fd;
    pfd_elem_t *elem;
    int res;
    apr_status_t rv = APR_SUCCESS;

    pollset_lock_rings();

    if (!APR_RING_EMPTY(&(pollset->p->free_ring), pfd_elem_t, link)) {
        elem = APR_RING_FIRST(&(pollset->p->free_ring));
        APR_RING_REMOVE(elem, link);
    }
    else {
        elem = (pfd_elem_t *) apr_palloc(pollset->pool, sizeof(pfd_elem_t));
        APR_RING_ELEM_INIT(elem, link);
        elem->on_query_ring = 0;
    }
    elem->pfd = *descriptor;

    if (descriptor->desc_type == APR_POLL_SOCKET) {
        fd = descriptor->desc.s->socketdes;
    }
    else {
        fd = descriptor->desc.f->filedes;
    }

    /* If another thread is polling, notify the kernel immediately; otherwise,
     * wait until the next call to apr_pollset_poll().
     */
    if (apr_atomic_read32(&pollset->p->waiting)) {
        res = port_associate(pollset->p->port_fd, PORT_SOURCE_FD, fd, 
                             get_event(descriptor->reqevents), (void *)elem);

        if (res < 0) {
            rv = apr_get_netos_error();
            APR_RING_INSERT_TAIL(&(pollset->p->free_ring), elem, pfd_elem_t, link);
        }
        else {
            elem->on_query_ring = 1;
            APR_RING_INSERT_TAIL(&(pollset->p->query_ring), elem, pfd_elem_t, link);
        }
    } 
    else {
        APR_RING_INSERT_TAIL(&(pollset->p->add_ring), elem, pfd_elem_t, link);
    }

    pollset_unlock_rings();

    return rv;
}
Ejemplo n.º 11
0
static struct waiter_t *make_waiter(apr_pool_t *pool)
{
    struct waiter_t *w = (struct waiter_t*)
                       apr_palloc(pool, sizeof(struct waiter_t));
    if (w == NULL)
        return NULL;
      
    w->sem  = create_sem(0, "apr conditional waiter");
    if (w->sem < 0)
        return NULL;

    APR_RING_ELEM_INIT(w, link);
    
    return w;
}
Ejemplo n.º 12
0
apt_bool_t mpf_buffer_event_write(mpf_buffer_t *buffer, mpf_frame_type_e event_type)
{
	mpf_chunk_t *chunk;
	apt_bool_t status;
	apr_thread_mutex_lock(buffer->guard);

	chunk = apr_palloc(buffer->pool,sizeof(mpf_chunk_t));
	APR_RING_ELEM_INIT(chunk,link);
	chunk->frame.codec_frame.buffer = NULL;
	chunk->frame.codec_frame.size = 0;
	chunk->frame.type = event_type;
	status = mpf_buffer_chunk_write(buffer,chunk);
	
	apr_thread_mutex_unlock(buffer->guard);
	return status;
}
Ejemplo n.º 13
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, 
                        const h2_config *conf,
                        apr_interval_time_t stream_timeout,
                        h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        m->q = h2_tq_create(m->pool, h2_config_geti(conf, H2_CONF_MAX_STREAMS));
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->stream_timeout = stream_timeout;
        m->workers = workers;
        
        m->tx_handles_reserved = 0;
        m->tx_chunk_size = 4;
    }
    return m;
}
Ejemplo n.º 14
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    h2_config *conf = h2_config_get(c);
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        apr_atomic_set32(&m->refs, 1);
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        m->bucket_alloc = apr_bucket_alloc_create(m->pool);
        
        m->q = h2_tq_create(m->id, m->pool);
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->closed = h2_stream_set_create(m->pool);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->workers = workers;
        
        m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES);
    }
    return m;
}
Ejemplo n.º 15
0
apt_bool_t mpf_buffer_audio_write(mpf_buffer_t *buffer, void *data, apr_size_t size)
{
	mpf_chunk_t *chunk;
	apt_bool_t status;
	apr_thread_mutex_lock(buffer->guard);

	chunk = apr_palloc(buffer->pool,sizeof(mpf_chunk_t));
	APR_RING_ELEM_INIT(chunk,link);
	chunk->frame.codec_frame.buffer = apr_palloc(buffer->pool,size);
	memcpy(chunk->frame.codec_frame.buffer,data,size);
	chunk->frame.codec_frame.size = size;
	chunk->frame.type = MEDIA_FRAME_TYPE_AUDIO;
	status = mpf_buffer_chunk_write(buffer,chunk);
	
	buffer->size += size;
	apr_thread_mutex_unlock(buffer->guard);
	return status;
}
Ejemplo n.º 16
0
APR_DECLARE(apr_status_t) apr_pollset_add(apr_pollset_t *pollset,
                                          const apr_pollfd_t *descriptor)
{
    apr_os_sock_t fd;
    pfd_elem_t *elem;
    int res;
    apr_status_t rv = APR_SUCCESS;

    pollset_lock_rings();

    if (!APR_RING_EMPTY(&(pollset->free_ring), pfd_elem_t, link)) {
        elem = APR_RING_FIRST(&(pollset->free_ring));
        APR_RING_REMOVE(elem, link);
    }
    else {
        elem = (pfd_elem_t *) apr_palloc(pollset->pool, sizeof(pfd_elem_t));
        APR_RING_ELEM_INIT(elem, link);
    }
    elem->pfd = *descriptor;

    if (descriptor->desc_type == APR_POLL_SOCKET) {
        fd = descriptor->desc.s->socketdes;
    }
    else {
        fd = descriptor->desc.f->filedes;
    }

    res = port_associate(pollset->port_fd, PORT_SOURCE_FD, fd, 
                         get_event(descriptor->reqevents), (void *)elem);

    if (res < 0) {
        rv = APR_ENOMEM;
        APR_RING_INSERT_TAIL(&(pollset->free_ring), elem, pfd_elem_t, link);
    }
    else {
        pollset->nelts++;
        APR_RING_INSERT_TAIL(&(pollset->query_ring), elem, pfd_elem_t, link);
    }

    pollset_unlock_rings();

    return rv;
}
Ejemplo n.º 17
0
/*
 * NOTE: This function is not thread safe by itself. Caller should hold the lock
 */
static struct apr_thread_list_elt *elt_new(apr_thread_pool_t * me,
                                           apr_thread_t * t)
{
    struct apr_thread_list_elt *elt;

    if (APR_RING_EMPTY(me->recycled_thds, apr_thread_list_elt, link)) {
        elt = apr_pcalloc(me->pool, sizeof(*elt));
        if (NULL == elt) {
            return NULL;
        }
    }
    else {
        elt = APR_RING_FIRST(me->recycled_thds);
        APR_RING_REMOVE(elt, link);
    }

    APR_RING_ELEM_INIT(elt, link);
    elt->thd = t;
    elt->current_owner = NULL;
    elt->state = TH_RUN;
    return elt;
}
Ejemplo n.º 18
0
Archivo: epoll.c Proyecto: ATCP/mtcp
static apr_status_t impl_pollset_add(apr_pollset_t *pollset,
                                     const apr_pollfd_t *descriptor)
{
#ifdef HAVE_MTCP
	struct mtcp_epoll_event ev = {0};
#else
	struct epoll_event ev = {0};
#endif
	int ret = -1;
    pfd_elem_t *elem = NULL;
    apr_status_t rv = APR_SUCCESS;

    ev.events = get_epoll_event(descriptor->reqevents);

    if (pollset->flags & APR_POLLSET_NOCOPY) {
        ev.data.ptr = (void *)descriptor;
    }
    else {
        pollset_lock_rings();

        if (!APR_RING_EMPTY(&(pollset->p->free_ring), pfd_elem_t, link)) {
            elem = APR_RING_FIRST(&(pollset->p->free_ring));
            APR_RING_REMOVE(elem, link);
        }
        else {
            elem = (pfd_elem_t *) apr_palloc(pollset->pool, sizeof(pfd_elem_t));
            APR_RING_ELEM_INIT(elem, link);
        }
        elem->pfd = *descriptor;
        ev.data.ptr = elem;
    }
    if (descriptor->desc_type == APR_POLL_SOCKET) {
#ifdef HAVE_MTCP
		int cpu = sched_getcpu();
		ret = mtcp_epoll_ctl(g_mctx[cpu], pollset->p->epoll_fd, EPOLL_CTL_ADD,
                        descriptor->desc.s->socketdes, &ev);
		if(ret!=0 && errno != EEXIST)
			perror("mtcp_epoll_ctl");
#else
		ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_ADD, 
				descriptor->desc.s->socketdes, &ev);
#endif
    }
    else {
        /* TODO: Non-socket fd? */
#ifndef HAVE_MTCP
		ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_ADD,
                        descriptor->desc.f->filedes, &ev);
#endif
	}

    if (0 != ret) {
        rv = apr_get_netos_error();
    }

    if (!(pollset->flags & APR_POLLSET_NOCOPY)) {
        if (rv != APR_SUCCESS) {
            APR_RING_INSERT_TAIL(&(pollset->p->free_ring), elem, pfd_elem_t, link);
        }
        else {
            APR_RING_INSERT_TAIL(&(pollset->p->query_ring), elem, pfd_elem_t, link);
        }
        pollset_unlock_rings();
    }

    return rv;
}
Ejemplo n.º 19
0
/* core's child-status hook
 * tracks number of remaining children per generation and
 * runs the end-generation hook when the last child of
 * a generation exits
 */
void ap_core_child_status(server_rec *s, pid_t pid,
                          ap_generation_t gen, int slot,
                          mpm_child_status status)
{
    mpm_gen_info_t *cur;
    const char *status_msg = "unknown status";

    if (!gen_head_init) { /* where to run this? */
        gen_head_init = 1;
        geninfo = apr_pcalloc(s->process->pool, sizeof *geninfo);
        unused_geninfo = apr_pcalloc(s->process->pool, sizeof *unused_geninfo);
        APR_RING_INIT(geninfo, mpm_gen_info_t, link);
        APR_RING_INIT(unused_geninfo, mpm_gen_info_t, link);
    }

    cur = APR_RING_FIRST(geninfo);
    while (cur != APR_RING_SENTINEL(geninfo, mpm_gen_info_t, link) &&
           cur->gen != gen) {
        cur = APR_RING_NEXT(cur, link);
    }

    switch(status) {
    case MPM_CHILD_STARTED:
        status_msg = "started";
        if (cur == APR_RING_SENTINEL(geninfo, mpm_gen_info_t, link)) {
            /* first child for this generation */
            if (!APR_RING_EMPTY(unused_geninfo, mpm_gen_info_t, link)) {
                cur = APR_RING_FIRST(unused_geninfo);
                APR_RING_REMOVE(cur, link);
                cur->active = cur->done = 0;
            }
            else {
                cur = apr_pcalloc(s->process->pool, sizeof *cur);
            }
            cur->gen = gen;
            APR_RING_ELEM_INIT(cur, link);
            APR_RING_INSERT_HEAD(geninfo, cur, mpm_gen_info_t, link);
        }
        ap_random_parent_after_fork();
        ++cur->active;
        break;
    case MPM_CHILD_EXITED:
        ap_update_global_status();
        status_msg = "exited";
        if (cur == APR_RING_SENTINEL(geninfo, mpm_gen_info_t, link)) {
            ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00546)
                         "no record of generation %d of exiting child %" APR_PID_T_FMT,
                         gen, pid);
        }
        else {
            --cur->active;
            if (!cur->active && cur->done) { /* no children, server has stopped/restarted */
                end_gen(cur);
            }
        }
        break;
    case MPM_CHILD_LOST_SLOT:
        status_msg = "lost slot";
        /* we don't track by slot, so it doesn't matter */
        break;
    }
    ap_log_error(APLOG_MARK, APLOG_TRACE4, 0, s,
                 "mpm child %" APR_PID_T_FMT " (gen %d/slot %d) %s",
                 pid, gen, slot, status_msg);
}
Ejemplo n.º 20
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, 
                        const h2_config *conf, 
                        apr_interval_time_t stream_timeout,
                        h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_pool_tag(m->pool, "h2_mplx");
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        status = apr_thread_cond_create(&m->task_thawed, m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
    
        m->bucket_alloc = apr_bucket_alloc_create(m->pool);
        m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->q = h2_iq_create(m->pool, m->max_streams);
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->stream_timeout = stream_timeout;
        m->workers = workers;
        m->workers_max = workers->max_workers;
        m->workers_def_limit = 4;
        m->workers_limit = m->workers_def_limit;
        m->last_limit_change = m->last_idle_block = apr_time_now();
        m->limit_change_interval = apr_time_from_msec(200);
        
        m->tx_handles_reserved = 0;
        m->tx_chunk_size = 4;
        
        m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
        
        m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, 
                                         m->stream_max_mem);
        h2_ngn_shed_set_ctx(m->ngn_shed , m);
    }
    return m;
}