APR_DECLARE(apr_status_t) apr_pollset_remove(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { apr_os_sock_t fd; pfd_elem_t *ep; apr_status_t rv = APR_SUCCESS; int res; pollset_lock_rings(); if (descriptor->desc_type == APR_POLL_SOCKET) { fd = descriptor->desc.s->socketdes; } else { fd = descriptor->desc.f->filedes; } res = port_dissociate(pollset->port_fd, PORT_SOURCE_FD, fd); if (res < 0) { rv = APR_NOTFOUND; } if (!APR_RING_EMPTY(&(pollset->query_ring), pfd_elem_t, link)) { for (ep = APR_RING_FIRST(&(pollset->query_ring)); ep != APR_RING_SENTINEL(&(pollset->query_ring), pfd_elem_t, link); ep = APR_RING_NEXT(ep, link)) { if (descriptor->desc.s == ep->pfd.desc.s) { APR_RING_REMOVE(ep, link); APR_RING_INSERT_TAIL(&(pollset->dead_ring), ep, pfd_elem_t, link); break; } } } if (!APR_RING_EMPTY(&(pollset->add_ring), pfd_elem_t, link)) { for (ep = APR_RING_FIRST(&(pollset->add_ring)); ep != APR_RING_SENTINEL(&(pollset->add_ring), pfd_elem_t, link); ep = APR_RING_NEXT(ep, link)) { if (descriptor->desc.s == ep->pfd.desc.s) { APR_RING_REMOVE(ep, link); APR_RING_INSERT_TAIL(&(pollset->dead_ring), ep, pfd_elem_t, link); break; } } } pollset_unlock_rings(); return rv; }
static apr_status_t impl_pollset_add(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { apr_os_sock_t fd; pfd_elem_t *elem; apr_status_t rv = APR_SUCCESS; pollset_lock_rings(); if (!APR_RING_EMPTY(&(pollset->p->free_ring), pfd_elem_t, link)) { elem = APR_RING_FIRST(&(pollset->p->free_ring)); APR_RING_REMOVE(elem, link); } else { elem = (pfd_elem_t *) apr_palloc(pollset->pool, sizeof(pfd_elem_t)); APR_RING_ELEM_INIT(elem, link); } elem->pfd = *descriptor; if (descriptor->desc_type == APR_POLL_SOCKET) { fd = descriptor->desc.s->socketdes; } else { fd = descriptor->desc.f->filedes; } if (descriptor->reqevents & APR_POLLIN) { EV_SET(&pollset->p->kevent, fd, EVFILT_READ, EV_ADD, 0, 0, elem); if (kevent(pollset->p->kqueue_fd, &pollset->p->kevent, 1, NULL, 0, NULL) == -1) { rv = apr_get_netos_error(); } } if (descriptor->reqevents & APR_POLLOUT && rv == APR_SUCCESS) { EV_SET(&pollset->p->kevent, fd, EVFILT_WRITE, EV_ADD, 0, 0, elem); if (kevent(pollset->p->kqueue_fd, &pollset->p->kevent, 1, NULL, 0, NULL) == -1) { rv = apr_get_netos_error(); } } if (rv == APR_SUCCESS) { APR_RING_INSERT_TAIL(&(pollset->p->query_ring), elem, pfd_elem_t, link); } else { APR_RING_INSERT_TAIL(&(pollset->p->free_ring), elem, pfd_elem_t, link); } pollset_unlock_rings(); return rv; }
static apr_status_t impl_pollset_add(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { struct epoll_event ev = {0}; int ret = -1; pfd_elem_t *elem = NULL; apr_status_t rv = APR_SUCCESS; ev.events = get_epoll_event(descriptor->reqevents); if (pollset->flags & APR_POLLSET_NOCOPY) { ev.data.ptr = (void *)descriptor; } else { pollset_lock_rings(); if (!APR_RING_EMPTY(&(pollset->p->free_ring), pfd_elem_t, link)) { elem = APR_RING_FIRST(&(pollset->p->free_ring)); APR_RING_REMOVE(elem, link); } else { elem = (pfd_elem_t *) apr_palloc(pollset->pool, sizeof(pfd_elem_t)); APR_RING_ELEM_INIT(elem, link); } elem->pfd = *descriptor; ev.data.ptr = elem; } if (descriptor->desc_type == APR_POLL_SOCKET) { ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_ADD, descriptor->desc.s->socketdes, &ev); } else { ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_ADD, descriptor->desc.f->filedes, &ev); } if (0 != ret) { rv = apr_get_netos_error(); } if (!(pollset->flags & APR_POLLSET_NOCOPY)) { if (rv != APR_SUCCESS) { APR_RING_INSERT_TAIL(&(pollset->p->free_ring), elem, pfd_elem_t, link); } else { APR_RING_INSERT_TAIL(&(pollset->p->query_ring), elem, pfd_elem_t, link); } pollset_unlock_rings(); } return rv; }
static apr_status_t impl_pollset_add(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { apr_os_sock_t fd; pfd_elem_t *elem; int res; apr_status_t rv = APR_SUCCESS; pollset_lock_rings(); if (!APR_RING_EMPTY(&(pollset->p->free_ring), pfd_elem_t, link)) { elem = APR_RING_FIRST(&(pollset->p->free_ring)); APR_RING_REMOVE(elem, link); } else { elem = (pfd_elem_t *) apr_palloc(pollset->pool, sizeof(pfd_elem_t)); APR_RING_ELEM_INIT(elem, link); elem->on_query_ring = 0; } elem->pfd = *descriptor; if (descriptor->desc_type == APR_POLL_SOCKET) { fd = descriptor->desc.s->socketdes; } else { fd = descriptor->desc.f->filedes; } /* If another thread is polling, notify the kernel immediately; otherwise, * wait until the next call to apr_pollset_poll(). */ if (apr_atomic_read32(&pollset->p->waiting)) { res = port_associate(pollset->p->port_fd, PORT_SOURCE_FD, fd, get_event(descriptor->reqevents), (void *)elem); if (res < 0) { rv = apr_get_netos_error(); APR_RING_INSERT_TAIL(&(pollset->p->free_ring), elem, pfd_elem_t, link); } else { elem->on_query_ring = 1; APR_RING_INSERT_TAIL(&(pollset->p->query_ring), elem, pfd_elem_t, link); } } else { APR_RING_INSERT_TAIL(&(pollset->p->add_ring), elem, pfd_elem_t, link); } pollset_unlock_rings(); return rv; }
/** Set one-shot timer */ APT_DECLARE(apt_bool_t) apt_timer_set(apt_timer_t *timer, apr_uint32_t timeout) { apt_timer_queue_t *queue = timer->queue; if(timeout <= 0 || !timer->proc) { return FALSE; } if(timer->scheduled_time) { /* remove timer first */ apt_timer_remove(queue,timer); } /* calculate time to elapse */ timer->scheduled_time = queue->elapsed_time + timeout; #ifdef APT_TIMER_DEBUG apt_log(APT_LOG_MARK,APT_PRIO_DEBUG,"Set Timer 0x%x [%u]",timer,timer->scheduled_time); #endif if(APR_RING_EMPTY(&queue->head, apt_timer_t, link)) { APR_RING_INSERT_TAIL(&queue->head,timer,apt_timer_t,link); return TRUE; } /* insert new node (timer) to sorted by scheduled time list */ return apt_timer_insert(queue,timer); }
APR_DECLARE(apr_status_t) apr_thread_cond_create(apr_thread_cond_t **cond, apr_pool_t *pool) { apr_thread_cond_t *new_cond; sem_id rv; int i; new_cond = (apr_thread_cond_t *)apr_palloc(pool, sizeof(apr_thread_cond_t)); if (new_cond == NULL) return APR_ENOMEM; if ((rv = create_sem(1, "apr conditional lock")) < B_OK) return rv; new_cond->lock = rv; new_cond->pool = pool; APR_RING_INIT(&new_cond->alist, waiter_t, link); APR_RING_INIT(&new_cond->flist, waiter_t, link); for (i=0;i < 10 ;i++) { struct waiter_t *nw = make_waiter(pool); APR_RING_INSERT_TAIL(&new_cond->flist, nw, waiter_t, link); } apr_pool_cleanup_register(new_cond->pool, (void *)new_cond, thread_cond_cleanup, apr_pool_cleanup_null); *cond = new_cond; return APR_SUCCESS; }
/* Accept RTSP connection */ static apt_bool_t rtsp_server_connection_accept(rtsp_server_t *server) { rtsp_server_connection_t *rtsp_connection; char *local_ip = NULL; char *remote_ip = NULL; apr_sockaddr_t *l_sockaddr = NULL; apr_sockaddr_t *r_sockaddr = NULL; apr_pool_t *pool = apt_pool_create(); if(!pool) { return FALSE; } rtsp_connection = apr_palloc(pool,sizeof(rtsp_server_connection_t)); rtsp_connection->pool = pool; rtsp_connection->sock = NULL; rtsp_connection->client_ip = NULL; APR_RING_ELEM_INIT(rtsp_connection,link); if(apr_socket_accept(&rtsp_connection->sock,server->listen_sock,rtsp_connection->pool) != APR_SUCCESS) { apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Accept RTSP Connection"); apr_pool_destroy(pool); return FALSE; } if(apr_socket_addr_get(&l_sockaddr,APR_LOCAL,rtsp_connection->sock) != APR_SUCCESS || apr_socket_addr_get(&r_sockaddr,APR_REMOTE,rtsp_connection->sock) != APR_SUCCESS) { apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Get RTSP Socket Address"); apr_pool_destroy(pool); return FALSE; } apr_sockaddr_ip_get(&local_ip,l_sockaddr); apr_sockaddr_ip_get(&remote_ip,r_sockaddr); rtsp_connection->client_ip = remote_ip; rtsp_connection->id = apr_psprintf(pool,"%s:%hu <-> %s:%hu", local_ip,l_sockaddr->port, remote_ip,r_sockaddr->port); memset(&rtsp_connection->sock_pfd,0,sizeof(apr_pollfd_t)); rtsp_connection->sock_pfd.desc_type = APR_POLL_SOCKET; rtsp_connection->sock_pfd.reqevents = APR_POLLIN; rtsp_connection->sock_pfd.desc.s = rtsp_connection->sock; rtsp_connection->sock_pfd.client_data = rtsp_connection; if(apt_poller_task_descriptor_add(server->task,&rtsp_connection->sock_pfd) != TRUE) { apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Add to Pollset %s",rtsp_connection->id); apr_socket_close(rtsp_connection->sock); apr_pool_destroy(pool); return FALSE; } apt_log(RTSP_LOG_MARK,APT_PRIO_NOTICE,"Accepted TCP Connection %s",rtsp_connection->id); rtsp_connection->session_table = apr_hash_make(rtsp_connection->pool); apt_text_stream_init(&rtsp_connection->rx_stream,rtsp_connection->rx_buffer,sizeof(rtsp_connection->rx_buffer)-1); apt_text_stream_init(&rtsp_connection->tx_stream,rtsp_connection->tx_buffer,sizeof(rtsp_connection->tx_buffer)-1); rtsp_connection->parser = rtsp_parser_create(rtsp_connection->pool); rtsp_connection->generator = rtsp_generator_create(rtsp_connection->pool); rtsp_connection->server = server; APR_RING_INSERT_TAIL(&server->connection_list,rtsp_connection,rtsp_server_connection_t,link); return TRUE; }
/* * Test it the task is the only one within the priority segment. * If it is not, return the first element with same or lower priority. * Otherwise, add the task into the queue and return NULL. * * NOTE: This function is not thread safe by itself. Caller should hold the lock */ static apr_thread_pool_task_t *add_if_empty(apr_thread_pool_t * me, apr_thread_pool_task_t * const t) { int seg; int next; apr_thread_pool_task_t *t_next; seg = TASK_PRIORITY_SEG(t); if (me->task_idx[seg]) { assert(APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) != me->task_idx[seg]); t_next = me->task_idx[seg]; while (t_next->dispatch.priority > t->dispatch.priority) { t_next = APR_RING_NEXT(t_next, link); if (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) == t_next) { return t_next; } } return t_next; } for (next = seg - 1; next >= 0; next--) { if (me->task_idx[next]) { APR_RING_INSERT_BEFORE(me->task_idx[next], t, link); break; } } if (0 > next) { APR_RING_INSERT_TAIL(me->tasks, t, apr_thread_pool_task, link); } me->task_idx[seg] = t; return NULL; }
/* Create RTSP connection */ static apt_bool_t rtsp_client_connection_create(rtsp_client_t *client, rtsp_client_session_t *session) { rtsp_client_connection_t *rtsp_connection; apr_pool_t *pool = apt_pool_create(); if(!pool) { return FALSE; } rtsp_connection = apr_palloc(pool,sizeof(rtsp_client_connection_t)); rtsp_connection->pool = pool; rtsp_connection->sock = NULL; APR_RING_ELEM_INIT(rtsp_connection,link); if(rtsp_client_connect(client,rtsp_connection,session->server_ip.buf,session->server_port) == FALSE) { apt_log(RTSP_LOG_MARK,APT_PRIO_WARNING,"Failed to Connect to RTSP Server %s:%hu", session->server_ip.buf,session->server_port); apr_pool_destroy(pool); return FALSE; } rtsp_connection->handle_table = apr_hash_make(pool); rtsp_connection->session_table = apr_hash_make(pool); rtsp_connection->inprogress_request_queue = apt_list_create(pool); apt_text_stream_init(&rtsp_connection->rx_stream,rtsp_connection->rx_buffer,sizeof(rtsp_connection->rx_buffer)-1); apt_text_stream_init(&rtsp_connection->tx_stream,rtsp_connection->tx_buffer,sizeof(rtsp_connection->tx_buffer)-1); rtsp_connection->parser = rtsp_parser_create(pool); rtsp_connection->generator = rtsp_generator_create(pool); rtsp_connection->last_cseq = 0; rtsp_connection->client = client; APR_RING_INSERT_TAIL(&client->connection_list,rtsp_connection,rtsp_client_connection_t,link); session->connection = rtsp_connection; return TRUE; }
MPF_DECLARE(apt_bool_t) mpf_context_termination_add(mpf_context_t *context, mpf_termination_t *termination) { apr_size_t i; header_item_t *header_item; for(i=0; i<context->capacity; i++) { header_item = &context->header[i]; if(header_item->termination) { continue; } if(!context->count) { apt_log(APT_LOG_MARK,APT_PRIO_DEBUG,"Add Context"); APR_RING_INSERT_TAIL(&context->factory->head,context,mpf_context_t,link); } apt_log(APT_LOG_MARK,APT_PRIO_DEBUG,"Add Termination"); header_item->termination = termination; header_item->tx_count = 0; header_item->rx_count = 0; termination->slot = i; context->count++; return TRUE; } return FALSE; }
APT_DECLARE(apt_list_elem_t*) apt_list_push_back(apt_obj_list_t *list, void *obj, apr_pool_t *pool) { apt_list_elem_t *elem = apr_palloc(pool,sizeof(apt_list_elem_t)); elem->obj = obj; APR_RING_INSERT_TAIL(&list->head,elem,apt_list_elem_t,link); return elem; }
static apr_status_t do_wait(apr_thread_cond_t *cond, apr_thread_mutex_t *mutex, apr_interval_time_t timeout) { struct waiter_t *wait; thread_id cth = find_thread(NULL); apr_status_t rv; int flags = B_RELATIVE_TIMEOUT; /* We must be the owner of the mutex or we can't do this... */ if (mutex->owner != cth) { /* What should we return??? */ return APR_EINVAL; } acquire_sem(cond->lock); wait = APR_RING_FIRST(&cond->flist); if (wait) APR_RING_REMOVE(wait, link); else wait = make_waiter(cond->pool); APR_RING_INSERT_TAIL(&cond->alist, wait, waiter_t, link); cond->condlock = mutex; release_sem(cond->lock); apr_thread_mutex_unlock(cond->condlock); if (timeout == 0) flags = 0; rv = acquire_sem_etc(wait->sem, 1, flags, timeout); apr_thread_mutex_lock(cond->condlock); if (rv != B_OK) { if (rv == B_TIMED_OUT) return APR_TIMEUP; return rv; } acquire_sem(cond->lock); APR_RING_REMOVE(wait, link); APR_RING_INSERT_TAIL(&cond->flist, wait, waiter_t, link); release_sem(cond->lock); return APR_SUCCESS; }
APR_DECLARE(apr_status_t) apr_pollset_add(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { apr_os_sock_t fd; pfd_elem_t *elem; int res; apr_status_t rv = APR_SUCCESS; pollset_lock_rings(); if (!APR_RING_EMPTY(&(pollset->free_ring), pfd_elem_t, link)) { elem = APR_RING_FIRST(&(pollset->free_ring)); APR_RING_REMOVE(elem, link); } else { elem = (pfd_elem_t *) apr_palloc(pollset->pool, sizeof(pfd_elem_t)); APR_RING_ELEM_INIT(elem, link); } elem->pfd = *descriptor; if (descriptor->desc_type == APR_POLL_SOCKET) { fd = descriptor->desc.s->socketdes; } else { fd = descriptor->desc.f->filedes; } res = port_associate(pollset->port_fd, PORT_SOURCE_FD, fd, get_event(descriptor->reqevents), (void *)elem); if (res < 0) { rv = APR_ENOMEM; APR_RING_INSERT_TAIL(&(pollset->free_ring), elem, pfd_elem_t, link); } else { pollset->nelts++; APR_RING_INSERT_TAIL(&(pollset->query_ring), elem, pfd_elem_t, link); } pollset_unlock_rings(); return rv; }
static apr_status_t impl_pollset_remove(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { pfd_elem_t *ep; apr_status_t rv = APR_SUCCESS; #ifdef HAVE_MTCP struct mtcp_epoll_event ev = {0}; #else struct epoll_event ev = {0}; /* ignored, but must be passed with * kernel < 2.6.9 */ #endif int ret = -1; if (descriptor->desc_type == APR_POLL_SOCKET) { #ifdef HAVE_MTCP int cpu = sched_getcpu(); ret = mtcp_epoll_ctl(g_mctx[cpu], pollset->p->epoll_fd, EPOLL_CTL_DEL, descriptor->desc.s->socketdes, &ev); #else ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_DEL, descriptor->desc.s->socketdes, &ev); #endif } else { ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_DEL, descriptor->desc.f->filedes, &ev); } if (ret < 0) { rv = APR_NOTFOUND; } if (!(pollset->flags & APR_POLLSET_NOCOPY)) { pollset_lock_rings(); for (ep = APR_RING_FIRST(&(pollset->p->query_ring)); ep != APR_RING_SENTINEL(&(pollset->p->query_ring), pfd_elem_t, link); ep = APR_RING_NEXT(ep, link)) { if (descriptor->desc.s == ep->pfd.desc.s) { APR_RING_REMOVE(ep, link); APR_RING_INSERT_TAIL(&(pollset->p->dead_ring), ep, pfd_elem_t, link); break; } } pollset_unlock_rings(); } return rv; }
static apr_status_t impl_pollset_remove(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { pfd_elem_t *ep; apr_status_t rv; apr_os_sock_t fd; pollset_lock_rings(); if (descriptor->desc_type == APR_POLL_SOCKET) { fd = descriptor->desc.s->socketdes; } else { fd = descriptor->desc.f->filedes; } rv = APR_NOTFOUND; /* unless at least one of the specified conditions is */ if (descriptor->reqevents & APR_POLLIN) { EV_SET(&pollset->p->kevent, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); if (kevent(pollset->p->kqueue_fd, &pollset->p->kevent, 1, NULL, 0, NULL) != -1) { rv = APR_SUCCESS; } } if (descriptor->reqevents & APR_POLLOUT) { EV_SET(&pollset->p->kevent, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); if (kevent(pollset->p->kqueue_fd, &pollset->p->kevent, 1, NULL, 0, NULL) != -1) { rv = APR_SUCCESS; } } for (ep = APR_RING_FIRST(&(pollset->p->query_ring)); ep != APR_RING_SENTINEL(&(pollset->p->query_ring), pfd_elem_t, link); ep = APR_RING_NEXT(ep, link)) { if (descriptor->desc.s == ep->pfd.desc.s) { APR_RING_REMOVE(ep, link); APR_RING_INSERT_TAIL(&(pollset->p->dead_ring), ep, pfd_elem_t, link); break; } } pollset_unlock_rings(); return rv; }
/* * schedule a task to run in "time" microseconds. Find the spot in the ring where * the time fits. Adjust the short_time so the thread wakes up when the time is reached. */ static apr_status_t schedule_task(apr_thread_pool_t *me, apr_thread_start_t func, void *param, void *owner, apr_interval_time_t time) { apr_thread_pool_task_t *t; apr_thread_pool_task_t *t_loc; apr_thread_t *thd; apr_status_t rv = APR_SUCCESS; apr_thread_mutex_lock(me->lock); t = task_new(me, func, param, 0, owner, time); if (NULL == t) { apr_thread_mutex_unlock(me->lock); return APR_ENOMEM; } t_loc = APR_RING_FIRST(me->scheduled_tasks); while (NULL != t_loc) { /* if the time is less than the entry insert ahead of it */ if (t->dispatch.time < t_loc->dispatch.time) { ++me->scheduled_task_cnt; APR_RING_INSERT_BEFORE(t_loc, t, link); break; } else { t_loc = APR_RING_NEXT(t_loc, link); if (t_loc == APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task, link)) { ++me->scheduled_task_cnt; APR_RING_INSERT_TAIL(me->scheduled_tasks, t, apr_thread_pool_task, link); break; } } } /* there should be at least one thread for scheduled tasks */ if (0 == me->thd_cnt) { rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool); if (APR_SUCCESS == rv) { ++me->thd_cnt; if (me->thd_cnt > me->thd_high) me->thd_high = me->thd_cnt; } } apr_thread_mutex_unlock(me->lock); apr_thread_mutex_lock(me->cond_lock); apr_thread_cond_signal(me->cond); apr_thread_mutex_unlock(me->cond_lock); return rv; }
APR_DECLARE(apr_status_t) apr_thread_cond_broadcast(apr_thread_cond_t *cond) { struct waiter_t *wake; acquire_sem(cond->lock); while (! APR_RING_EMPTY(&cond->alist, waiter_t, link)) { wake = APR_RING_FIRST(&cond->alist); APR_RING_REMOVE(wake, link); release_sem(wake->sem); APR_RING_INSERT_TAIL(&cond->flist, wake, waiter_t, link); } release_sem(cond->lock); return APR_SUCCESS; }
/** Open the filter and add the filter object to the list. */ int kdfilter_add(kdfilter *self, struct filter_driver *filter_drv) { DEBUG(_log_filter_, "Registering filter: %s.", filter_drv->filter_name); /* Open the filter driver. */ if ((filter_drv->p_open)(self, &filter_drv->private_data) < 0) { filter_drv->private_data = NULL; filter_drv = NULL; return -1; } /* Append the filter to the list. */ APR_RING_INSERT_TAIL(&self->filter_drv_list, filter_drv, filter_driver, link); return 0; }
apr_status_t ap_queue_push_timer(fd_queue_t * queue, timer_event_t *te) { apr_status_t rv; if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) { return rv; } AP_DEBUG_ASSERT(!queue->terminated); APR_RING_INSERT_TAIL(&queue->timers, te, timer_event_t, link); apr_thread_cond_signal(queue->not_empty); if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) { return rv; } return APR_SUCCESS; }
/** Set one-shot timer */ MPF_DECLARE(apt_bool_t) mpf_timer_set(mpf_timer_t *timer, apr_uint32_t timeout) { mpf_timer_manager_t *manager = timer->manager; if(timeout <= 0 || !timer->proc) { return FALSE; } /* calculate time to elapse */ timer->scheduled_time = manager->elapsed_time + timeout; apt_log(APT_LOG_MARK,APT_PRIO_DEBUG,"Set Timer 0x%x [%lu]",timer,timer->scheduled_time); if(APR_RING_EMPTY(&timer->manager->head, mpf_timer_t, link)) { APR_RING_INSERT_TAIL(&manager->head,timer,mpf_timer_t,link); return TRUE; } /* insert new node (timer) to sorted by scheduled time list */ return mpf_timer_insert(manager,timer); }
/** * Add a new BMX Property to an BMX Bean. * @param bean The bean where the property will be added. * @param prop The property to add. */ void bmx_bean_prop_add(struct bmx_bean *bean, struct bmx_property *prop) { APR_RING_INSERT_TAIL(&(bean->bean_props), prop, bmx_property, link); }
static apt_bool_t mrcp_server_agent_connection_accept(mrcp_connection_agent_t *agent) { char *local_ip = NULL; char *remote_ip = NULL; mrcp_connection_t *connection = mrcp_connection_create(); if(apr_socket_accept(&connection->sock,agent->listen_sock,connection->pool) != APR_SUCCESS) { apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Accept Connection"); mrcp_connection_destroy(connection); return FALSE; } if(apr_socket_addr_get(&connection->r_sockaddr,APR_REMOTE,connection->sock) != APR_SUCCESS || apr_socket_addr_get(&connection->l_sockaddr,APR_LOCAL,connection->sock) != APR_SUCCESS) { apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Get Socket Address"); apr_socket_close(connection->sock); mrcp_connection_destroy(connection); return FALSE; } apr_sockaddr_ip_get(&local_ip,connection->l_sockaddr); apr_sockaddr_ip_get(&remote_ip,connection->r_sockaddr); apt_string_set(&connection->remote_ip,remote_ip); connection->id = apr_psprintf(connection->pool,"%s:%hu <-> %s:%hu", local_ip,connection->l_sockaddr->port, remote_ip,connection->r_sockaddr->port); if(apr_hash_count(agent->pending_channel_table) == 0) { apt_log(APT_LOG_MARK,APT_PRIO_NOTICE,"Reject Unexpected TCP/MRCPv2 Connection %s",connection->id); apr_socket_close(connection->sock); mrcp_connection_destroy(connection); return FALSE; } memset(&connection->sock_pfd,0,sizeof(apr_pollfd_t)); connection->sock_pfd.desc_type = APR_POLL_SOCKET; connection->sock_pfd.reqevents = APR_POLLIN; connection->sock_pfd.desc.s = connection->sock; connection->sock_pfd.client_data = connection; if(apt_poller_task_descriptor_add(agent->task, &connection->sock_pfd) != TRUE) { apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Add to Pollset %s",connection->id); apr_socket_close(connection->sock); mrcp_connection_destroy(connection); return FALSE; } apt_log(APT_LOG_MARK,APT_PRIO_NOTICE,"Accepted TCP/MRCPv2 Connection %s",connection->id); connection->agent = agent; APR_RING_INSERT_TAIL(&agent->connection_list,connection,mrcp_connection_t,link); connection->parser = mrcp_parser_create(agent->resource_factory,connection->pool); connection->generator = mrcp_generator_create(agent->resource_factory,connection->pool); connection->tx_buffer_size = agent->tx_buffer_size; connection->tx_buffer = apr_palloc(connection->pool,connection->tx_buffer_size+1); connection->rx_buffer_size = agent->rx_buffer_size; connection->rx_buffer = apr_palloc(connection->pool,connection->rx_buffer_size+1); apt_text_stream_init(&connection->rx_stream,connection->rx_buffer,connection->rx_buffer_size); if(apt_log_masking_get() != APT_LOG_MASKING_NONE) { connection->verbose = FALSE; mrcp_parser_verbose_set(connection->parser,TRUE); mrcp_generator_verbose_set(connection->generator,TRUE); } return TRUE; }
/* * The worker thread function. Take a task from the queue and perform it if * there is any. Otherwise, put itself into the idle thread list and waiting * for signal to wake up. * The thread terminate directly by detach and exit when it is asked to stop * after finishing a task. Otherwise, the thread should be in idle thread list * and should be joined. */ static void *APR_THREAD_FUNC thread_pool_func(apr_thread_t * t, void *param) { apr_thread_pool_t *me = param; apr_thread_pool_task_t *task = NULL; apr_interval_time_t wait; struct apr_thread_list_elt *elt; apr_thread_mutex_lock(me->lock); --me->spawning_cnt; elt = elt_new(me, t); if (!elt) { apr_thread_mutex_unlock(me->lock); apr_thread_exit(t, APR_ENOMEM); } while (!me->terminated && elt->state != TH_STOP) { /* Test if not new element, it is awakened from idle */ if (APR_RING_NEXT(elt, link) != elt) { --me->idle_cnt; APR_RING_REMOVE(elt, link); } APR_RING_INSERT_TAIL(me->busy_thds, elt, apr_thread_list_elt, link); task = pop_task(me); while (NULL != task && !me->terminated) { ++me->tasks_run; elt->current_owner = task->owner; apr_thread_mutex_unlock(me->lock); apr_thread_data_set(task, "apr_thread_pool_task", NULL, t); task->func(t, task->param); apr_thread_mutex_lock(me->lock); APR_RING_INSERT_TAIL(me->recycled_tasks, task, apr_thread_pool_task, link); elt->current_owner = NULL; if (TH_STOP == elt->state) { break; } task = pop_task(me); } assert(NULL == elt->current_owner); if (TH_STOP != elt->state) APR_RING_REMOVE(elt, link); /* Test if a busy thread been asked to stop, which is not joinable */ if ((me->idle_cnt >= me->idle_max && !(me->scheduled_task_cnt && 0 >= me->idle_max) && !me->idle_wait) || me->terminated || elt->state != TH_RUN) { --me->thd_cnt; if ((TH_PROBATION == elt->state) && me->idle_wait) ++me->thd_timed_out; APR_RING_INSERT_TAIL(me->recycled_thds, elt, apr_thread_list_elt, link); apr_thread_mutex_unlock(me->lock); apr_thread_detach(t); apr_thread_exit(t, APR_SUCCESS); return NULL; /* should not be here, safe net */ } /* busy thread become idle */ ++me->idle_cnt; APR_RING_INSERT_TAIL(me->idle_thds, elt, apr_thread_list_elt, link); /* * If there is a scheduled task, always scheduled to perform that task. * Since there is no guarantee that current idle threads are scheduled * for next scheduled task. */ if (me->scheduled_task_cnt) wait = waiting_time(me); else if (me->idle_cnt > me->idle_max) { wait = me->idle_wait; elt->state = TH_PROBATION; } else wait = -1; if (wait >= 0) { apr_thread_cond_timedwait(me->cond, me->lock, wait); } else { apr_thread_cond_wait(me->cond, me->lock); } } /* idle thread been asked to stop, will be joined */ --me->thd_cnt; apr_thread_mutex_unlock(me->lock); apr_thread_exit(t, APR_SUCCESS); return NULL; /* should not be here, safe net */ }
/* * walk the cache directory tree */ static int process_dir(char *path, apr_pool_t *pool) { apr_dir_t *dir; apr_pool_t *p; apr_hash_t *h; apr_hash_index_t *i; apr_file_t *fd; apr_status_t status; apr_finfo_t info; apr_size_t len; apr_time_t current, deviation; char *nextpath, *base, *ext, *orig_basename; APR_RING_ENTRY(_direntry) anchor; DIRENTRY *d, *t, *n; ENTRY *e; int skip, retries; disk_cache_info_t disk_info; APR_RING_INIT(&anchor, _direntry, link); apr_pool_create(&p, pool); h = apr_hash_make(p); fd = NULL; skip = 0; deviation = MAXDEVIATION * APR_USEC_PER_SEC; if (apr_dir_open(&dir, path, p) != APR_SUCCESS) { return 1; } while (apr_dir_read(&info, 0, dir) == APR_SUCCESS && !interrupted) { if (!strcmp(info.name, ".") || !strcmp(info.name, "..")) { continue; } d = apr_pcalloc(p, sizeof(DIRENTRY)); d->basename = apr_pstrcat(p, path, "/", info.name, NULL); APR_RING_INSERT_TAIL(&anchor, d, _direntry, link); } apr_dir_close(dir); if (interrupted) { return 1; } skip = baselen + 1; for (d = APR_RING_FIRST(&anchor); !interrupted && d != APR_RING_SENTINEL(&anchor, _direntry, link); d=n) { n = APR_RING_NEXT(d, link); base = strrchr(d->basename, '/'); if (!base++) { base = d->basename; } ext = strchr(base, '.'); /* there may be temporary files which may be gone before * processing, always skip these if not in realclean mode */ if (!ext && !realclean) { if (!strncasecmp(base, AP_TEMPFILE_BASE, AP_TEMPFILE_BASELEN) && strlen(base) == AP_TEMPFILE_NAMELEN) { continue; } } /* this may look strange but apr_stat() may return errno which * is system dependent and there may be transient failures, * so just blindly retry for a short while */ retries = STAT_ATTEMPTS; status = APR_SUCCESS; do { if (status != APR_SUCCESS) { apr_sleep(STAT_DELAY); } status = apr_stat(&info, d->basename, DIRINFO, p); } while (status != APR_SUCCESS && !interrupted && --retries); /* what may happen here is that apache did create a file which * we did detect but then does delete the file before we can * get file information, so if we don't get any file information * we will ignore the file in this case */ if (status != APR_SUCCESS) { if (!realclean && !interrupted) { continue; } return 1; } if (info.filetype == APR_DIR) { /* Make a copy of the basename, as process_dir modifies it */ orig_basename = apr_pstrdup(pool, d->basename); if (process_dir(d->basename, pool)) { return 1; } /* If asked to delete dirs, do so now. We don't care if it fails. * If it fails, it likely means there was something else there. */ if (deldirs && !dryrun) { apr_dir_remove(orig_basename, pool); } continue; } if (info.filetype != APR_REG) { continue; } if (!ext) { if (!strncasecmp(base, AP_TEMPFILE_BASE, AP_TEMPFILE_BASELEN) && strlen(base) == AP_TEMPFILE_NAMELEN) { d->basename += skip; d->type = TEMP; d->dsize = info.size; apr_hash_set(h, d->basename, APR_HASH_KEY_STRING, d); } continue; } if (!strcasecmp(ext, CACHE_HEADER_SUFFIX)) { *ext = '\0'; d->basename += skip; /* if a user manually creates a '.header' file */ if (d->basename[0] == '\0') { continue; } t = apr_hash_get(h, d->basename, APR_HASH_KEY_STRING); if (t) { d = t; } d->type |= HEADER; d->htime = info.mtime; d->hsize = info.size; apr_hash_set(h, d->basename, APR_HASH_KEY_STRING, d); continue; } if (!strcasecmp(ext, CACHE_DATA_SUFFIX)) { *ext = '\0'; d->basename += skip; /* if a user manually creates a '.data' file */ if (d->basename[0] == '\0') { continue; } t = apr_hash_get(h, d->basename, APR_HASH_KEY_STRING); if (t) { d = t; } d->type |= DATA; d->dtime = info.mtime; d->dsize = info.size; apr_hash_set(h, d->basename, APR_HASH_KEY_STRING, d); } } if (interrupted) { return 1; } path[baselen] = '\0'; for (i = apr_hash_first(p, h); i && !interrupted; i = apr_hash_next(i)) { void *hvalue; apr_uint32_t format; apr_hash_this(i, NULL, NULL, &hvalue); d = hvalue; switch(d->type) { case HEADERDATA: nextpath = apr_pstrcat(p, path, "/", d->basename, CACHE_HEADER_SUFFIX, NULL); if (apr_file_open(&fd, nextpath, APR_FOPEN_READ | APR_FOPEN_BINARY, APR_OS_DEFAULT, p) == APR_SUCCESS) { len = sizeof(format); if (apr_file_read_full(fd, &format, len, &len) == APR_SUCCESS) { if (format == DISK_FORMAT_VERSION) { apr_off_t offset = 0; apr_file_seek(fd, APR_SET, &offset); len = sizeof(disk_cache_info_t); if (apr_file_read_full(fd, &disk_info, len, &len) == APR_SUCCESS) { apr_file_close(fd); e = apr_palloc(pool, sizeof(ENTRY)); APR_RING_INSERT_TAIL(&root, e, _entry, link); e->expire = disk_info.expire; e->response_time = disk_info.response_time; e->htime = d->htime; e->dtime = d->dtime; e->hsize = d->hsize; e->dsize = d->dsize; e->basename = apr_pstrdup(pool, d->basename); break; } else { apr_file_close(fd); } } else if (format == VARY_FORMAT_VERSION) { /* This must be a URL that added Vary headers later, * so kill the orphaned .data file */ apr_file_close(fd); apr_file_remove(apr_pstrcat(p, path, "/", d->basename, CACHE_DATA_SUFFIX, NULL), p); } } else { apr_file_close(fd); } } /* we have a somehow unreadable headers file which is associated * with a data file. this may be caused by apache currently * rewriting the headers file. thus we may delete the file set * either in realclean mode or if the headers file modification * timestamp is not within a specified positive or negative offset * to the current time. */ current = apr_time_now(); if (realclean || d->htime < current - deviation || d->htime > current + deviation) { delete_entry(path, d->basename, p); unsolicited += d->hsize; unsolicited += d->dsize; } break; /* single data and header files may be deleted either in realclean * mode or if their modification timestamp is not within a * specified positive or negative offset to the current time. * this handling is necessary due to possible race conditions * between apache and this process */ case HEADER: current = apr_time_now(); nextpath = apr_pstrcat(p, path, "/", d->basename, CACHE_HEADER_SUFFIX, NULL); if (apr_file_open(&fd, nextpath, APR_FOPEN_READ | APR_FOPEN_BINARY, APR_OS_DEFAULT, p) == APR_SUCCESS) { len = sizeof(format); if (apr_file_read_full(fd, &format, len, &len) == APR_SUCCESS) { if (format == VARY_FORMAT_VERSION) { apr_time_t expires; len = sizeof(expires); apr_file_read_full(fd, &expires, len, &len); apr_file_close(fd); if (expires < current) { delete_entry(path, d->basename, p); } break; } } apr_file_close(fd); } if (realclean || d->htime < current - deviation || d->htime > current + deviation) { delete_entry(path, d->basename, p); unsolicited += d->hsize; } break; case DATA: current = apr_time_now(); if (realclean || d->dtime < current - deviation || d->dtime > current + deviation) { delete_entry(path, d->basename, p); unsolicited += d->dsize; } break; /* temp files may only be deleted in realclean mode which * is asserted above if a tempfile is in the hash array */ case TEMP: delete_file(path, d->basename, p); unsolicited += d->dsize; break; } } if (interrupted) { return 1; } apr_pool_destroy(p); if (benice) { apr_sleep(NICE_DELAY); } if (interrupted) { return 1; } return 0; }
static APR_INLINE apt_bool_t mpf_buffer_chunk_write(mpf_buffer_t *buffer, mpf_chunk_t *chunk) { APR_RING_INSERT_TAIL(&buffer->head,chunk,mpf_chunk_t,link); return TRUE; }
static mrcp_connection_t* mrcp_client_agent_connection_create(mrcp_connection_agent_t *agent, mrcp_control_descriptor_t *descriptor) { char *local_ip = NULL; char *remote_ip = NULL; mrcp_connection_t *connection = mrcp_connection_create(); apr_sockaddr_info_get(&connection->r_sockaddr,descriptor->ip.buf,APR_INET,descriptor->port,0,connection->pool); if(!connection->r_sockaddr) { mrcp_connection_destroy(connection); return NULL; } if(apr_socket_create(&connection->sock,connection->r_sockaddr->family,SOCK_STREAM,APR_PROTO_TCP,connection->pool) != APR_SUCCESS) { mrcp_connection_destroy(connection); return NULL; } apr_socket_opt_set(connection->sock, APR_SO_NONBLOCK, 0); apr_socket_timeout_set(connection->sock, -1); apr_socket_opt_set(connection->sock, APR_SO_REUSEADDR, 1); if(apr_socket_connect(connection->sock, connection->r_sockaddr) != APR_SUCCESS) { apr_socket_close(connection->sock); mrcp_connection_destroy(connection); return NULL; } if(apr_socket_addr_get(&connection->l_sockaddr,APR_LOCAL,connection->sock) != APR_SUCCESS) { apr_socket_close(connection->sock); mrcp_connection_destroy(connection); return NULL; } apr_sockaddr_ip_get(&local_ip,connection->l_sockaddr); apr_sockaddr_ip_get(&remote_ip,connection->r_sockaddr); connection->id = apr_psprintf(connection->pool,"%s:%hu <-> %s:%hu", local_ip,connection->l_sockaddr->port, remote_ip,connection->r_sockaddr->port); memset(&connection->sock_pfd,0,sizeof(apr_pollfd_t)); connection->sock_pfd.desc_type = APR_POLL_SOCKET; connection->sock_pfd.reqevents = APR_POLLIN; connection->sock_pfd.desc.s = connection->sock; connection->sock_pfd.client_data = connection; if(apt_poller_task_descriptor_add(agent->task, &connection->sock_pfd) != TRUE) { apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Add to Pollset %s",connection->id); apr_socket_close(connection->sock); mrcp_connection_destroy(connection); return NULL; } apt_log(APT_LOG_MARK,APT_PRIO_NOTICE,"Established TCP/MRCPv2 Connection %s",connection->id); connection->agent = agent; APR_RING_INSERT_TAIL(&agent->connection_list,connection,mrcp_connection_t,link); connection->parser = mrcp_parser_create(agent->resource_factory,connection->pool); connection->generator = mrcp_generator_create(agent->resource_factory,connection->pool); connection->tx_buffer_size = agent->tx_buffer_size; connection->tx_buffer = apr_palloc(connection->pool,connection->tx_buffer_size+1); connection->rx_buffer_size = agent->rx_buffer_size; connection->rx_buffer = apr_palloc(connection->pool,connection->rx_buffer_size+1); apt_text_stream_init(&connection->rx_stream,connection->rx_buffer,connection->rx_buffer_size); if(apt_log_masking_get() != APT_LOG_MASKING_NONE) { connection->verbose = FALSE; mrcp_parser_verbose_set(connection->parser,TRUE); mrcp_generator_verbose_set(connection->generator,TRUE); } return connection; }
APR_DECLARE(apr_status_t) apr_pollset_poll(apr_pollset_t *pollset, apr_interval_time_t timeout, apr_int32_t *num, const apr_pollfd_t **descriptors) { apr_os_sock_t fd; int ret, i; unsigned int nget; pfd_elem_t *ep; struct timespec tv, *tvptr; apr_status_t rv = APR_SUCCESS; if (timeout < 0) { tvptr = NULL; } else { tv.tv_sec = (long) apr_time_sec(timeout); tv.tv_nsec = (long) apr_time_usec(timeout) * 1000; tvptr = &tv; } nget = 1; pollset_lock_rings(); while (!APR_RING_EMPTY(&(pollset->add_ring), pfd_elem_t, link)) { ep = APR_RING_FIRST(&(pollset->add_ring)); APR_RING_REMOVE(ep, link); if (ep->pfd.desc_type == APR_POLL_SOCKET) { fd = ep->pfd.desc.s->socketdes; } else { fd = ep->pfd.desc.f->filedes; } port_associate(pollset->port_fd, PORT_SOURCE_FD, fd, get_event(ep->pfd.reqevents), ep); APR_RING_INSERT_TAIL(&(pollset->query_ring), ep, pfd_elem_t, link); } pollset_unlock_rings(); ret = port_getn(pollset->port_fd, pollset->port_set, pollset->nalloc, &nget, tvptr); (*num) = nget; if (ret == -1) { (*num) = 0; if (errno == ETIME || errno == EINTR) { rv = APR_TIMEUP; } else { rv = APR_EGENERAL; } } else if (nget == 0) { rv = APR_TIMEUP; } else { pollset_lock_rings(); for (i = 0; i < nget; i++) { pollset->result_set[i] = (((pfd_elem_t*)(pollset->port_set[i].portev_user))->pfd); pollset->result_set[i].rtnevents = get_revent(pollset->port_set[i].portev_events); APR_RING_REMOVE((pfd_elem_t*)pollset->port_set[i].portev_user, link); APR_RING_INSERT_TAIL(&(pollset->add_ring), (pfd_elem_t*)pollset->port_set[i].portev_user, pfd_elem_t, link); } pollset_unlock_rings(); if (descriptors) { *descriptors = pollset->result_set; } } pollset_lock_rings(); /* Shift all PFDs in the Dead Ring to be Free Ring */ APR_RING_CONCAT(&(pollset->free_ring), &(pollset->dead_ring), pfd_elem_t, link); pollset_unlock_rings(); return rv; }
static apr_status_t impl_pollset_poll(apr_pollset_t *pollset, apr_interval_time_t timeout, apr_int32_t *num, const apr_pollfd_t **descriptors) { apr_os_sock_t fd; int ret, i, j; unsigned int nget; pfd_elem_t *ep; apr_status_t rv = APR_SUCCESS; apr_pollfd_t fp; nget = 1; pollset_lock_rings(); apr_atomic_inc32(&pollset->p->waiting); while (!APR_RING_EMPTY(&(pollset->p->add_ring), pfd_elem_t, link)) { ep = APR_RING_FIRST(&(pollset->p->add_ring)); APR_RING_REMOVE(ep, link); if (ep->pfd.desc_type == APR_POLL_SOCKET) { fd = ep->pfd.desc.s->socketdes; } else { fd = ep->pfd.desc.f->filedes; } ret = port_associate(pollset->p->port_fd, PORT_SOURCE_FD, fd, get_event(ep->pfd.reqevents), ep); if (ret < 0) { rv = apr_get_netos_error(); APR_RING_INSERT_TAIL(&(pollset->p->free_ring), ep, pfd_elem_t, link); break; } ep->on_query_ring = 1; APR_RING_INSERT_TAIL(&(pollset->p->query_ring), ep, pfd_elem_t, link); } pollset_unlock_rings(); if (rv != APR_SUCCESS) { apr_atomic_dec32(&pollset->p->waiting); return rv; } rv = call_port_getn(pollset->p->port_fd, pollset->p->port_set, pollset->nalloc, &nget, timeout); /* decrease the waiting ASAP to reduce the window for calling port_associate within apr_pollset_add() */ apr_atomic_dec32(&pollset->p->waiting); (*num) = nget; if (nget) { pollset_lock_rings(); for (i = 0, j = 0; i < nget; i++) { fp = (((pfd_elem_t*)(pollset->p->port_set[i].portev_user))->pfd); if ((pollset->flags & APR_POLLSET_WAKEABLE) && fp.desc_type == APR_POLL_FILE && fp.desc.f == pollset->wakeup_pipe[0]) { apr_pollset_drain_wakeup_pipe(pollset); rv = APR_EINTR; } else { pollset->p->result_set[j] = fp; pollset->p->result_set[j].rtnevents = get_revent(pollset->p->port_set[i].portev_events); /* If the ring element is still on the query ring, move it * to the add ring for re-association with the event port * later. (It may have already been moved to the dead ring * by a call to pollset_remove on another thread.) */ ep = (pfd_elem_t *)pollset->p->port_set[i].portev_user; if (ep->on_query_ring) { APR_RING_REMOVE(ep, link); ep->on_query_ring = 0; APR_RING_INSERT_TAIL(&(pollset->p->add_ring), ep, pfd_elem_t, link); } j++; } } pollset_unlock_rings(); if ((*num = j)) { /* any event besides wakeup pipe? */ rv = APR_SUCCESS; if (descriptors) { *descriptors = pollset->p->result_set; } } } pollset_lock_rings(); /* Shift all PFDs in the Dead Ring to the Free Ring */ APR_RING_CONCAT(&(pollset->p->free_ring), &(pollset->p->dead_ring), pfd_elem_t, link); pollset_unlock_rings(); return rv; }
/** * Add a resource to the end of the list, set the time at which * it was added to the list. * Assumes: that the reslist is locked. */ static void push_resource(apr_reslist_t *reslist, apr_res_t *resource) { APR_RING_INSERT_TAIL(&reslist->avail_list, resource, apr_res_t, link); resource->freed = apr_time_now(); reslist->nidle++; }
/** * Free up a resource container by placing it on the free list. */ static void free_container(apr_reslist_t *reslist, apr_res_t *container) { APR_RING_INSERT_TAIL(&reslist->free_list, container, apr_res_t, link); }