static apr_status_t remove_tasks(apr_thread_pool_t *me, void *owner) { apr_thread_pool_task_t *t_loc; apr_thread_pool_task_t *next; int seg; t_loc = APR_RING_FIRST(me->tasks); while (t_loc != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link)) { next = APR_RING_NEXT(t_loc, link); if (t_loc->owner == owner) { --me->task_cnt; seg = TASK_PRIORITY_SEG(t_loc); if (t_loc == me->task_idx[seg]) { me->task_idx[seg] = APR_RING_NEXT(t_loc, link); if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) { me->task_idx[seg] = NULL; } } APR_RING_REMOVE(t_loc, link); } t_loc = next; } return APR_SUCCESS; }
/* * Test it the task is the only one within the priority segment. * If it is not, return the first element with same or lower priority. * Otherwise, add the task into the queue and return NULL. * * NOTE: This function is not thread safe by itself. Caller should hold the lock */ static apr_thread_pool_task_t *add_if_empty(apr_thread_pool_t * me, apr_thread_pool_task_t * const t) { int seg; int next; apr_thread_pool_task_t *t_next; seg = TASK_PRIORITY_SEG(t); if (me->task_idx[seg]) { assert(APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) != me->task_idx[seg]); t_next = me->task_idx[seg]; while (t_next->dispatch.priority > t->dispatch.priority) { t_next = APR_RING_NEXT(t_next, link); if (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) == t_next) { return t_next; } } return t_next; } for (next = seg - 1; next >= 0; next--) { if (me->task_idx[next]) { APR_RING_INSERT_BEFORE(me->task_idx[next], t, link); break; } } if (0 > next) { APR_RING_INSERT_TAIL(me->tasks, t, apr_thread_pool_task, link); } me->task_idx[seg] = t; return NULL; }
apr_status_t ap_mpm_end_gen_helper(void *unused) /* cleanup on pconf */ { int gen = ap_config_generation - 1; /* differs from MPM generation */ mpm_gen_info_t *cur; if (geninfo == NULL) { /* initial pconf teardown, MPM hasn't run */ return APR_SUCCESS; } cur = APR_RING_FIRST(geninfo); while (cur != APR_RING_SENTINEL(geninfo, mpm_gen_info_t, link) && cur->gen != gen) { cur = APR_RING_NEXT(cur, link); } if (cur == APR_RING_SENTINEL(geninfo, mpm_gen_info_t, link)) { /* last child of generation already exited */ ap_log_error(APLOG_MARK, APLOG_TRACE4, 0, ap_server_conf, "no record of generation %d", gen); } else { cur->done = 1; if (cur->active == 0) { end_gen(cur); } } return APR_SUCCESS; }
APR_DECLARE(apr_status_t) apr_pollset_remove(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { apr_os_sock_t fd; pfd_elem_t *ep; apr_status_t rv = APR_SUCCESS; int res; pollset_lock_rings(); if (descriptor->desc_type == APR_POLL_SOCKET) { fd = descriptor->desc.s->socketdes; } else { fd = descriptor->desc.f->filedes; } res = port_dissociate(pollset->port_fd, PORT_SOURCE_FD, fd); if (res < 0) { rv = APR_NOTFOUND; } if (!APR_RING_EMPTY(&(pollset->query_ring), pfd_elem_t, link)) { for (ep = APR_RING_FIRST(&(pollset->query_ring)); ep != APR_RING_SENTINEL(&(pollset->query_ring), pfd_elem_t, link); ep = APR_RING_NEXT(ep, link)) { if (descriptor->desc.s == ep->pfd.desc.s) { APR_RING_REMOVE(ep, link); APR_RING_INSERT_TAIL(&(pollset->dead_ring), ep, pfd_elem_t, link); break; } } } if (!APR_RING_EMPTY(&(pollset->add_ring), pfd_elem_t, link)) { for (ep = APR_RING_FIRST(&(pollset->add_ring)); ep != APR_RING_SENTINEL(&(pollset->add_ring), pfd_elem_t, link); ep = APR_RING_NEXT(ep, link)) { if (descriptor->desc.s == ep->pfd.desc.s) { APR_RING_REMOVE(ep, link); APR_RING_INSERT_TAIL(&(pollset->dead_ring), ep, pfd_elem_t, link); break; } } } pollset_unlock_rings(); return rv; }
/** Get (copy) MRCP header fields */ MRCP_DECLARE(apt_bool_t) mrcp_header_fields_get(mrcp_message_header_t *header, const mrcp_message_header_t *src_header, const mrcp_message_header_t *mask_header, apr_pool_t *pool) { apt_header_field_t *header_field; const apt_header_field_t *src_header_field; const apt_header_field_t *mask_header_field; for(mask_header_field = APR_RING_FIRST(&mask_header->header_section.ring); mask_header_field != APR_RING_SENTINEL(&mask_header->header_section.ring, apt_header_field_t, link); mask_header_field = APR_RING_NEXT(mask_header_field, link)) { header_field = apt_header_section_field_get(&header->header_section,mask_header_field->id); if(header_field) { /* this header field has already been set, skip to the next one */ continue; } src_header_field = apt_header_section_field_get(&src_header->header_section,mask_header_field->id); if(src_header_field) { /* copy the entire header field */ header_field = apt_header_field_copy(src_header_field,pool); mrcp_header_accessor_value_duplicate(header,header_field,src_header,src_header_field,pool); } else { /* copy only the name of the header field */ header_field = apt_header_field_copy(mask_header_field,pool); } /* add the header field to the header section */ apt_header_section_field_add(&header->header_section,header_field); } return TRUE; }
/** * Called by other modules to print their "jmx beans" to the response in * whatever format was requested by the client. */ static apr_status_t bmx_bean_print_text_plain(request_rec *r, const struct bmx_bean *bean) { apr_size_t objectname_strlen = bmx_objectname_strlen(bean->objectname) + 1; char *objectname_str = apr_palloc(r->pool, objectname_strlen); (void)bmx_objectname_str(bean->objectname, objectname_str, objectname_strlen); (void)ap_rputs("Name: ", r); (void)ap_rputs(objectname_str, r); (void)ap_rputs("\n", r); /* for each element in bean->bean_properties, print it */ if (!APR_RING_EMPTY(&(bean->bean_props), bmx_property, link)) { struct bmx_property *p = NULL; const char *value; for (p = APR_RING_FIRST(&(bean->bean_props)); p != APR_RING_SENTINEL(&(bean->bean_props), bmx_property, link); p = APR_RING_NEXT(p, link)) { (void)ap_rputs(p->key, r); (void)ap_rputs(": ", r); value = property_print(r->pool, p); if (value) (void)ap_rputs(value, r); (void)ap_rputs("\n", r); } } (void)ap_rputs("\n", r); return APR_SUCCESS; }
static void wait_on_busy_threads(apr_thread_pool_t *me, void *owner) { #ifndef NDEBUG apr_os_thread_t *os_thread; #endif struct apr_thread_list_elt *elt; apr_thread_mutex_lock(me->lock); elt = APR_RING_FIRST(me->busy_thds); while (elt != APR_RING_SENTINEL(me->busy_thds, apr_thread_list_elt, link)) { if (elt->current_owner != owner) { elt = APR_RING_NEXT(elt, link); continue; } #ifndef NDEBUG /* make sure the thread is not the one calling tasks_cancel */ apr_os_thread_get(&os_thread, elt->thd); #ifdef WIN32 /* hack for apr win32 bug */ assert(!apr_os_thread_equal(apr_os_thread_current(), os_thread)); #else assert(!apr_os_thread_equal(apr_os_thread_current(), *os_thread)); #endif #endif while (elt->current_owner == owner) { apr_thread_mutex_unlock(me->lock); apr_sleep(200 * 1000); apr_thread_mutex_lock(me->lock); } elt = APR_RING_FIRST(me->busy_thds); } apr_thread_mutex_unlock(me->lock); return; }
APT_DECLARE(apt_list_elem_t*) apt_list_next_elem_get(apt_obj_list_t *list, apt_list_elem_t *elem) { apt_list_elem_t *next_elem = APR_RING_NEXT(elem,link); if(next_elem == APR_RING_SENTINEL(&list->head,apt_list_elem_t,link)) { next_elem = NULL; } return next_elem; }
APT_DECLARE(apt_list_elem_t*) apt_list_prev_elem_get(apt_obj_list_t *list, apt_list_elem_t *elem) { apt_list_elem_t *prev_elem = APR_RING_PREV(elem,link); if(prev_elem == APR_RING_SENTINEL(&list->head,apt_list_elem_t,link)) { prev_elem = NULL; } return prev_elem; }
static apr_status_t add_task(apr_thread_pool_t *me, apr_thread_start_t func, void *param, apr_byte_t priority, int push, void *owner) { apr_thread_pool_task_t *t; apr_thread_pool_task_t *t_loc; apr_thread_t *thd; apr_status_t rv = APR_SUCCESS; apr_thread_mutex_lock(me->lock); t = task_new(me, func, param, priority, owner, 0); if (NULL == t) { apr_thread_mutex_unlock(me->lock); return APR_ENOMEM; } t_loc = add_if_empty(me, t); if (NULL == t_loc) { goto FINAL_EXIT; } if (push) { while (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) != t_loc && t_loc->dispatch.priority >= t->dispatch.priority) { t_loc = APR_RING_NEXT(t_loc, link); } } APR_RING_INSERT_BEFORE(t_loc, t, link); if (!push) { if (t_loc == me->task_idx[TASK_PRIORITY_SEG(t)]) { me->task_idx[TASK_PRIORITY_SEG(t)] = t; } } FINAL_EXIT: me->task_cnt++; if (me->task_cnt > me->tasks_high) me->tasks_high = me->task_cnt; if (0 == me->thd_cnt || (0 == me->idle_cnt && me->thd_cnt < me->thd_max && me->task_cnt > me->threshold)) { rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool); if (APR_SUCCESS == rv) { ++me->thd_cnt; if (me->thd_cnt > me->thd_high) me->thd_high = me->thd_cnt; } } apr_thread_mutex_unlock(me->lock); apr_thread_mutex_lock(me->cond_lock); apr_thread_cond_signal(me->cond); apr_thread_mutex_unlock(me->cond_lock); return rv; }
static void mpf_timers_reschedule(mpf_timer_manager_t *manager) { mpf_timer_t *it; for(it = APR_RING_LAST(&manager->head); it != APR_RING_SENTINEL(&manager->head, mpf_timer_t, link); it = APR_RING_PREV(it, link)) { it->scheduled_time -= manager->elapsed_time; } manager->elapsed_time = 0; }
/** Generate header section */ APT_DECLARE(apt_bool_t) apt_header_section_generate(const apt_header_section_t *header, apt_text_stream_t *stream) { apt_header_field_t *header_field; for(header_field = APR_RING_FIRST(&header->ring); header_field != APR_RING_SENTINEL(&header->ring, apt_header_field_t, link); header_field = APR_RING_NEXT(header_field, link)) { apt_header_field_generate(header_field,stream); } return apt_text_eol_insert(stream); }
static void apt_timers_reschedule(apt_timer_queue_t *queue) { apt_timer_t *it; for(it = APR_RING_LAST(&queue->head); it != APR_RING_SENTINEL(&queue->head, apt_timer_t, link); it = APR_RING_PREV(it, link)) { it->scheduled_time -= queue->elapsed_time; } queue->elapsed_time = 0; }
static apr_interval_time_t waiting_time(apr_thread_pool_t * me) { apr_thread_pool_task_t *task = NULL; task = APR_RING_FIRST(me->scheduled_tasks); assert(task != NULL); assert(task != APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task, link)); return task->dispatch.time - apr_time_now(); }
MPF_DECLARE(apt_bool_t) mpf_context_factory_process(mpf_context_factory_t *factory) { mpf_context_t *context; for(context = APR_RING_FIRST(&factory->head); context != APR_RING_SENTINEL(&factory->head, mpf_context_t, link); context = APR_RING_NEXT(context, link)) { mpf_context_process(context); } return TRUE; }
static apr_status_t impl_pollset_remove(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { pfd_elem_t *ep; apr_status_t rv = APR_SUCCESS; #ifdef HAVE_MTCP struct mtcp_epoll_event ev = {0}; #else struct epoll_event ev = {0}; /* ignored, but must be passed with * kernel < 2.6.9 */ #endif int ret = -1; if (descriptor->desc_type == APR_POLL_SOCKET) { #ifdef HAVE_MTCP int cpu = sched_getcpu(); ret = mtcp_epoll_ctl(g_mctx[cpu], pollset->p->epoll_fd, EPOLL_CTL_DEL, descriptor->desc.s->socketdes, &ev); #else ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_DEL, descriptor->desc.s->socketdes, &ev); #endif } else { ret = epoll_ctl(pollset->p->epoll_fd, EPOLL_CTL_DEL, descriptor->desc.f->filedes, &ev); } if (ret < 0) { rv = APR_NOTFOUND; } if (!(pollset->flags & APR_POLLSET_NOCOPY)) { pollset_lock_rings(); for (ep = APR_RING_FIRST(&(pollset->p->query_ring)); ep != APR_RING_SENTINEL(&(pollset->p->query_ring), pfd_elem_t, link); ep = APR_RING_NEXT(ep, link)) { if (descriptor->desc.s == ep->pfd.desc.s) { APR_RING_REMOVE(ep, link); APR_RING_INSERT_TAIL(&(pollset->p->dead_ring), ep, pfd_elem_t, link); break; } } pollset_unlock_rings(); } return rv; }
/* * NOTE: This function is not thread safe by itself. Caller should hold the lock */ static apr_thread_pool_task_t *pop_task(apr_thread_pool_t * me) { apr_thread_pool_task_t *task = NULL; int seg; /* check for scheduled tasks */ if (me->scheduled_task_cnt > 0) { task = APR_RING_FIRST(me->scheduled_tasks); assert(task != NULL); assert(task != APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task, link)); /* if it's time */ if (task->dispatch.time <= apr_time_now()) { --me->scheduled_task_cnt; APR_RING_REMOVE(task, link); return task; } } /* check for normal tasks if we're not returning a scheduled task */ if (me->task_cnt == 0) { return NULL; } task = APR_RING_FIRST(me->tasks); assert(task != NULL); assert(task != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link)); --me->task_cnt; seg = TASK_PRIORITY_SEG(task); if (task == me->task_idx[seg]) { me->task_idx[seg] = APR_RING_NEXT(task, link); if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) { me->task_idx[seg] = NULL; } } APR_RING_REMOVE(task, link); return task; }
/** Execute the filters. */ int kdfilter_exec(kdfilter *self, struct filter_params *params, struct filter_result *res) { struct filter_driver * en; memset(res, 0, sizeof(struct filter_result)); for (en = APR_RING_FIRST(&self->filter_drv_list); en != APR_RING_SENTINEL(&self->filter_drv_list, filter_driver, link); en = APR_RING_NEXT(en, link)) { kerror_reset(); INFO(_log_filter_, "Filtering message with filter: %s", en->filter_name); /* Filter test phase. */ DEBUG(_log_filter_, "Filter %s test phase.", en->filter_name); if ((en->p_test)(self, en->private_data) < 0) { if (kerror_has_error()) KERROR_PUSH(_filter_, 0, "filter %s failed test phase", en->filter_name); else KERROR_SET(_filter_, 0, "filter %s failed test phase", en->filter_name); return -1; } /* Proceed to filtering. */ DEBUG(_log_filter_, "Filter %s scan phase.", en->filter_name); if ((en->p_scan)(self, en->private_data, params, res) < 0) { if (kerror_has_error()) KERROR_PUSH(_filter_, 0, "filter %s failed scan", en->filter_name); else KERROR_SET(_filter_, 0, "filter %s failed scan", en->filter_name); return -1; } if (res->msg_state) INFO(_log_filter_, "Filter %s report: %s", en->filter_name, res->msg); else INFO(_log_filter_, "Filter %s returned an empty report.", en->filter_name); DEBUG(_log_filter_, "Filter %s rating: %d.", en->filter_name, res->rating); /* * If one of those bit is set, that means we need to interrupt the filtering * and go back to the main loop for confirmation. */ if ((res->rating & FILTER_EXEC_DENY) != 0 || (res->rating & FILTER_EXEC_CHALLENGE) != 0) return 0; } return 0; }
static apr_status_t impl_pollset_remove(apr_pollset_t *pollset, const apr_pollfd_t *descriptor) { pfd_elem_t *ep; apr_status_t rv; apr_os_sock_t fd; pollset_lock_rings(); if (descriptor->desc_type == APR_POLL_SOCKET) { fd = descriptor->desc.s->socketdes; } else { fd = descriptor->desc.f->filedes; } rv = APR_NOTFOUND; /* unless at least one of the specified conditions is */ if (descriptor->reqevents & APR_POLLIN) { EV_SET(&pollset->p->kevent, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL); if (kevent(pollset->p->kqueue_fd, &pollset->p->kevent, 1, NULL, 0, NULL) != -1) { rv = APR_SUCCESS; } } if (descriptor->reqevents & APR_POLLOUT) { EV_SET(&pollset->p->kevent, fd, EVFILT_WRITE, EV_DELETE, 0, 0, NULL); if (kevent(pollset->p->kqueue_fd, &pollset->p->kevent, 1, NULL, 0, NULL) != -1) { rv = APR_SUCCESS; } } for (ep = APR_RING_FIRST(&(pollset->p->query_ring)); ep != APR_RING_SENTINEL(&(pollset->p->query_ring), pfd_elem_t, link); ep = APR_RING_NEXT(ep, link)) { if (descriptor->desc.s == ep->pfd.desc.s) { APR_RING_REMOVE(ep, link); APR_RING_INSERT_TAIL(&(pollset->p->dead_ring), ep, pfd_elem_t, link); break; } } pollset_unlock_rings(); return rv; }
/* * schedule a task to run in "time" microseconds. Find the spot in the ring where * the time fits. Adjust the short_time so the thread wakes up when the time is reached. */ static apr_status_t schedule_task(apr_thread_pool_t *me, apr_thread_start_t func, void *param, void *owner, apr_interval_time_t time) { apr_thread_pool_task_t *t; apr_thread_pool_task_t *t_loc; apr_thread_t *thd; apr_status_t rv = APR_SUCCESS; apr_thread_mutex_lock(me->lock); t = task_new(me, func, param, 0, owner, time); if (NULL == t) { apr_thread_mutex_unlock(me->lock); return APR_ENOMEM; } t_loc = APR_RING_FIRST(me->scheduled_tasks); while (NULL != t_loc) { /* if the time is less than the entry insert ahead of it */ if (t->dispatch.time < t_loc->dispatch.time) { ++me->scheduled_task_cnt; APR_RING_INSERT_BEFORE(t_loc, t, link); break; } else { t_loc = APR_RING_NEXT(t_loc, link); if (t_loc == APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task, link)) { ++me->scheduled_task_cnt; APR_RING_INSERT_TAIL(me->scheduled_tasks, t, apr_thread_pool_task, link); break; } } } /* there should be at least one thread for scheduled tasks */ if (0 == me->thd_cnt) { rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool); if (APR_SUCCESS == rv) { ++me->thd_cnt; if (me->thd_cnt > me->thd_high) me->thd_high = me->thd_cnt; } } apr_thread_mutex_unlock(me->lock); apr_thread_mutex_lock(me->cond_lock); apr_thread_cond_signal(me->cond); apr_thread_mutex_unlock(me->cond_lock); return rv; }
/* Cleanup function. */ static apr_status_t kdfilter_delete(void *data) { kdfilter *self = (kdfilter *)data; struct filter_driver *drv; /* Call the close method of each filters. */ for (drv = APR_RING_FIRST(&self->filter_drv_list); drv != APR_RING_SENTINEL(&self->filter_drv_list, filter_driver, link); drv = APR_RING_NEXT(drv, link)) { DEBUG(_log_filter_, "Unregistering filter: %s.", drv->filter_name); (drv->p_close)(self, drv->private_data); } return APR_SUCCESS; }
/** Parse MRCP channel-identifier */ MRCP_DECLARE(apt_bool_t) mrcp_channel_id_parse(mrcp_channel_id *channel_id, mrcp_message_header_t *header, apr_pool_t *pool) { apt_header_field_t *header_field; for(header_field = APR_RING_FIRST(&header->header_section.ring); header_field != APR_RING_SENTINEL(&header->header_section.ring, apt_header_field_t, link); header_field = APR_RING_NEXT(header_field, link)) { if(header_field->value.length && strncasecmp(header_field->name.buf,MRCP_CHANNEL_ID,MRCP_CHANNEL_ID_LENGTH) == 0) { apt_id_resource_parse(&header_field->value,'@',&channel_id->session_id,&channel_id->resource_name,pool); apt_header_section_field_remove(&header->header_section,header_field); return TRUE; } } return FALSE; }
static APR_INLINE apt_bool_t apt_timer_insert(apt_timer_queue_t *timer_queue, apt_timer_t *timer) { apt_timer_t *it; for(it = APR_RING_LAST(&timer_queue->head); it != APR_RING_SENTINEL(&timer_queue->head, apt_timer_t, link); it = APR_RING_PREV(it, link)) { if(it->scheduled_time <= timer->scheduled_time) { APR_RING_INSERT_AFTER(it,timer,link); return TRUE; } } APR_RING_INSERT_HEAD(&timer_queue->head,timer,apt_timer_t,link); return TRUE; }
static APR_INLINE apt_bool_t mpf_timer_insert(mpf_timer_manager_t *manager, mpf_timer_t *timer) { mpf_timer_t *it; for(it = APR_RING_LAST(&manager->head); it != APR_RING_SENTINEL(&manager->head, mpf_timer_t, link); it = APR_RING_PREV(it, link)) { if(it->scheduled_time <= timer->scheduled_time) { APR_RING_INSERT_AFTER(it,timer,link); return TRUE; } } APR_RING_INSERT_HEAD(&manager->head,timer,mpf_timer_t,link); return TRUE; }
static mrcp_connection_t* mrcp_connection_find(mrcp_connection_agent_t *agent, const apt_str_t *remote_ip) { mrcp_connection_t *connection; if(!agent || !remote_ip) { return NULL; } for(connection = APR_RING_FIRST(&agent->connection_list); connection != APR_RING_SENTINEL(&agent->connection_list, mrcp_connection_t, link); connection = APR_RING_NEXT(connection, link)) { if(apt_string_compare(&connection->remote_ip,remote_ip) == TRUE) { return connection; } } return NULL; }
static mrcp_connection_t* mrcp_client_agent_connection_find(mrcp_connection_agent_t *agent, mrcp_control_descriptor_t *descriptor) { apr_sockaddr_t *sockaddr; mrcp_connection_t *connection; for(connection = APR_RING_FIRST(&agent->connection_list); connection != APR_RING_SENTINEL(&agent->connection_list, mrcp_connection_t, link); connection = APR_RING_NEXT(connection, link)) { if(apr_sockaddr_info_get(&sockaddr,descriptor->ip.buf,APR_INET,descriptor->port,0,connection->pool) == APR_SUCCESS) { if(apr_sockaddr_equal(sockaddr,connection->r_sockaddr) != 0 && descriptor->port == connection->r_sockaddr->port) { return connection; } } } return NULL; }
static apr_status_t remove_scheduled_tasks(apr_thread_pool_t *me, void *owner) { apr_thread_pool_task_t *t_loc; apr_thread_pool_task_t *next; t_loc = APR_RING_FIRST(me->scheduled_tasks); while (t_loc != APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task, link)) { next = APR_RING_NEXT(t_loc, link); /* if this is the owner remove it */ if (t_loc->owner == owner) { --me->scheduled_task_cnt; APR_RING_REMOVE(t_loc, link); } t_loc = next; } return APR_SUCCESS; }
/** Parse MRCP header fields */ MRCP_DECLARE(apt_bool_t) mrcp_header_fields_parse(mrcp_message_header_t *header, apr_pool_t *pool) { apt_header_field_t *header_field; for(header_field = APR_RING_FIRST(&header->header_section.ring); header_field != APR_RING_SENTINEL(&header->header_section.ring, apt_header_field_t, link); header_field = APR_RING_NEXT(header_field, link)) { if(mrcp_header_field_value_parse(&header->resource_header_accessor,header_field,pool) == TRUE) { header_field->id += GENERIC_HEADER_COUNT; apt_header_section_field_set(&header->header_section,header_field); } else if(mrcp_header_field_value_parse(&header->generic_header_accessor,header_field,pool) == TRUE) { apt_header_section_field_set(&header->header_section,header_field); } else { apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Unknown MRCP header field: %s",header_field->name.buf); } } return TRUE; }
/** Set (copy) MRCP header fields */ MRCP_DECLARE(apt_bool_t) mrcp_header_fields_set(mrcp_message_header_t *header, const mrcp_message_header_t *src_header, apr_pool_t *pool) { apt_header_field_t *header_field; const apt_header_field_t *src_header_field; for(src_header_field = APR_RING_FIRST(&src_header->header_section.ring); src_header_field != APR_RING_SENTINEL(&src_header->header_section.ring, apt_header_field_t, link); src_header_field = APR_RING_NEXT(src_header_field, link)) { header_field = apt_header_section_field_get(&header->header_section,src_header_field->id); if(header_field) { /* this header field has already been set, just copy its value */ apt_string_copy(&header_field->value,&src_header_field->value,pool); } else { /* copy the entire header field and add it to the header section */ header_field = apt_header_field_copy(src_header_field,pool); apt_header_section_field_add(&header->header_section,header_field); } mrcp_header_accessor_value_duplicate(header,header_field,src_header,src_header_field,pool); } return TRUE; }
/* * purge cache entries */ static void purge(char *path, apr_pool_t *pool, apr_off_t max) { apr_off_t sum, total, entries, etotal; ENTRY *e, *n, *oldest; sum = 0; entries = 0; for (e = APR_RING_FIRST(&root); e != APR_RING_SENTINEL(&root, _entry, link); e = APR_RING_NEXT(e, link)) { sum += e->hsize; sum += e->dsize; entries++; } total = sum; etotal = entries; if (sum <= max) { printstats(total, sum, max, etotal, entries); return; } /* process all entries with a timestamp in the future, this may * happen if a wrong system time is corrected */ for (e = APR_RING_FIRST(&root); e != APR_RING_SENTINEL(&root, _entry, link) && !interrupted;) { n = APR_RING_NEXT(e, link); if (e->response_time > now || e->htime > now || e->dtime > now) { delete_entry(path, e->basename, pool); sum -= e->hsize; sum -= e->dsize; entries--; APR_RING_REMOVE(e, link); if (sum <= max) { if (!interrupted) { printstats(total, sum, max, etotal, entries); } return; } } e = n; } if (interrupted) { return; } /* process all entries with are expired */ for (e = APR_RING_FIRST(&root); e != APR_RING_SENTINEL(&root, _entry, link) && !interrupted;) { n = APR_RING_NEXT(e, link); if (e->expire != APR_DATE_BAD && e->expire < now) { delete_entry(path, e->basename, pool); sum -= e->hsize; sum -= e->dsize; entries--; APR_RING_REMOVE(e, link); if (sum <= max) { if (!interrupted) { printstats(total, sum, max, etotal, entries); } return; } } e = n; } if (interrupted) { return; } /* process remaining entries oldest to newest, the check for an emtpy * ring actually isn't necessary except when the compiler does * corrupt 64bit arithmetics which happend to me once, so better safe * than sorry */ while (sum > max && !interrupted && !APR_RING_EMPTY(&root, _entry, link)) { oldest = APR_RING_FIRST(&root); for (e = APR_RING_NEXT(oldest, link); e != APR_RING_SENTINEL(&root, _entry, link); e = APR_RING_NEXT(e, link)) { if (e->dtime < oldest->dtime) { oldest = e; } } delete_entry(path, oldest->basename, pool); sum -= oldest->hsize; sum -= oldest->dsize; entries--; APR_RING_REMOVE(oldest, link); } if (!interrupted) { printstats(total, sum, max, etotal, entries); } }