void _lru_process_waiters(cache_t *c) { cache_lru_t *cp = (cache_lru_t *)c->fn.priv; lru_page_wait_t *pw = NULL; cache_cond_t *cache_cond; ex_off_t bytes_free, bytes_needed; if (stack_size(cp->pending_free_tasks) > 0) { //**Check on pending free tasks 1st while ((cache_cond = (cache_cond_t *)pop(cp->pending_free_tasks)) != NULL) { log_printf(15, "waking up pending task cache_cond=%p stack_size left=%d\n", cache_cond, stack_size(cp->pending_free_tasks)); apr_thread_cond_signal(cache_cond->cond); //** Wake up the paused thread } // return; } if (stack_size(cp->waiting_stack) > 0) { //** Also handle the tasks waiting for flushes to complete bytes_free = _lru_max_bytes(c) - cp->bytes_used; move_to_top(cp->waiting_stack); pw = get_ele_data(cp->waiting_stack); bytes_needed = pw->bytes_needed; while ((bytes_needed <= bytes_free) && (pw != NULL)) { bytes_free -= bytes_needed; delete_current(cp->waiting_stack, 1, 0); log_printf(15, "waking up waiting stack pw=%d\n", pw); apr_thread_cond_signal(pw->cond); //** Wake up the paused thread //** Get the next one if available pw = get_ele_data(cp->waiting_stack); bytes_needed = (pw == NULL) ? bytes_free + 1 : pw->bytes_needed; } } }
h2_stream *h2_mplx_next_submit(h2_mplx *m, h2_stream_set *streams) { apr_status_t status; h2_stream *stream = NULL; AP_DEBUG_ASSERT(m); if (m->aborted) { return NULL; } status = apr_thread_mutex_lock(m->lock); if (APR_SUCCESS == status) { h2_io *io = h2_io_set_get_highest_prio(m->ready_ios); if (io) { h2_response *response = io->response; h2_io_set_remove(m->ready_ios, io); stream = h2_stream_set_get(streams, response->stream_id); if (stream) { h2_stream_set_response(stream, response, io->bbout); if (io->output_drained) { apr_thread_cond_signal(io->output_drained); } } else { ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_NOTFOUND, m->c, APLOGNO(02953) "h2_mplx(%ld): stream for response %d", m->id, response->stream_id); } } apr_thread_mutex_unlock(m->lock); } return stream; }
apr_status_t h2_mplx_out_readx(h2_mplx *m, int stream_id, h2_io_data_cb *cb, void *ctx, apr_size_t *plen, int *peos) { apr_status_t status; AP_DEBUG_ASSERT(m); if (m->aborted) { return APR_ECONNABORTED; } status = apr_thread_mutex_lock(m->lock); if (APR_SUCCESS == status) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io) { status = h2_io_out_readx(io, cb, ctx, plen, peos); if (status == APR_SUCCESS && io->output_drained) { apr_thread_cond_signal(io->output_drained); } } else { status = APR_ECONNABORTED; } apr_thread_mutex_unlock(m->lock); } return status; }
apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info, apr_pool_t * pool_to_recycle) { apr_status_t rv; apr_int32_t prev_idlers; ap_push_pool(queue_info, pool_to_recycle); /* Atomically increment the count of idle workers */ prev_idlers = apr_atomic_inc32(&(queue_info->idlers)) - zero_pt; /* If other threads are waiting on a worker, wake one up */ if (prev_idlers < 0) { rv = apr_thread_mutex_lock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { AP_DEBUG_ASSERT(0); return rv; } rv = apr_thread_cond_signal(queue_info->wait_for_idler); if (rv != APR_SUCCESS) { apr_thread_mutex_unlock(queue_info->idlers_mutex); return rv; } rv = apr_thread_mutex_unlock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { return rv; } } return APR_SUCCESS; }
ZEKE_HIDDEN apr_uint32_t zktool_vxid_get(struct zktool_vxid_pool *vp) { assert(vp != NULL && vp->magic == ZKTOOL_VXID_MAGIC); do { if(vp->count == 0) { #ifdef ZEKE_USE_THREADS assert(apr_thread_mutex_lock(vp->mutex) == APR_SUCCESS); while(vp->busy > 0) { vp->waiter++; assert(apr_thread_cond_wait(vp-cond,vp->mutex) == APR_SUCCESS); vp->waiter--; } vp->busy++; #endif vp->next = *VXID(vp).base; vp->count = *VXID(vp).chunk; *VXID(vp).chunk += vp->count; #ifdef ZEKE_USE_THREADS vp->busy-- assert(apr_thread_mutex_lock(vp->mutex) == APR_SUCCESS); if(vp->waiter) assert(apr_thread_cond_signal(vp->cond) == APR_SUCCESS); assert(apr_thread_mutex_unlock(vp->mutex) == APR_SUCCESS); #endif } vp->count--; vp->next++; } while(vp->next == 0); return vp->next; }
/* Write to the audio queue. */ int audio_queue_write(audio_queue_t *queue, void *data, apr_size_t *data_len) { int status = 0; if ((queue == NULL) || (data == NULL) || (data_len == NULL)) return -1; if (queue->mutex != NULL) apr_thread_mutex_lock(queue->mutex); if (audio_buffer_write(queue->buffer, data, *data_len) > 0) { queue->write_bytes = queue->write_bytes + *data_len; if (queue->waiting <= audio_buffer_inuse(queue->buffer)) { if (queue->cond != NULL) apr_thread_cond_signal(queue->cond); } } else { ast_log(LOG_WARNING, "(%s) Audio queue overflow!\n", queue->name); *data_len = 0; status = -1; } if (queue->mutex != NULL) apr_thread_mutex_unlock(queue->mutex); return status; }
void _gop_dummy_submit_op(void *arg, op_generic_t *op) { int dolock = 0; log_printf(15, "gid=%d\n", gop_id(op)); // if (op->base.cb != NULL) { //** gop is on a q apr_thread_mutex_lock(gd_lock); push(gd_stack, op); apr_thread_cond_signal(gd_cond); apr_thread_mutex_unlock(gd_lock); return; // } //*-------* This isn't executed below ----------- if (apr_thread_mutex_trylock(op->base.ctl->lock) != APR_SUCCESS) dolock = 1; unlock_gop(op); //log_printf(15, "dolock=%d gid=%d err=%d APR_SUCCESS=%d\n", dolock, gop_id(op), err, APR_SUCCESS); op->base.started_execution = 1; gop_mark_completed(op, op->base.status); if (dolock == 1) { lock_gop(op); } //** lock_gop is a macro so need the {} return; }
/* Response or event from the server arrived */ static apt_bool_t OnMessageReceive(mrcp_application_t* application, mrcp_session_t* session, mrcp_channel_t* channel, mrcp_message_t* message) { (void) application; (void) session; (void) channel; /* Analyze, update your application state and reply messages here */ if (message->start_line.message_type == MRCP_MESSAGE_TYPE_RESPONSE) { if (message->start_line.status_code != MRCP_STATUS_CODE_SUCCESS) return sess_failed("SPEAK request failed"); if (message->start_line.request_state != MRCP_REQUEST_STATE_INPROGRESS) return sess_failed("Failed to start SPEAK processing"); /* Start writing audio to file */ stream_started = TRUE; return TRUE; /* Does not actually matter */ } if (message->start_line.message_type != MRCP_MESSAGE_TYPE_EVENT) return sess_failed("Unexpected message from the server"); if (message->start_line.method_id == SYNTHESIZER_SPEAK_COMPLETE) { mrcp_synth_header_t* hdr = (mrcp_synth_header_t*) mrcp_resource_header_get(message); printf("Speak complete: %d %.*s", hdr->completion_cause, (int) hdr->completion_reason.length, hdr->completion_reason.buf); stream_started = FALSE; err = 0; apr_thread_cond_signal(cond); return TRUE; /* Does not actually matter */ } return sess_failed("Unknown message recived"); }
/* Shorthand for graceful fail: Write message, release semaphore and return FALSE */ static apt_bool_t sess_failed(char const* msg) { puts(msg); err = 1; apr_thread_cond_signal(cond); return FALSE; }
apr_status_t h2_mplx_out_rst(h2_mplx *m, int stream_id, int error) { apr_status_t status; AP_DEBUG_ASSERT(m); if (m->aborted) { return APR_ECONNABORTED; } status = apr_thread_mutex_lock(m->lock); if (APR_SUCCESS == status) { if (!m->aborted) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->rst_error && !io->orphaned) { h2_io_rst(io, error); if (!io->response) { h2_io_set_add(m->ready_ios, io); } H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_rst"); have_out_data_for(m, stream_id); if (io->output_drained) { apr_thread_cond_signal(io->output_drained); } } else { status = APR_ECONNABORTED; } } apr_thread_mutex_unlock(m->lock); } return status; }
static void pong(toolbox_t *box) { apr_status_t rv; abts_case *tc = box->tc; rv = apr_thread_mutex_lock(box->mutex); ABTS_SUCCESS(rv); if (state == TOSS) state = PONG; do { rv = apr_thread_cond_signal(box->cond); ABTS_SUCCESS(rv); state = PING; rv = apr_thread_cond_wait(box->cond, box->mutex); ABTS_SUCCESS(rv); ABTS_TRUE(tc, state == PONG || state == OVER); } while (state != OVER); rv = apr_thread_mutex_unlock(box->mutex); ABTS_SUCCESS(rv); rv = apr_thread_cond_broadcast(box->cond); ABTS_SUCCESS(rv); }
apr_status_t h2_mplx_out_read_to(h2_mplx *m, int stream_id, apr_bucket_brigade *bb, apr_off_t *plen, int *peos, apr_table_t **ptrailers) { apr_status_t status; AP_DEBUG_ASSERT(m); if (m->aborted) { return APR_ECONNABORTED; } status = apr_thread_mutex_lock(m->lock); if (APR_SUCCESS == status) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_read_to_pre"); status = h2_io_out_read_to(io, bb, plen, peos); H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_read_to_post"); if (status == APR_SUCCESS && io->output_drained) { apr_thread_cond_signal(io->output_drained); } } else { status = APR_ECONNABORTED; } *ptrailers = (*peos && io->response)? io->response->trailers : NULL; apr_thread_mutex_unlock(m->lock); } return status; }
apr_status_t h2_mplx_in_close(h2_mplx *m, int stream_id) { apr_status_t status; AP_DEBUG_ASSERT(m); if (m->aborted) { return APR_ECONNABORTED; } status = apr_thread_mutex_lock(m->lock); if (APR_SUCCESS == status) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { status = h2_io_in_close(io); H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_close"); if (io->input_arrived) { apr_thread_cond_signal(io->input_arrived); } io_process_events(m, io); } else { status = APR_ECONNABORTED; } apr_thread_mutex_unlock(m->lock); } return status; }
int _rs_simple_refresh(resource_service_fn_t *rs) { rs_simple_priv_t *rss = (rs_simple_priv_t *)rs->priv; struct stat sbuf; int err; //log_printf(0, "SKIPPING refresh\n"); //return(0); if (stat(rss->fname, &sbuf) != 0) { log_printf(1, "RS file missing!!! Using old definition. fname=%s\n", rss->fname); return(0); } if (rss->modify_time != sbuf.st_mtime) { //** File changed so reload it log_printf(5, "RELOADING data\n"); rss->modify_time = sbuf.st_mtime; if (rss->rid_table != NULL) tbx_list_destroy(rss->rid_table); if (rss->random_array != NULL) free(rss->random_array); err = _rs_simple_load(rs, rss->fname); //** Load the new file _rss_make_check_table(rs); //** and make the new inquiry table apr_thread_cond_signal(rss->cond); //** Notify the check thread that we made a change return(err); } return(0); }
rpc_state* add_cb(rpc_state *state) { // Do nothing //uint32_t j = apr_atomic_add32(&n_rpc_, 1); uint32_t *res = (uint32_t*) state->raw_input; uint32_t k = *res; n_callbacks_[k] += 1; LOG_DEBUG("client callback exceuted. rpc no: %d", n_rpc_); if (n_callbacks_[k] == max_rpc_) { if (apr_atomic_dec32(&n_active_cli_) == 0) { tm_end_ = apr_time_now(); apr_thread_mutex_lock(mx_rpc_); apr_thread_cond_signal(cd_rpc_); apr_thread_mutex_unlock(mx_rpc_); } } if (n_callbacks_[k] % 1000000 == 0) { apr_atomic_add32(&n_rpc_, 1000000); tm_middle_ = apr_time_now(); uint64_t p = tm_middle_ - tm_begin_; double rate = n_rpc_ * 1.0 / p; LOG_INFO("rpc rate: %0.2f million per second", rate); } // do another rpc. if (max_rpc_ < 0 || n_issues_[k] < max_rpc_) { n_issues_[k]++; call_add(clis_[k], k); } return NULL; }
void h2_mplx_request_done(h2_mplx **pm, int stream_id, const h2_request **preq) { h2_mplx *m = *pm; int acquired; if (enter_mutex(m, &acquired) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, "h2_mplx(%ld): request(%d) done", m->id, stream_id); if (io) { io->worker_done = 1; if (io->orphaned) { io_destroy(m, io, 0); if (m->join_wait) { apr_thread_cond_signal(m->join_wait); } } else { /* hang around until the stream deregisteres */ } } if (preq) { /* someone wants another request, if we have */ *preq = pop_request(m); } if (!preq || !*preq) { /* No request to hand back to the worker, NULLify reference * and decrement count */ *pm = NULL; } leave_mutex(m, acquired); } }
rpc_state* add_cb(rpc_state *state) { // Do nothing //uint32_t j = apr_atomic_add32(&n_rpc_, 1); n_rpc_ += 1; LOG_DEBUG("client callback exceuted. rpc no: %d", n_rpc_); if (n_rpc_ == max_rpc_ * n_client_) { tm_end_ = apr_time_now(); apr_thread_mutex_lock(mx_rpc_); apr_thread_cond_signal(cd_rpc_); apr_thread_mutex_unlock(mx_rpc_); } // } if (n_rpc_ % 1000000 == 0) { tm_middle_ = apr_time_now(); uint64_t p = tm_middle_ - tm_begin_; double rate = n_rpc_ * 1.0 / p; LOG_INFO("rpc rate: %0.2f million per second", rate); } if (max_rpc_ < 0 || n_issued_ < max_rpc_) { n_issued_++; call_add(cli_); } // do another rpc. return NULL; }
/** * Push a new socket onto the queue. * * precondition: ap_queue_info_wait_for_idler has already been called * to reserve an idle worker thread */ apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p) { fd_queue_elem_t *elem; apr_status_t rv; if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) { return rv; } AP_DEBUG_ASSERT(!queue->terminated); AP_DEBUG_ASSERT(!ap_queue_full(queue)); elem = &queue->data[queue->in]; queue->in++; if (queue->in >= queue->bounds) queue->in -= queue->bounds; elem->sd = sd; elem->p = p; queue->nelts++; apr_thread_cond_signal(queue->not_empty); if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) { return rv; } return APR_SUCCESS; }
/** * Retrieves the next item from the queue. If there are no * items available, it will block until one becomes available. * Once retrieved, the item is placed into the address specified by * 'data'. */ APU_DECLARE(apr_status_t) apr_queue_trypop(apr_queue_t *queue, void **data) { apr_status_t rv; if (queue->terminated) { return APR_EOF; /* no more elements ever again */ } rv = apr_thread_mutex_lock(queue->one_big_mutex); if (rv != APR_SUCCESS) { return rv; } /* Keep waiting until we wake up and find that the queue is not empty. */ if (apr_queue_empty(queue)) { rv = apr_thread_mutex_unlock(queue->one_big_mutex); return APR_EAGAIN; } *data = queue->data[queue->out]; queue->nelts--; queue->out = (queue->out + 1) % queue->bounds; if (queue->full_waiters) { Q_DBG("signal !full", queue); rv = apr_thread_cond_signal(queue->not_full); if (rv != APR_SUCCESS) { apr_thread_mutex_unlock(queue->one_big_mutex); return rv; } } rv = apr_thread_mutex_unlock(queue->one_big_mutex); return rv; }
/** * Retrieves the next item from the queue. If there are no * items available, return APR_EAGAIN. Once retrieved, * the item is placed into the address specified by 'data'. */ apr_status_t etch_apr_queue_trypop(etch_apr_queue_t *queue, void **data) { apr_status_t rv; if (queue->terminated) rv = APR_EOF; /* no more elements ever again */ else if (APR_SUCCESS == (rv = apr_thread_mutex_lock(queue->one_big_mutex))) { if (etch_apr_queue_empty(queue)) rv = APR_EAGAIN; else { *data = queue->data[queue->out]; queue->nelts--; queue->out = (queue->out + 1) % queue->bounds; if (queue->full_waiters) { Q_DBG("signal !full", queue); rv = apr_thread_cond_signal(queue->not_full); } } apr_thread_mutex_unlock(queue->one_big_mutex); } return rv; }
/** * Push new data onto the queue. Blocks if the queue is full. Once * the push operation has completed, it signals other threads waiting * in apr_queue_pop() that they may continue consuming sockets. */ APU_DECLARE(apr_status_t) apr_queue_trypush(apr_queue_t *queue, void *data) { apr_status_t rv; if (queue->terminated) { return APR_EOF; /* no more elements ever again */ } rv = apr_thread_mutex_lock(queue->one_big_mutex); if (rv != APR_SUCCESS) { return rv; } if (apr_queue_full(queue)) { rv = apr_thread_mutex_unlock(queue->one_big_mutex); return APR_EAGAIN; } queue->data[queue->in] = data; queue->in = (queue->in + 1) % queue->bounds; queue->nelts++; if (queue->empty_waiters) { Q_DBG("sig !empty", queue); rv = apr_thread_cond_signal(queue->not_empty); if (rv != APR_SUCCESS) { apr_thread_mutex_unlock(queue->one_big_mutex); return rv; } } rv = apr_thread_mutex_unlock(queue->one_big_mutex); return rv; }
/** * Push new data onto the queue. Blocks if the queue is full. Once * the push operation has completed, it signals other threads waiting * in apr_queue_pop() that they may continue consuming sockets. */ apr_status_t etch_apr_queue_trypush(etch_apr_queue_t *queue, void *data) { apr_status_t rv; if (queue->terminated) rv = APR_EOF; else if (APR_SUCCESS == (rv = apr_thread_mutex_lock(queue->one_big_mutex))) { if (etch_apr_queue_full(queue)) rv = APR_EAGAIN; else { queue->data[queue->in] = data; queue->in = (queue->in + 1) % queue->bounds; queue->nelts++; if (queue->empty_waiters) { Q_DBG("sig !empty", queue); rv = apr_thread_cond_signal(queue->not_empty); } } apr_thread_mutex_unlock(queue->one_big_mutex); } return rv; }
void close_server_port(tbx_ns_monitor_t *nm) { apr_status_t dummy; //** Trigger a port shutdown apr_thread_mutex_lock(nm->lock); nm->shutdown_request = 1; log_printf(15, "close_server_port: port=%d Before cond_signal\n", nm->port); tbx_log_flush(); apr_thread_cond_signal(nm->cond); log_printf(15, "close_server_port: port=%d After cond_signal\n", nm->port); tbx_log_flush(); apr_thread_mutex_unlock(nm->lock); log_printf(15, "close_server_port: port=%d After unlock\n", nm->port); tbx_log_flush(); //** Wait until the thread closes apr_thread_join(&dummy, nm->thread); log_printf(15, "close_server_port: port=%d After join\n", nm->port); tbx_log_flush(); //** Free the actual struct free(nm->address); apr_thread_mutex_destroy(nm->lock); apr_thread_cond_destroy(nm->cond); apr_pool_destroy(nm->mpool); nm->port = -1; }
int lru_cache_destroy(cache_t *c) { apr_status_t value; cache_lru_t *cp = (cache_lru_t *)c->fn.priv; //** Shutdown the dirty thread cache_lock(c); c->shutdown_request = 1; apr_thread_cond_signal(cp->dirty_trigger); cache_unlock(c); apr_thread_join(&value, cp->dirty_thread); //** Wait for it to complete cache_base_destroy(c); free_stack(cp->stack, 0); free_stack(cp->waiting_stack, 0); free_stack(cp->pending_free_tasks, 0); destroy_pigeon_coop(cp->free_pending_tables); destroy_pigeon_coop(cp->free_page_tables); free(cp); free(c); return(0); }
void *monitor_thread(apr_thread_t *th, void *data) { tbx_ns_monitor_t *nm = (tbx_ns_monitor_t *)data; tbx_ns_t *ns = nm->ns; int i; log_printf(15, "monitor_thread: Monitoring port %d\n", nm->port); apr_thread_mutex_lock(nm->lock); while (nm->shutdown_request == 0) { apr_thread_mutex_unlock(nm->lock); i = ns->connection_request(ns->sock, 1); if (i == 1) { //** Got a request log_printf(15, "monitor_thread: port=%d ns=%d Got a connection request time=" TT "\n", nm->port, tbx_ns_getid(ns), apr_time_now()); //** Mark that I have a connection pending apr_thread_mutex_lock(nm->lock); nm->is_pending = 1; apr_thread_mutex_unlock(nm->lock); //** Wake up the calling thread apr_thread_mutex_lock(nm->trigger_lock); (*(nm->trigger_count))++; apr_thread_cond_signal(nm->trigger_cond); apr_thread_mutex_unlock(nm->trigger_lock); log_printf(15, "monitor_thread: port=%d ns=%d waiting for accept\n", nm->port, tbx_ns_getid(ns)); //** Sleep until my connection is accepted apr_thread_mutex_lock(nm->lock); while ((nm->is_pending == 1) && (nm->shutdown_request == 0)) { apr_thread_cond_wait(nm->cond, nm->lock); log_printf(15, "monitor_thread: port=%d ns=%d Cond triggered=" TT " trigger_count=%d\n", nm->port, tbx_ns_getid(ns), apr_time_now(), *(nm->trigger_count)); } apr_thread_mutex_unlock(nm->lock); log_printf(15, "monitor_thread: port=%d ns=%d Connection accepted time=" TT "\n", nm->port, tbx_ns_getid(ns), apr_time_now()); //** Update pending count // apr_thread_mutex_lock(nm->trigger_lock); // *(nm->trigger_count)--; // apr_thread_mutex_unlock(nm->trigger_lock); } apr_thread_mutex_lock(nm->lock); } apr_thread_mutex_unlock(nm->lock); //** Lastly shutdown my socket tbx_ns_close(ns); log_printf(15, "monitor_thread: Closing port %d\n", nm->port); apr_thread_exit(th, 0); return(NULL); }
/** * Push new data onto the queue. Blocks if the queue is full. Once * the push operation has completed, it signals other threads waiting * in apr_queue_pop() that they may continue consuming sockets. * @param timeout added by Cisco. now uses apr_thread_cond_timewait(). * interval of time to wait. zero means forever, negative indicates no wait, * otherwise wait time in *microseconds*. * @return APR_SUCCESS, APR_EAGAIN, APR_EOF, APR_EINTR, APR_TIMEUP, * or some APR error */ apr_status_t etch_apr_queue_push(etch_apr_queue_t *queue, apr_interval_time_t timeout, void *data) { apr_status_t rv; if (queue->terminated) rv = APR_EOF; /* no more elements ever again */ else if (APR_SUCCESS == (rv = apr_thread_mutex_lock(queue->one_big_mutex))) { do { if (etch_apr_queue_full(queue)) { if (!queue->terminated) { if (-1 == timeout) { rv = APR_EAGAIN; /* asked to not wait */ break; } queue->full_waiters++; if (0 == timeout) rv = apr_thread_cond_wait(queue->not_full, queue->one_big_mutex); else rv = apr_thread_cond_timedwait(queue->not_full, queue->one_big_mutex, timeout); queue->full_waiters--; if (rv != APR_SUCCESS) break; } /* If we wake up and it's still empty, then we were interrupted */ if (etch_apr_queue_full(queue)) { Q_DBG("queue full (intr)", queue); rv = queue->terminated? APR_EOF: APR_EINTR; break; } } queue->data[queue->in] = data; queue->in = (queue->in + 1) % queue->bounds; queue->nelts++; if (queue->empty_waiters) { Q_DBG("sig !empty", queue); rv = apr_thread_cond_signal(queue->not_empty); } } while(0); apr_thread_mutex_unlock(queue->one_big_mutex); } return rv; }
static void have_out_data_for(h2_mplx *m, int stream_id) { (void)stream_id; AP_DEBUG_ASSERT(m); if (m->added_output) { apr_thread_cond_signal(m->added_output); } }
static void release(h2_mplx *m) { if (!apr_atomic_dec32(&m->refs)) { if (m->join_wait) { apr_thread_cond_signal(m->join_wait); } } }
apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info, apr_pool_t *pool_to_recycle) { apr_status_t rv; int prev_idlers; /* If we have been given a pool to recycle, atomically link * it into the queue_info's list of recycled pools */ if (pool_to_recycle) { struct recycled_pool *new_recycle; new_recycle = (struct recycled_pool *)apr_palloc(pool_to_recycle, sizeof(*new_recycle)); new_recycle->pool = pool_to_recycle; for (;;) { /* Save queue_info->recycled_pool in local variable next because * new_recycle->next can be changed after apr_atomic_casptr * function call. For gory details see PR 44402. */ struct recycled_pool *next = queue_info->recycled_pools; new_recycle->next = next; if (apr_atomic_casptr((void*)&(queue_info->recycled_pools), new_recycle, next) == next) { break; } } } /* Atomically increment the count of idle workers */ for (;;) { prev_idlers = queue_info->idlers; if (apr_atomic_cas32(&(queue_info->idlers), prev_idlers + 1, prev_idlers) == prev_idlers) { break; } } /* If this thread just made the idle worker count nonzero, * wake up the listener. */ if (prev_idlers == 0) { rv = apr_thread_mutex_lock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { return rv; } rv = apr_thread_cond_signal(queue_info->wait_for_idler); if (rv != APR_SUCCESS) { apr_thread_mutex_unlock(queue_info->idlers_mutex); return rv; } rv = apr_thread_mutex_unlock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { return rv; } } return APR_SUCCESS; }
/** * Retrieves the next item from the queue. If there are no * items available, it will block until one becomes available. * Once retrieved, the item is placed into the address specified by * 'data'. */ APU_DECLARE(apr_status_t) apr_queue_pop(apr_queue_t *queue, void **data) { apr_status_t rv; if (queue->terminated) { return APR_EOF; /* no more elements ever again */ } rv = apr_thread_mutex_lock(queue->one_big_mutex); if (rv != APR_SUCCESS) { return rv; } /* Keep waiting until we wake up and find that the queue is not empty. */ if (apr_queue_empty(queue)) { if (!queue->terminated) { queue->empty_waiters++; rv = apr_thread_cond_wait(queue->not_empty, queue->one_big_mutex); queue->empty_waiters--; if (rv != APR_SUCCESS) { apr_thread_mutex_unlock(queue->one_big_mutex); return rv; } } /* If we wake up and it's still empty, then we were interrupted */ if (apr_queue_empty(queue)) { Q_DBG("queue empty (intr)", queue); rv = apr_thread_mutex_unlock(queue->one_big_mutex); if (rv != APR_SUCCESS) { return rv; } if (queue->terminated) { return APR_EOF; /* no more elements ever again */ } else { return APR_EINTR; } } } *data = queue->data[queue->out]; queue->nelts--; queue->out = (queue->out + 1) % queue->bounds; if (queue->full_waiters) { Q_DBG("signal !full", queue); rv = apr_thread_cond_signal(queue->not_full); if (rv != APR_SUCCESS) { apr_thread_mutex_unlock(queue->one_big_mutex); return rv; } } rv = apr_thread_mutex_unlock(queue->one_big_mutex); return rv; }