void signal(int threadid, int *msg, int senderid, int signal_type) { int i = 0; wait_mutex(&working_threads[threadid].waiting_for_signal_mutex); if(working_threads[threadid].signal_senderid != senderid || working_threads[threadid].waiting_for_signal == 0 || working_threads[threadid].expected_signal_type != signal_type) { // unexpected signal, discard it leave_mutex(&working_threads[threadid].waiting_for_signal_mutex); return; } // send signal working_threads[threadid].expected_signal_type = OFS_THREADSIGNAL_NOSIGNAL; working_threads[threadid].signal_type = signal_type; working_threads[threadid].signal_senderid = senderid; if(msg != NULL) { working_threads[threadid].signal_msg[0] = *msg++; working_threads[threadid].signal_msg[1] = *(msg++); working_threads[threadid].signal_msg[2] = *(msg++); working_threads[threadid].signal_msg[3] = *(msg); } leave_mutex(&working_threads[threadid].waiting_for_signal_mutex); working_threads[threadid].waiting_for_signal = 0; }
void h2_mplx_request_done(h2_mplx **pm, int stream_id, const h2_request **preq) { h2_mplx *m = *pm; int acquired; if (enter_mutex(m, &acquired) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, "h2_mplx(%ld): request(%d) done", m->id, stream_id); if (io) { io->worker_done = 1; if (io->orphaned) { io_destroy(m, io, 0); if (m->join_wait) { apr_thread_cond_signal(m->join_wait); } } else { /* hang around until the stream deregisteres */ } } if (preq) { /* someone wants another request, if we have */ *preq = pop_request(m); } if (!preq || !*preq) { /* No request to hand back to the worker, NULLify reference * and decrement count */ *pm = NULL; } leave_mutex(m, acquired); } }
apr_status_t h2_mplx_stream_done(h2_mplx *m, int stream_id, int rst_error) { apr_status_t status = APR_SUCCESS; apr_thread_mutex_t *holding; int acquired; /* This maybe called from inside callbacks that already hold the lock. * E.g. when we are streaming out DATA and the EOF triggers the stream * release. */ AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); /* there should be an h2_io, once the stream has been scheduled * for processing, e.g. when we received all HEADERs. But when * a stream is cancelled very early, it will not exist. */ if (io) { io_stream_done(m, io, rst_error); } leave_mutex(m, acquired); } return status; }
apr_status_t h2_mplx_out_read_to(h2_mplx *m, int stream_id, apr_bucket_brigade *bb, apr_off_t *plen, int *peos, apr_table_t **ptrailers) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_read_to_pre"); status = h2_io_out_read_to(io, bb, plen, peos); H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_read_to_post"); if (status == APR_SUCCESS) { h2_io_signal(io, H2_IO_WRITE); } } else { status = APR_ECONNABORTED; } *ptrailers = (*peos && io->response)? io->response->trailers : NULL; leave_mutex(m, acquired); } return status; }
apr_status_t h2_mplx_out_write(h2_mplx *m, int stream_id, ap_filter_t* f, apr_bucket_brigade *bb, apr_table_t *trailers, struct apr_thread_cond_t *iowait) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { status = out_write(m, io, f, bb, trailers, iowait); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, "h2_mplx(%ld-%d): write with trailers=%s", m->id, io->id, trailers? "yes" : "no"); H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_write"); have_out_data_for(m, stream_id); } else { status = APR_ECONNABORTED; } leave_mutex(m, acquired); } return status; }
apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, apr_thread_cond_t *iowait) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { status = APR_ECONNABORTED; } else { m->added_output = iowait; status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout); if (APLOGctrace2(m->c)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, "h2_mplx(%ld): trywait on data for %f ms)", m->id, timeout/1000.0); } m->added_output = NULL; } leave_mutex(m, acquired); } return status; }
apr_status_t h2_mplx_out_rst(h2_mplx *m, int stream_id, int error) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->rst_error && !io->orphaned) { h2_io_rst(io, error); if (!io->response) { h2_io_set_add(m->ready_ios, io); } H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_rst"); have_out_data_for(m, stream_id); h2_io_signal(io, H2_IO_WRITE); } else { status = APR_ECONNABORTED; } leave_mutex(m, acquired); } return status; }
apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const h2_request *req, h2_stream_pri_cmp *cmp, void *ctx) { apr_status_t status; int was_empty = 0; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { status = APR_ECONNABORTED; } else { h2_io *io = open_io(m, stream_id); io->request = req; if (!io->request->body) { status = h2_io_in_close(io); } was_empty = h2_tq_empty(m->q); h2_tq_add(m->q, io->id, cmp, ctx); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, "h2_mplx(%ld-%d): process", m->c->id, stream_id); H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_process"); } leave_mutex(m, acquired); } if (status == APR_SUCCESS && was_empty) { workers_register(m); } return status; }
apr_status_t h2_mplx_in_update_windows(h2_mplx *m) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if (m->aborted) { return APR_ECONNABORTED; } if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { update_ctx ctx; ctx.m = m; ctx.streams_updated = 0; status = APR_EAGAIN; h2_io_set_iter(m->stream_ios, update_window, &ctx); if (ctx.streams_updated) { status = APR_SUCCESS; } leave_mutex(m, acquired); } return status; }
// this function should be invoked before sending msgs void set_wait_for_signal(int threadid, int signal_type, int senderid) { wait_mutex(&working_threads[threadid].waiting_for_signal_mutex); working_threads[threadid].expected_signal_type = signal_type; working_threads[threadid].signal_senderid = senderid; working_threads[threadid].waiting_for_signal = 1; leave_mutex(&working_threads[threadid].waiting_for_signal_mutex); }
int check_idle() { int ret; wait_mutex(&idle_threads_mutex); ret = idle_threads > 0; leave_mutex(&idle_threads_mutex); return ret; }
void free(void *ptr) { # ifdef SAFE wait_mutex(&malloc_mutex); # endif _mem_free(ptr); # ifdef SAFE leave_mutex(&malloc_mutex); # endif }
int h2_mplx_get_max_stream_started(h2_mplx *m) { int stream_id = 0; int acquired; enter_mutex(m, &acquired); stream_id = m->max_stream_started; leave_mutex(m, acquired); return stream_id; }
void *calloc(size_t nelem, size_t elsize) { void *ptr = NULL; # ifdef SAFE wait_mutex(&malloc_mutex); # endif ptr = _mem_alloc(nelem * elsize, 1); # ifdef SAFE leave_mutex(&malloc_mutex); # endif return ptr; }
apr_uint32_t h2_mplx_shutdown(h2_mplx *m) { int acquired, max_stream_started = 0; if (enter_mutex(m, &acquired) == APR_SUCCESS) { max_stream_started = m->max_stream_started; /* Clear schedule queue, disabling existing streams from starting */ h2_iq_clear(m->q); leave_mutex(m, acquired); } return max_stream_started; }
void *malloc(size_t size) { void *ptr = NULL; # ifdef SAFE wait_mutex(&malloc_mutex); # endif ptr = _mem_alloc(size, 0); # ifdef SAFE leave_mutex(&malloc_mutex); # endif return ptr; }
void h2_mplx_abort(h2_mplx *m) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if (!m->aborted) { if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { m->aborted = 1; leave_mutex(m, acquired); } } }
h2_stream *h2_mplx_next_submit(h2_mplx *m, h2_stream_set *streams) { apr_status_t status; h2_stream *stream = NULL; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_pop_highest_prio(m->ready_ios); if (io && !m->aborted) { stream = h2_stream_set_get(streams, io->id); if (stream) { if (io->rst_error) { h2_stream_rst(stream, io->rst_error); } else { AP_DEBUG_ASSERT(io->response); H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_next_submit_pre"); h2_stream_set_response(stream, io->response, io->bbout); H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_next_submit_post"); } } else { /* We have the io ready, but the stream has gone away, maybe * reset by the client. Should no longer happen since such * streams should clear io's from the ready queue. */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): stream for response %d closed, " "resetting io to close request processing", m->id, io->id); h2_io_make_orphaned(io, H2_ERR_STREAM_CLOSED); if (!io->worker_started || io->worker_done) { io_destroy(m, io, 1); } else { /* hang around until the h2_task is done, but * shutdown input and send out any events (e.g. window * updates) asap. */ h2_io_in_shutdown(io); io_process_events(m, io); } } h2_io_signal(io, H2_IO_WRITE); } leave_mutex(m, acquired); } return stream; }
apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) { apr_status_t status; int acquired; h2_workers_unregister(m->workers, m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { int i, wait_secs = 5; /* disable WINDOW_UPDATE callbacks */ h2_mplx_set_consumed_cb(m, NULL, NULL); while (!h2_io_set_iter(m->stream_ios, stream_done_iter, m)) { /* iterate until all ios have been orphaned or destroyed */ } /* Any remaining ios have handed out requests to workers that are * not done yet. Any operation they do on their assigned stream ios will * be errored ECONNRESET/ABORTED, so that should find out pretty soon. */ for (i = 0; h2_io_set_size(m->stream_ios) > 0; ++i) { m->join_wait = wait; ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): release_join, waiting on %d worker to report back", m->id, (int)h2_io_set_size(m->stream_ios)); status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs)); if (APR_STATUS_IS_TIMEUP(status)) { if (i > 0) { /* Oh, oh. Still we wait for assigned workers to report that * they are done. Unless we have a bug, a worker seems to be hanging. * If we exit now, all will be deallocated and the worker, once * it does return, will walk all over freed memory... */ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, "h2_mplx(%ld): release, waiting for %d seconds now for " "all h2_workers to return, have still %d requests outstanding", m->id, i*wait_secs, (int)h2_io_set_size(m->stream_ios)); } } } ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, "h2_mplx(%ld): release_join -> destroy", m->id); leave_mutex(m, acquired); h2_mplx_destroy(m); /* all gone */ } return status; }
int h2_mplx_in_has_eos_for(h2_mplx *m, int stream_id) { int has_eos = 0; int acquired; apr_status_t status; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { has_eos = h2_io_in_has_eos_for(io); } else { has_eos = 1; } leave_mutex(m, acquired); } return has_eos; }
int h2_mplx_out_has_data_for(h2_mplx *m, int stream_id) { apr_status_t status; int has_data = 0; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { has_data = h2_io_out_has_data(io); } else { has_data = 0; } leave_mutex(m, acquired); } return has_data; }
apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { status = APR_ECONNABORTED; } else { h2_tq_sort(m->q, cmp, ctx); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): reprioritize tasks", m->id); } leave_mutex(m, acquired); } return status; }
const h2_request *h2_mplx_pop_request(h2_mplx *m, int *has_more) { const h2_request *req = NULL; apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { req = NULL; *has_more = 0; } else { req = pop_request(m); *has_more = !h2_tq_empty(m->q); } leave_mutex(m, acquired); } return req; }
apr_status_t h2_mplx_in_close(h2_mplx *m, int stream_id) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { status = h2_io_in_close(io); H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_close"); h2_io_signal(io, H2_IO_READ); io_process_events(m, io); } else { status = APR_ECONNABORTED; } leave_mutex(m, acquired); } return status; }
apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_response *response, ap_filter_t* f, apr_bucket_brigade *bb, struct apr_thread_cond_t *iowait) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { status = APR_ECONNABORTED; } else { status = out_open(m, stream_id, response, f, bb, iowait); if (APLOGctrace1(m->c)) { h2_util_bb_log(m->c, stream_id, APLOG_TRACE1, "h2_mplx_out_open", bb); } } leave_mutex(m, acquired); } return status; }
apr_status_t h2_mplx_in_read(h2_mplx *m, apr_read_type_e block, int stream_id, apr_bucket_brigade *bb, apr_table_t *trailers, struct apr_thread_cond_t *iowait) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_read_pre"); h2_io_signal_init(io, H2_IO_READ, m->stream_timeout, iowait); status = h2_io_in_read(io, bb, -1, trailers); while (APR_STATUS_IS_EAGAIN(status) && !is_aborted(m, &status) && block == APR_BLOCK_READ) { ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c, "h2_mplx(%ld-%d): wait on in data (BLOCK_READ)", m->id, stream_id); status = h2_io_signal_wait(m, io); if (status == APR_SUCCESS) { status = h2_io_in_read(io, bb, -1, trailers); } } H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_read_post"); h2_io_signal_exit(io); } else { status = APR_EOF; } leave_mutex(m, acquired); } return status; }
apr_status_t h2_mplx_out_close(h2_mplx *m, int stream_id, apr_table_t *trailers) { apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { if (!io->response && !io->rst_error) { /* In case a close comes before a response was created, * insert an error one so that our streams can properly * reset. */ h2_response *r = h2_response_die(stream_id, APR_EGENERAL, io->request, m->pool); status = out_open(m, stream_id, r, NULL, NULL, NULL); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, "h2_mplx(%ld-%d): close, no response, no rst", m->id, io->id); } ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, "h2_mplx(%ld-%d): close with trailers=%s", m->id, io->id, trailers? "yes" : "no"); status = h2_io_out_close(io, trailers); H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_close"); have_out_data_for(m, stream_id); } else { status = APR_ECONNABORTED; } leave_mutex(m, acquired); } return status; }
struct stdfss_res *mkdevice(int wpid, struct working_thread *thread, struct stdfss_mkdevice *mkdevice_cmd) { struct stdfss_res *ret = NULL; struct smount_info *minf = NULL; struct sdevice_info *opening_dinf = NULL; char *str = NULL, *strmatched = NULL; int parse_ret, dir_exists; struct gc_node *dir_base_node = NULL, *device_base_node = NULL; unsigned int *block = NULL; unsigned int nodeid; // get device file name from smo str = get_string(mkdevice_cmd->path_smo); ret = check_path(str, thread->command.command); if(ret != NULL) return ret; char *devstr = get_string(mkdevice_cmd->service_name); ret = check_path(devstr, thread->command.command); if(ret != NULL) { free(str); return ret; } // check path is ok if(str[len(str)] == '/') { free(str); free(devstr); return build_response_msg(thread->command.command, STDFSSERR_INVALID_COMMAND_PARAMS); } // get mount info wait_mutex(&mounted_mutex); minf = (struct smount_info *)lpt_getvalue_parcial_matchE(mounted, str, &strmatched); leave_mutex(&mounted_mutex); if(minf == NULL) { free(str); free(devstr); return build_response_msg(mkdevice_cmd->command, STDFSSERR_DEVICE_NOT_MOUNTED); } // check file does not exist parse_ret = parse_directory(TRUE, &dir_base_node, OFS_NODELOCK_EXCLUSIVE | OFS_NODELOCK_BLOCKING, thread->command.command, thread, wpid, minf, str, len(strmatched), NULL, &nodeid, NULL, &dir_exists, &ret); if(ret != NULL) free(ret); ret = NULL; if(parse_ret) { // file exists if(thread->lastdir_parsed_node != NULL) { nfree(minf->dinf, thread->lastdir_parsed_node); thread->lastdir_parsed_node = NULL; } free(strmatched); free(str); free(devstr); unlock_node(wpid, FALSE, OFS_LOCKSTATUS_OK); nfree(minf->dinf, dir_base_node); return build_response_msg(thread->command.command, STDFSSERR_FILE_EXISTS); } if(!dir_exists) { // worng path if(thread->lastdir_parsed_node != NULL) { nfree(minf->dinf, thread->lastdir_parsed_node); thread->lastdir_parsed_node = NULL; } free(strmatched); free(str); free(devstr); return build_response_msg(thread->command.command, STDFSSERR_FILE_DOESNOTEXIST); } dir_base_node = nref(minf->dinf, thread->lastdir_parsed_node->nodeid); free(strmatched); // Create device file if(!create_file(dir_base_node, str, last_index_of(str, '/') + 1, OFS_DEVICE_FILE, TRUE, minf, wpid, thread->command.command, &nodeid, &device_base_node, OFS_NOFLAGS , &ret)) { nfree(minf->dinf, thread->lastdir_parsed_node); nfree(minf->dinf, dir_base_node); thread->lastdir_parsed_node = NULL; free(str); free(devstr); return ret; } nfree(minf->dinf, thread->lastdir_parsed_node); nfree(minf->dinf, dir_base_node); thread->lastdir_parsed_node = NULL; // get a free block block = get_free_blocks(1, TRUE, minf, thread->command.command, wpid, &ret); if(ret != NULL) { free(str); free(devstr); unlock_node(wpid, FALSE, OFS_LOCKSTATUS_OK); nfree(minf->dinf, device_base_node); return ret; } free(str); clear_dir_buffer(thread->directory_buffer.buffer); // write buffer /* int LOGIC DEVICE ID: internal ID on the service (4 BYTES) int service name size; (4 BYTES) char SERVICE NAME[]: name of the device driver (zero terminated devstring) */ *((unsigned int *)thread->directory_buffer.buffer) = (unsigned int)mkdevice_cmd->logic_deviceid; *((unsigned int *)(thread->directory_buffer.buffer + 4)) = len(devstr); mem_copy((unsigned char *)devstr, ((unsigned char *)thread->directory_buffer.buffer + 8), len(devstr) + 1); free(devstr); write_buffer((char *)thread->directory_buffer.buffer, OFS_DIR_BUFFERSIZE, *block, thread->command.command, wpid, minf, &ret); if(ret != NULL) { free_block(TRUE, TRUE, *block, minf, thread->command.command, wpid, &ret); free(block); unlock_node(wpid, FALSE, OFS_LOCKSTATUS_OK); nfree(minf->dinf, device_base_node); return ret; } // update node device_base_node->n.file_size = 8 + len(devstr) + 1; device_base_node->n.blocks[0] = *block; if(!write_node(device_base_node, minf, wpid, thread->command.command, &ret) || ret != NULL) { free_block(TRUE, TRUE, *block, minf, thread->command.command, wpid, &ret); free(block); unlock_node(wpid, FALSE, OFS_LOCKSTATUS_OK); nfree(minf->dinf, device_base_node); return ret; } nfree(minf->dinf, device_base_node); // unlock device node unlock_node(wpid, FALSE, OFS_LOCKSTATUS_OK); return build_response_msg(thread->command.command, STDFSSERR_OK); }
void signal_idle() { wait_mutex(&idle_threads_mutex); idle_threads++; leave_mutex(&idle_threads_mutex); }
void decrement_idle() { wait_mutex(&idle_threads_mutex); idle_threads--; leave_mutex(&idle_threads_mutex); }