static apt_bool_t mrcp_sofia_session_terminate_request(mrcp_session_t *session)
{
	mrcp_sofia_session_t *sofia_session = session->obj;
	if(!sofia_session) {
		return FALSE;
	}

	sofia_session->terminate_requested = FALSE;
	apr_thread_mutex_lock(sofia_session->mutex);
	if(sofia_session->nh) {
		sofia_session->terminate_requested = TRUE;
		nua_bye(sofia_session->nh,TAG_END());
	}
	apr_thread_mutex_unlock(sofia_session->mutex);

	if(sofia_session->terminate_requested == FALSE) {
		mrcp_sofia_session_cleanup(sofia_session);
		mrcp_session_terminate_response(session);
	}
	return TRUE;
}
Ejemplo n.º 2
0
apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
                                 apr_thread_cond_t *iowait)
{
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    apr_status_t status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        m->added_output = iowait;
        status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
        if (APLOGctrace2(m->c)) {
            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
                          "h2_mplx(%ld): trywait on data for %f ms)",
                          m->id, timeout/1000.0);
        }
        m->added_output = NULL;
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}
Ejemplo n.º 3
0
static void
    demo_exchange_initialise (
void)
{

    //  Test for already active before applying any locks; avoids deadlock in
    //  some classes
    if (!s_demo_exchange_active) {

#if (defined (BASE_THREADSAFE))
        //  First make sure the object mutex has been created
        if (!icl_global_mutex) {
            icl_system_panic ("icl_init", "iCL not initialised - call icl_system_initialise()\n");
            abort ();
        }
        apr_thread_mutex_lock (icl_global_mutex);
        if (!s_demo_exchange_mutex)
            s_demo_exchange_mutex = icl_mutex_new ();
        apr_thread_mutex_unlock (icl_global_mutex);

        //  Now lock the object mutex
        icl_mutex_lock   (s_demo_exchange_mutex);

        //  Test again for already active now that we hold the lock
        if (!s_demo_exchange_active) {
#endif
            //  Register the class termination call-back functions
            icl_system_register (NULL, self_terminate);

demo_exchange_agent_init ();

s_demo_exchange_table = demo_exchange_table_new ();
            s_demo_exchange_active = TRUE;
#if (defined (BASE_THREADSAFE))
        }
        icl_mutex_unlock (s_demo_exchange_mutex);
#endif

    }
}
Ejemplo n.º 4
0
MPF_DECLARE(apt_bool_t) mpf_dtmf_generator_enqueue(
								struct mpf_dtmf_generator_t *generator,
								const char *digits)
{
	apr_size_t qlen, dlen;
	apt_bool_t ret;

	dlen = strlen(digits);
	apr_thread_mutex_lock(generator->mutex);
	qlen = strlen(generator->queue);
	if (qlen + dlen > MPF_DTMFGEN_QUEUE_LEN) {
		ret = FALSE;
		apt_log(APT_LOG_MARK, APT_PRIO_WARNING, "DTMF queue too short (%"APR_SIZE_T_FMT"), "
			"cannot add %d digit%s, already has %"APR_SIZE_T_FMT, MPF_DTMFGEN_QUEUE_LEN,
			dlen, dlen > 1 ? "s" : "", qlen);
	} else {
		strcpy(generator->queue + qlen, digits);
		ret = TRUE;
	}
	apr_thread_mutex_unlock(generator->mutex);
	return ret;
}
Ejemplo n.º 5
0
apr_status_t h2_mplx_in_close(h2_mplx *m, int stream_id)
{
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    apr_status_t status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
        if (io) {
            status = h2_io_in_close(io);
            if (io->input_arrived) {
                apr_thread_cond_signal(io->input_arrived);
            }
        }
        else {
            status = APR_ECONNABORTED;
        }
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}
static apt_bool_t mrcp_sofia_session_offer(mrcp_session_t *session, mrcp_session_descriptor_t *descriptor)
{
	char sdp_str[2048];
	const char *local_sdp_str = NULL;
	apt_bool_t res = FALSE;
	mrcp_sofia_session_t *sofia_session = session->obj;
	if(!sofia_session) {
		return FALSE;
	}

	if(session->signaling_agent) {
		mrcp_sofia_agent_t *sofia_agent = mrcp_sofia_agent_get(session);
		if(sofia_agent) {
			if(sofia_agent->config->origin) {
				apt_string_set(&descriptor->origin,sofia_agent->config->origin);
			}
		}
	}
	if(sdp_string_generate_by_mrcp_descriptor(sdp_str,sizeof(sdp_str),descriptor,TRUE) > 0) {
		local_sdp_str = sdp_str;
		sofia_session->descriptor = descriptor;
		apt_obj_log(APT_LOG_MARK,APT_PRIO_INFO,session->log_obj,"Local SDP "APT_NAMESID_FMT"\n%s", 
			session->name,
			MRCP_SESSION_SID(session), 
			local_sdp_str);
	}

	apr_thread_mutex_lock(sofia_session->mutex);

	if(sofia_session->nh) {
		res = TRUE;
		nua_invite(sofia_session->nh,
				TAG_IF(local_sdp_str,SOATAG_USER_SDP_STR(local_sdp_str)),
				TAG_END());
	}

	apr_thread_mutex_unlock(sofia_session->mutex);
	return res;
}
Ejemplo n.º 7
0
extern apr_status_t napr_threadpool_add(napr_threadpool_t *threadpool, void *data)
{
    char errbuf[128];
    apr_status_t status;

    if (APR_SUCCESS != (status = apr_thread_mutex_lock(threadpool->threadpool_mutex))) {
	DEBUG_ERR("error calling apr_thread_mutex_lock: %s", apr_strerror(status, errbuf, 128));
	return status;
    }
    threadpool->ended &= 0x0;
    napr_list_enqueue(threadpool->list, data);
    if (APR_SUCCESS != (status = apr_thread_mutex_unlock(threadpool->threadpool_mutex))) {
	DEBUG_ERR("error calling apr_thread_mutex_unlock: %s", apr_strerror(status, errbuf, 128));
	return status;
    }
    if (APR_SUCCESS != (status = apr_thread_cond_signal(threadpool->threadpool_update))) {
	DEBUG_ERR("error calling apr_thread_cond_signal: %s", apr_strerror(status, errbuf, 128));
	return status;
    }

    return APR_SUCCESS;
}
Ejemplo n.º 8
0
apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_response *response,
                              ap_filter_t* f, apr_bucket_brigade *bb,
                              struct apr_thread_cond_t *iowait)
{
    apr_status_t status;
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        status = out_open(m, stream_id, response, f, bb, iowait);
        if (APLOGctrace1(m->c)) {
            h2_util_bb_log(m->c, stream_id, APLOG_TRACE1, "h2_mplx_out_open", bb);
        }
        if (m->aborted) {
            return APR_ECONNABORTED;
        }
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}
void slayer_server_log_add_entry(slayer_server_log_manager_t *manager, apr_pool_t *mpool,
                                  const char *client_ip,apr_int64_t rtime,
                                  const char *request_line,int response_code,
                                  int nbytes_sent, apr_int64_t time_toservice ) {

	json_value *container = json_object_create(mpool);
	json_object_add(container,"client_ip",json_string_create(mpool,client_ip));
	json_object_add(container,"request_time",json_long_create(mpool,rtime / (1000*1000)));
	json_object_add(container,"request",json_string_create(mpool,request_line));
	json_object_add(container,"response_code",json_long_create(mpool,response_code));
	json_object_add(container,"bytes_sent",json_long_create(mpool,nbytes_sent));
	json_object_add(container,"time_toservice",json_long_create(mpool,time_toservice));
	char *json_entry = strdup(json_serialize(mpool,container)); //we want our own copy of this data

	//smallest chunk in the mutex
	apr_thread_mutex_lock(manager->list_mutex);
	manager->offset++;
	if (manager->offset == manager->nentries)  manager->offset = 0;
	free(manager->entries[manager->offset].json_view);
	manager->entries[manager->offset].json_view = json_entry;
	apr_thread_mutex_unlock(manager->list_mutex);
}
Ejemplo n.º 10
0
service_manager_t *clone_service_manager(service_manager_t *sm)
{
    apr_ssize_t klen;
    service_manager_t *clone;
    apr_hash_index_t *his;
    apr_hash_t *section, *clone_section;
    char *key;

    //** Make an empty SM
    clone = create_service_manager(sm);

    //** Now cycle through all the tables and copy them
    apr_thread_mutex_lock(sm->lock);
    for (his = apr_hash_first(NULL, sm->table); his != NULL; his = apr_hash_next(his)) {
        apr_hash_this(his, (const void **)&key, &klen, (void **)&section);
        clone_section = apr_hash_copy(clone->pool, section);
        apr_hash_set(clone->table, apr_pstrdup(clone->pool, key), APR_HASH_KEY_STRING, clone_section);
    }
    apr_thread_mutex_unlock(sm->lock);

    return(clone);
}
Ejemplo n.º 11
0
/* Write synthesized speech / speech to be recognized. */
int speech_channel_write(speech_channel_t *schannel, void *data, apr_size_t *len)
{
	int status = 0;

	if ((schannel != NULL) && (*len > 0)) {
#if SPEECH_CHANNEL_TRACE
		apr_size_t req_len = *len;
#endif

#if SPEECH_CHANNEL_DUMP
		if(schannel->stream_in) {
			fwrite(data, 1, *len, schannel->stream_in);
		}
#endif
		if (schannel->mutex != NULL)
			apr_thread_mutex_lock(schannel->mutex);

		audio_queue_t *queue = schannel->audio_queue;

		if (schannel->state == SPEECH_CHANNEL_PROCESSING)
			status = audio_queue_write(queue, data, len);
		else
			status = -1;

		if (schannel->mutex != NULL)
			apr_thread_mutex_unlock(schannel->mutex);

#if SPEECH_CHANNEL_TRACE
		ast_log(LOG_DEBUG, "(%s) channel_write() status=%d req=%"APR_SIZE_T_FMT" written=%"APR_SIZE_T_FMT"\n", 
				schannel->name, status, req_len, *len);
#endif

	} else {
		ast_log(LOG_ERROR, "Speech channel structure pointer is NULL\n");
		return -1;
	}

	return status;
}
Ejemplo n.º 12
0
txn_info_t *attach_txn_info(roundid_t **rids, size_t sz_rids, 
    mpaxos_req_t *req) {

    txn_info_t *info = NULL;
    txn_info_create(&info, rids, sz_rids, req);
    
    for (size_t i = 0; i < sz_rids; i++) {
        group_info_t *ginfo = NULL;
        group_info_create(&ginfo, info, rids[i]);
        // add group to round
        apr_hash_set(info->ht_ginfo, &ginfo->gid, sizeof(groupid_t), ginfo);
    }

    apr_thread_mutex_lock(mx_txn_info_);
    // txn_info_t *info = apr_hash_get(ht_txn_info_, &(req->id), sizeof(roundid_t));
    // it should be a new round.
    // SAFE_ASSERT(info == NULL);
    // attach txn
    apr_hash_set(ht_txn_info_, &info->tid, sizeof(txnid_t), info);
    apr_thread_mutex_unlock(mx_txn_info_);
    return info;
}
Ejemplo n.º 13
0
/* Creates a new serialization context with the specified handle. If any
 * compilation units are waiting for an SC with this handle, removes it from
 * their to-resolve list after installing itself in the appropriate slot. */
MVMObject * MVM_sc_create(MVMThreadContext *tc, MVMString *handle) {
    MVMObject   *sc;
    MVMCompUnit *cur_cu;

    /* Allocate. */
    MVMROOT(tc, handle, {
        sc = REPR(tc->instance->SCRef)->allocate(tc, STABLE(tc->instance->SCRef));
        MVMROOT(tc, sc, {
            REPR(sc)->initialize(tc, STABLE(sc), sc, OBJECT_BODY(sc));

            /* Set handle. */
            MVM_ASSIGN_REF(tc, sc, ((MVMSerializationContext *)sc)->body->handle, handle);

            /* Add to weak lookup hash. */
            if (apr_thread_mutex_lock(tc->instance->mutex_sc_weakhash) != APR_SUCCESS)
                MVM_exception_throw_adhoc(tc, "Unable to lock SC weakhash");
            MVM_string_flatten(tc, handle);
            MVM_HASH_BIND(tc, tc->instance->sc_weakhash, handle, ((MVMSerializationContext *)sc)->body);
            if (apr_thread_mutex_unlock(tc->instance->mutex_sc_weakhash) != APR_SUCCESS)
                MVM_exception_throw_adhoc(tc, "Unable to unlock SC weakhash");

            /* Visit compilation units that need this SC and resolve it. */
            cur_cu = tc->instance->head_compunit;
            while (cur_cu) {
                if (cur_cu->scs_to_resolve) {
                    MVMuint32 i;
                    for (i = 0; i < cur_cu->num_scs; i++) {
                        MVMString *res = cur_cu->scs_to_resolve[i];
                        if (res && MVM_string_equal(tc, res, handle)) {
                            cur_cu->scs[i] = (MVMSerializationContext *)sc;
                            cur_cu->scs_to_resolve[i] = NULL;
                            break;
                        }
                    }
                }
                cur_cu = cur_cu->next_compunit;
            }
        });
    });
Ejemplo n.º 14
0
void ds_ibp_destroy(data_service_fn_t *dsf)
{
    ds_ibp_priv_t *ds = (ds_ibp_priv_t *)dsf->priv;
    apr_status_t value;

    //** Wait for the warmer thread to complete
    apr_thread_mutex_lock(ds->lock);
    ds->warm_stop = 1;
    apr_thread_cond_signal(ds->cond);
    apr_thread_mutex_unlock(ds->lock);
    apr_thread_join(&value, ds->thread);  //** Wait for it to complete

    //** Now we can clean up
    apr_thread_mutex_destroy(ds->lock);
    apr_thread_cond_destroy(ds->cond);
    apr_pool_destroy(ds->pool);

    ibp_destroy_context(ds->ic);

    free(ds);
    free(dsf);
}
Ejemplo n.º 15
0
apr_status_t h2_mplx_out_close(h2_mplx *m, int stream_id)
{
    apr_status_t status;
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        if (!m->aborted) {
            h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
            if (io) {
                if (!io->response->ngheader) {
                    /* In case a close comes before a response was created,
                     * insert an error one so that our streams can properly
                     * reset.
                     */
                    h2_response *r = h2_response_create(stream_id, 
                                                        "500", NULL, m->pool);
                    status = out_open(m, stream_id, r, NULL, NULL, NULL);
                }
                status = h2_io_out_close(io);
                have_out_data_for(m, stream_id);
                if (m->aborted) {
                    /* if we were the last output, the whole session might
                     * have gone down in the meantime.
                     */
                    return APR_SUCCESS;
                }
            }
            else {
                status = APR_ECONNABORTED;
            }
        }
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}
Ejemplo n.º 16
0
void change_all_hportal_conn(portal_context_t *hpc, int min_conn, int max_conn, apr_time_t dt_connect)
{
    apr_hash_index_t *hi;
    host_portal_t *hp;
    void *val;

    apr_thread_mutex_lock(hpc->lock);

    for (hi=apr_hash_first(hpc->pool, hpc->table); hi != NULL; hi = apr_hash_next(hi)) {
        apr_hash_this(hi, NULL, NULL, &val);
        hp = (host_portal_t *)val;

        hportal_lock(hp);
//log_printf(0, "change_all_hportal_conn: hp=%s min=%d max=%d\n", hp->skey, min_conn, max_conn);
        hp->min_conn = min_conn;
        hp->max_conn = max_conn;
        hp->stable_conn = max_conn;
        hp->dt_connect = dt_connect;
        hportal_unlock(hp);
    }

    apr_thread_mutex_unlock(hpc->lock);
}
Ejemplo n.º 17
0
/** \brief Send MRCP request to client stack and wait for async response */
static apt_bool_t uni_recog_mrcp_request_send(uni_speech_t *uni_speech, mrcp_message_t *message)
{
	apt_bool_t res = FALSE;
	apr_thread_mutex_lock(uni_speech->mutex);
	uni_speech->mrcp_request = message;

	/* Send MRCP request */
	ast_log(LOG_DEBUG, "Send MRCP request\n");
	res = mrcp_application_message_send(uni_speech->session,uni_speech->channel,message);

	if(res == TRUE) {
		/* Wait for MRCP response */
		ast_log(LOG_DEBUG, "Wait for MRCP response\n");
		if(apr_thread_cond_timedwait(uni_speech->wait_object,uni_speech->mutex,MRCP_APP_REQUEST_TIMEOUT) != APR_SUCCESS) {
		    ast_log(LOG_ERROR, "Failed to get response, request timed out\n");
		    uni_speech->mrcp_response = NULL;
		}
		ast_log(LOG_DEBUG, "Waked up\n");
	}
	uni_speech->mrcp_request = NULL;
	apr_thread_mutex_unlock(uni_speech->mutex);
	return res;
}
Ejemplo n.º 18
0
void h2_mplx_task_done(h2_mplx *m, int stream_id)
{
    apr_status_t status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        h2_stream *stream = h2_stream_set_get(m->closed, stream_id);
        h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
        ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
                      "h2_mplx(%ld): task(%d) done", m->id, stream_id);
        if (stream) {
            /* stream was already closed by main connection and is in 
             * zombie state. Now that the task is done with it, we
             * can free its resources. */
            h2_stream_set_remove(m->closed, stream);
            stream_destroy(m, stream, io);
        }
        else if (io) {
            /* main connection has not finished stream. Mark task as done
             * so that eventual cleanup can start immediately. */
            io->task_done = 1;
        }
        apr_thread_mutex_unlock(m->lock);
    }
}
Ejemplo n.º 19
0
void mmlog_log(mm_logger* log,const char* a_format, ...) {
    va_list va;
    //int i=0;
    int size=2048;
    apr_time_t tnow;
    char tbuf[64];
    char buffer[BUFFERSIZE];
    memset(buffer, '\0', BUFFERSIZE);

    apr_thread_mutex_lock(log->mutex);

    if(log->maxLogFileSizeMB>0) mmlog_rotateLogFile(log);

    va_start(va, a_format);
    vsnprintf(buffer, BUFFERSIZE-1, a_format, va);

    tnow=apr_time_now();
    memset(tbuf,'\0',64);
    apr_ctime(tbuf,tnow);
    apr_file_printf(log->file,"%s mm.monitor.pid [%d] %s\r\n",tbuf,getpid(),buffer);
    va_end(va);
    apr_thread_mutex_unlock(log->mutex);
}
Ejemplo n.º 20
0
int submit_hp_que_op(portal_context_t *hpc, op_generic_t *op)
{
    command_op_t *hop = &(op->op->cmd);

    apr_thread_mutex_lock(hpc->lock);

    //** Check if we should do a garbage run **
    if (hpc->next_check < time(NULL)) {
        hpc->next_check = time(NULL) + hpc->compact_interval;

        apr_thread_mutex_unlock(hpc->lock);
        log_printf(15, "submit_hp_op: Calling compact_hportals\n");
        compact_hportals(hpc);
        apr_thread_mutex_lock(hpc->lock);
    }

//log_printf(1, "submit_hp_op: hpc=%p hpc->table=%p\n",hpc, hpc->table);
    host_portal_t *hp = _lookup_hportal(hpc, hop->hostport);
    if (hp == NULL) {
        log_printf(15, "submit_hp_que_op: New host: %s\n", hop->hostport);
        hp = create_hportal(hpc, hop->connect_context, hop->hostport, hpc->min_threads, hpc->max_threads, hpc->dt_connect);
        if (hp == NULL) {
            log_printf(15, "submit_hp_que_op: create_hportal failed!\n");
            return(1);
        }
        log_printf(15, "submit_op: New host.. hp->skey=%s\n", hp->skey);
        apr_hash_set(hpc->table, hp->skey, APR_HASH_KEY_STRING, (const void *)hp);
        host_portal_t *hp2 = _lookup_hportal(hpc, hop->hostport);
        log_printf(15, "submit_hp_que_op: after lookup hp2=%p\n", hp2);
    }

    //** This is done in the submit_hportal() since we have release_master=1
    //** This protects against accidental compaction removal
    //** apr_thread_mutex_unlock(hpc->lock);

    return(submit_hportal(hp, op, 0, 1));
}
Ejemplo n.º 21
0
apr_status_t h2_mplx_out_write(h2_mplx *m, int stream_id, 
                               ap_filter_t* f, apr_bucket_brigade *bb,
                               apr_table_t *trailers,
                               struct apr_thread_cond_t *iowait)
{
    apr_status_t status;
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        if (!m->aborted) {
            h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
            if (io && !io->orphaned) {
                status = out_write(m, io, f, bb, trailers, iowait);
                ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c,
                              "h2_mplx(%ld-%d): write with trailers=%s", 
                              m->id, io->id, trailers? "yes" : "no");
                H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_write");
                
                have_out_data_for(m, stream_id);
                if (m->aborted) {
                    return APR_ECONNABORTED;
                }
            }
            else {
                status = APR_ECONNABORTED;
            }
        }
        
        if (m->lock) {
            apr_thread_mutex_unlock(m->lock);
        }
    }
    return status;
}
Ejemplo n.º 22
0
static void *APR_THREAD_FUNC thread_mutex_function(apr_thread_t *thd, void *data)
{
    int exitLoop = 1;

    /* slight delay to allow things to settle */
    apr_sleep (1);
    
    while (1)
    {
        apr_thread_mutex_lock(thread_mutex);
        if (i == MAX_ITER)
            exitLoop = 0;
        else 
        {
            i++;
            x++;
        }
        apr_thread_mutex_unlock(thread_mutex);

        if (!exitLoop)
            break;
    }
    return NULL;
} 
Ejemplo n.º 23
0
static void test_timeoutcond(abts_case *tc, void *data)
{
    apr_status_t s;
    apr_interval_time_t timeout;
    apr_time_t begin, end;
    int i;

    s = apr_thread_mutex_create(&timeout_mutex, APR_THREAD_MUTEX_DEFAULT, p);
    ABTS_INT_EQUAL(tc, APR_SUCCESS, s);
    ABTS_PTR_NOTNULL(tc, timeout_mutex);

    s = apr_thread_cond_create(&timeout_cond, p);
    ABTS_INT_EQUAL(tc, APR_SUCCESS, s);
    ABTS_PTR_NOTNULL(tc, timeout_cond);

    timeout = apr_time_from_sec(5);

    for (i = 0; i < MAX_RETRY; i++) {
        apr_thread_mutex_lock(timeout_mutex);

        begin = apr_time_now();
        s = apr_thread_cond_timedwait(timeout_cond, timeout_mutex, timeout);
        end = apr_time_now();
        apr_thread_mutex_unlock(timeout_mutex);
        
        if (s != APR_SUCCESS && !APR_STATUS_IS_TIMEUP(s)) {
            continue;
        }
        ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(s));
        ABTS_ASSERT(tc, "Timer returned too late", end - begin - timeout < 500000);
        break;
    }
    ABTS_ASSERT(tc, "Too many retries", i < MAX_RETRY);
    APR_ASSERT_SUCCESS(tc, "Unable to destroy the conditional",
                       apr_thread_cond_destroy(timeout_cond));
}
Ejemplo n.º 24
0
apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
                                    apr_pool_t *pool_to_recycle)
{
    apr_status_t rv;
    rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
    if (rv != APR_SUCCESS) {
        return rv;
    }
    AP_DEBUG_ASSERT(queue_info->idlers >= 0);
    AP_DEBUG_ASSERT(queue_info->num_recycled < queue_info->max_idlers);
    if (pool_to_recycle) {
        queue_info->recycled_pools[queue_info->num_recycled++] =
            pool_to_recycle;
    }
    if (queue_info->idlers++ == 0) {
        /* Only signal if we had no idlers before. */
        apr_thread_cond_signal(queue_info->wait_for_idler);
    }
    rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
    if (rv != APR_SUCCESS) {
        return rv;
    }
    return APR_SUCCESS;
}
Ejemplo n.º 25
0
static apt_bool_t mrcp_server_agent_control_process(mrcp_connection_agent_t *agent)
{
	apt_bool_t status = TRUE;
	apt_bool_t running = TRUE;
	connection_task_msg_t *msg;

	do {
		apr_thread_mutex_lock(agent->guard);
		msg = apt_cyclic_queue_pop(agent->msg_queue);
		apr_thread_mutex_unlock(agent->guard);
		if(msg) {
			switch(msg->type) {
				case CONNECTION_TASK_MSG_ADD_CHANNEL:
					mrcp_server_agent_channel_add(agent,msg->channel,msg->descriptor);
					break;
				case CONNECTION_TASK_MSG_MODIFY_CHANNEL:
					mrcp_server_agent_channel_modify(agent,msg->channel,msg->descriptor);
					break;
				case CONNECTION_TASK_MSG_REMOVE_CHANNEL:
					mrcp_server_agent_channel_remove(agent,msg->channel);
					break;
				case CONNECTION_TASK_MSG_SEND_MESSAGE:
					mrcp_server_agent_messsage_send(agent,msg->channel->connection,msg->message);
					break;
				case CONNECTION_TASK_MSG_TERMINATE:
					status = FALSE;
					break;
			}
		}
		else {
			running = FALSE;
		}
	}
	while(running == TRUE);
	return status;
}
Ejemplo n.º 26
0
static apt_bool_t mrcp_server_control_message_signal(
								connection_task_msg_type_e type,
								mrcp_connection_agent_t *agent,
								mrcp_control_channel_t *channel,
								mrcp_control_descriptor_t *descriptor,
								mrcp_message_t *message)
{
	apt_bool_t status;
	connection_task_msg_t *msg = apr_palloc(channel->pool,sizeof(connection_task_msg_t));
	msg->type = type;
	msg->agent = agent;
	msg->channel = channel;
	msg->descriptor = descriptor;
	msg->message = message;

	apr_thread_mutex_lock(agent->guard);
	status = apt_cyclic_queue_push(agent->msg_queue,msg);
	apr_thread_mutex_unlock(agent->guard);
	if(apt_pollset_wakeup(agent->pollset) != TRUE) {
		apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Signal Control Message");
		status = FALSE;
	}
	return status;
}
Ejemplo n.º 27
0
void rsrs_config_send(resource_service_fn_t *rs, mq_frame_t *fid, mq_msg_t *address)
{
    rs_remote_server_priv_t *rsrs = (rs_remote_server_priv_t *)rs->priv;
    mq_msg_t *msg;
    char *config;
    char data[128];

    //** Form the core message
    msg = mq_msg_new();
    mq_msg_append_mem(msg, MQF_VERSION_KEY, MQF_VERSION_SIZE, MQF_MSG_KEEP_DATA);
    mq_msg_append_mem(msg, MQF_RESPONSE_KEY, MQF_RESPONSE_SIZE, MQF_MSG_KEEP_DATA);
    mq_msg_append_frame(msg, fid);

    //** Add the version.. Note the "\n" for the version.  This preserves a NULL term on the receiver
    apr_thread_mutex_lock(rsrs->lock);
    snprintf(data, sizeof(data), "%d %d\n", rsrs->my_map_version.map_version, rsrs->my_map_version.status_version);
    apr_thread_mutex_unlock(rsrs->lock);
    mq_msg_append_mem(msg, strdup(data), strlen(data), MQF_MSG_AUTO_FREE);

    log_printf(5, "version=%s", data);

    //** Add the config
    config = rs_get_rid_config(rsrs->rs_child);
    mq_msg_append_mem(msg, config, strlen(config), MQF_MSG_AUTO_FREE);

    log_printf(5, "rid_config=%s\n", config);

    //** End with an empty frame
    mq_msg_append_mem(msg, NULL, 0, MQF_MSG_KEEP_DATA);

    //** Now address it
    mq_apply_return_address_msg(msg, address, 0);

    //** Lastly send it
    mq_submit(rsrs->server_portal, mq_task_new(rsrs->mqc, msg, NULL, NULL, 30));
}
Ejemplo n.º 28
0
static void pipe_write(toolbox_t *box, char ch)
{
    apr_status_t rv;
    apr_size_t nbytes;
    abts_case *tc = box->tc;
    apr_file_t *in = box->data;

    rv = apr_file_write_full(in, &ch, 1, &nbytes);
    ABTS_SUCCESS(rv);
    ABTS_SIZE_EQUAL(tc, 1, nbytes);

    rv = apr_thread_mutex_lock(box->mutex);
    ABTS_SUCCESS(rv);

    if (!pipe_count) {
        rv = apr_thread_cond_signal(box->cond);
        ABTS_SUCCESS(rv);
    }

    pipe_count++;

    rv = apr_thread_mutex_unlock(box->mutex);
    ABTS_SUCCESS(rv);
}
Ejemplo n.º 29
0
int add_service(service_manager_t *sm, char *service_section, char *service_name, void *service)
{
    char *key;
    apr_hash_t *section;

    apr_thread_mutex_lock(sm->lock);

    log_printf(15, "adding section=%s service=%s\n", service_section, service_name);

    section = apr_hash_get(sm->table, service_section, APR_HASH_KEY_STRING);
    if (section == NULL) {  //** New section so create the table and insert it
        log_printf(15, "Creating section=%s\n", service_section);
        section = apr_hash_make(sm->pool);
        key = apr_pstrdup(sm->pool, service_section);
        apr_hash_set(sm->table, key, APR_HASH_KEY_STRING, section);
    }

    key = apr_pstrdup(sm->pool, service_name);
    apr_hash_set(section, key, APR_HASH_KEY_STRING, service);

    apr_thread_mutex_unlock(sm->lock);

    return(0);
}
Ejemplo n.º 30
0
apr_status_t h2_mplx_in_update_windows(h2_mplx *m)
{
    apr_status_t status;
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        update_ctx ctx;
        
        ctx.m               = m;
        ctx.streams_updated = 0;

        status = APR_EAGAIN;
        h2_io_set_iter(m->stream_ios, update_window, &ctx);
        
        if (ctx.streams_updated) {
            status = APR_SUCCESS;
        }
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}