示例#1
0
文件: fdqueue.c 项目: AzerTyQsdF/osx
apr_status_t ap_queue_push_timer(fd_queue_t * queue, timer_event_t *te)
{
    apr_status_t rv;

    if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
        return rv;
    }

    AP_DEBUG_ASSERT(!queue->terminated);

    APR_RING_INSERT_TAIL(&queue->timers, te, timer_event_t, link);

    apr_thread_cond_signal(queue->not_empty);

    if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
        return rv;
    }

    return APR_SUCCESS;
}
示例#2
0
static apt_bool_t apt_poller_task_wakeup_process(apt_poller_task_t *task)
{
	apt_bool_t status = TRUE;
	apt_bool_t running = TRUE;
	apt_task_msg_t *msg;

	do {
		apr_thread_mutex_lock(task->guard);
		msg = apt_cyclic_queue_pop(task->msg_queue);
		apr_thread_mutex_unlock(task->guard);
		if(msg) {
			status = apt_task_msg_process(task->base,msg);
		}
		else {
			running = FALSE;
		}
	}
	while(running == TRUE);
	return status;
}
static apt_bool_t mrcp_client_agent_task_terminate(apt_task_t *task)
{
	apt_bool_t status = FALSE;
	mrcp_connection_agent_t *agent = apt_task_object_get(task);
	if(agent->pollset) {
		connection_task_msg_t *msg = apr_pcalloc(agent->pool,sizeof(connection_task_msg_t));
		msg->type = CONNECTION_TASK_MSG_TERMINATE;

		apr_thread_mutex_lock(agent->guard);
		status = apt_cyclic_queue_push(agent->msg_queue,msg);
		apr_thread_mutex_unlock(agent->guard);
		if(apt_pollset_wakeup(agent->pollset) == TRUE) {
			status = TRUE;
		}
		else {
			apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Signal Control Message");
		}
	}
	return status;
}
示例#4
0
static void ssl_dyn_lock_function(int mode, struct CRYPTO_dynlock_value *l,
                           const char *file, int line)
{
    apr_status_t rv;

    if (mode & CRYPTO_LOCK) {
        ap_log_perror(file, line, APLOG_MODULE_INDEX, APLOG_TRACE3, 0, l->pool,
                      "Acquiring mutex %s:%d", l->file, l->line);
        rv = apr_thread_mutex_lock(l->mutex);
        ap_log_perror(file, line, APLOG_MODULE_INDEX, APLOG_TRACE3, rv, l->pool,
                      "Mutex %s:%d acquired!", l->file, l->line);
    }
    else {
        ap_log_perror(file, line, APLOG_MODULE_INDEX, APLOG_TRACE3, 0, l->pool,
                      "Releasing mutex %s:%d", l->file, l->line);
        rv = apr_thread_mutex_unlock(l->mutex);
        ap_log_perror(file, line, APLOG_MODULE_INDEX, APLOG_TRACE3, rv, l->pool,
                      "Mutex %s:%d released!", l->file, l->line);
    }
}
示例#5
0
h2_task *h2_mplx_pop_task(h2_mplx *m, int *has_more)
{
    h2_task *task = NULL;
    apr_status_t status;
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        *has_more = 0;
        return NULL;
    }
    status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        task = h2_tq_pop_first(m->q);
        if (task) {
            h2_task_set_started(task);
        }
        *has_more = !h2_tq_empty(m->q);
        apr_thread_mutex_unlock(m->lock);
    }
    return task;
}
示例#6
0
apr_status_t h2_mplx_in_update_windows(h2_mplx *m, 
                                       h2_mplx_consumed_cb *cb, void *cb_ctx)
{
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    apr_status_t status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        update_ctx ctx = { cb, cb_ctx, 0 };
        status = APR_EAGAIN;
        h2_io_set_iter(m->stream_ios, update_window, &ctx);
        
        if (ctx.streams_updated) {
            status = APR_SUCCESS;
        }
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}
示例#7
0
文件: h2_mplx.c 项目: AzerTyQsdF/osx
apr_status_t h2_mplx_stream_done(h2_mplx *m, int stream_id, int rst_error)
{
    apr_status_t status;
    
    AP_DEBUG_ASSERT(m);
    status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        h2_io *io = h2_io_set_get(m->stream_ios, stream_id);

        /* there should be an h2_io, once the stream has been scheduled
         * for processing, e.g. when we received all HEADERs. But when
         * a stream is cancelled very early, it will not exist. */
        if (io) {
            io_stream_done(m, io, rst_error);
        }
        
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}
示例#8
0
static void pipe_consumer(toolbox_t *box)
{
    char ch;
    apr_status_t rv;
    apr_size_t nbytes;
    abts_case *tc = box->tc;
    apr_file_t *out = box->data;
    apr_uint32_t consumed = 0;

    do {
        rv = apr_thread_mutex_lock(box->mutex);
        ABTS_SUCCESS(rv);

        while (!pipe_count && !exiting) {
            rv = apr_thread_cond_wait(box->cond, box->mutex);
            ABTS_SUCCESS(rv);
        }

        if (!pipe_count && exiting) {
            rv = apr_thread_mutex_unlock(box->mutex);
            ABTS_SUCCESS(rv);
            break;
        }

        pipe_count--;
        consumed++;

        rv = apr_thread_mutex_unlock(box->mutex);
        ABTS_SUCCESS(rv);

        rv = apr_file_read_full(out, &ch, 1, &nbytes);
        ABTS_SUCCESS(rv);
        ABTS_SIZE_EQUAL(tc, 1, nbytes);
        ABTS_TRUE(tc, ch == '.');
    } while (1);

    /* naive fairness test - it would be good to introduce or solidify
     * a solid test to ensure one thread is not starved.
     * ABTS_INT_EQUAL(tc, 1, !!consumed);
     */
}
示例#9
0
/** \brief Send session management request to client stack and wait for async response */
static apt_bool_t uni_recog_sm_request_send(uni_speech_t *uni_speech, mrcp_sig_command_e sm_request)
{
	apt_bool_t res = FALSE;
	ast_log(LOG_DEBUG, "Send session request type:%d\n",sm_request);
	apr_thread_mutex_lock(uni_speech->mutex);
	uni_speech->is_sm_request = TRUE;
	uni_speech->sm_request = sm_request;
	switch(sm_request) {
		case MRCP_SIG_COMMAND_SESSION_UPDATE:
			res = mrcp_application_session_update(uni_speech->session);
			break;
		case MRCP_SIG_COMMAND_SESSION_TERMINATE:
			res = mrcp_application_session_terminate(uni_speech->session);
			break;
		case MRCP_SIG_COMMAND_CHANNEL_ADD:
			res = mrcp_application_channel_add(uni_speech->session,uni_speech->channel);
			break;
		case MRCP_SIG_COMMAND_CHANNEL_REMOVE:
    			res = mrcp_application_channel_remove(uni_speech->session,uni_speech->channel);
			break;
		case MRCP_SIG_COMMAND_RESOURCE_DISCOVER:
    			res = mrcp_application_resource_discover(uni_speech->session);
			break;
		default:
			break;
	}

	if(res == TRUE) {
		/* Wait for session response */
		ast_log(LOG_DEBUG, "Wait for session response\n");
		if(apr_thread_cond_timedwait(uni_speech->wait_object,uni_speech->mutex,MRCP_APP_REQUEST_TIMEOUT) != APR_SUCCESS) {
		    ast_log(LOG_ERROR, "Failed to get response, request timed out\n");
		    uni_speech->sm_response = MRCP_SIG_STATUS_CODE_FAILURE;
		}
		ast_log(LOG_DEBUG, "Waked up, status code: %d\n",uni_speech->sm_response);
	}
	
	uni_speech->is_sm_request = FALSE;
	apr_thread_mutex_unlock(uni_speech->mutex);
	return res;
}
static apt_bool_t mrcp_sofia_session_terminate_request(mrcp_session_t *session)
{
	mrcp_sofia_session_t *sofia_session = session->obj;
	if(!sofia_session) {
		return FALSE;
	}

	sofia_session->terminate_requested = FALSE;
	apr_thread_mutex_lock(sofia_session->mutex);
	if(sofia_session->nh) {
		sofia_session->terminate_requested = TRUE;
		nua_bye(sofia_session->nh,TAG_END());
	}
	apr_thread_mutex_unlock(sofia_session->mutex);

	if(sofia_session->terminate_requested == FALSE) {
		mrcp_sofia_session_destroy(sofia_session);
		mrcp_session_terminate_response(session);
	}
	return TRUE;
}
示例#11
0
void release_pigeon_hole(tbx_ph_t *ph, int slot)
{
    apr_thread_mutex_lock(ph->lock);
    log_printf(15, "release_pigeon_hole: ph=%s nholes=%d start nused=%d slot=%d\n", ph->name, ph->nholes, ph->nused, slot);

    //** Check for valid range
    if ((slot<0) || (slot>=ph->nholes)) {
        log_printf(15, "release_pigeon_hole: ERROR ph=%p slot=%d is invalid\n", ph, slot);
        apr_thread_mutex_unlock(ph->lock);
        return;
    }

    if (ph->hole[slot] == 1) {
        ph->hole[slot] = 0;
        ph->nused--;
    } else {
        log_printf(15, "release_pigeon_hole: ERROR ph=%s nholes=%d nused=%d slot=%d is NOT USED!!!\n", ph->name, ph->nholes, ph->nused, slot);
//abort();
    }
    apr_thread_mutex_unlock(ph->lock);
}
示例#12
0
void *ds_ibp_cap_auto_warm(data_service_fn_t *arg, data_cap_set_t *dcs)
{
    ds_ibp_priv_t *ds = (ds_ibp_priv_t *)arg->priv;
    ibp_capset_t *cs = (ibp_capset_t *)dcs;
    ibp_capset_t *w;

    log_printf(15, "Adding to auto warm cap: %s\n", cs->manageCap);

    //** Make the new cap
    {w = new_ibp_capset(); assert(w != NULL); }
    if (cs->readCap) w->readCap = strdup(cs->readCap);
    if (cs->writeCap) w->writeCap = strdup(cs->writeCap);
    if (cs->manageCap) w->manageCap = strdup(cs->manageCap);

    //** Add it to the warming list
    apr_thread_mutex_lock(ds->lock);
    apr_hash_set(ds->warm_table, w->manageCap, APR_HASH_KEY_STRING, w);
    apr_thread_mutex_unlock(ds->lock);

    return(w);
}
示例#13
0
apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
                                 apr_thread_cond_t *iowait)
{
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    apr_status_t status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        m->added_output = iowait;
        status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
        if (APLOGctrace2(m->c)) {
            ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c,
                          "h2_mplx(%ld): trywait on data for %f ms)",
                          m->id, timeout/1000.0);
        }
        m->added_output = NULL;
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}
示例#14
0
static void mpf_suite_session_destroy(mpf_suite_agent_t *agent, mpf_suite_session_t* session)
{
	apt_log(APT_LOG_MARK,APT_PRIO_INFO,"Destroy MPF Context");
	mpf_engine_context_destroy(session->context);
	session->context = NULL;

	if(agent->rx_session == session) {
		agent->rx_session = NULL;
	}
	else if(agent->tx_session == session) {
		agent->tx_session = NULL;
	}

	apr_pool_destroy(session->pool);

	if(!agent->tx_session && !agent->rx_session) {
		apr_thread_mutex_lock(agent->wait_object_mutex);
		apr_thread_cond_signal(agent->wait_object);
		apr_thread_mutex_unlock(agent->wait_object_mutex);
	}
}
示例#15
0
static void
    demo_exchange_initialise (
void)
{

    //  Test for already active before applying any locks; avoids deadlock in
    //  some classes
    if (!s_demo_exchange_active) {

#if (defined (BASE_THREADSAFE))
        //  First make sure the object mutex has been created
        if (!icl_global_mutex) {
            icl_system_panic ("icl_init", "iCL not initialised - call icl_system_initialise()\n");
            abort ();
        }
        apr_thread_mutex_lock (icl_global_mutex);
        if (!s_demo_exchange_mutex)
            s_demo_exchange_mutex = icl_mutex_new ();
        apr_thread_mutex_unlock (icl_global_mutex);

        //  Now lock the object mutex
        icl_mutex_lock   (s_demo_exchange_mutex);

        //  Test again for already active now that we hold the lock
        if (!s_demo_exchange_active) {
#endif
            //  Register the class termination call-back functions
            icl_system_register (NULL, self_terminate);

demo_exchange_agent_init ();

s_demo_exchange_table = demo_exchange_table_new ();
            s_demo_exchange_active = TRUE;
#if (defined (BASE_THREADSAFE))
        }
        icl_mutex_unlock (s_demo_exchange_mutex);
#endif

    }
}
示例#16
0
/**
 * Retrieves the next available socket from the queue. If there are no
 * sockets available, it will block until one becomes available.
 * Once retrieved, the socket is placed into the address specified by
 * 'sd'.
 */
apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p)
{
    fd_queue_elem_t *elem;
    apr_status_t rv;

    if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
        return rv;
    }

    /* Keep waiting until we wake up and find that the queue is not empty. */
    if (ap_queue_empty(queue)) {
        if (!queue->terminated) {
            apr_thread_cond_wait(queue->not_empty, queue->one_big_mutex);
        }
        /* If we wake up and it's still empty, then we were interrupted */
        if (ap_queue_empty(queue)) {
            rv = apr_thread_mutex_unlock(queue->one_big_mutex);
            if (rv != APR_SUCCESS) {
                return rv;
            }
            if (queue->terminated) {
                return APR_EOF; /* no more elements ever again */
            }
            else {
                return APR_EINTR;
            }
        }
    } 

    elem = &queue->data[--queue->nelts];
    *sd = elem->sd;
    *p = elem->p;
#ifdef AP_DEBUG
    elem->sd = NULL;
    elem->p = NULL;
#endif /* AP_DEBUG */

    rv = apr_thread_mutex_unlock(queue->one_big_mutex);
    return rv;
}
示例#17
0
MPF_DECLARE(apt_bool_t) mpf_dtmf_generator_enqueue(
								struct mpf_dtmf_generator_t *generator,
								const char *digits)
{
	apr_size_t qlen, dlen;
	apt_bool_t ret;

	dlen = strlen(digits);
	apr_thread_mutex_lock(generator->mutex);
	qlen = strlen(generator->queue);
	if (qlen + dlen > MPF_DTMFGEN_QUEUE_LEN) {
		ret = FALSE;
		apt_log(APT_LOG_MARK, APT_PRIO_WARNING, "DTMF queue too short (%"APR_SIZE_T_FMT"), "
			"cannot add %d digit%s, already has %"APR_SIZE_T_FMT, MPF_DTMFGEN_QUEUE_LEN,
			dlen, dlen > 1 ? "s" : "", qlen);
	} else {
		strcpy(generator->queue + qlen, digits);
		ret = TRUE;
	}
	apr_thread_mutex_unlock(generator->mutex);
	return ret;
}
示例#18
0
文件: controller.c 项目: Abioy/mpaxos
txn_info_t *attach_txn_info(roundid_t **rids, size_t sz_rids, 
    mpaxos_req_t *req) {

    txn_info_t *info = NULL;
    txn_info_create(&info, rids, sz_rids, req);
    
    for (size_t i = 0; i < sz_rids; i++) {
        group_info_t *ginfo = NULL;
        group_info_create(&ginfo, info, rids[i]);
        // add group to round
        apr_hash_set(info->ht_ginfo, &ginfo->gid, sizeof(groupid_t), ginfo);
    }

    apr_thread_mutex_lock(mx_txn_info_);
    // txn_info_t *info = apr_hash_get(ht_txn_info_, &(req->id), sizeof(roundid_t));
    // it should be a new round.
    // SAFE_ASSERT(info == NULL);
    // attach txn
    apr_hash_set(ht_txn_info_, &info->tid, sizeof(txnid_t), info);
    apr_thread_mutex_unlock(mx_txn_info_);
    return info;
}
void slayer_server_log_add_entry(slayer_server_log_manager_t *manager, apr_pool_t *mpool,
                                  const char *client_ip,apr_int64_t rtime,
                                  const char *request_line,int response_code,
                                  int nbytes_sent, apr_int64_t time_toservice ) {

	json_value *container = json_object_create(mpool);
	json_object_add(container,"client_ip",json_string_create(mpool,client_ip));
	json_object_add(container,"request_time",json_long_create(mpool,rtime / (1000*1000)));
	json_object_add(container,"request",json_string_create(mpool,request_line));
	json_object_add(container,"response_code",json_long_create(mpool,response_code));
	json_object_add(container,"bytes_sent",json_long_create(mpool,nbytes_sent));
	json_object_add(container,"time_toservice",json_long_create(mpool,time_toservice));
	char *json_entry = strdup(json_serialize(mpool,container)); //we want our own copy of this data

	//smallest chunk in the mutex
	apr_thread_mutex_lock(manager->list_mutex);
	manager->offset++;
	if (manager->offset == manager->nentries)  manager->offset = 0;
	free(manager->entries[manager->offset].json_view);
	manager->entries[manager->offset].json_view = json_entry;
	apr_thread_mutex_unlock(manager->list_mutex);
}
示例#20
0
文件: h2_mplx.c 项目: AzerTyQsdF/osx
apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_response *response,
                              ap_filter_t* f, apr_bucket_brigade *bb,
                              struct apr_thread_cond_t *iowait)
{
    apr_status_t status;
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        status = out_open(m, stream_id, response, f, bb, iowait);
        if (APLOGctrace1(m->c)) {
            h2_util_bb_log(m->c, stream_id, APLOG_TRACE1, "h2_mplx_out_open", bb);
        }
        if (m->aborted) {
            return APR_ECONNABORTED;
        }
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}
void *lookup_service(service_manager_t *sm, char *service_section, char *service_name)
{
    void *s;
    apr_hash_t *section;

    apr_thread_mutex_lock(sm->lock);

    section = apr_hash_get(sm->table, service_section, APR_HASH_KEY_STRING);
    if (section == NULL) {  //** New section so create the table and insert it
        log_printf(10, "No matching section for section=%s name=%s\n", service_section, service_name);
        apr_thread_mutex_unlock(sm->lock);
        return(NULL);
    }

    s = apr_hash_get(section, service_name, APR_HASH_KEY_STRING);
    if (s == NULL) {
        log_printf(10, "No matching object for section=%s name=%\n", service_section, service_name);
    }
    apr_thread_mutex_unlock(sm->lock);

    return(s);
}
service_manager_t *clone_service_manager(service_manager_t *sm)
{
    apr_ssize_t klen;
    service_manager_t *clone;
    apr_hash_index_t *his;
    apr_hash_t *section, *clone_section;
    char *key;

    //** Make an empty SM
    clone = create_service_manager(sm);

    //** Now cycle through all the tables and copy them
    apr_thread_mutex_lock(sm->lock);
    for (his = apr_hash_first(NULL, sm->table); his != NULL; his = apr_hash_next(his)) {
        apr_hash_this(his, (const void **)&key, &klen, (void **)&section);
        clone_section = apr_hash_copy(clone->pool, section);
        apr_hash_set(clone->table, apr_pstrdup(clone->pool, key), APR_HASH_KEY_STRING, clone_section);
    }
    apr_thread_mutex_unlock(sm->lock);

    return(clone);
}
示例#23
0
文件: sc.c 项目: ryandietrich/MoarVM
/* Creates a new serialization context with the specified handle. If any
 * compilation units are waiting for an SC with this handle, removes it from
 * their to-resolve list after installing itself in the appropriate slot. */
MVMObject * MVM_sc_create(MVMThreadContext *tc, MVMString *handle) {
    MVMObject   *sc;
    MVMCompUnit *cur_cu;

    /* Allocate. */
    MVMROOT(tc, handle, {
        sc = REPR(tc->instance->SCRef)->allocate(tc, STABLE(tc->instance->SCRef));
        MVMROOT(tc, sc, {
            REPR(sc)->initialize(tc, STABLE(sc), sc, OBJECT_BODY(sc));

            /* Set handle. */
            MVM_ASSIGN_REF(tc, sc, ((MVMSerializationContext *)sc)->body->handle, handle);

            /* Add to weak lookup hash. */
            if (apr_thread_mutex_lock(tc->instance->mutex_sc_weakhash) != APR_SUCCESS)
                MVM_exception_throw_adhoc(tc, "Unable to lock SC weakhash");
            MVM_string_flatten(tc, handle);
            MVM_HASH_BIND(tc, tc->instance->sc_weakhash, handle, ((MVMSerializationContext *)sc)->body);
            if (apr_thread_mutex_unlock(tc->instance->mutex_sc_weakhash) != APR_SUCCESS)
                MVM_exception_throw_adhoc(tc, "Unable to unlock SC weakhash");

            /* Visit compilation units that need this SC and resolve it. */
            cur_cu = tc->instance->head_compunit;
            while (cur_cu) {
                if (cur_cu->scs_to_resolve) {
                    MVMuint32 i;
                    for (i = 0; i < cur_cu->num_scs; i++) {
                        MVMString *res = cur_cu->scs_to_resolve[i];
                        if (res && MVM_string_equal(tc, res, handle)) {
                            cur_cu->scs[i] = (MVMSerializationContext *)sc;
                            cur_cu->scs_to_resolve[i] = NULL;
                            break;
                        }
                    }
                }
                cur_cu = cur_cu->next_compunit;
            }
        });
    });
示例#24
0
void ds_ibp_destroy(data_service_fn_t *dsf)
{
    ds_ibp_priv_t *ds = (ds_ibp_priv_t *)dsf->priv;
    apr_status_t value;

    //** Wait for the warmer thread to complete
    apr_thread_mutex_lock(ds->lock);
    ds->warm_stop = 1;
    apr_thread_cond_signal(ds->cond);
    apr_thread_mutex_unlock(ds->lock);
    apr_thread_join(&value, ds->thread);  //** Wait for it to complete

    //** Now we can clean up
    apr_thread_mutex_destroy(ds->lock);
    apr_thread_cond_destroy(ds->cond);
    apr_pool_destroy(ds->pool);

    ibp_destroy_context(ds->ic);

    free(ds);
    free(dsf);
}
示例#25
0
apr_status_t h2_mplx_in_close(h2_mplx *m, int stream_id)
{
    AP_DEBUG_ASSERT(m);
    if (m->aborted) {
        return APR_ECONNABORTED;
    }
    apr_status_t status = apr_thread_mutex_lock(m->lock);
    if (APR_SUCCESS == status) {
        h2_io *io = h2_io_set_get(m->stream_ios, stream_id);
        if (io) {
            status = h2_io_in_close(io);
            if (io->input_arrived) {
                apr_thread_cond_signal(io->input_arrived);
            }
        }
        else {
            status = APR_ECONNABORTED;
        }
        apr_thread_mutex_unlock(m->lock);
    }
    return status;
}
static apt_bool_t mrcp_sofia_session_offer(mrcp_session_t *session, mrcp_session_descriptor_t *descriptor)
{
	char sdp_str[2048];
	const char *local_sdp_str = NULL;
	apt_bool_t res = FALSE;
	mrcp_sofia_session_t *sofia_session = session->obj;
	if(!sofia_session) {
		return FALSE;
	}

	if(session->signaling_agent) {
		mrcp_sofia_agent_t *sofia_agent = mrcp_sofia_agent_get(session);
		if(sofia_agent) {
			if(sofia_agent->config->origin) {
				apt_string_set(&descriptor->origin,sofia_agent->config->origin);
			}
		}
	}
	if(sdp_string_generate_by_mrcp_descriptor(sdp_str,sizeof(sdp_str),descriptor,TRUE) > 0) {
		local_sdp_str = sdp_str;
		sofia_session->descriptor = descriptor;
		apt_obj_log(APT_LOG_MARK,APT_PRIO_INFO,session->log_obj,"Local SDP "APT_NAMESID_FMT"\n%s", 
			session->name,
			MRCP_SESSION_SID(session), 
			local_sdp_str);
	}

	apr_thread_mutex_lock(sofia_session->mutex);

	if(sofia_session->nh) {
		res = TRUE;
		nua_invite(sofia_session->nh,
				TAG_IF(local_sdp_str,SOATAG_USER_SDP_STR(local_sdp_str)),
				TAG_END());
	}

	apr_thread_mutex_unlock(sofia_session->mutex);
	return res;
}
示例#27
0
/* Write synthesized speech / speech to be recognized. */
int speech_channel_write(speech_channel_t *schannel, void *data, apr_size_t *len)
{
	int status = 0;

	if ((schannel != NULL) && (*len > 0)) {
#if SPEECH_CHANNEL_TRACE
		apr_size_t req_len = *len;
#endif

#if SPEECH_CHANNEL_DUMP
		if(schannel->stream_in) {
			fwrite(data, 1, *len, schannel->stream_in);
		}
#endif
		if (schannel->mutex != NULL)
			apr_thread_mutex_lock(schannel->mutex);

		audio_queue_t *queue = schannel->audio_queue;

		if (schannel->state == SPEECH_CHANNEL_PROCESSING)
			status = audio_queue_write(queue, data, len);
		else
			status = -1;

		if (schannel->mutex != NULL)
			apr_thread_mutex_unlock(schannel->mutex);

#if SPEECH_CHANNEL_TRACE
		ast_log(LOG_DEBUG, "(%s) channel_write() status=%d req=%"APR_SIZE_T_FMT" written=%"APR_SIZE_T_FMT"\n", 
				schannel->name, status, req_len, *len);
#endif

	} else {
		ast_log(LOG_ERROR, "Speech channel structure pointer is NULL\n");
		return -1;
	}

	return status;
}
示例#28
0
extern apr_status_t napr_threadpool_add(napr_threadpool_t *threadpool, void *data)
{
    char errbuf[128];
    apr_status_t status;

    if (APR_SUCCESS != (status = apr_thread_mutex_lock(threadpool->threadpool_mutex))) {
	DEBUG_ERR("error calling apr_thread_mutex_lock: %s", apr_strerror(status, errbuf, 128));
	return status;
    }
    threadpool->ended &= 0x0;
    napr_list_enqueue(threadpool->list, data);
    if (APR_SUCCESS != (status = apr_thread_mutex_unlock(threadpool->threadpool_mutex))) {
	DEBUG_ERR("error calling apr_thread_mutex_unlock: %s", apr_strerror(status, errbuf, 128));
	return status;
    }
    if (APR_SUCCESS != (status = apr_thread_cond_signal(threadpool->threadpool_update))) {
	DEBUG_ERR("error calling apr_thread_cond_signal: %s", apr_strerror(status, errbuf, 128));
	return status;
    }

    return APR_SUCCESS;
}
示例#29
0
apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
                                          apr_pool_t **recycled_pool)
{
    apr_status_t rv;
    *recycled_pool = NULL;
    rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
    if (rv != APR_SUCCESS) {
        return rv;
    }
    AP_DEBUG_ASSERT(queue_info->idlers >= 0);
    while ((queue_info->idlers == 0) && (!queue_info->terminated)) {
        rv = apr_thread_cond_wait(queue_info->wait_for_idler,
                                  queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            apr_status_t rv2;
            rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
            if (rv2 != APR_SUCCESS) {
                return rv2;
            }
            return rv;
        }
    }
    queue_info->idlers--; /* Oh, and idler? Let's take 'em! */
    if (queue_info->num_recycled) {
        *recycled_pool =
            queue_info->recycled_pools[--queue_info->num_recycled];
    }
    rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
    if (rv != APR_SUCCESS) {
        return rv;
    }
    else if (queue_info->terminated) {
        return APR_EOF;
    }
    else {
        return APR_SUCCESS;
    }
}
示例#30
0
/** Start message processing loop */
MRCP_DECLARE(apt_bool_t) mrcp_client_start(mrcp_client_t *client)
{
	apt_bool_t sync_start = TRUE;
	apt_task_t *task;
	if(!client || !client->task) {
		apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Invalid Client");
		return FALSE;
	}
	task = apt_consumer_task_base_get(client->task);

	if(client->on_start_complete) {
		sync_start = FALSE;
	}

	if(sync_start == TRUE) {
		/* get prepared to start stack synchronously */
		apr_thread_mutex_create(&client->sync_start_mutex,APR_THREAD_MUTEX_DEFAULT,client->pool);
		apr_thread_cond_create(&client->sync_start_object,client->pool);
		
		apr_thread_mutex_lock(client->sync_start_mutex);
	}

	if(apt_task_start(task) == FALSE) {
		if(sync_start == TRUE) {
			apr_thread_mutex_unlock(client->sync_start_mutex);
		}
		apt_log(APT_LOG_MARK,APT_PRIO_WARNING,"Failed to Start Client Task");
		return FALSE;
	}
	
	if(sync_start == TRUE) {
		/* wait for start complete */
		apr_thread_cond_wait(client->sync_start_object,client->sync_start_mutex);
		apr_thread_mutex_unlock(client->sync_start_mutex);
	}

	return TRUE;
}