/** \brief Received MRCP message */ static apt_bool_t on_message_receive(mrcp_application_t *application, mrcp_session_t *session, mrcp_channel_t *channel, mrcp_message_t *message) { uni_speech_t *uni_speech = mrcp_application_channel_object_get(channel); ast_log(LOG_DEBUG, "On message receive\n"); if(message->start_line.message_type == MRCP_MESSAGE_TYPE_RESPONSE) { return uni_recog_mrcp_response_signal(uni_speech,message); } if(message->start_line.message_type == MRCP_MESSAGE_TYPE_EVENT) { if(message->start_line.method_id == RECOGNIZER_RECOGNITION_COMPLETE) { uni_speech->is_inprogress = FALSE; if (uni_speech->speech_base->state != AST_SPEECH_STATE_NOT_READY) { uni_speech->mrcp_event = message; ast_speech_change_state(uni_speech->speech_base,AST_SPEECH_STATE_DONE); } else { uni_speech->mrcp_event = NULL; ast_speech_change_state(uni_speech->speech_base,AST_SPEECH_STATE_NOT_READY); } } else if(message->start_line.method_id == RECOGNIZER_START_OF_INPUT) { ast_set_flag(uni_speech->speech_base,AST_SPEECH_QUIET); } } return TRUE; }
/*! \brief Stop the in-progress recognition */ static int uni_recog_stop(struct ast_speech *speech) { uni_speech_t *uni_speech = speech->data; mrcp_message_t *mrcp_message; if(!uni_speech->is_inprogress) { return 0; } ast_log(LOG_NOTICE, "Stop recognition '%s'\n",uni_speech_id_get(uni_speech)); mrcp_message = mrcp_application_message_create( uni_speech->session, uni_speech->channel, RECOGNIZER_STOP); if(!mrcp_message) { ast_log(LOG_WARNING, "Failed to create MRCP message\n"); return -1; } /* Reset last event (if any) */ uni_speech->mrcp_event = NULL; /* Send MRCP request and wait for response */ if(uni_recog_mrcp_request_send(uni_speech,mrcp_message) != TRUE) { ast_log(LOG_WARNING, "Failed to send MRCP message\n"); return -1; } /* Check received response */ if(!uni_speech->mrcp_response || uni_speech->mrcp_response->start_line.status_code != MRCP_STATUS_CODE_SUCCESS) { ast_log(LOG_WARNING, "Received failure response\n"); return -1; } /* Reset media buffer */ mpf_frame_buffer_restart(uni_speech->media_buffer); ast_speech_change_state(speech, AST_SPEECH_STATE_NOT_READY); uni_speech->is_inprogress = FALSE; return 0; }
/** \brief Received MRCP message */ static apt_bool_t on_message_receive(mrcp_application_t *application, mrcp_session_t *session, mrcp_channel_t *channel, mrcp_message_t *message) { uni_speech_t *uni_speech = mrcp_application_channel_object_get(channel); if(message->start_line.message_type == MRCP_MESSAGE_TYPE_RESPONSE) { ast_log(LOG_DEBUG, "(%s) Received MRCP response method-id: %d status-code: %d req-state: %d\n", uni_speech->name, (int)message->start_line.method_id, message->start_line.status_code, (int)message->start_line.request_state); return uni_recog_mrcp_response_signal(uni_speech,message); } if(message->start_line.message_type == MRCP_MESSAGE_TYPE_EVENT) { if(message->start_line.method_id == RECOGNIZER_RECOGNITION_COMPLETE) { ast_log(LOG_DEBUG, "(%s) Recognition complete req-state: %d\n", uni_speech->name, (int)message->start_line.request_state); uni_speech->is_inprogress = FALSE; if (uni_speech->speech_base->state != AST_SPEECH_STATE_NOT_READY) { uni_speech->mrcp_event = message; ast_speech_change_state(uni_speech->speech_base,AST_SPEECH_STATE_DONE); } else { ast_log(LOG_DEBUG, "(%s) Unexpected RECOGNITION-COMPLETE event\n",uni_speech->name); } } else if(message->start_line.method_id == RECOGNIZER_START_OF_INPUT) { ast_log(LOG_DEBUG, "(%s) Start of input\n",uni_speech->name); ast_set_flag(uni_speech->speech_base, AST_SPEECH_QUIET | AST_SPEECH_SPOKE); } else { ast_log(LOG_DEBUG, "(%s) Received unhandled MRCP event id: %d req-state: %d\n", uni_speech->name, (int)message->start_line.method_id, (int)message->start_line.request_state); } } return TRUE; }
/*! \brief Stop the in-progress recognition */ static int uni_recog_stop(struct ast_speech *speech) { uni_speech_t *uni_speech = speech->data; mrcp_message_t *mrcp_message; if(!uni_speech->is_inprogress) { return 0; } ast_log(LOG_NOTICE, "(%s) Stop recognition\n",uni_speech->name); mrcp_message = mrcp_application_message_create( uni_speech->session, uni_speech->channel, RECOGNIZER_STOP); if(!mrcp_message) { ast_log(LOG_WARNING, "(%s) Failed to create MRCP message\n",uni_speech->name); return -1; } /* Reset last event (if any) */ uni_speech->mrcp_event = NULL; /* Send MRCP request and wait for response */ if(uni_recog_mrcp_request_send(uni_speech,mrcp_message) != TRUE) { ast_log(LOG_WARNING, "(%s) Failed to stop recognition\n",uni_speech->name); return -1; } /* Reset media buffer */ mpf_frame_buffer_restart(uni_speech->media_buffer); ast_speech_change_state(speech, AST_SPEECH_STATE_NOT_READY); uni_speech->is_inprogress = FALSE; return 0; }
/*! \brief SpeechBackground(Sound File|Timeout) Dialplan Application */ static int speech_background(struct ast_channel *chan, void *data) { unsigned int timeout = 0; int res = 0, done = 0, argc = 0, started = 0, quieted = 0, max_dtmf_len = 0; struct ast_module_user *u = NULL; struct ast_speech *speech = find_speech(chan); struct ast_frame *f = NULL; int oldreadformat = AST_FORMAT_SLINEAR; char dtmf[AST_MAX_EXTENSION] = ""; time_t start, current; struct ast_datastore *datastore = NULL; char *argv[2], *args = NULL, *filename_tmp = NULL, *filename = NULL, tmp[2] = "", dtmf_terminator = '#'; const char *tmp2 = NULL; args = ast_strdupa(data); u = ast_module_user_add(chan); if (speech == NULL) { ast_module_user_remove(u); return -1; } /* If channel is not already answered, then answer it */ if (chan->_state != AST_STATE_UP && ast_answer(chan)) { ast_module_user_remove(u); return -1; } /* Record old read format */ oldreadformat = chan->readformat; /* Change read format to be signed linear */ if (ast_set_read_format(chan, AST_FORMAT_SLINEAR)) { ast_module_user_remove(u); return -1; } /* Parse out options */ argc = ast_app_separate_args(args, '|', argv, sizeof(argv) / sizeof(argv[0])); if (argc > 0) { /* Yay sound file */ filename_tmp = ast_strdupa(argv[0]); if (!ast_strlen_zero(argv[1])) { if ((timeout = atoi(argv[1])) == 0) timeout = -1; } else timeout = 0; } /* See if the maximum DTMF length variable is set... we use a variable in case they want to carry it through their entire dialplan */ if ((tmp2 = pbx_builtin_getvar_helper(chan, "SPEECH_DTMF_MAXLEN")) && !ast_strlen_zero(tmp2)) max_dtmf_len = atoi(tmp2); /* See if a terminator is specified */ if ((tmp2 = pbx_builtin_getvar_helper(chan, "SPEECH_DTMF_TERMINATOR"))) { if (ast_strlen_zero(tmp2)) dtmf_terminator = '\0'; else dtmf_terminator = tmp2[0]; } /* Before we go into waiting for stuff... make sure the structure is ready, if not - start it again */ if (speech->state == AST_SPEECH_STATE_NOT_READY || speech->state == AST_SPEECH_STATE_DONE) { ast_speech_change_state(speech, AST_SPEECH_STATE_NOT_READY); ast_speech_start(speech); } /* Ensure no streams are currently running */ ast_stopstream(chan); /* Okay it's streaming so go into a loop grabbing frames! */ while (done == 0) { /* If the filename is null and stream is not running, start up a new sound file */ if (!quieted && (chan->streamid == -1 && chan->timingfunc == NULL) && (filename = strsep(&filename_tmp, "&"))) { /* Discard old stream information */ ast_stopstream(chan); /* Start new stream */ speech_streamfile(chan, filename, chan->language); } /* Run scheduled stuff */ ast_sched_runq(chan->sched); /* Yay scheduling */ res = ast_sched_wait(chan->sched); if (res < 0) { res = 1000; } /* If there is a frame waiting, get it - if not - oh well */ if (ast_waitfor(chan, res) > 0) { f = ast_read(chan); if (f == NULL) { /* The channel has hung up most likely */ done = 3; break; } } /* Do timeout check (shared between audio/dtmf) */ if ((!quieted || strlen(dtmf)) && started == 1) { time(¤t); if ((current-start) >= timeout) { done = 1; if (f) ast_frfree(f); break; } } /* Do checks on speech structure to see if it's changed */ ast_mutex_lock(&speech->lock); if (ast_test_flag(speech, AST_SPEECH_QUIET)) { if (chan->stream) ast_stopstream(chan); ast_clear_flag(speech, AST_SPEECH_QUIET); quieted = 1; } /* Check state so we can see what to do */ switch (speech->state) { case AST_SPEECH_STATE_READY: /* If audio playback has stopped do a check for timeout purposes */ if (chan->streamid == -1 && chan->timingfunc == NULL) ast_stopstream(chan); if (!quieted && chan->stream == NULL && timeout && started == 0 && !filename_tmp) { if (timeout == -1) { done = 1; if (f) ast_frfree(f); break; } time(&start); started = 1; } /* Write audio frame out to speech engine if no DTMF has been received */ if (!strlen(dtmf) && f != NULL && f->frametype == AST_FRAME_VOICE) { ast_speech_write(speech, f->data, f->datalen); } break; case AST_SPEECH_STATE_WAIT: /* Cue up waiting sound if not already playing */ if (!strlen(dtmf)) { if (chan->stream == NULL) { if (speech->processing_sound != NULL) { if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) { speech_streamfile(chan, speech->processing_sound, chan->language); } } } else if (chan->streamid == -1 && chan->timingfunc == NULL) { ast_stopstream(chan); if (speech->processing_sound != NULL) { if (strlen(speech->processing_sound) > 0 && strcasecmp(speech->processing_sound,"none")) { speech_streamfile(chan, speech->processing_sound, chan->language); } } } } break; case AST_SPEECH_STATE_DONE: /* Now that we are done... let's switch back to not ready state */ ast_speech_change_state(speech, AST_SPEECH_STATE_NOT_READY); if (!strlen(dtmf)) { /* Copy to speech structure the results, if available */ speech->results = ast_speech_results_get(speech); /* Break out of our background too */ done = 1; /* Stop audio playback */ if (chan->stream != NULL) { ast_stopstream(chan); } } break; default: break; } ast_mutex_unlock(&speech->lock); /* Deal with other frame types */ if (f != NULL) { /* Free the frame we received */ switch (f->frametype) { case AST_FRAME_DTMF: if (dtmf_terminator != '\0' && f->subclass == dtmf_terminator) { done = 1; } else { if (chan->stream != NULL) { ast_stopstream(chan); } if (!started) { /* Change timeout to be 5 seconds for DTMF input */ timeout = (chan->pbx && chan->pbx->dtimeout) ? chan->pbx->dtimeout : 5; started = 1; } time(&start); snprintf(tmp, sizeof(tmp), "%c", f->subclass); strncat(dtmf, tmp, sizeof(dtmf) - strlen(dtmf) - 1); /* If the maximum length of the DTMF has been reached, stop now */ if (max_dtmf_len && strlen(dtmf) == max_dtmf_len) done = 1; } break; case AST_FRAME_CONTROL: switch (f->subclass) { case AST_CONTROL_HANGUP: /* Since they hung up we should destroy the speech structure */ done = 3; default: break; } default: break; } ast_frfree(f); f = NULL; } } if (strlen(dtmf)) { /* We sort of make a results entry */ speech->results = ast_calloc(1, sizeof(*speech->results)); if (speech->results != NULL) { ast_speech_dtmf(speech, dtmf); speech->results->score = 1000; speech->results->text = strdup(dtmf); speech->results->grammar = strdup("dtmf"); } ast_speech_change_state(speech, AST_SPEECH_STATE_NOT_READY); } /* See if it was because they hung up */ if (done == 3) { /* Destroy speech structure */ ast_speech_destroy(speech); datastore = ast_channel_datastore_find(chan, &speech_datastore, NULL); if (datastore != NULL) { ast_channel_datastore_remove(chan, datastore); } } else { /* Channel is okay so restore read format */ ast_set_read_format(chan, oldreadformat); } ast_module_user_remove(u); return 0; }
/** brief Prepare engine to accept audio */ static int uni_recog_start(struct ast_speech *speech) { uni_speech_t *uni_speech = speech->data; mrcp_message_t *mrcp_message; mrcp_generic_header_t *generic_header; mrcp_recog_header_t *recog_header; if(uni_speech->is_inprogress) { uni_recog_stop(speech); } ast_log(LOG_NOTICE, "Start audio '%s'\n",uni_speech_id_get(uni_speech)); mrcp_message = mrcp_application_message_create( uni_speech->session, uni_speech->channel, RECOGNIZER_RECOGNIZE); if(!mrcp_message) { ast_log(LOG_WARNING, "Failed to create MRCP message\n"); return -1; } /* Get/allocate generic header */ generic_header = mrcp_generic_header_prepare(mrcp_message); if(generic_header) { apr_hash_index_t *it; void *val; const char *grammar_name; const char *content = NULL; /* Set generic header fields */ apt_string_assign(&generic_header->content_type,"text/uri-list",mrcp_message->pool); mrcp_generic_header_property_add(mrcp_message,GENERIC_HEADER_CONTENT_TYPE); /* Construct and set message body */ it = apr_hash_first(mrcp_message->pool,uni_speech->active_grammars); if(it) { apr_hash_this(it,NULL,NULL,&val); grammar_name = val; content = apr_pstrcat(mrcp_message->pool,"session:",grammar_name,NULL); it = apr_hash_next(it); } for(; it; it = apr_hash_next(it)) { apr_hash_this(it,NULL,NULL,&val); grammar_name = val; content = apr_pstrcat(mrcp_message->pool,content,"\nsession:",grammar_name,NULL); } if(content) { apt_string_set(&mrcp_message->body,content); } } /* Get/allocate recognizer header */ recog_header = (mrcp_recog_header_t*) mrcp_resource_header_prepare(mrcp_message); if(recog_header) { /* Set recognizer header fields */ if(mrcp_message->start_line.version == MRCP_VERSION_2) { recog_header->cancel_if_queue = FALSE; mrcp_resource_header_property_add(mrcp_message,RECOGNIZER_HEADER_CANCEL_IF_QUEUE); } recog_header->start_input_timers = TRUE; mrcp_resource_header_property_add(mrcp_message,RECOGNIZER_HEADER_START_INPUT_TIMERS); } /* Reset last event (if any) */ uni_speech->mrcp_event = NULL; /* Send MRCP request and wait for response */ if(uni_recog_mrcp_request_send(uni_speech,mrcp_message) != TRUE) { ast_log(LOG_WARNING, "Failed to send MRCP message\n"); return -1; } /* Check received response */ if(!uni_speech->mrcp_response || uni_speech->mrcp_response->start_line.status_code != MRCP_STATUS_CODE_SUCCESS) { ast_log(LOG_WARNING, "Received failure response\n"); return -1; } /* Reset media buffer */ mpf_frame_buffer_restart(uni_speech->media_buffer); ast_speech_change_state(speech, AST_SPEECH_STATE_READY); uni_speech->is_inprogress = TRUE; return 0; }