Пример #1
0
static espeak_ERROR speak_text(struct synth_t *s)
{
	espeak_ERROR rc;
	int synth_mode = 0;

	if (espeakup_mode == ESPEAKUP_MODE_ACSINT)
		synth_mode |= espeakSSML;

	if (espeakup_mode == ESPEAKUP_MODE_SPEAKUP && (s->len == 1)) {
		char *buf;
		int n;
		if (s->buf[0] == ' ')
			n = asprintf(&buf,
				     "<say-as interpret-as=\"tts:char\">&#32;</say-as>");
		else
			n = asprintf(&buf,
				     "<say-as interpret-as=\"characters\">%c</say-as>",
				     s->buf[0]);
		if (n == -1) {
			/* D'oh.  Not much to do on allocation failure.
			 * Perhaps espeak will happen to say the character */
			rc = espeak_Synth(s->buf, s->len + 1, 0, POS_CHARACTER,
					  0, synth_mode, NULL, NULL);
		} else {
			rc = espeak_Synth(buf, n + 1, 0, POS_CHARACTER, 0,
					  espeakSSML, NULL, NULL);
			free(buf);
		}
	} else
		rc = espeak_Synth(s->buf, s->len + 1, 0, POS_CHARACTER, 0,
				  synth_mode, NULL, NULL);
	return rc;
}
Пример #2
0
int speakText(const char *text) {
  size_t size;
  unsigned int pos = 0;
  espeak_POSITION_TYPE pos_type = POS_CHARACTER;
  unsigned int pos_end = 0;
  unsigned int flags = espeakCHARS_AUTO | espeakENDPAUSE | espeakSSML;
  unsigned int unique_id = 0;
  void *user_data = NULL;
  espeak_ERROR rc;

  if ( 1 == initFlag ) {
    size = strlen(text) + 1;
    rc = espeak_Synth(text, size, pos, pos_type, pos_end, flags, &unique_id, 
		      user_data);
    if ( EE_BUFFER_FULL == rc ) {
      sleep(3);
      rc = espeak_Synth(text, size, pos, pos_type, pos_end, flags, &unique_id, 
			user_data);
      if ( EE_OK != rc ) {
	return -1;
      }
    }
    if ( EE_INTERNAL_ERROR == rc ) {
      return -1;
    }
  }
  return 0;
}
Пример #3
0
        eSpeak::eSpeak(std::unique_ptr<NUClear::Environment> environment) : Reactor(std::move(environment)) {

            // Initialize espeak, and set it to play out the speakers, and not exit if it can't find it's directory
            espeak_Initialize(AUDIO_OUTPUT_PLAYBACK, 500, nullptr, 1 << 15);
            espeak_SetVoiceByName("default");
            espeak_SetParameter(espeakVOLUME, 100, 0);
            espeak_SetParameter(espeakCAPITALS, 6, 0);

            on<Trigger<messages::output::Say>, Options<Sync<eSpeak>>>([](const messages::output::Say& message) {
                // Wait to finish the current message (if any)
                // By waiting here this reaction can finish and return to the pool
                // if it does not have to wait for another say message
                espeak_Synchronize();

                // Say the new message
                espeak_Synth(message.c_str(),       // Text
                             message.size() + 1,    // Size (including null at end)
                             0,                     // Start position
                             POS_CHARACTER,         // Position Type (irrelevant since we start at the beginning)
                             0,                     // End position (0 means no end position)
                             espeakCHARS_AUTO,      // Flags (auto encoding)
                             nullptr,               // User identifier for callback
                             nullptr                // Callback
                        );
            });

            on<Trigger<Shutdown>>([](const Shutdown&) {
                // Stop espeak
                espeak_Terminate();
            });
        }
Пример #4
0
void espeak_callback(const std_msgs::String::ConstPtr& line) {
	// lock mutex before calling espeak functions
	boost::mutex::scoped_lock u_lock(mtx);
    /* Speak the string */
	ROS_INFO("%s", line->data.c_str());
    espeak_Synth(line->data.c_str(), line->data.length()+1, 0, POS_CHARACTER, 0, 
        espeakCHARS_AUTO | espeakPHONEMES | espeakENDPAUSE, NULL, NULL);
    espeak_Synchronize();
    //ROS_INFO("Speaking: \"%s\"", line->data.c_str());
}
void QGCAudioWorker::say(QString inText, int severity)
{
	static bool threadInit = false;
	if (!threadInit) {
		threadInit = true;
		init();
	}

    if (!muted)
    {
        QString text = _fixTextMessageForAudio(inText);
        // Prepend high priority text with alert beep
        if (severity < GAudioOutput::AUDIO_SEVERITY_CRITICAL) {
            beep();
        }

#ifdef QGC_NOTIFY_TUNES_ENABLED
        // Wait for the last sound to finish
        while (!sound->isFinished()) {
            QGC::SLEEP::msleep(100);
        }
#endif

#if defined _MSC_VER && defined QGC_SPEECH_ENABLED
        HRESULT hr = pVoice->Speak(text.toStdWString().c_str(), SPF_DEFAULT, NULL);
		if (FAILED(hr)) {
			qDebug() << "Speak failed, HR:" << QString("%1").arg(hr, 0, 16);
		}
#elif defined Q_OS_LINUX && defined QGC_SPEECH_ENABLED
        // Set size of string for espeak: +1 for the null-character
        unsigned int espeak_size = strlen(text.toStdString().c_str()) + 1;
        espeak_Synth(text.toStdString().c_str(), espeak_size, 0, POS_CHARACTER, 0, espeakCHARS_AUTO, NULL, NULL);

#elif defined Q_OS_MAC && defined QGC_SPEECH_ENABLED
        // Slashes necessary to have the right start to the sentence
        // copying data prevents SpeakString from reading additional chars
        text = "\\" + text;
        std::wstring str = text.toStdWString();
        unsigned char str2[1024] = {};
        memcpy(str2, text.toLatin1().data(), str.length());
        SpeakString(str2);

        // Block the thread while busy
        // because we run in our own thread, this doesn't
        // halt the main application
        while (SpeechBusy()) {
            QGC::SLEEP::msleep(100);
        }

#else
        // Make sure there isn't an unused variable warning when speech output is disabled
        Q_UNUSED(inText);
#endif
    }
}
Пример #6
0
static void
spk_say(volatile SpeechSynthesizer *spk, const unsigned char *buffer, size_t length, size_t count, const unsigned char *attributes)
{
	int result;

	/* add 1 to the length in order to pass along the trailing zero */
	result = espeak_Synth(buffer, length+1, 0, POS_CHARACTER, 0,
			espeakCHARS_UTF8, NULL, (void *)spk);
	if (result != EE_OK)
		logMessage(LOG_ERR, "eSpeak-NG: Synth() returned error %d", result);
}
Пример #7
0
static espeak_ERROR speak_text(struct synth_t *s)
{
	espeak_ERROR rc;
	int synth_mode = 0;

	if (espeakup_mode == ESPEAKUP_MODE_ACSINT)
		synth_mode |= espeakSSML;

	rc = espeak_Synth(s->buf, s->len + 1, 0, POS_CHARACTER, 0, synth_mode,
					  NULL, NULL);
	return rc;
}
Пример #8
0
int main(int argc, char* argv[] ) 
{
    output = AUDIO_OUTPUT_PLAYBACK;
    int I, Run = 1, L;    
    espeak_Initialize(output, Buflength, path, Options ); 
    espeak_SetVoiceByName(Voice);
    Size = strlen(text)+1;    
    printf("Saying  '%s'",text);
    espeak_Synth( text, Size, position, position_type, end_position, flags,
    unique_identifier, user_data );
    espeak_Synchronize( );
    printf("\n:Done\n"); 
    return 0;
}
Пример #9
0
/* TTS annoncement should be in thread otherwise 
 * it will freez the game till announcemrnt finishes */
int tts_thread_func(void *arg)
{
	espeak_POSITION_TYPE position_type = POS_CHARACTER;
	tts_argument recived = *((tts_argument*)(arg));
	fprintf(stderr,"\nSpeaking : %s - %d",recived.text,recived.mode);
	if (recived.mode == INTERRUPT)
		T4K_Tts_cancel();
	else
		T4K_Tts_wait();
	
	int Size = strlen(recived.text)+1;
	espeak_Synth(recived.text, Size, 0, position_type, 0,	espeakCHARS_AUTO,0, NULL);	
	espeak_Synchronize();
	return 1;
}
Пример #10
0
void
ManglerAudio::playText(Glib::ustring text) {/*{{{*/
    if (!text.length() || !Mangler::config["NotificationTextToSpeech"].toBool()) {
        return;
    }
#ifdef HAVE_ESPEAK
    espeak_SetSynthCallback(espeak_synth_cb);
    ManglerAudio *tts = new ManglerAudio(AUDIO_NOTIFY, mangler->espeakRate, 1, 0, 0, false);
    if (espeak_Synth(text.c_str(), text.length() + 1, 0, POS_CHARACTER, 0, espeakCHARS_AUTO, NULL, tts) != EE_OK) {
        tts->finish();
        fprintf(stderr, "espeak: synth error\n");
        return;
    }
#endif
}/*}}}*/
Пример #11
0
void QGCAudioWorker::say(QString inText, int severity)
{
#ifdef __android__
    Q_UNUSED(inText);
    Q_UNUSED(severity);
#else
    static bool threadInit = false;
    if (!threadInit) {
        threadInit = true;
        init();
    }

    if (!muted)
    {
        QString text = fixTextMessageForAudio(inText);
        // Prepend high priority text with alert beep
        if (severity < GAudioOutput::AUDIO_SEVERITY_CRITICAL) {
            beep();
        }

#ifdef QGC_NOTIFY_TUNES_ENABLED
        // Wait for the last sound to finish
        while (!sound->isFinished()) {
            QGC::SLEEP::msleep(100);
        }
#endif

#if defined _MSC_VER && defined QGC_SPEECH_ENABLED
        HRESULT hr = pVoice->Speak(text.toStdWString().c_str(), SPF_DEFAULT, NULL);
        if (FAILED(hr)) {
            qDebug() << "Speak failed, HR:" << QString("%1").arg(hr, 0, 16);
        }
#elif defined Q_OS_LINUX && defined QGC_SPEECH_ENABLED
        // Set size of string for espeak: +1 for the null-character
        unsigned int espeak_size = strlen(text.toStdString().c_str()) + 1;
        espeak_Synth(text.toStdString().c_str(), espeak_size, 0, POS_CHARACTER, 0, espeakCHARS_AUTO, NULL, NULL);

#elif (defined __macos__) && defined QGC_SPEECH_ENABLED
        macSpeech.say(text.toStdString().c_str());
#elif (defined __ios__) && defined QGC_SPEECH_ENABLED
        iOSSpeak(text);
#else
        // Make sure there isn't an unused variable warning when speech output is disabled
        Q_UNUSED(inText);
#endif
    }
#endif // __android__
}
Пример #12
0
  void synth_(const char* aText, void* aCallback) {
    t_espeak_callback* cb = reinterpret_cast<t_espeak_callback*>(aCallback);
    espeak_SetSynthCallback(cb);
    espeak_SetParameter(espeakPITCH, pitch, 0);
    espeak_SetParameter(espeakRATE, rate, 0);

    if (current_voice)
      espeak_SetVoiceByProperties(current_voice);
    else
      espeak_SetVoiceByName("default");

    espeak_Synth(aText, 0, 0, POS_CHARACTER, 0, 0, NULL, NULL);

    // Reset callback so other instances will work too.
    espeak_SetSynthCallback(NULL);
  }
Пример #13
0
void ESpeak::sayString(QString string)
{
    QByteArray data = string.toUtf8();
    data.append((char)0);
    uint uid = 0;
    espeak_Synth(
                data.constData(),
                data.length(),
                0,
                POS_CHARACTER,
                0,
                espeakCHARS_AUTO,
                &uid,
                0
                );
}
Пример #14
0
void tts_l(const char ch)
{
	/* neither espeak_Char nor espeak_Key honour the setting of split_caps (say 'capital') if it is OFF, so use synth */
	char pair[2];
	pair[0] =ch;
	pair[1] = 0;
	espeak_ERROR erc;
	//debug_log(logfd,"Called tts_l\n");
	ilctts_stop_request(st);
	erc = espeak_Cancel();
	//debug_log(logfd, "In tts_l espeak_Cancel returned: %d\n", erc);
	stop_requested = 0;
	erc = espeak_Synth(pair, 2, 0, POS_CHARACTER, 0, 0, NULL, st);
		//debug_log(logfd, "In tts_l espeak_Synth returned: %d\n", erc);
	return;
} /* end tts_l */
Пример #15
0
uint8_t speech_poll() {
  speech_list_t *p_list;
	if( speaking == 2 ) {
    SDL_PauseAudio( 1 );
    speaking = 0;
	}
	if( speaking == 0 ) {
    SDL_mutexP( speech_mx );
		if( speech_first ) {
  		// Queue pop
      p_list = speech_first;
      speech_first = p_list->next;
      SDL_mutexV( speech_mx );
#ifdef USE_SAM
  		// Set parameters
  		sam_params( SAM_SCALE, SAM_SING, SAM_SPEED, SAM_PITCH, SAM_MOUTH, SAM_THROAT );
      buf_pos = 0;
  		buf_size = sizeof( buf );
  		if( sam_speak( buf, &buf_size, p_list->data ) == 0 ) buf_play();
#else

      printf( "Speech [info]: eSpeak voice return: %i\n", espeak_SetVoiceByName( p_list->voice ) );
      espeak_SetParameter(espeakRATE,130,0);
	    espeak_SetParameter(espeakRANGE,0,0);
//		espeak_SetParameter(espeakVOLUME,volume,0);
//		espeak_SetParameter(espeakPITCH,pitch,0);
//		espeak_SetParameter(espeakCAPITALS,option_capitals,0);
//		espeak_SetParameter(espeakPUNCTUATION,option_punctuation,0);
//		espeak_SetParameter(espeakWORDGAP,wordgap,0);
//		espeak_SetParameter(espeakLINELENGTH,option_linelength,0);
//		espeak_SetPunctuationList(option_punctlist);

      p_buf = buf;
      viseme_count = 0;
      espeak_Synth( p_list->data, strlen( p_list->data ) + 1, 0, POS_CHARACTER, 0, 0, NULL, NULL );
      buf_play();
#endif
  		free( p_list );
		} else {
      SDL_mutexV( speech_mx );
		}
	}
	return( speaking );
}
Пример #16
0
void tts_say(char *text)
{
	int rc;
	espeak_ERROR erc;
	char *newtext = calloc(1,strlen(text));
	/* remove all occurrences of [*] from the speech string */
	//debug_log(logfd, "Called tts_say: %s\n", text);
	clean_string(text, newtext, "\\[\\*\\]", " ");
	pthread_mutex_lock(&queue_guard_mutex);
	ilctts_stop_request(st);
	erc = espeak_Cancel();
	//debug_log(logfd, "In tts_say espeak_Cancel returned %d\n", erc);
	rc = empty_queue();
	stop_requested = 0;
	erc = espeak_Synth(newtext, strlen(newtext)+1, 0, POS_CHARACTER, 0, SYNTH_FLAGS, NULL, st);
	//debug_log(logfd,"In tts_say espeak_Synth returned %d\n", erc);
	pthread_mutex_unlock(&queue_guard_mutex);
	return;
} /* end tts_say */
Пример #17
0
/** synthesizeText
 *  Synthesizes a text string.
 *  The text string could be annotated with SSML tags.
 *  @text     - text to synthesize
 *  @buffer   - buffer which will receive generated samples
 *  @bufferSize - size of buffer
 *  @userdata - pointer to user data which will be passed back to callback function
 *  return tts_result
*/
tts_result TtsEngine::synthesizeText( const char * text, int8_t * buffer, size_t bufferSize, void * userdata )
{
    espeak_SetSynthCallback(eSpeakCallback);

    unsigned int unique_identifier;
    espeak_ERROR err;

    err = espeak_Synth(text,
                       strlen(text),
                       0,  // position
                       POS_CHARACTER,
                       0,  // end position (0 means no end position)
                       espeakCHARS_UTF8,
                       &unique_identifier,
                       userdata);

    err = espeak_Synchronize();
    return TTS_SUCCESS;
}
Пример #18
0
/*
* Dispatch one chunk of speech to espeak.
* Guarded by queue_guard_mutex in the dispatch thread
*/
int send_speech(void)
{
	espeak_ERROR erc;
	TTS_QUEUE_ENTRY_T *qe;
	ListElmt *element;

	if (queue_size(&tts_queue) < 1)
		return -1;

	if ( (element = malloc(sizeof(ListElmt))) == NULL)
		return -1;

	queue_pop(&tts_queue, (void*)element);
	qe = (TTS_QUEUE_ENTRY_T*)list_data(element);

	erc = espeak_Synth(qe->speech, qe->length+1, 0, POS_CHARACTER, 0, SYNTH_FLAGS, NULL, st);

	free(qe->speech);
	free(element);
	return 0;
} /* send_speech */
JNIEXPORT jboolean
JNICALL Java_com_googlecode_eyesfree_espeak_SpeechSynthesis_nativeSynthesize(
    JNIEnv *env, jobject object, jstring text) {
  if (DEBUG) LOGV("%s", __FUNCTION__);
  native_data_t *nat = getNativeData(env, object);
  const char *c_text = env->GetStringUTFChars(text, NULL);
  unsigned int unique_identifier;

  nat->env = env;

  espeak_SetSynthCallback(SynthCallback);
  espeak_Synth(c_text, strlen(c_text), 0,  // position
               POS_CHARACTER, 0,  // end position (0 means no end position)
               espeakCHARS_UTF8 | espeakSSML, // use or ignore xml tags
               &unique_identifier, nat);
  espeak_Synchronize();

  env->ReleaseStringUTFChars(text, c_text);

  return JNI_TRUE;
}
Пример #20
0
void
carmen_voice_send_alert(char *message, int min_time_between_messages, char *language)
{
	static int first = 1;
	static double time_when_stop_speaking = 0;
//	static int started_speaking = 0;
	double current_time;
	unsigned int text_size;

	if (first)
	{
		carmen_voice_initialize(language);
		first = 0;
	}

	/**
	 * The following code is a state machine. While the computer is speaking,
	 * the code do nothing, it just returns. If it ended speaking, we wait some time
	 * before start speaking again.
	 */
	text_size = strlen(message) + 1;
	current_time = time(NULL);

	if (espeak_IsPlaying())
		return;

//	if (started_speaking && !espeak_IsPlaying())
//	{
//		started_speaking = 0;
//	}

	if (fabs(current_time - time_when_stop_speaking) < min_time_between_messages)
		return;

	printf("saying '%s'\n", message);
	espeak_Synth(message, text_size, 0, POS_CHARACTER, 0, espeakCHARS_AUTO, NULL, NULL);
	time_when_stop_speaking = time(NULL);
//	started_speaking = 1;
}
Пример #21
0
static void espeak_text(t_espeak *x, t_symbol*s, int argc, t_atom*argv) {
  t_binbuf*bb=binbuf_new();
  int size=0;
  char*text=NULL;

  binbuf_add(bb, argc, argv);
  binbuf_gettext(bb, &text, &size);
  binbuf_free(bb);

  text[size]=0;

  verbose(1, "speak '%s'", text);

  espeak_Synth(text,
	       strlen(text),
	       0,
	       POS_CHARACTER,
	       0,
	       espeakCHARS_AUTO,
	       NULL,
	       x);
}
Пример #22
0
int main(int argc, char* argv[] ) 
{
    output = AUDIO_OUTPUT_PLAYBACK;
    int I, Run = 1, L;    
    espeak_Initialize(output, Buflength, path, Options ); 
    //espeak_SetVoiceByName(Voice);
    const char *langNativeString = "lt"; //Default to US English
    espeak_VOICE voice;
	memset(&voice, 0, sizeof(espeak_VOICE)); // Zero out the voice first
	voice.languages = langNativeString;
	voice.name = "klatt";
	voice.variant = 2;
	voice.gender = 1;
	espeak_SetVoiceByProperties(&voice);
    Size = strlen(text)+1;    
    printf("Saying  '%s'",text);
    espeak_Synth( text, Size, position, position_type, end_position, flags,
    unique_identifier, user_data );
    espeak_Synchronize( );
    printf("\n:Done\n"); 
    return 0;
}
Пример #23
0
//-----------------------------------------------------------------------------
// Data definitions
//-----------------------------------------------------------------------------
void RobotSpeak::Speak(const char *psz, bool fWait)
{

    // Tell msound to release itself if necessary...
    printf("Speak %s\n", psz);

    // See if we need to initialize.
    if (!_fSpeakInit)
        InitSpeak();
    else
    {
        // if it is still playing cancel any active stuff...
        if (espeak_IsPlaying())
            espeak_Cancel();
    }

    if (psz)
    {
        espeak_Synth( psz, strlen(psz)+1, 0, POS_CHARACTER, 0, espeakCHARS_AUTO, &_uSpeakIdentifier, 0 );
        if (fWait)
            espeak_Synchronize();
    }
}
Пример #24
0
void QGCAudioWorker::say(QString inText)
{
#ifdef __android__
    Q_UNUSED(inText);
#else
    static bool threadInit = false;
    if (!threadInit) {
        threadInit = true;
        init();
    }

    if (!muted)
    {
        QString text = fixTextMessageForAudio(inText);

#if defined _MSC_VER && defined QGC_SPEECH_ENABLED
        HRESULT hr = pVoice->Speak(text.toStdWString().c_str(), SPF_DEFAULT, NULL);
        if (FAILED(hr)) {
            qDebug() << "Speak failed, HR:" << QString("%1").arg(hr, 0, 16);
        }
#elif defined Q_OS_LINUX && defined QGC_SPEECH_ENABLED
        // Set size of string for espeak: +1 for the null-character
        unsigned int espeak_size = strlen(text.toStdString().c_str()) + 1;
        espeak_Synth(text.toStdString().c_str(), espeak_size, 0, POS_CHARACTER, 0, espeakCHARS_AUTO, NULL, NULL);

#elif (defined __macos__) && defined QGC_SPEECH_ENABLED
        macSpeech.say(text.toStdString().c_str());
#elif (defined __ios__) && defined QGC_SPEECH_ENABLED
        iOSSpeak(text);
#else
        // Make sure there isn't an unused variable warning when speech output is disabled
        Q_UNUSED(inText);
#endif
    }
#endif // __android__
}
void PlatformSpeechSynthesisProviderEfl::speak(PassRefPtr<PlatformSpeechSynthesisUtterance> utterance)
{
    if (!engineInit() || !utterance) {
        fireSpeechEvent(SpeechError);
        return;
    }

    m_utterance = utterance;
    String voice = voiceName(m_utterance);
    espeak_SetVoiceByName(voice.utf8().data());
    espeak_SetParameter(espeakRATE, convertRateToEspeakValue(m_utterance->rate()), 0);
    espeak_SetParameter(espeakVOLUME, convertVolumeToEspeakValue(m_utterance->volume()), 0);
    espeak_SetParameter(espeakPITCH, convertPitchToEspeakValue(m_utterance->pitch()), 0);

    String textToRead = m_utterance->text();
    espeak_ERROR err = espeak_Synth(textToRead.utf8().data(), textToRead.length(), 0, POS_CHARACTER, 0, espeakCHARS_AUTO, 0, nullptr);
    if (err == EE_INTERNAL_ERROR) {
        fireSpeechEvent(SpeechError);
        m_utterance = nullptr;
        return;
    }

    fireSpeechEvent(SpeechStart);
}
Пример #26
0
static int espeak_speak(gchar * data, size_t bytes, SPDMessageType msgtype)
{
	espeak_ERROR result = EE_INTERNAL_ERROR;
	int flags = espeakSSML | espeakCHARS_UTF8;

	log_msg(OTTS_LOG_INFO, "Espeak: module_speak().");

	pthread_mutex_lock(&espeak_state_mutex);
	if (espeak_state != IDLE) {
		log_msg(OTTS_LOG_WARN,
			"Espeak: Warning, module_speak called when not ready.");
		pthread_mutex_unlock(&espeak_state_mutex);
		return FALSE;
	}

	log_msg(OTTS_LOG_DEBUG, "Espeak: Requested data: |%s| %d %ld", data,
		msgtype, bytes);

	espeak_state_reset();
	espeak_state = BEFORE_SYNTH;

	/* Setting speech parameters. */
	UPDATE_STRING_PARAMETER(voice.language, espeak_set_language);
	UPDATE_PARAMETER(voice_type, espeak_set_voice);
	UPDATE_STRING_PARAMETER(voice.name, espeak_set_synthesis_voice);

	UPDATE_PARAMETER(rate, espeak_set_rate);
	UPDATE_PARAMETER(volume, espeak_set_volume);
	UPDATE_PARAMETER(pitch, espeak_set_pitch);
	UPDATE_PARAMETER(punctuation_mode, espeak_set_punctuation_mode);
	UPDATE_PARAMETER(cap_let_recogn, espeak_set_cap_let_recogn);

	/*
	   UPDATE_PARAMETER(spelling_mode, espeak_set_spelling_mode);
	 */
	/* Send data to espeak */
	switch (msgtype) {
	case SPD_MSGTYPE_TEXT:
		result = espeak_Synth(data, bytes + 1, 0, POS_CHARACTER, 0,
				      flags, NULL, NULL);
		break;
	case SPD_MSGTYPE_SOUND_ICON:
		{
			char *msg =
			    g_strdup_printf("<audio src=\"%s%s\">%s</audio>",
					    EspeakSoundIconFolder, data, data);
			result =
			    espeak_Synth(msg, strlen(msg) + 1, 0, POS_CHARACTER,
					 0, flags, NULL, NULL);
			g_free(msg);
			break;
		}
	case SPD_MSGTYPE_CHAR:
		{
			wchar_t wc = 0;
			if (bytes == 1) {	// ASCII
				wc = (wchar_t) data[0];
			} else if (bytes == 5
				   && (0 == strncmp(data, "space", bytes))) {
				wc = (wchar_t) 0x20;
			} else {
				gsize bytes_out;
				gchar *tmp =
				    g_convert(data, -1, "wchar_t", "utf-8",
					      NULL, &bytes_out, NULL);
				if (tmp != NULL && bytes_out == sizeof(wchar_t)) {
					wchar_t *wc_ptr = (wchar_t *) tmp;
					wc = wc_ptr[0];
				} else {
					log_msg(OTTS_LOG_NOTICE,
						"Espeak: Failed to convert utf-8 to wchar_t, or not exactly one utf-8 character given.");
				}
				g_free(tmp);
			}
			char *msg =
			    g_strdup_printf
			    ("<say-as interpret-as=\"tts:char\">&#%d;</say-as>",
			     wc);
			result =
			    espeak_Synth(msg, strlen(msg) + 1, 0, POS_CHARACTER,
					 0, flags, NULL, NULL);
			g_free(msg);
			break;
		}
	case SPD_MSGTYPE_KEY:
		{
			/* TODO: Convert unspeakable keys to speakable form */
			char *msg =
			    g_strdup_printf
			    ("<say-as interpret-as=\"tts:key\">%s</say-as>",
			     data);
			result =
			    espeak_Synth(msg, strlen(msg) + 1, 0, POS_CHARACTER,
					 0, flags, NULL, NULL);
			g_free(msg);
			break;
		}
	case SPD_MSGTYPE_SPELL:
		/* TODO: Not sure what to do here... */
		break;
	}

	if (result != EE_OK) {
		return FALSE;
	}

	pthread_mutex_unlock(&espeak_state_mutex);

	log_msg(OTTS_LOG_DEBUG, "Espeak: Leaving module_speak() normally.");
	return bytes;
}
Пример #27
0
int main(int argc, char **argv)
{
	static struct option long_options[] = {
		{ "help",    no_argument,       0, 'h' },
		{ "stdin",   no_argument,       0, 0x100 },
		{ "compile-debug", optional_argument, 0, 0x101 },
		{ "compile", optional_argument, 0, 0x102 },
		{ "punct",   optional_argument, 0, 0x103 },
		{ "voices",  optional_argument, 0, 0x104 },
		{ "stdout",  no_argument,       0, 0x105 },
		{ "split",   optional_argument, 0, 0x106 },
		{ "path",    required_argument, 0, 0x107 },
		{ "phonout", required_argument, 0, 0x108 },
		{ "pho",     no_argument,       0, 0x109 },
		{ "ipa",     optional_argument, 0, 0x10a },
		{ "version", no_argument,       0, 0x10b },
		{ "sep",     optional_argument, 0, 0x10c },
		{ "tie",     optional_argument, 0, 0x10d },
		{ "compile-mbrola", optional_argument, 0, 0x10e },
		{ "compile-intonations", no_argument, 0, 0x10f },
		{ "compile-phonemes", optional_argument, 0, 0x110 },
		{ 0, 0, 0, 0 }
	};

	FILE *f_text = NULL;
	char *p_text = NULL;
	FILE *f_phonemes_out = stdout;
	char *data_path = NULL; // use default path for espeak-ng-data

	int option_index = 0;
	int c;
	int ix;
	char *optarg2;
	int value;
	int flag_stdin = 0;
	int flag_compile = 0;
	int filesize = 0;
	int synth_flags = espeakCHARS_AUTO | espeakPHONEMES | espeakENDPAUSE;

	int volume = -1;
	int speed = -1;
	int pitch = -1;
	int wordgap = -1;
	int option_capitals = -1;
	int option_punctuation = -1;
	int phonemes_separator = 0;
	int phoneme_options = 0;
	int option_linelength = 0;
	int option_waveout = 0;

	espeak_VOICE voice_select;
	char filename[200];
	char voicename[40];
	char devicename[200];
	#define N_PUNCTLIST 100
	wchar_t option_punctlist[N_PUNCTLIST];

	voicename[0] = 0;
	wavefile[0] = 0;
	filename[0] = 0;
	devicename[0] = 0;
	option_punctlist[0] = 0;

	while (true) {
		c = getopt_long(argc, argv, "a:b:d:f:g:hk:l:mp:qs:v:w:xXz",
		                long_options, &option_index);

		// Detect the end of the options.
		if (c == -1)
			break;
		optarg2 = optarg;

		switch (c)
		{
		case 'b':
			// input character encoding, 8bit, 16bit, UTF8
			if ((sscanf(optarg2, "%d", &value) == 1) && (value <= 4))
				synth_flags |= value;
			else
				synth_flags |= espeakCHARS_8BIT;
			break;
		case 'd':
			strncpy0(devicename, optarg2, sizeof(devicename));
			break;
		case 'h':
			printf("\n");
			PrintVersion();
			printf("%s", help_text);
			return 0;
		case 'k':
			option_capitals = atoi(optarg2);
			break;
		case 'x':
			phoneme_options |= espeakPHONEMES_SHOW;
			break;
		case 'X':
			phoneme_options |= espeakPHONEMES_TRACE;
			break;
		case 'm':
			synth_flags |= espeakSSML;
			break;
		case 'p':
			pitch = atoi(optarg2);
			break;
		case 'q':
			quiet = true;
			break;
		case 'f':
			strncpy0(filename, optarg2, sizeof(filename));
			break;
		case 'l':
			option_linelength = atoi(optarg2);
			break;
		case 'a':
			volume = atoi(optarg2);
			break;
		case 's':
			speed = atoi(optarg2);
			break;
		case 'g':
			wordgap = atoi(optarg2);
			break;
		case 'v':
			strncpy0(voicename, optarg2, sizeof(voicename));
			break;
		case 'w':
			option_waveout = 1;
			strncpy0(wavefile, optarg2, sizeof(filename));
			break;
		case 'z': // remove pause from the end of a sentence
			synth_flags &= ~espeakENDPAUSE;
			break;
		case 0x100: // --stdin
			flag_stdin = 1;
			break;
		case 0x105: // --stdout
			option_waveout = 1;
			strcpy(wavefile, "stdout");
			break;
		case 0x101: // --compile-debug
		case 0x102: // --compile
			if (optarg2 != NULL && *optarg2) {
				strncpy0(voicename, optarg2, sizeof(voicename));
				flag_compile = c;
				quiet = true;
				break;
			} else {
				fprintf(stderr, "Voice name to '%s' not specified.\n", c == 0x101 ? "--compile-debug" : "--compile");
				exit(EXIT_FAILURE);
			}
		case 0x103: // --punct
			option_punctuation = 1;
			if (optarg2 != NULL) {
				ix = 0;
				while ((ix < N_PUNCTLIST) && ((option_punctlist[ix] = optarg2[ix]) != 0)) ix++;
				option_punctlist[N_PUNCTLIST-1] = 0;
				option_punctuation = 2;
			}
			break;
		case 0x104: // --voices
			espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS, 0, data_path, 0);
			DisplayVoices(stdout, optarg2);
			exit(0);
		case 0x106: // -- split
			if (optarg2 == NULL)
				samples_split_seconds = 30 * 60; // default 30 minutes
			else
				samples_split_seconds = atoi(optarg2) * 60;
			break;
		case 0x107: // --path
			data_path = optarg2;
			break;
		case 0x108: // --phonout
			if ((f_phonemes_out = fopen(optarg2, "w")) == NULL)
				fprintf(stderr, "Can't write to: %s\n", optarg2);
			break;
		case 0x109: // --pho
			phoneme_options |= espeakPHONEMES_MBROLA;
			break;
		case 0x10a: // --ipa
			phoneme_options |= espeakPHONEMES_IPA;
			if (optarg2 != NULL) {
				// deprecated and obsolete
				switch (atoi(optarg2))
				{
				case 1:
					phonemes_separator = '_';
					break;
				case 2:
					phonemes_separator = 0x0361;
					phoneme_options |= espeakPHONEMES_TIE;
					break;
				case 3:
					phonemes_separator = 0x200d; // ZWJ
					phoneme_options |= espeakPHONEMES_TIE;
					break;
				}

			}
			break;
		case 0x10b: // --version
			PrintVersion();
			exit(0);
		case 0x10c: // --sep
			phoneme_options |= espeakPHONEMES_SHOW;
			if (optarg2 == 0)
				phonemes_separator = ' ';
			else
				utf8_in(&phonemes_separator, optarg2);
			if (phonemes_separator == 'z')
				phonemes_separator = 0x200c; // ZWNJ
			break;
		case 0x10d: // --tie
			phoneme_options |= (espeakPHONEMES_SHOW | espeakPHONEMES_TIE);
			if (optarg2 == 0)
				phonemes_separator = 0x0361; // default: combining-double-inverted-breve
			else
				utf8_in(&phonemes_separator, optarg2);
			if (phonemes_separator == 'z')
				phonemes_separator = 0x200d; // ZWJ
			break;
		case 0x10e: // --compile-mbrola
		{
			espeak_ng_InitializePath(data_path);
			espeak_ng_ERROR_CONTEXT context = NULL;
			espeak_ng_STATUS result = espeak_ng_CompileMbrolaVoice(optarg2, stdout, &context);
			if (result != ENS_OK) {
				espeak_ng_PrintStatusCodeMessage(result, stderr, context);
				espeak_ng_ClearErrorContext(&context);
				return EXIT_FAILURE;
			}
			return EXIT_SUCCESS;
		}
		case 0x10f: // --compile-intonations
		{
			espeak_ng_InitializePath(data_path);
			espeak_ng_ERROR_CONTEXT context = NULL;
			espeak_ng_STATUS result = espeak_ng_CompileIntonation(stdout, &context);
			if (result != ENS_OK) {
				espeak_ng_PrintStatusCodeMessage(result, stderr, context);
				espeak_ng_ClearErrorContext(&context);
				return EXIT_FAILURE;
			}
			return EXIT_SUCCESS;
		}
		case 0x110: // --compile-phonemes
		{
			espeak_ng_InitializePath(data_path);
			espeak_ng_ERROR_CONTEXT context = NULL;
			espeak_ng_STATUS result;
			if (optarg2) {
				result = espeak_ng_CompilePhonemeDataPath(22050, optarg2, NULL, stdout, &context);
			} else {
				result = espeak_ng_CompilePhonemeData(22050, stdout, &context);
			}
			if (result != ENS_OK) {
				espeak_ng_PrintStatusCodeMessage(result, stderr, context);
				espeak_ng_ClearErrorContext(&context);
				return EXIT_FAILURE;
			}
			return EXIT_SUCCESS;
		}
		default:
			exit(0);
		}
	}

	espeak_ng_InitializePath(data_path);
	espeak_ng_ERROR_CONTEXT context = NULL;
	espeak_ng_STATUS result = espeak_ng_Initialize(&context);
	if (result != ENS_OK) {
		espeak_ng_PrintStatusCodeMessage(result, stderr, context);
		espeak_ng_ClearErrorContext(&context);
		exit(1);
	}

	if (option_waveout || quiet) {
		// writing to a file (or no output), we can use synchronous mode
		result = espeak_ng_InitializeOutput(ENOUTPUT_MODE_SYNCHRONOUS, 0, devicename[0] ? devicename : NULL);
		samplerate = espeak_ng_GetSampleRate();
		samples_split = samplerate * samples_split_seconds;

		espeak_SetSynthCallback(SynthCallback);
		if (samples_split) {
			char *extn;
			extn = strrchr(wavefile, '.');
			if ((extn != NULL) && ((wavefile + strlen(wavefile) - extn) <= 4)) {
				strcpy(filetype, extn);
				*extn = 0;
			}
		}
	} else {
		// play the sound output
		result = espeak_ng_InitializeOutput(PLAYBACK_MODE, 0, devicename[0] ? devicename : NULL);
		samplerate = espeak_ng_GetSampleRate();
	}

	if (result != ENS_OK) {
		espeak_ng_PrintStatusCodeMessage(result, stderr, NULL);
		exit(EXIT_FAILURE);
	}

	if (voicename[0] == 0)
		strcpy(voicename, ESPEAKNG_DEFAULT_VOICE);

	result = espeak_ng_SetVoiceByName(voicename);
	if (result != ENS_OK) {
		memset(&voice_select, 0, sizeof(voice_select));
		voice_select.languages = voicename;
		result = espeak_ng_SetVoiceByProperties(&voice_select);
		if (result != ENS_OK) {
			espeak_ng_PrintStatusCodeMessage(result, stderr, NULL);
			exit(EXIT_FAILURE);
		}
	}

	if (flag_compile) {
		// This must be done after the voice is set
		espeak_ng_ERROR_CONTEXT context = NULL;
		espeak_ng_STATUS result = espeak_ng_CompileDictionary("", NULL, stderr, flag_compile & 0x1, &context);
		if (result != ENS_OK) {
			espeak_ng_PrintStatusCodeMessage(result, stderr, context);
			espeak_ng_ClearErrorContext(&context);
			return EXIT_FAILURE;
		}
		return EXIT_SUCCESS;
	}

	// set any non-default values of parameters. This must be done after espeak_Initialize()
	if (speed > 0)
		espeak_SetParameter(espeakRATE, speed, 0);
	if (volume >= 0)
		espeak_SetParameter(espeakVOLUME, volume, 0);
	if (pitch >= 0)
		espeak_SetParameter(espeakPITCH, pitch, 0);
	if (option_capitals >= 0)
		espeak_SetParameter(espeakCAPITALS, option_capitals, 0);
	if (option_punctuation >= 0)
		espeak_SetParameter(espeakPUNCTUATION, option_punctuation, 0);
	if (wordgap >= 0)
		espeak_SetParameter(espeakWORDGAP, wordgap, 0);
	if (option_linelength > 0)
		espeak_SetParameter(espeakLINELENGTH, option_linelength, 0);
	if (option_punctuation == 2)
		espeak_SetPunctuationList(option_punctlist);

	espeak_SetPhonemeTrace(phoneme_options | (phonemes_separator << 8), f_phonemes_out);

	if (filename[0] == 0) {
		if ((optind < argc) && (flag_stdin == 0)) {
			// there's a non-option parameter, and no -f or --stdin
			// use it as text
			p_text = argv[optind];
		} else {
			f_text = stdin;
			if (flag_stdin == 0)
				flag_stdin = 2;
		}
	} else {
		struct stat st;
		if (stat(filename, &st) != 0) {
			fprintf(stderr, "Failed to stat() file '%s'\n", filename);
			exit(EXIT_FAILURE);
		}
		filesize = GetFileLength(filename);
		f_text = fopen(filename, "r");
		if (f_text == NULL) {
			fprintf(stderr, "Failed to read file '%s'\n", filename);
			exit(EXIT_FAILURE);
		}
		if (S_ISFIFO(st.st_mode)) {
			flag_stdin = 2;
		}
	}

	if (p_text != NULL) {
		int size;
		size = strlen(p_text);
		espeak_Synth(p_text, size+1, 0, POS_CHARACTER, 0, synth_flags, NULL, NULL);
	} else if (flag_stdin) {
		size_t max = 1000;
		if ((p_text = (char *)malloc(max)) == NULL) {
			espeak_ng_PrintStatusCodeMessage(ENOMEM, stderr, NULL);
			exit(EXIT_FAILURE);
		}

		if (flag_stdin == 2) {
			// line by line input on stdin or from FIFO
			while (fgets(p_text, max, f_text) != NULL) {
				p_text[max-1] = 0;
				espeak_Synth(p_text, max, 0, POS_CHARACTER, 0, synth_flags, NULL, NULL);
				// Allow subprocesses to use the audio data through pipes.
				fflush(stdout);
			}
			if (f_text != stdin) {
				fclose(f_text);
			}
		} else {
			// bulk input on stdin
			ix = 0;
			while (true) {
				if ((c = fgetc(stdin)) == EOF)
					break;
				p_text[ix++] = (char)c;
				if (ix >= (max-1)) {
					char *new_text = NULL;
					if (max <= SIZE_MAX - 1000) {
						max += 1000;
						new_text = (char *)realloc(p_text, max);
					}
					if (new_text == NULL) {
						free(p_text);
						espeak_ng_PrintStatusCodeMessage(ENOMEM, stderr, NULL);
						exit(EXIT_FAILURE);
					}
					p_text = new_text;
				}
			}
			if (ix > 0) {
				p_text[ix-1] = 0;
				espeak_Synth(p_text, ix+1, 0, POS_CHARACTER, 0, synth_flags, NULL, NULL);
			}
		}

		free(p_text);
	} else if (f_text != NULL) {
		if ((p_text = (char *)malloc(filesize+1)) == NULL) {
			espeak_ng_PrintStatusCodeMessage(ENOMEM, stderr, NULL);
			exit(EXIT_FAILURE);
		}

		fread(p_text, 1, filesize, f_text);
		p_text[filesize] = 0;
		espeak_Synth(p_text, filesize+1, 0, POS_CHARACTER, 0, synth_flags, NULL, NULL);
		fclose(f_text);

		free(p_text);
	}

	result = espeak_ng_Synchronize();
	if (result != ENS_OK) {
		espeak_ng_PrintStatusCodeMessage(result, stderr, NULL);
		exit(EXIT_FAILURE);
	}

	if (f_phonemes_out != stdout)
		fclose(f_phonemes_out);

	CloseWavFile();
	espeak_ng_Terminate();
	return 0;
}
Пример #28
0
autoSound SpeechSynthesizer_to_Sound (SpeechSynthesizer me, const char32 *text, autoTextGrid *tg, autoTable *events) {
	try {
		int fsamp = espeak_Initialize (AUDIO_OUTPUT_SYNCHRONOUS, 0, nullptr, // 5000ms
			espeakINITIALIZE_PHONEME_EVENTS|espeakINITIALIZE_PHONEME_IPA);
		if (fsamp == -1) {
			Melder_throw (U"Internal espeak error.");
		}
		int synth_flags = espeakCHARS_WCHAR;
		if (my d_inputTextFormat == SpeechSynthesizer_INPUT_TAGGEDTEXT) {
			synth_flags |= espeakSSML;
		}
		if (my d_inputTextFormat != SpeechSynthesizer_INPUT_TEXTONLY) {
			synth_flags |= espeakPHONEMES;
		}
		option_phoneme_events = espeakINITIALIZE_PHONEME_EVENTS; // extern int option_phoneme_events;
		if (my d_outputPhonemeCoding == SpeechSynthesizer_PHONEMECODINGS_IPA) {
			option_phoneme_events |= espeakINITIALIZE_PHONEME_IPA;
		}

		espeak_SetParameter (espeakRATE, my d_wordsPerMinute, 0);
		espeak_SetParameter (espeakPITCH, my d_pitchAdjustment, 0);
		espeak_SetParameter (espeakRANGE, my d_pitchRange, 0);
		const char32 *voiceLanguageCode = SpeechSynthesizer_getVoiceLanguageCodeFromName (me, my d_voiceLanguageName);
		const char32 *voiceVariantCode = SpeechSynthesizer_getVoiceVariantCodeFromName (me, my d_voiceVariantName);
		espeakdata_SetVoiceByName ((const char *) Melder_peek32to8 (voiceLanguageCode), 
			(const char *) Melder_peek32to8 (voiceVariantCode));

		espeak_SetParameter (espeakWORDGAP, my d_wordgap * 100, 0); // espeak wordgap is in units of 10 ms
		espeak_SetParameter (espeakCAPITALS, 0, 0);
		espeak_SetParameter (espeakPUNCTUATION, espeakPUNCT_NONE, 0);

		espeak_SetSynthCallback (synthCallback);

		my d_events = Table_createWithColumnNames (0, U"time type type-t t-pos length a-pos sample id uniq");

		#ifdef _WIN32
                wchar_t *textW = Melder_peek32toW (text);
                espeak_Synth (textW, wcslen (textW) + 1, 0, POS_CHARACTER, 0, synth_flags, nullptr, me);
		#else
                espeak_Synth (text, str32len (text) + 1, 0, POS_CHARACTER, 0, synth_flags, nullptr, me);
		#endif
				
		espeak_Terminate ();
		autoSound thee = buffer_to_Sound (my d_wav, my d_numberOfSamples, my d_internalSamplingFrequency);

		if (my d_samplingFrequency != my d_internalSamplingFrequency) {
			thee = Sound_resample (thee.get(), my d_samplingFrequency, 50);
		}
		my d_numberOfSamples = 0; // re-use the wav-buffer
		if (tg) {
			double xmin = Table_getNumericValue_Assert (my d_events.get(), 1, 1);
			if (xmin > thy xmin) {
				xmin = thy xmin;
			}
			double xmax = Table_getNumericValue_Assert (my d_events.get(), my d_events -> rows.size, 1);
			if (xmax < thy xmax) {
				xmax = thy xmax;
			}
			autoTextGrid tg1 = Table_to_TextGrid (my d_events.get(), text, xmin, xmax);
			*tg = TextGrid_extractPart (tg1.get(), thy xmin, thy xmax, 0);
		}
		if (events) {
			Table_setEventTypeString (my d_events.get());
			*events = my d_events.move();
		}
		my d_events.reset();
		return thee;
	} catch (MelderError) {
		espeak_Terminate ();
		Melder_throw (U"Text not played.");
	}
}
Пример #29
0
int main (int argc, char **argv)
//==============================
{
	static struct option long_options[] =
		{
		/* These options set a flag. */
//		{"verbose", no_argument,       &verbose_flag, 1},
//		{"brief",   no_argument,       &verbose_flag, 0},

		/* These options don't set a flag.
			We distinguish them by their indices. */
		{"help",    no_argument,       0, 'h'},
		{"stdin",   no_argument,       0, 0x100},
		{"compile-debug", optional_argument, 0, 0x101},
		{"compile", optional_argument, 0, 0x102},
		{"punct",   optional_argument, 0, 0x103},
		{"voices",  optional_argument, 0, 0x104},
		{"stdout",  no_argument,       0, 0x105},
		{"split",   optional_argument, 0, 0x106},
		{0, 0, 0, 0}
		};

	static const char* err_load = "Failed to read ";


	FILE *f_text=NULL;
	char *p_text=NULL;

	int option_index = 0;
	int c;
	int ix;
	int flag_stdin = 0;
	int flag_compile = 0;
	int filesize = 0;
	int synth_flags = espeakCHARS_AUTO | espeakPHONEMES | espeakENDPAUSE;

	int volume = -1;
	int speed = -1;
	int pitch = -1;
	int wordgap = -1;
	int option_capitals = -1;
	int option_punctuation = -1;
	int option_phonemes = -1;
	int option_linelength = 0;
	int option_waveout = 0;

	char filename[120];
	char voicename[40];
	char voice_mbrola[20];
	char dictname[40];
#define N_PUNCTLIST  100
	wchar_t option_punctlist[N_PUNCTLIST];

	voicename[0] = 0;
	voice_mbrola[0] = 0;
	dictname[0] = 0;
	wavefile[0] = 0;
	filename[0] = 0;
	option_punctlist[0] = 0;

	while(true)
	{
		c = getopt_long (argc, argv, "a:bf:g:hk:l:mp:qs:v:w:xXz",
					long_options, &option_index);

		/* Detect the end of the options. */
		if (c == -1)
			break;

		switch (c)
		{
		case 'b':
			synth_flags |= espeakCHARS_8BIT;
			break;

		case 'h':
			printf("\n");
			printf("eSpeak text-to-speech: %s\n%s",espeak_Info(NULL),help_text);
			exit(0);
			break;

		case 'k':
			option_capitals = atoi(optarg);
			break;

		case 'x':
			option_phonemes = 1;
			break;

		case 'X':
			option_phonemes = 2;
			break;

		case 'm':
			synth_flags |= espeakSSML;
			break;

		case 'p':
			pitch = atoi(optarg);
			break;

		case 'q':
			quiet = 1;
			break;

		case 'f':
			strncpy0(filename,optarg,sizeof(filename));
			break;

		case 'l':
			option_linelength = atoi(optarg);
			break;

		case 'a':
			volume = atoi(optarg);
			break;

		case 's':
			speed = atoi(optarg);
			break;

		case 'g':
			wordgap = atoi(optarg);
			break;

		case 'v':
			strncpy0(voicename,optarg,sizeof(voicename));
			break;

		case 'w':
			option_waveout = 1;
			strncpy0(wavefile,optarg,sizeof(filename));
			break;

		case 'z':  // remove pause from the end of a sentence
			synth_flags &= ~espeakENDPAUSE;
			break;

		case 0x100:		// --stdin
			flag_stdin = 1;
			break;

		case 0x105:		// --stdout
			option_waveout = 1;
			strcpy(wavefile,"stdout");
			break;

		case 0x101:    // --compile-debug
		case 0x102:		// --compile
			strncpy0(voicename,optarg,sizeof(voicename));
			flag_compile = c;
			quiet = 1;
			break;

		case 0x103:		// --punct
			option_punctuation = 1;
			if(optarg != NULL)
			{
				ix = 0;
				while((ix < N_PUNCTLIST) && ((option_punctlist[ix] = optarg[ix]) != 0)) ix++;
				option_punctlist[N_PUNCTLIST-1] = 0;
				option_punctuation = 2;
			}
			break;

		case 0x104:   // --voices
			espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,0,NULL,0);
			DisplayVoices(stdout,optarg);
			exit(0);

		case 0x106:   // -- split
			if(optarg == NULL)
				samples_split = 30;  // default 30 minutes
			else
				samples_split = atoi(optarg);
			break;

		default:
			exit(0);
		}
	}


	if(option_waveout || quiet)
	{
		// writing to a file (or no output), we can use synchronous mode
		samplerate = espeak_Initialize(AUDIO_OUTPUT_SYNCHRONOUS,0,NULL,0);
		samples_split = (samplerate * samples_split) * 60;

		espeak_SetSynthCallback(SynthCallback);
		if(samples_split)
		{
			char *extn;
			extn = strrchr(wavefile,'.');
			if((extn != NULL) && ((wavefile + strlen(wavefile) - extn) <= 4))
			{
				strcpy(filetype,extn);
				*extn = 0;
			}
		}
		else
		if(option_waveout)
		{
			if(OpenWavFile(wavefile,samplerate) != 0)
				exit(4);
		}
	}
	else
	{
		// play the sound output
		samplerate = espeak_Initialize(AUDIO_OUTPUT_PLAYBACK,0,NULL,0);
	}
	

	if(voicename[0] == 0)
		strcpy(voicename,"default");

	if(espeak_SetVoiceByName(voicename) != EE_OK)
	{
		fprintf(stderr,"%svoice '%s'\n",err_load,voicename);
		exit(2);
	}

	if(flag_compile)
	{
		// This must be done after the voice is set
		espeak_CompileDictionary("", stderr, flag_compile & 0x1);
		exit(0);
	}

	// set any non-default values of parameters. This must be done after espeak_Initialize()
	if(speed > 0)
		espeak_SetParameter(espeakRATE,speed,0);
	if(volume >= 0)
		espeak_SetParameter(espeakVOLUME,volume,0);
	if(pitch >= 0)
		espeak_SetParameter(espeakPITCH,pitch,0);
	if(option_capitals >= 0)
		espeak_SetParameter(espeakCAPITALS,option_capitals,0);
	if(option_punctuation >= 0)
		espeak_SetParameter(espeakPUNCTUATION,option_punctuation,0);
	if(wordgap >= 0)
		espeak_SetParameter(espeakWORDGAP,wordgap,0);
	if(option_linelength > 0)
		espeak_SetParameter(espeakLINELENGTH,option_linelength,0);
	if(option_punctuation == 2)
		espeak_SetPunctuationList(option_punctlist);
	if(option_phonemes >= 0)
		espeak_SetPhonemeTrace(option_phonemes,stderr);

	if(filename[0]==0)
	{
		if((optind < argc) && (flag_stdin == 0))
		{
			// there's a non-option parameter, and no -f or --stdin
			// use it as text
			p_text = argv[optind];
		}
		else
		{
			f_text = stdin;
			if(flag_stdin == 0)
			{
				flag_stdin = 2;
			}
		}
	}
	else
	{
		filesize = GetFileLength(filename);
		f_text = fopen(filename,"r");
	}

	if((f_text == NULL) && (p_text == NULL))
	{
		fprintf(stderr,"%sfile '%s'\n",err_load,filename);
		exit(1);
	}


	if(p_text != NULL)
	{
		int size;
		size = strlen(p_text);
		espeak_Synth(p_text,size+1,0,POS_CHARACTER,0,synth_flags,NULL,NULL);
	}
	else
	if(flag_stdin)
	{
		int max = 1000;
		p_text = (char *)malloc(max);

		if(flag_stdin == 2)
		{
			// line by line input on stdin
			while(fgets(p_text,max,stdin) != NULL)
			{
				p_text[max-1] = 0;
				espeak_Synth(p_text,max,0,POS_CHARACTER,0,synth_flags,NULL,NULL);

			}
		}
		else
		{
			// bulk input on stdin
			ix = 0;
			while(!feof(stdin))
			{
				p_text[ix++] = fgetc(stdin);
				if(ix >= (max-1))
				{
					max += 1000;
					p_text = (char *)realloc(p_text,max);
				}
			}
			if(ix > 0)
			{
				p_text[ix-1] = 0;
				espeak_Synth(p_text,ix+1,0,POS_CHARACTER,0,synth_flags,NULL,NULL);
			}
		}
	}
	else
	if(f_text != NULL)
	{
		if((p_text = (char *)malloc(filesize+1)) == NULL)
		{
			fprintf(stderr,"Failed to allocate memory %d bytes",filesize);
			exit(3);
		}

		fread(p_text,1,filesize,f_text);
		p_text[filesize]=0;
		espeak_Synth(p_text,filesize+1,0,POS_CHARACTER,0,synth_flags,NULL,NULL);
		fclose(f_text);
	}

	espeak_Synchronize();
	return(0);
}