void VoiceEndpoint::timerEvent(QTimerEvent *event)
{
    if(m_sesTimer > 0 && event && event->timerId() == m_sesTimer) {
        killTimer(m_sesTimer);
        m_sesTimer = 0;
        switch (m_sesPhase) {
        case PhSetupRequest:
            sessionSetupResponse(ResTimeout,QUuid());
            break;
        case PhSetupComplete:
            sessionSetupResponse(ResInvalidMessage,QUuid());
            break;
        case PhAudioStarted:
            stopAudioStream(m_sessId);
            break;
        case PhAudioStopped:
            //transcriptionResponse(ResTimeout,QList<Sentence>(),QUuid()); // We should actualy reply like this but lets do...
            transcriptionResponse(ResSuccess,
                            QList<Sentence>({
                                Sentence({8,QList<Word>({{1,2,"No"},{1,3,"one"},{1,5,"dared"},{1,2,"to"},{1,5,"reply"},{1,1,"."},{1,7,"Service"},{1,7,"Timeout"}})})
                            }),
                            m_appUuid); // Example transcription - for the reference
            break;
        case PhResultReceived:
            sendDictationResults();
            sessionDestroy();
            break;
        default:
            qDebug() << "Unhandled timer event for phase" << m_sesPhase;
        }
    }
}
/**
 * @brief VoiceEndpoint::handleFrame
 * @param data
 * Handles raw unframed packet for audio endpoint
 * It handles both - bitstream and stop packet
 * When bitstream stops it fires a timer to detect timeout and cleanup session
 * Important: response should be made within 2000ms window after stop
 */
void VoiceEndpoint::handleFrame(const QByteArray &data)
{
    WatchDataReader reader(data);
    quint8 cmd = reader.read<quint8>();
    quint16 sid = reader.readLE<quint16>();

    if(cmd == FrmDataTransfer) {
        if(sid == m_sessId) {
            AudioStream str;
            str.count = reader.read<quint8>();
            for(int i=0;i<str.count;i++) {
                Frame frm;
                frm.length = reader.read<quint8>();
                frm.data = reader.readBytes(frm.length);
                str.frames.append(frm);
            }
            emit audioFrame(sid,str);
            m_sesPhase = PhAudioStarted;
        } else {
            stopAudioStream(sid);
        }
    } else if(cmd == FrmStopTransfer) {
        if(sid != m_sessId)
            return;
        if(m_sesTimer)
            killTimer(m_sesTimer);
        qDebug() << "Pebble finished sending audio at session" << m_sessId;
        emit audioFrame(m_sessId,AudioStream());
        m_sesPhase = PhAudioStopped;
        m_sesTimer = startTimer(1500);
    } else {
        qWarning() << "Unknown audio frame type" << data.toHex();
    }
}
void AudioControl::run()
{
    PaError err;

    m_synth = new Synth();

    err = initPortAudio();

    if (err != paNoError )
      terminateAudioStream(err);

    m_startLock.unlock();
    exec();

    stopAudioStream();
    delete m_releaseTimer;
    delete m_synth;

    QApplication::quit();
}
Exemple #4
0
int main(void)
{
    //=============================================================================
    //Variables and Data Structures
    //=============================================================================
    USERDATA data;
    AUDIOSTREAM outStream;
    int i;

    //=============================================================================
    //STEP 0 - Preliminary set-up and initialisation
    //=============================================================================

    //Initialise sinusoidal wavetable
    for( i=0; i<TABLE_SIZE; i++ )
    {
        data.sine[i] = (float) sin( ((double)i/(double)TABLE_SIZE) * M_PI * 2. );
    }
    data.left_phase = data.right_phase = 0;

    //Initialise message of data structure
    sprintf( data.message, "No Message" );

    //Set up AUDIOSTREAM structure
    outStream.stream=NULL; //Will get a value after openDefaultAudioStream call
    outStream.sampleRate=44100;//The stream sample rate
    outStream.sampleFormat=paFloat32;//The stream sample format (float 32-bit in this case)
    outStream.inChannels=0;//Zero input channels indicates that we need an output only stream
    outStream.outChannels=2;//Two-channels for stereo output
    outStream.framesPerBuffer=512;//Number of frames of the buffers in the processing callback function

    //=============================================================================
    //STEP 1 - Initialise audio system.
    //=============================================================================
    initialiseAudioSystem();

    //=============================================================================
    //STEP 2 - Open an audio stream
    //=============================================================================
    openDefaultAudioStream(&outStream,processingCallback,&data);

    //Also register a callback function to be called as soon as
    //the stream stops. Handy to have here although it does not
    //do much in this example.
    setAudioStreamFinishedCallback(&outStream,&StreamFinished);

    //=============================================================================
    //STEP 3 - Main control loop
    //=============================================================================

    //Start the audio stream
    startAudioStream(&outStream);

    //Pause for NUM_SECONDS seconds
    printf("Pause for %i seconds\n",NUM_SECONDS);
    pause(NUM_SECONDS*1000);

    //Stop the audio stream
    stopAudioStream(&outStream);

    //=============================================================================
    //PART4 - Clean up
    //=============================================================================

    //Close the audio stream
    closeAudioStream(&outStream);

    //Terminate the audio system.
    terminateAudioSystem();

    printf("Example completed.\n");

    return 0;
}
Exemple #5
0
/* Stop the connection by undoing the step at the current stage and those before it */
void LiStopConnection(void) {
    // Disable termination callbacks now
    alreadyTerminated = 1;
    
	if (stage == STAGE_INPUT_STREAM_START) {
		Limelog("Stopping input stream...");
		stopInputStream();
		stage--;
		Limelog("done\n");
	}
	if (stage == STAGE_AUDIO_STREAM_START) {
		Limelog("Stopping audio stream...");
		stopAudioStream();
		stage--;
		Limelog("done\n");
	}
	if (stage == STAGE_VIDEO_STREAM_START) {
		Limelog("Stopping video stream...");
		stopVideoStream();
		stage--;
		Limelog("done\n");
	}
	if (stage == STAGE_CONTROL_STREAM_START) {
		Limelog("Stopping control stream...");
		stopControlStream();
		stage--;
		Limelog("done\n");
	}
	if (stage == STAGE_INPUT_STREAM_INIT) {
		Limelog("Cleaning up input stream...");
		destroyInputStream();
		stage--;
		Limelog("done\n");
	}
	if (stage == STAGE_AUDIO_STREAM_INIT) {
		Limelog("Cleaning up audio stream...");
		destroyAudioStream();
		stage--;
		Limelog("done\n");
	}
	if (stage == STAGE_VIDEO_STREAM_INIT) {
		Limelog("Cleaning up video stream...");
		destroyVideoStream();
		stage--;
		Limelog("done\n");
	}
	if (stage == STAGE_CONTROL_STREAM_INIT) {
		Limelog("Cleaning up control stream...");
		destroyControlStream();
		stage--;
		Limelog("done\n");
	}
	if (stage == STAGE_RTSP_HANDSHAKE) {
		Limelog("Terminating RTSP handshake...");
		terminateRtspHandshake();
		stage--;
		Limelog("done\n");
	}
    if (stage == STAGE_NAME_RESOLUTION) {
        // Nothing to do
        stage--;
    }
	if (stage == STAGE_PLATFORM_INIT) {
		Limelog("Cleaning up platform...");
		cleanupPlatform();
		stage--;
		Limelog("done\n");
	}
	LC_ASSERT(stage == STAGE_NONE);
}
void VoiceEndpoint::stopAudioStream()
{
    stopAudioStream(m_sessId);
}