OPAL_SOUND_MGR bool SoundManager::playAudio( unsigned int audioID, bool forceRestart ) { // Make sure the audio source ident is valid and usable if ( audioID >= MAX_AUDIO_SOURCES || !mAudioSourceInUse[audioID]) return false; int sourceAudioState = 0; alGetError(); // Are we currently playing the audio source? alGetSourcei( mAudioSources[audioID], AL_SOURCE_STATE, &sourceAudioState ); if ( sourceAudioState == AL_PLAYING ) { if ( forceRestart ) stopAudio( audioID ); else return false; // Not forced, so we don't do anything } alSourcePlay( mAudioSources[ audioID ] ); if ( checkALError( "playAudio::alSourcePlay: ") ) return false; return true; }
void Sound::Stop() { if (_sound) { stopAudio(_sound); } }
bool VideoDecoder::seek(const Audio::Timestamp &time) { if (!isSeekable()) return false; // Stop all tracks so they can be seeked if (isPlaying()) stopAudio(); for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) if (!(*it)->seek(time)) return false; _lastTimeChange = time; // Now that we've seeked, start all tracks again // Also reset our start time if (isPlaying()) { startAudio(); _startTime = g_system->getMillis() - time.msecs(); } resetPauseStartTime(); _needsUpdate = true; return true; }
bool VideoDecoder::seek(const Audio::Timestamp &time) { if (!isSeekable()) return false; // Stop all tracks so they can be seeked if (isPlaying()) stopAudio(); // Do the actual seeking if (!seekIntern(time)) return false; // Seek any external track too for (TrackListIterator it = _externalTracks.begin(); it != _externalTracks.end(); it++) if (!(*it)->seek(time)) return false; _lastTimeChange = time; // Now that we've seeked, start all tracks again // Also reset our start time if (isPlaying()) { startAudio(); _startTime = g_system->getMillis() - (time.msecs() / _playbackRate).toInt(); } resetPauseStartTime(); findNextVideoTrack(); _needsUpdate = true; return true; }
void AudioManager::setGameState(int newState) { if ((newState != gameState) && (gameState != GAME_STATE_DRAGON)) { switch (newState) { case GAME_STATE_NONE: stopAudio(); break; case GAME_STATE_COMBAT: Print("Setting combat state."); setAudioCue(combatMusic); break; case GAME_STATE_DRAGON: Print("Setting dragon state."); setAudioCue(dragonMusic); break; case GAME_STATE_NORMAL: default: Print("Setting normal state."); setAudioCue(normalMusic); break; } gameState = newState; } }
void VideoDecoder::stop() { if (!isPlaying()) return; // Stop audio here so we don't have it affect getTime() stopAudio(); // Keep the time marked down in case we start up again // We do this before _playbackRate is set so we don't get // _lastTimeChange returned, but before _pauseLevel is // reset. _lastTimeChange = getTime(); _playbackRate = 0; _startTime = 0; _palette = 0; _dirtyPalette = false; _needsUpdate = false; // Also reset the pause state. _pauseLevel = 0; // Reset the pause state of the tracks too for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) (*it)->pause(false); }
void SeekableBinkDecoder::seekToFrame(uint32 frame) { assert(frame < _frames.size()); // Fast path if ((int32)frame == _curFrame + 1) return; // Stop all audio (for now) stopAudio(); // Track down the keyframe _curFrame = findKeyFrame(frame) - 1; while (_curFrame < (int32)frame - 1) skipNextFrame(); // Map out the starting point Common::Rational startTime = frame * 1000 / getFrameRate(); _startTime = g_system->getMillis() - startTime.toInt(); resetPauseStartTime(); // Adjust the audio starting point if (_audioTrack < _audioTracks.size()) { Common::Rational audioStartTime = (frame + 1) * 1000 / getFrameRate(); _audioStartOffset = audioStartTime.toInt(); } // Restart the audio startAudio(); }
void QuickTimeDecoder::close() { stopAudio(); if (_scaledSurface) { _scaledSurface->free(); delete _scaledSurface; _scaledSurface = 0; } Common::QuickTimeParser::close(); SeekableVideoDecoder::reset(); }
void QuickTimeDecoder::close() { stopAudio(); freeAllTrackHandlers(); if (_scaledSurface) { _scaledSurface->free(); delete _scaledSurface; _scaledSurface = 0; } _width = _height = 0; Common::QuickTimeParser::close(); SeekableVideoDecoder::reset(); }
int main() { printf("INIT: %d\n", initializeAudio()); //testBuffer(); startAudio(); startRecording("test.ogg"); sleep(5); stopRecording(); stopAudio(); printf("DEINIT: %d\n", uninitializeAudio()); return 0; }
void SyntroLCamConsole::run() { #ifndef WIN32 if (m_daemonMode) runDaemon(); else #endif runConsole(); stopVideo(); stopAudio(); m_client->exitThread(); SyntroUtils::syntroAppExit(); QCoreApplication::exit(); }
OPAL_SOUND_MGR bool SoundManager::stopAllAudio( void ) { if ( mAudioSourcesInUseCount >= MAX_AUDIO_SOURCES ) return false; alGetError(); for ( int i=0; i<mAudioSourcesInUseCount; i++ ) { stopAudio( i ); } if ( checkALError( "stopAllAudio::alSourceStop ") ) return false; return true; }
void BinkDecoder::close() { reset(); // Stop audio stopAudio(); for (int i = 0; i < 4; i++) { delete[] _curPlanes[i]; _curPlanes[i] = 0; delete[] _oldPlanes[i]; _oldPlanes[i] = 0; } deinitBundles(); for (int i = 0; i < 16; i++) { delete _huffman[i]; _huffman[i] = 0; } delete _bink; _bink = 0; _surface.free(); _audioTrack = 0; for (int i = 0; i < kSourceMAX; i++) { _bundles[i].countLength = 0; _bundles[i].huffman.index = 0; for (int j = 0; j < 16; j++) _bundles[i].huffman.symbols[j] = j; _bundles[i].data = 0; _bundles[i].dataEnd = 0; _bundles[i].curDec = 0; _bundles[i].curPtr = 0; } for (int i = 0; i < 16; i++) { _colHighHuffman[i].index = 0; for (int j = 0; j < 16; j++) _colHighHuffman[i].symbols[j] = j; } _audioTracks.clear(); _frames.clear(); }
void TargetFileComponent::buttonClicked (Button* buttonThatWasClicked) { //[UserbuttonClicked_Pre] //[/UserbuttonClicked_Pre] if (buttonThatWasClicked == playButton) { //[UserButtonCode_playButton] -- add your button handler code here.. playAudio(); //[/UserButtonCode_playButton] } else if (buttonThatWasClicked == stopButton) { //[UserButtonCode_stopButton] -- add your button handler code here.. stopAudio(); //[/UserButtonCode_stopButton] } else if (buttonThatWasClicked == loadFileButton) { //[UserButtonCode_loadFileButton] -- add your button handler code here.. FileChooser myChooser ("Please select the file you want to load..."); if (myChooser.browseForFileToOpen()) { audioTransport.setSource(nullptr); // this fixes memory issue with loading new file File selectedFile = myChooser.getResult(); currentFile->setFile(selectedFile); container->setFile(selectedFile); audioTransport.setSource(currentFile->getSource()); isPlayable = true; setPlayable(true); sendActionMessage("setTargetFile"); } //[/UserButtonCode_loadFileButton] } //[UserbuttonClicked_Post] //[/UserbuttonClicked_Post] }
void QuickTimeDecoder::seekToTime(const Audio::Timestamp &time) { stopAudio(); _audioStartOffset = time; // Sets all tracks to this time for (uint32 i = 0; i < _handlers.size(); i++) _handlers[i]->seekToTime(time); startAudio(); // Reset our start time _startTime = g_system->getMillis() - time.msecs(); _setStartTime = true; resetPauseStartTime(); // Reset the next video track too _nextVideoTrack = findNextVideoTrack(); _needUpdate = _nextVideoTrack != 0; }
bool VideoDecoder::rewind() { if (!isRewindable()) return false; // Stop all tracks so they can be rewound if (isPlaying()) stopAudio(); for (TrackList::iterator it = _tracks.begin(); it != _tracks.end(); it++) if (!(*it)->rewind()) return false; // Now that we've rewound, start all tracks again if (isPlaying()) startAudio(); _lastTimeChange = 0; _startTime = g_system->getMillis(); resetPauseStartTime(); return true; }
void QuickTimeDecoder::seekToFrame(uint32 frame) { assert(_videoTrackIndex >= 0); assert(frame < _tracks[_videoTrackIndex]->frameCount); // Stop all audio (for now) stopAudio(); // Track down the keyframe _curFrame = findKeyFrame(frame) - 1; while (_curFrame < (int32)frame - 1) decodeNextFrame(); // Map out the starting point _nextFrameStartTime = 0; uint32 curFrame = 0; for (int32 i = 0; i < _tracks[_videoTrackIndex]->timeToSampleCount && curFrame < frame; i++) { for (int32 j = 0; j < _tracks[_videoTrackIndex]->timeToSample[i].count && curFrame < frame; j++) { curFrame++; _nextFrameStartTime += _tracks[_videoTrackIndex]->timeToSample[i].duration; } } // Adjust the video starting point const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _tracks[_videoTrackIndex]->timeScale); _startTime = g_system->getMillis() - curVideoTime.msecs(); resetPauseStartTime(); // Adjust the audio starting point if (_audioTrackIndex >= 0) { _audioStartOffset = curVideoTime; // Seek to the new audio location setAudioStreamPos(_audioStartOffset); // Restart the audio startAudio(); } }
void VideoDecoder::setEndTime(const Audio::Timestamp &endTime) { Audio::Timestamp startTime = 0; if (isPlaying()) { startTime = getTime(); stopAudio(); } _endTime = endTime; _endTimeSet = true; if (startTime > endTime) return; if (isPlaying()) { // We'll assume the audio track is going to start up at the same time it just was // and therefore not do any seeking. // Might want to set it anyway if we're seekable. startAudioLimit(_endTime.msecs() - startTime.msecs()); _lastTimeChange = startTime; } }
/******************************************************************************* * Initializes the top buttons. */ QWidget* MainWindow::initButtons(QWidget* parent) { QWidget* widget = new QWidget(parent); QPushButton* startAudioButton = new QPushButton(tr("(Re)start"), widget); QPushButton* stopAudioButton = new QPushButton(tr("Stop"), widget); QPushButton* reloadButton = new QPushButton(tr("Reload Configuration"), widget); QPushButton* analysisButton = new QPushButton(tr("Analysis"), widget); QHBoxLayout* layout = new QHBoxLayout(widget); layout->setContentsMargins(0, 0, 0, 0); layout->addWidget(startAudioButton); layout->addWidget(stopAudioButton); layout->addWidget(reloadButton); layout->addWidget(analysisButton); connect(startAudioButton, SIGNAL(clicked()), this, SLOT(startAudio())); connect(stopAudioButton , SIGNAL(clicked()), this, SLOT(stopAudio())); connect(reloadButton , SIGNAL(clicked()), this, SLOT(reload())); connect(analysisButton , SIGNAL(clicked()), this, SLOT(showAnalysisWindow())); return widget; }
void QuickTimeDecoder::seekToFrame(uint32 frame) { assert(_videoStreamIndex >= 0); assert(frame < _streams[_videoStreamIndex]->nb_frames); // Stop all audio (for now) stopAudio(); // Track down the keyframe _curFrame = findKeyFrame(frame) - 1; while (_curFrame < (int32)frame - 1) decodeNextFrame(); // Map out the starting point _nextFrameStartTime = 0; uint32 curFrame = 0; for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count && curFrame < frame; i++) { for (int32 j = 0; j < _streams[_videoStreamIndex]->stts_data[i].count && curFrame < frame; j++) { curFrame++; _nextFrameStartTime += _streams[_videoStreamIndex]->stts_data[i].duration; } } // Adjust the video starting point const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _streams[_videoStreamIndex]->time_scale); _startTime = g_system->getMillis() - curVideoTime.msecs(); resetPauseStartTime(); // Adjust the audio starting point if (_audioStreamIndex >= 0) { _audioStartOffset = curVideoTime; // Re-create the audio stream STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0]; _audStream = Audio::makeQueuingAudioStream(entry->sampleRate, entry->channels == 2); // First, we need to track down what audio sample we need Audio::Timestamp curAudioTime(0, _streams[_audioStreamIndex]->time_scale); uint sample = 0; bool done = false; for (int32 i = 0; i < _streams[_audioStreamIndex]->stts_count && !done; i++) { for (int32 j = 0; j < _streams[_audioStreamIndex]->stts_data[i].count; j++) { curAudioTime = curAudioTime.addFrames(_streams[_audioStreamIndex]->stts_data[i].duration); if (curAudioTime > curVideoTime) { done = true; break; } sample++; } } // Now to track down what chunk it's in _curAudioChunk = 0; uint32 totalSamples = 0; for (uint32 i = 0; i < _streams[_audioStreamIndex]->chunk_count; i++, _curAudioChunk++) { int sampleToChunkIndex = -1; for (uint32 j = 0; j < _streams[_audioStreamIndex]->sample_to_chunk_sz; j++) if (i >= _streams[_audioStreamIndex]->sample_to_chunk[j].first) sampleToChunkIndex = j; assert(sampleToChunkIndex >= 0); totalSamples += _streams[_audioStreamIndex]->sample_to_chunk[sampleToChunkIndex].count; if (sample < totalSamples) { totalSamples -= _streams[_audioStreamIndex]->sample_to_chunk[sampleToChunkIndex].count; break; } } // Reposition the audio stream readNextAudioChunk(); if (sample != totalSamples) { // HACK: Skip a certain amount of samples from the stream // (There's got to be a better way to do this!) int16 *tempBuffer = new int16[sample - totalSamples]; _audStream->readBuffer(tempBuffer, sample - totalSamples); delete[] tempBuffer; debug(3, "Skipping %d audio samples", sample - totalSamples); } // Restart the audio startAudio(); } }
// Response to all POST request // 3 requests are correct : // - /GetJson --> Receive faust code / Compile Data / Send back jsonInterface // - /CreateInstance --> Receive factoryIndex / Create instance // - /DeleteFactory --> Receive factoryIndex / Delete Factory int Server::answer_post(MHD_Connection *connection, const char *url, const char *upload_data, size_t *upload_data_size, void **con_cls){ struct connection_info_struct *con_info = (connection_info_struct*)*con_cls; if (0 != *upload_data_size) { MHD_post_process(con_info->fPostprocessor, upload_data, *upload_data_size); *upload_data_size = 0; return MHD_YES; } else { if(strcmp(url, "/GetJson") == 0){ if (compile_Data(con_info)) { return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_OK, "application/json"); } else { return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_BAD_REQUEST, "text/html"); } } else if(strcmp(url, "/GetJsonFromKey") == 0){ if (getJsonFromKey(con_info)) { return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_OK, "application/json"); } else { return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_BAD_REQUEST, "text/html"); } } else if(strcmp(url, "/CreateInstance") == 0){ if (createInstance(con_info)) { return send_page(connection, "", 0, MHD_HTTP_OK, "text/html"); } else { return send_page(connection, con_info->fAnswerstring.c_str(), con_info->fAnswerstring.size(), MHD_HTTP_BAD_REQUEST, "text/html"); } } // else if(strcmp(url, "/DeleteFactory") == 0){ // // llvm_dsp_factory* toDelete = fAvailableFactories[con_info->fSHAKey]; // // if (toDelete) { // // fAvailableFactories.erase(con_info->fSHAKey); // deleteSlaveDSPFactory(toDelete); // // return send_page(connection, "", 0, MHD_HTTP_OK, "application/html"); // } else { // return send_page(connection, "", 0, MHD_HTTP_BAD_REQUEST, "text/html"); // } // } else if(strcmp(url, "/StartAudio") == 0){ startAudio(con_info->fSHAKey); return send_page(connection, "", 0, MHD_HTTP_OK, "text/html"); } else if(strcmp(url, "/StopAudio") == 0){ stopAudio(con_info->fSHAKey); return send_page(connection, "", 0, MHD_HTTP_OK, "text/html"); } else{ return send_page(connection, "", 0, MHD_HTTP_BAD_REQUEST, "text/html"); } } }
void AudioPlayer::stopAllAudio() { stopSoundSync(); stopAudio(); if (_audioCdStart > 0) audioCdStop(); }