void StopTrack (void) { LockMutex (soundSource[SPEECH_SOURCE].stream_mutex); StopStream (SPEECH_SOURCE); track_count = 0; tracks_length = 0; cur_chunk = NULL; cur_sub_chunk = NULL; UnlockMutex (soundSource[SPEECH_SOURCE].stream_mutex); if (chunks_head) { chunks_tail = NULL; destroy_SoundChunk_list (chunks_head); chunks_head = NULL; last_sub = NULL; } if (sound_sample) { // We delete the decoders ourselves sound_sample->decoder = NULL; TFB_DestroySoundSample (sound_sample); sound_sample = NULL; } }
void openAL_Uninit (void) { int i; UninitStreamDecoder (); for (i = 0; i < NUM_SOUNDSOURCES; ++i) { if (soundSource[i].sample && soundSource[i].sample->decoder) { StopStream (i); } if (soundSource[i].sbuffer) { void *sbuffer = soundSource[i].sbuffer; soundSource[i].sbuffer = NULL; HFree (sbuffer); } DestroyMutex (soundSource[i].stream_mutex); } alcMakeContextCurrent (NULL); alcDestroyContext (alcContext); alcContext = NULL; alcCloseDevice (alcDevice); alcDevice = NULL; SoundDecoder_Uninit (); }
void mixSDL_Uninit (void) { int i; UninitStreamDecoder (); for (i = 0; i < NUM_SOUNDSOURCES; ++i) { if (soundSource[i].sample && soundSource[i].sample->decoder) { StopStream (i); } if (soundSource[i].sbuffer) { void *sbuffer = soundSource[i].sbuffer; soundSource[i].sbuffer = NULL; HFree (sbuffer); } DestroyMutex (soundSource[i].stream_mutex); soundSource[i].stream_mutex = 0; mixSDL_DeleteSources (1, &soundSource[i].handle); } SDL_CloseAudio (); mixer_Uninit (); SoundDecoder_Uninit (); SDL_QuitSubSystem (SDL_INIT_AUDIO); }
BOOLEAN _ReleaseMusicData (void *data) { TFB_SoundSample **pmus = data; TFB_SoundSample *sample; if (pmus == NULL) return (FALSE); sample = *pmus; assert (sample != 0); if (sample->decoder) { TFB_SoundDecoder *decoder = sample->decoder; LockMutex (soundSource[MUSIC_SOURCE].stream_mutex); if (soundSource[MUSIC_SOURCE].sample == sample) { // Currently playing this sample! Not good. StopStream (MUSIC_SOURCE); } UnlockMutex (soundSource[MUSIC_SOURCE].stream_mutex); sample->decoder = NULL; SoundDecoder_Free (decoder); } TFB_DestroySoundSample (sample); FreeMusicData (data); return (TRUE); }
static PaError AbortStream(PaStream *stream) { DPR("AbortStream:\n"); return StopStream(stream); }
PortAudioSound::~PortAudioSound() { ReadIndex = WriteIndex = 0; int n = numBuffers; numBuffers = 0; for (int i = 0; i < n; i++){ fftw_free(ppPowSpect[i]); fftw_free(ppShade[i]); } fftw_free(ppPowSpect); fftw_free(ppShade); ppPowSpect = NULL; if (pStream != NULL){ StopStream(); error(Pa_CloseStream(pStream)); logFile.WriteString(L"Stream Closed\n"); } error(Pa_Terminate()); logFile.WriteString(L"PortAudio terminated\n"); //fftw_free(Pspect); fftw_destroy_plan(FFTplan); fftw_free(hamming); fftw_free(FFTin); fftw_free(FFTout); }
unsigned long GS_StopStream(unsigned long nPort) { //恢复渲染,停止线程 PSTREAMCONFIG pm = GetStream(nPort); if(pm==NULL) { return S_FALSE; } if(StopStream(pm->pContrlConfig)==S_OK) { if(pm->pAudioConfig) UninitDirectSound(pm->pAudioConfig); if(pm->pVideoConfig->m_bUsingGDIPLUS) { UnInitGDIPlus(pm->pVideoConfig); } else { UnInitDirectDraw(pm->pVideoConfig); } } char str[128]; sprintf(str," Stop GentekPlatformStream %d\n",nPort); OutputDebugStringA(str); return S_OK; }
// // Stop // // Stop playing // void TrackPlayer::Stop() { // Stop the stream StopStream(); // Clear requested status requested = FALSE; }
void PLRStop (MUSIC_REF MusicRef) { if (MusicRef == curMusicRef || MusicRef == (MUSIC_REF)~0) { LockMutex (soundSource[MUSIC_SOURCE].stream_mutex); StopStream (MUSIC_SOURCE); UnlockMutex (soundSource[MUSIC_SOURCE].stream_mutex); curMusicRef = 0; } }
void snd_StopSpeech (void) { if (!curSpeechRef) return; LockMutex (soundSource[SPEECH_SOURCE].stream_mutex); StopStream (SPEECH_SOURCE); UnlockMutex (soundSource[SPEECH_SOURCE].stream_mutex); curSpeechRef = 0; }
// This function figures out the chunk that should be playing based on // 'offset' into the total playing time of all tracks. It then sets // the speech source's sample to the necessary decoder and seeks the // decoder to the proper point. // XXX: This means that whatever speech has already been queued on the // source will continue playing, so we may need some small timing // adjustments. It may be simpler to just call PlayStream(). static void seek_track (sint32 offset) { TFB_SoundChunk *cur; TFB_SoundChunk *last_tag = NULL; if (!sound_sample) return; // nothing to recompute if (offset < 0) offset = 0; else if ((uint32)offset > tracks_length) offset = tracks_length + 1; // Adjusting the stream start time is the only way we can arbitrarily // seek the stream right now soundSource[SPEECH_SOURCE].start_time = GetTimeCounter () - offset; // Find the chunk that should be playing at this time offset for (cur = chunks_head; cur && offset >= chunk_end_time (cur); cur = cur->next) { // .. looking for the last callback as we go along // XXX: this effectively set the last point where Fot is looking at. // TODO: this should be somehow changed if we implement more // callbacks, like Melnorme trading, offloading at Starbase, etc. if (cur->tag_me) last_tag = cur; } if (cur) { cur_chunk = cur; SoundDecoder_Seek (cur->decoder, (uint32) (((float)offset / ONE_SECOND - cur->start_time) * 1000)); sound_sample->decoder = cur->decoder; if (cur->tag_me) last_tag = cur; if (last_tag) DoTrackTag (last_tag); } else { // The offset is beyond the length of all tracks StopStream (SPEECH_SOURCE); cur_chunk = NULL; cur_sub_chunk = NULL; } }
void cSoundStreamRenderer::UpdateStreams() { RemoveUnmarkedBindings(); for(unsigned int i = 0; i < m_DataBindings.size(); ++i) { sStreamBinding &str = m_DataBindings[i]; if(!str.m_Source || !str.m_Stream) { ClearBinding(str); continue; } while(OpenAL::Get().GetProcessedBuffersCount(str.m_Source->Get()) > 0) { ALuint buffer = OpenAL::Get().PopBufferQueue(str.m_Source->Get()); str.m_RawBuffers.Return(buffer); } vSoundStreamProperties *props = dynamic_cast<vSoundStreamProperties*>(str.m_Stream->GetRenderingProperties()); if(!props) continue; switch(props->GetWantedState()) { case ESourceState::Playing: PlayStream(str); break; case ESourceState::Stopped: StopStream(str); break; case ESourceState::Paused: PauseStream(str); break; default: break; } } for(unsigned int i = 0; i < m_DataBindings.size(); ++i) { sStreamBinding &str = m_DataBindings[i]; vSoundStreamProperties *props = dynamic_cast<vSoundStreamProperties*>(str.m_Stream->GetRenderingProperties()); props->SetState(OpenAL::Get().GetSourceState(str.m_Source->Get())); str.m_Stream->Synchronize(props); } };
static PaError CloseStream(PaStream *stream) { PaSndioStream *s = (PaSndioStream *)stream; DPR("CloseStream:\n"); if (!s->stopped) StopStream(stream); if (s->mode & SIO_REC) free(s->rbuf); if (s->mode & SIO_PLAY) free(s->wbuf); sio_close(s->hdl); PaUtil_TerminateStreamRepresentation(&s->base); PaUtil_TerminateBufferProcessor(&s->bufproc); PaUtil_FreeMemory(s); return paNoError; }
void cSoundStreamRenderer::ClearBinding(sStreamBinding &bnd) { StopStream(bnd); m_Sources.Return(bnd.m_Source); bnd.m_Source = NULL; bnd.m_Stream = NULL; bnd.m_RawBuffers.FreeAll(); for(unsigned int i = 0; i < bnd.m_Buffers.size(); ++i) m_Buffers.Return(bnd.m_Buffers[i]); bnd.m_Buffers.clear(); bnd.m_RawBuffers.Clear(); bnd.m_Marked = false; }
void CAudioManager::KillAll(){ if(m_bValidAudio == false) return; for(int i = 0; i < m_AudioClip.size(); i++){ if(FSOUND_IsPlaying(i)==true){ StopSoundClip(i); } else{ } } for(int i = 0; i < m_AudioClip.size(); i++){ StopStream(i); } //FSOUND_StopSound(FSOUND_ALL); //::Sleep(2000); }
// // RandomTrack // // Move to a random track // Bool TrackPlayer::RandomTrack() { // Stop the stream StopStream(); // Clear the active track active = NULL; // Do we have any tracks if (tracks.GetCount()) { // Setup an iterator NList<Track>::Iterator i(&tracks); // Pick a random track i.GoTo(Random::nonSync.Integer(tracks.GetCount())); // Set as active active = *i; } // Should we start a track if (requested) { // Did we find a track if (active) { // Start the track Play(); // Success return (TRUE); } // No tracks requested = FALSE; } return (active ? TRUE : FALSE); }
// // PrevTrack // // Move to the previous track // Bool TrackPlayer::PrevTrack() { // Stop the stream StopStream(); // Get the previous track if (active) { active = active->GetPrev(); } // Get the last track if (!active) { active = tracks.GetTail(); } // Should we play it if (requested) { // Did we find a track if (active) { // Play it Play(); // Success return (TRUE); } // No tracks to play requested = FALSE; } // Returns true if found a track return (active ? TRUE : FALSE); }
// // NextTrack // // Move to the next track // Bool TrackPlayer::NextTrack() { // Stop the stream StopStream(); // Get the next track if (active) { active = active->GetNext(); } // Get the first track if (!active) { active = tracks.GetHead(); } // Should we play it if (requested) { // Did we find a track if (active) { // Play it Play(); } else { // No tracks to play requested = FALSE; } } // Returns true if found a track return (active ? TRUE : FALSE); }
static PaError AbortStream(PaStream *stream) { PA_DEBUG(("AbortStream:\n")); return StopStream(stream); }