void AudioController::OpenAudio(agi::fs::path const& url) { if (url.empty()) throw agi::InternalError("AudioController::OpenAudio() was passed an empty string. This must not happen.", 0); std::unique_ptr<AudioProvider> new_provider; try { new_provider = AudioProviderFactory::GetProvider(url); config::path->SetToken("?audio", url); } catch (agi::UserCancelException const&) { throw; } catch (...) { config::mru->Remove("Audio", url); throw; } CloseAudio(); provider = std::move(new_provider); try { player = AudioPlayerFactory::GetAudioPlayer(provider.get()); } catch (...) { provider.reset(); throw; } audio_url = url; config::mru->Add("Audio", url); try { AnnounceAudioOpen(provider.get()); } catch (...) { CloseAudio(); throw; } }
unsigned long GS_CloseAudio(unsigned long nPort) { PSTREAMCONFIG pm = GetStream(nPort); if(pm==NULL) { return S_FALSE; } return CloseAudio(pm->pContrlConfig); }
INT ReopenAudioDevice( DWORD Rate, INT OutputMode, INT Latency ) { ALock; debugf( NAME_Init, TEXT("Reopening audio device.") ); CloseAudio(); INT Result = OpenAudio( Rate, OutputMode, Latency ); AUnlock; return Result; }
void ffplayer::Close() { CloseAudio(); CloseVideo(); // Close the video file if (m_pFormatCtx) avformat_close_input(&m_pFormatCtx); m_pFormatCtx = 0; }
bool ffplayer::OpenAudioCode() { if (m_audioStream == -1) { return false; } m_pAudioCodecCtx = m_pFormatCtx->streams[m_audioStream]->codec; // Find the decoder for the video stream m_pAudioCodec = avcodec_find_decoder(m_pAudioCodecCtx->codec_id); if (m_pAudioCodec == NULL) { // m_errorMsg = "Could not find a video decoder"; CloseAudio(); return false; } // Open the codec if (avcodec_open2(m_pAudioCodecCtx, m_pAudioCodec, 0) < 0) { // m_errorMsg = "Could not open the video decoder"; CloseAudio(); return false; } m_pAudioFrame = av_frame_alloc(); if (!m_pAudioFrame) { CloseAudio(); return false; } m_bHasAudio = true; }
void AudioController::OnComputerResuming(wxPowerEvent &) { if (provider) { try { player = AudioPlayerFactory::GetAudioPlayer(provider); } catch (...) { CloseAudio(); } } }
void AudioController::OnAudioPlayerChanged() { if (!IsAudioOpen()) return; Stop(); delete player; try { player = AudioPlayerFactory::GetAudioPlayer(provider); } catch (...) { CloseAudio(); throw; } }
AudioController::~AudioController() { CloseAudio(); }
void AudioController::OpenAudio(const wxString &url) { if (!url) throw agi::InternalError("AudioController::OpenAudio() was passed an empty string. This must not happen.", 0); wxString path_part; AudioProvider *new_provider = 0; if (url.StartsWith("dummy-audio:", &path_part)) { /* * scheme ::= "dummy-audio" ":" signal-specifier "?" signal-parameters * signal-specifier ::= "silence" | "noise" | "sine" "/" frequency * frequency ::= integer * signal-parameters ::= signal-parameter [ "&" signal-parameters ] * signal-parameter ::= signal-parameter-name "=" integer * signal-parameter-name ::= "sr" | "bd" | "ch" | "ln" * * Signal types: * "silence", a silent signal is generated. * "noise", a white noise signal is generated. * "sine", a sine wave is generated at the specified frequency. * * Signal parameters: * "sr", sample rate to generate signal at. * "bd", bit depth to generate signal at (usually 16). * "ch", number of channels to generate, usually 1 or 2. The same signal is generated * in every channel even if one would be LFE. * "ln", length of signal in samples. ln/sr gives signal length in seconds. */ new_provider = new DummyAudioProvider(5*30*60*1000, path_part.StartsWith("noise")); } else if (url.StartsWith("video-audio:", &path_part)) { /* * scheme ::= "video-audio" ":" stream-type * stream-type ::= "stream" | "cache" * * Stream types: * * "stream", the audio is streamed as required directly from the video provider, * and cannot be used to drive an audio display. Seeking is unreliable. * * "cache", the entire audio is cached to memory or disk. Audio displays can be * driven and seeking is reliable. Opening takes longer because the entire audio * stream has to be decoded and stored. */ } else if (url.StartsWith("file:", &path_part)) { /* * scheme ::= "file" ":" "//" file-system-path * * On Unix-like systems, the file system path is regular. On Windows-systems, the * path uses forward slashes instead of back-slashes and the drive letter is * preceded by a slash. * * URL-encoding?? */ } else { /* * Assume it's not a URI but instead a filename in the platform's native format. */ try { new_provider = AudioProviderFactory::GetProvider(url); StandardPaths::SetPathValue("?audio", wxFileName(url).GetPath()); } catch (agi::UserCancelException const&) { throw; } catch (...) { config::mru->Remove("Audio", STD_STR(url)); throw; } } CloseAudio(); provider = new_provider; try { player = AudioPlayerFactory::GetAudioPlayer(provider); } catch (...) { delete provider; provider = 0; throw; } audio_url = url; config::mru->Add("Audio", STD_STR(url)); try { // Tell listeners about this. AnnounceAudioOpen(provider); } catch (...) { CloseAudio(); throw; } }
EXPORT void CALL RomClosed(void) { WriteTrace(TraceAudioInterface, TraceDebug, "Start"); CloseAudio(); WriteTrace(TraceAudioInterface, TraceDebug, "Done"); }
static void InitializeAudio(uint32_t freq) { WriteTrace(TraceAudioInitShutdown, TraceDebug, "Start (freq: %d)",freq); if (freq < 4000) { WriteTrace(TraceAudioInitShutdown, TraceInfo, "Sometimes a bad frequency is requested so ignore it (freq: %d)",freq); WriteTrace(TraceAudioInitShutdown, TraceDebug, "Done"); return; } if (g_GameFreq == freq && g_primaryBuffer != NULL) { WriteTrace(TraceAudioInitShutdown, TraceInfo, "we are already using this frequency, so ignore it (freq: %d)",freq); WriteTrace(TraceAudioInitShutdown, TraceDebug, "Done"); return; } if (g_critical_failure) { WriteTrace(TraceAudioInitShutdown, TraceInfo, "had a critical failure in setting up plugin, so ignore init"); WriteTrace(TraceAudioInitShutdown, TraceDebug, "Done"); return; } /* This is important for the sync */ g_GameFreq = freq; #ifdef ANDROID SLuint32 sample_rate; if((freq/1000) <= 11) { g_OutputFreq = 11025; sample_rate = SL_SAMPLINGRATE_11_025; } else if((freq/1000) <= 22) { g_OutputFreq = 22050; sample_rate = SL_SAMPLINGRATE_22_05; } else if((freq/1000) <= 32) { g_OutputFreq = 32000; sample_rate = SL_SAMPLINGRATE_32; } else { g_OutputFreq = 44100; sample_rate = SL_SAMPLINGRATE_44_1; } #endif WriteTrace(TraceAudioInitShutdown, TraceInfo, "Requesting frequency: %iHz.", g_OutputFreq); /* reload these because they gets re-assigned from data below, and InitializeAudio can be called more than once */ g_PrimaryBufferSize = GetSetting(Buffer_PrimarySize); g_SecondaryBufferSize = GetSetting(Buffer_SecondarySize); g_SecondaryBufferNbr = GetSetting(Buffer_SecondaryNbr); /* Close everything because InitializeAudio can be called more than once */ CloseAudio(); /* Create primary buffer */ if(!CreatePrimaryBuffer()) { WriteTrace(TraceAudioInitShutdown, TraceError, "CreatePrimaryBuffer failed"); CloseAudio(); g_critical_failure = true; WriteTrace(TraceAudioInitShutdown, TraceDebug, "Done"); return; } /* Create secondary buffers */ if(!CreateSecondaryBuffers()) { WriteTrace(TraceAudioInitShutdown, TraceError, "CreateSecondaryBuffers failed"); CloseAudio(); g_critical_failure = true; WriteTrace(TraceAudioInitShutdown, TraceDebug, "Done"); return; } #ifdef ANDROID /* Create thread Locks to ensure synchronization between callback and processing code */ if (pthread_mutex_init(&(g_lock.mutex), (pthread_mutexattr_t*) NULL) != 0) { WriteTrace(TraceAudioInitShutdown, TraceError, "pthread_mutex_init failed"); CloseAudio(); g_critical_failure = true; WriteTrace(TraceAudioInitShutdown, TraceDebug, "Done"); return; } if (pthread_cond_init(&(g_lock.cond), (pthread_condattr_t*) NULL) != 0) { WriteTrace(TraceAudioInitShutdown, TraceError, "pthread_cond_init failed"); CloseAudio(); g_critical_failure = true; WriteTrace(TraceAudioInitShutdown, TraceDebug, "Done"); return; } pthread_mutex_lock(&(g_lock.mutex)); g_lock.value = g_lock.limit = g_SecondaryBufferNbr; pthread_mutex_unlock(&(g_lock.mutex)); /* Engine object */ SLresult result = slCreateEngine(&g_engineObject, 0, NULL, 0, NULL, NULL); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "slCreateEngine failed (result: %d)",result); } if(result == SL_RESULT_SUCCESS) { result = (*g_engineObject)->Realize(g_engineObject, SL_BOOLEAN_FALSE); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "slCreateEngine->Realize failed (result: %d)",result); } } if(result == SL_RESULT_SUCCESS) { result = (*g_engineObject)->GetInterface(g_engineObject, SL_IID_ENGINE, &g_engineEngine); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "slCreateEngine->GetInterface failed (result: %d)",result); } } if(result == SL_RESULT_SUCCESS) { /* Output mix object */ result = (*g_engineEngine)->CreateOutputMix(g_engineEngine, &g_outputMixObject, 0, NULL, NULL); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "slCreateEngine->CreateOutputMix failed (result: %d)",result); } } if(result == SL_RESULT_SUCCESS) { result = (*g_outputMixObject)->Realize(g_outputMixObject, SL_BOOLEAN_FALSE); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "g_outputMixObject->Realize failed (result: %d)",result); } } if(result == SL_RESULT_SUCCESS) { SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, g_SecondaryBufferNbr}; SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM,2, sample_rate, SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16, (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT), SL_BYTEORDER_LITTLEENDIAN}; SLDataSource audioSrc = {&loc_bufq, &format_pcm}; /* Configure audio sink */ SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, g_outputMixObject}; SLDataSink audioSnk = {&loc_outmix, NULL}; /* Create audio player */ const SLInterfaceID ids1[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE}; const SLboolean req1[] = {SL_BOOLEAN_TRUE}; result = (*g_engineEngine)->CreateAudioPlayer(g_engineEngine, &(g_playerObject), &audioSrc, &audioSnk, 1, ids1, req1); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "g_engineEngine->CreateAudioPlayer failed (result: %d)",result); } } /* Realize the player */ if(result == SL_RESULT_SUCCESS) { result = (*g_playerObject)->Realize(g_playerObject, SL_BOOLEAN_FALSE); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "g_playerObject->Realize failed (result: %d)",result); } } /* Get the play interface */ if(result == SL_RESULT_SUCCESS) { result = (*g_playerObject)->GetInterface(g_playerObject, SL_IID_PLAY, &(g_playerPlay)); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "g_playerObject->GetInterface(SL_IID_PLAY) failed (result: %d)",result); } } /* Get the buffer queue interface */ if(result == SL_RESULT_SUCCESS) { result = (*g_playerObject)->GetInterface(g_playerObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &(g_bufferQueue)); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "g_playerObject->GetInterface(SL_IID_ANDROIDSIMPLEBUFFERQUEUE) failed (result: %d)",result); } } /* register callback on the buffer queue */ if(result == SL_RESULT_SUCCESS) { result = (*g_bufferQueue)->RegisterCallback(g_bufferQueue, queueCallback, &g_lock); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "bufferQueue->RegisterCallback() failed (result: %d)",result); } } /* set the player's state to playing */ if(result == SL_RESULT_SUCCESS) { result = (*g_playerPlay)->SetPlayState(g_playerPlay, SL_PLAYSTATE_PLAYING); if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceError, "g_playerPlay->SetPlayState(SL_PLAYSTATE_PLAYING) failed (result: %d)",result); } } if(result != SL_RESULT_SUCCESS) { WriteTrace(TraceAudioInitShutdown, TraceNotice, "Couldn't open OpenSLES audio"); CloseAudio(); g_critical_failure = true; } #endif WriteTrace(TraceAudioInitShutdown, TraceNotice, "Done"); }
void Project::LoadUnloadFiles(ProjectProperties properties) { auto load_linked = OPT_GET("App/Auto/Load Linked Files")->GetInt(); if (!load_linked) return; auto audio = context->path->MakeAbsolute(properties.audio_file, "?script"); auto video = context->path->MakeAbsolute(properties.video_file, "?script"); auto timecodes = context->path->MakeAbsolute(properties.timecodes_file, "?script"); auto keyframes = context->path->MakeAbsolute(properties.keyframes_file, "?script"); if (video == video_file && audio == audio_file && keyframes == keyframes_file && timecodes == timecodes_file) return; if (load_linked == 2) { wxString str = _("Do you want to load/unload the associated files?"); str += "\n"; auto append_file = [&](agi::fs::path const& p, wxString const& unload, wxString const& load) { if (p.empty()) str += "\n" + unload; else str += "\n" + agi::wxformat(load, p); }; if (audio != audio_file) append_file(audio, _("Unload audio"), _("Load audio file: %s")); if (video != video_file) append_file(video, _("Unload video"), _("Load video file: %s")); if (timecodes != timecodes_file) append_file(timecodes, _("Unload timecodes"), _("Load timecodes file: %s")); if (keyframes != keyframes_file) append_file(keyframes, _("Unload keyframes"), _("Load keyframes file: %s")); if (wxMessageBox(str, _("(Un)Load files?"), wxYES_NO | wxCENTRE, context->parent) != wxYES) return; } bool loaded_video = false; if (video != video_file) { if (video.empty()) CloseVideo(); else if ((loaded_video = DoLoadVideo(video))) { auto vc = context->videoController.get(); vc->JumpToFrame(properties.video_position); auto ar_mode = static_cast<AspectRatio>(properties.ar_mode); if (ar_mode == AspectRatio::Custom) vc->SetAspectRatio(properties.ar_value); else vc->SetAspectRatio(ar_mode); context->videoDisplay->SetZoom(properties.video_zoom); } } if (!timecodes.empty()) LoadTimecodes(timecodes); if (!keyframes.empty()) LoadKeyframes(keyframes); if (audio != audio_file) { if (audio.empty()) CloseAudio(); else DoLoadAudio(audio, false); } else if (loaded_video && OPT_GET("Video/Open Audio")->GetBool() && audio_file != video_file && video_provider->HasAudio()) DoLoadAudio(video, true); }
void OpenSLESDriver::AI_Shutdown(void) { CloseAudio(); }
bool wxSoundBackendSDL::Play(wxSoundData *data, unsigned flags, volatile wxSoundPlaybackStatus *WXUNUSED(status)) { Stop(); int format; if (data->m_bitsPerSample == 8) format = AUDIO_U8; else if (data->m_bitsPerSample == 16) format = AUDIO_S16LSB; else return false; bool needsOpen = true; if (m_audioOpen) { if (format == m_spec.format && m_spec.freq == (int)data->m_samplingRate && m_spec.channels == data->m_channels) { needsOpen = false; } else { CloseAudio(); } } if (needsOpen) { m_spec.format = format; m_spec.freq = data->m_samplingRate; m_spec.channels = data->m_channels; if (!OpenAudio()) return false; } SDL_LockAudio(); wxLogTrace(wxT("sound"), wxT("playing new sound")); m_playing = true; m_pos = 0; m_loop = (flags & wxSOUND_LOOP); m_data = data; data->IncRef(); SDL_UnlockAudio(); SDL_PauseAudio(0); // wait until playback finishes if called in sync mode: if (!(flags & wxSOUND_ASYNC)) { wxLogTrace(wxT("sound"), wxT("waiting for sample to finish")); while (m_playing && m_data == data) { #if wxUSE_THREADS // give the playback thread a chance to add event to pending // events queue, release GUI lock temporarily: if (wxThread::IsMain()) wxMutexGuiLeave(); #endif wxMilliSleep(10); #if wxUSE_THREADS if (wxThread::IsMain()) wxMutexGuiEnter(); #endif } wxLogTrace(wxT("sound"), wxT("sample finished")); } return true; }
wxSoundBackendSDL::~wxSoundBackendSDL() { Stop(); CloseAudio(); delete m_evtHandler; }
/***************************************************************************** * Open : creates a handle and opens an alsa device ***************************************************************************** * This function opens an alsa device, through the alsa API *****************************************************************************/ int OpenAudio( vlc_object_t *p_this ) { aout_instance_t *p_aout = (aout_instance_t *)p_this; int i_ret; int i_bytes_per_sample; int i_nb_channels; snd_pcm_channel_info_t pi; snd_pcm_channel_params_t pp; aout_instance_t *p_aout = (aout_instance_t *)p_this; /* allocate structure */ p_aout->output.p_sys = malloc( sizeof( aout_sys_t ) ); if( p_aout->output.p_sys == NULL ) return -1; /* open audio device */ if( ( i_ret = snd_pcm_open_preferred( &p_aout->output.p_sys->p_pcm_handle, &p_aout->output.p_sys->i_card, &p_aout->output.p_sys->i_device, SND_PCM_OPEN_PLAYBACK ) ) < 0 ) { msg_Err( p_aout, "unable to open audio device (%s)", snd_strerror( i_ret ) ); free( p_aout->output.p_sys ); return -1; } /* disable mmap */ if( ( i_ret = snd_pcm_plugin_set_disable( p_aout->output.p_sys->p_pcm_handle, PLUGIN_DISABLE_MMAP ) ) < 0 ) { msg_Err( p_aout, "unable to disable mmap (%s)", snd_strerror(i_ret) ); CloseAudio( p_this ); free( p_aout->output.p_sys ); return -1; } p_aout->output.p_sys->p_silent_buffer = malloc( DEFAULT_FRAME_SIZE * 4 ); p_aout->output.pf_play = Play; aout_VolumeSoftInit( p_aout ); memset( &pi, 0, sizeof(pi) ); memset( &pp, 0, sizeof(pp) ); pi.channel = SND_PCM_CHANNEL_PLAYBACK; if( ( i_ret = snd_pcm_plugin_info( p_aout->output.p_sys->p_pcm_handle, &pi ) ) < 0 ) { msg_Err( p_aout, "unable to get plugin info (%s)", snd_strerror( i_ret ) ); CloseAudio( p_this ); free( p_aout->output.p_sys ); return -1; } pp.mode = SND_PCM_MODE_BLOCK; pp.channel = SND_PCM_CHANNEL_PLAYBACK; pp.start_mode = SND_PCM_START_FULL; pp.stop_mode = SND_PCM_STOP_STOP; pp.buf.block.frags_max = 3; pp.buf.block.frags_min = 1; pp.format.interleave = 1; pp.format.rate = p_aout->output.output.i_rate; i_nb_channels = aout_FormatNbChannels( &p_aout->output.output ); if ( i_nb_channels > 2 ) { /* I don't know if QNX supports more than two channels. */ i_nb_channels = 2; p_aout->output.output.i_channels = AOUT_CHAN_STEREO; } pp.format.voices = i_nb_channels; p_aout->output.output.i_format = VLC_CODEC_S16N; p_aout->output.i_nb_samples = DEFAULT_FRAME_SIZE; pp.format.format = SND_PCM_SFMT_S16; i_bytes_per_sample = 2; pp.buf.block.frag_size = p_aout->output.i_nb_samples * p_aout->output.output.i_channels * i_bytes_per_sample; /* set parameters */ if( ( i_ret = snd_pcm_plugin_params( p_aout->output.p_sys->p_pcm_handle, &pp ) ) < 0 ) { msg_Err( p_aout, "unable to set parameters (%s)", snd_strerror(i_ret) ); CloseAudio( p_this ); free( p_aout->output.p_sys ); return -1; } /* prepare channel */ if( ( i_ret = snd_pcm_plugin_prepare( p_aout->output.p_sys->p_pcm_handle, SND_PCM_CHANNEL_PLAYBACK ) ) < 0 ) { msg_Err( p_aout, "unable to prepare channel (%s)", snd_strerror( i_ret ) ); CloseAudio( p_this ); free( p_aout->output.p_sys ); return -1; } /* Create audio thread and wait for its readiness. */ if( vlc_thread_create( p_aout, "aout", QNXaoutThread, VLC_THREAD_PRIORITY_OUTPUT ) ) { msg_Err( p_aout, "cannot create QNX audio thread (%m)" ); CloseAudio( p_this ); free( p_aout->output.p_sys ); return -1; } return( 0 ); }