void SaveAudioClip(AudioProvider const& provider, fs::path const& path, int start_time, int end_time) { const auto max_samples = provider.GetNumSamples(); const auto start_sample = std::min(max_samples, ((int64_t)start_time * provider.GetSampleRate() + 999) / 1000); const auto end_sample = util::mid(start_sample, ((int64_t)end_time * provider.GetSampleRate() + 999) / 1000, max_samples); const size_t bytes_per_sample = provider.GetBytesPerSample() * provider.GetChannels(); const size_t bufsize = (end_sample - start_sample) * bytes_per_sample; writer out{path}; out.write("RIFF"); out.write<int32_t>(bufsize + 36); out.write("WAVEfmt "); out.write<int32_t>(16); // Size of chunk out.write<int16_t>(1); // compression format (PCM) out.write<int16_t>(provider.GetChannels()); out.write<int32_t>(provider.GetSampleRate()); out.write<int32_t>(provider.GetSampleRate() * provider.GetChannels() * provider.GetBytesPerSample()); out.write<int16_t>(provider.GetChannels() * provider.GetBytesPerSample()); out.write<int16_t>(provider.GetBytesPerSample() * 8); out.write("data"); out.write<int32_t>(bufsize); // samples per read size_t spr = 65536 / bytes_per_sample; std::vector<char> buf; for (int64_t i = start_sample; i < end_sample; i += spr) { spr = std::min<size_t>(spr, end_sample - i); buf.resize(spr * bytes_per_sample); provider.GetAudio(&buf[0], i, spr); out.write(buf); } }
/// @brief See if we need to downmix the number of channels /// @param source_provider /// AudioProvider *CreateConvertAudioProvider(AudioProvider *source_provider) { AudioProvider *provider = source_provider; // Aegisub requires 16 bit samples, // some audio players break with low samplerates, // everything breaks with wrong-ended samples. if (provider->GetBytesPerSample() != 2 || provider->GetSampleRate() < 32000 || !provider->AreSamplesNativeEndian()) { // @todo add support for more bitdepths (i.e. 24- and 32-bit audio) if (provider->GetBytesPerSample() > 2) throw AudioOpenError("Audio format converter: audio with bitdepths greater than 16 bits/sample is currently unsupported"); provider = new ConvertAudioProvider(provider); } // We also require mono audio for historical reasons if (provider->GetChannels() != 1) { provider = new DownmixingAudioProvider(provider); } return provider; }
AudioProvider *AudioProviderFactory::GetProvider(wxString const& filename) { provider_creator creator; AudioProvider *provider = nullptr; provider = creator.try_create("Dummy audio provider", [&]() { return new DummyAudioProvider(filename); }); // Try a PCM provider first if (!provider && !OPT_GET("Provider/Audio/PCM/Disable")->GetBool()) provider = creator.try_create("PCM audio provider", [&]() { return CreatePCMAudioProvider(filename); }); if (!provider) { std::vector<std::string> list = GetClasses(OPT_GET("Audio/Provider")->GetString()); if (list.empty()) throw agi::NoAudioProvidersError("No audio providers are available.", 0); for (auto const& name : list) { provider = creator.try_create(name, [&]() { return Create(name, filename); }); if (provider) break; } } if (!provider) { if (creator.found_audio) throw agi::AudioProviderOpenError(creator.msg, 0); if (creator.found_file) throw agi::AudioDataNotFoundError(creator.msg, 0); throw agi::FileNotFoundError(from_wx(filename)); } bool needsCache = provider->NeedsCache(); // Give it a converter if needed if (provider->GetBytesPerSample() != 2 || provider->GetSampleRate() < 32000 || provider->GetChannels() != 1) provider = CreateConvertAudioProvider(provider); // Change provider to RAM/HD cache if needed int cache = OPT_GET("Audio/Cache/Type")->GetInt(); if (!cache || !needsCache) return new LockAudioProvider(provider); DialogProgress progress(wxGetApp().frame, _("Load audio")); // Convert to RAM if (cache == 1) return new RAMAudioProvider(provider, &progress); // Convert to HD if (cache == 2) return new HDAudioProvider(provider, &progress); throw agi::AudioCacheOpenError("Unknown caching method", 0); }
/// @brief Open stream /// void DirectSoundPlayer::OpenStream() { // Get provider AudioProvider *provider = GetProvider(); // Initialize the DirectSound object HRESULT res; res = DirectSoundCreate8(&DSDEVID_DefaultPlayback,&directSound,NULL); // TODO: support selecting audio device if (FAILED(res)) throw _T("Failed initializing DirectSound"); // Set DirectSound parameters AegisubApp *app = (AegisubApp*) wxTheApp; directSound->SetCooperativeLevel((HWND)app->frame->GetHandle(),DSSCL_PRIORITY); // Create the wave format structure WAVEFORMATEX waveFormat; waveFormat.wFormatTag = WAVE_FORMAT_PCM; waveFormat.nSamplesPerSec = provider->GetSampleRate(); waveFormat.nChannels = provider->GetChannels(); waveFormat.wBitsPerSample = provider->GetBytesPerSample() * 8; waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8; waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign; waveFormat.cbSize = sizeof(waveFormat); // Create the buffer initializer int aim = waveFormat.nAvgBytesPerSec * 15/100; // 150 ms buffer int min = DSBSIZE_MIN; int max = DSBSIZE_MAX; bufSize = std::min(std::max(min,aim),max); DSBUFFERDESC desc; desc.dwSize = sizeof(DSBUFFERDESC); desc.dwFlags = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_GLOBALFOCUS; desc.dwBufferBytes = bufSize; desc.dwReserved = 0; desc.lpwfxFormat = &waveFormat; desc.guid3DAlgorithm = GUID_NULL; // Create the buffer IDirectSoundBuffer *buf; res = directSound->CreateSoundBuffer(&desc,&buf,NULL); if (res != DS_OK) throw _T("Failed creating DirectSound buffer"); // Copy interface to buffer res = buf->QueryInterface(IID_IDirectSoundBuffer8,(LPVOID*) &buffer); if (res != S_OK) throw _T("Failed casting interface to IDirectSoundBuffer8"); // Set data offset = 0; }
AudioProvider *AudioProviderFactory::GetProvider(wxString const& filename, int cache) { AudioProvider *provider = 0; bool found_file = false; bool found_audio = false; std::string msg; if (!OPT_GET("Provider/Audio/PCM/Disable")->GetBool()) { // Try a PCM provider first try { provider = CreatePCMAudioProvider(filename); LOG_D("audio_provider") << "Using PCM provider"; } catch (agi::FileNotFoundError const& err) { msg = "PCM audio provider: " + err.GetMessage() + " not found.\n"; } catch (agi::AudioOpenError const& err) { found_file = true; msg += err.GetChainedMessage() + "\n"; } } if (!provider) { std::vector<std::string> list = GetClasses(OPT_GET("Audio/Provider")->GetString()); if (list.empty()) throw agi::NoAudioProvidersError("No audio providers are available.", 0); for (size_t i = 0; i < list.size() ; ++i) { try { provider = Create(list[i], filename); if (provider) { LOG_D("audio_provider") << "Using audio provider: " << list[i]; break; } } catch (agi::FileNotFoundError const& err) { msg += list[i] + ": " + err.GetMessage() + " not found.\n"; } catch (agi::AudioDataNotFoundError const& err) { found_file = true; msg += list[i] + ": " + err.GetChainedMessage() + "\n"; } catch (agi::AudioOpenError const& err) { found_audio = true; found_file = true; msg += list[i] + ": " + err.GetChainedMessage() + "\n"; } } } if (!provider) { if (found_audio) throw agi::AudioProviderOpenError(msg, 0); if (found_file) throw agi::AudioDataNotFoundError(msg, 0); throw agi::FileNotFoundError(STD_STR(filename)); } bool needsCache = provider->NeedsCache(); // Give it a converter if needed if (provider->GetBytesPerSample() != 2 || provider->GetSampleRate() < 32000 || provider->GetChannels() != 1) provider = CreateConvertAudioProvider(provider); // Change provider to RAM/HD cache if needed if (cache == -1) cache = OPT_GET("Audio/Cache/Type")->GetInt(); if (!cache || !needsCache) { return new LockAudioProvider(provider); } DialogProgress progress(wxGetApp().frame, _("Load audio")); // Convert to RAM if (cache == 1) return new RAMAudioProvider(provider, &progress); // Convert to HD if (cache == 2) return new HDAudioProvider(provider, &progress); throw agi::AudioCacheOpenError("Unknown caching method", 0); }