Exemplo n.º 1
0
/// @brief See if we need to downmix the number of channels
/// @param source_provider 
///
AudioProvider *CreateConvertAudioProvider(AudioProvider *source_provider) {
	AudioProvider *provider = source_provider;

	// Aegisub requires 16 bit samples,
	// some audio players break with low samplerates,
	// everything breaks with wrong-ended samples.
	if (provider->GetBytesPerSample() != 2 ||
		provider->GetSampleRate() < 32000 ||
		!provider->AreSamplesNativeEndian())
	{
		// @todo add support for more bitdepths (i.e. 24- and 32-bit audio)
		if (provider->GetBytesPerSample() > 2)
			throw AudioOpenError("Audio format converter: audio with bitdepths greater than 16 bits/sample is currently unsupported");

		provider = new ConvertAudioProvider(provider);
	}

	// We also require mono audio for historical reasons
	if (provider->GetChannels() != 1)
	{
		provider = new DownmixingAudioProvider(provider);
	}

	return provider;
}
Exemplo n.º 2
0
void SaveAudioClip(AudioProvider const& provider, fs::path const& path, int start_time, int end_time) {
	const auto max_samples = provider.GetNumSamples();
	const auto start_sample = std::min(max_samples, ((int64_t)start_time * provider.GetSampleRate() + 999) / 1000);
	const auto end_sample = util::mid(start_sample, ((int64_t)end_time * provider.GetSampleRate() + 999) / 1000, max_samples);

	const size_t bytes_per_sample = provider.GetBytesPerSample() * provider.GetChannels();
	const size_t bufsize = (end_sample - start_sample) * bytes_per_sample;

	writer out{path};
	out.write("RIFF");
	out.write<int32_t>(bufsize + 36);

	out.write("WAVEfmt ");
	out.write<int32_t>(16); // Size of chunk
	out.write<int16_t>(1);  // compression format (PCM)
	out.write<int16_t>(provider.GetChannels());
	out.write<int32_t>(provider.GetSampleRate());
	out.write<int32_t>(provider.GetSampleRate() * provider.GetChannels() * provider.GetBytesPerSample());
	out.write<int16_t>(provider.GetChannels() * provider.GetBytesPerSample());
	out.write<int16_t>(provider.GetBytesPerSample() * 8);

	out.write("data");
	out.write<int32_t>(bufsize);

	// samples per read
	size_t spr = 65536 / bytes_per_sample;
	std::vector<char> buf;
	for (int64_t i = start_sample; i < end_sample; i += spr) {
		spr = std::min<size_t>(spr, end_sample - i);
		buf.resize(spr * bytes_per_sample);
		provider.GetAudio(&buf[0], i, spr);
		out.write(buf);
	}
}
Exemplo n.º 3
0
AudioProvider *AudioProviderFactory::GetProvider(wxString const& filename) {
	provider_creator creator;
	AudioProvider *provider = nullptr;

	provider = creator.try_create("Dummy audio provider", [&]() { return new DummyAudioProvider(filename); });

	// Try a PCM provider first
	if (!provider && !OPT_GET("Provider/Audio/PCM/Disable")->GetBool())
		provider = creator.try_create("PCM audio provider", [&]() { return CreatePCMAudioProvider(filename); });

	if (!provider) {
		std::vector<std::string> list = GetClasses(OPT_GET("Audio/Provider")->GetString());
		if (list.empty()) throw agi::NoAudioProvidersError("No audio providers are available.", 0);

		for (auto const& name : list) {
			provider = creator.try_create(name, [&]() { return Create(name, filename); });
			if (provider) break;
		}
	}

	if (!provider) {
		if (creator.found_audio)
			throw agi::AudioProviderOpenError(creator.msg, 0);
		if (creator.found_file)
			throw agi::AudioDataNotFoundError(creator.msg, 0);
		throw agi::FileNotFoundError(from_wx(filename));
	}

	bool needsCache = provider->NeedsCache();

	// Give it a converter if needed
	if (provider->GetBytesPerSample() != 2 || provider->GetSampleRate() < 32000 || provider->GetChannels() != 1)
		provider = CreateConvertAudioProvider(provider);

	// Change provider to RAM/HD cache if needed
	int cache = OPT_GET("Audio/Cache/Type")->GetInt();
	if (!cache || !needsCache)
		return new LockAudioProvider(provider);

	DialogProgress progress(wxGetApp().frame, _("Load audio"));

	// Convert to RAM
	if (cache == 1) return new RAMAudioProvider(provider, &progress);

	// Convert to HD
	if (cache == 2) return new HDAudioProvider(provider, &progress);

	throw agi::AudioCacheOpenError("Unknown caching method", 0);
}
Exemplo n.º 4
0
/// @brief Open stream 
///
void DirectSoundPlayer::OpenStream() {
	// Get provider
	AudioProvider *provider = GetProvider();

	// Initialize the DirectSound object
	HRESULT res;
	res = DirectSoundCreate8(&DSDEVID_DefaultPlayback,&directSound,NULL); // TODO: support selecting audio device
	if (FAILED(res)) throw _T("Failed initializing DirectSound");

	// Set DirectSound parameters
	AegisubApp *app = (AegisubApp*) wxTheApp;
	directSound->SetCooperativeLevel((HWND)app->frame->GetHandle(),DSSCL_PRIORITY);

	// Create the wave format structure
	WAVEFORMATEX waveFormat;
	waveFormat.wFormatTag = WAVE_FORMAT_PCM;
	waveFormat.nSamplesPerSec = provider->GetSampleRate();
	waveFormat.nChannels = provider->GetChannels();
	waveFormat.wBitsPerSample = provider->GetBytesPerSample() * 8;
	waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
	waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
	waveFormat.cbSize = sizeof(waveFormat);

	// Create the buffer initializer
	int aim = waveFormat.nAvgBytesPerSec * 15/100; // 150 ms buffer
	int min = DSBSIZE_MIN;
	int max = DSBSIZE_MAX;
	bufSize = std::min(std::max(min,aim),max);
	DSBUFFERDESC desc;
	desc.dwSize = sizeof(DSBUFFERDESC);
	desc.dwFlags = DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_GLOBALFOCUS;
	desc.dwBufferBytes = bufSize;
	desc.dwReserved = 0;
	desc.lpwfxFormat = &waveFormat;
	desc.guid3DAlgorithm = GUID_NULL;

	// Create the buffer
	IDirectSoundBuffer *buf;
	res = directSound->CreateSoundBuffer(&desc,&buf,NULL);
	if (res != DS_OK) throw _T("Failed creating DirectSound buffer");

	// Copy interface to buffer
	res = buf->QueryInterface(IID_IDirectSoundBuffer8,(LPVOID*) &buffer);
	if (res != S_OK) throw _T("Failed casting interface to IDirectSoundBuffer8");

	// Set data
	offset = 0;
}
Exemplo n.º 5
0
AudioProvider *AudioProviderFactory::GetProvider(wxString const& filename, int cache) {
	AudioProvider *provider = 0;
	bool found_file = false;
	bool found_audio = false;
	std::string msg;

	if (!OPT_GET("Provider/Audio/PCM/Disable")->GetBool()) {
		// Try a PCM provider first
		try {
			provider = CreatePCMAudioProvider(filename);
			LOG_D("audio_provider") << "Using PCM provider";
		}
		catch (agi::FileNotFoundError const& err) {
			msg = "PCM audio provider: " + err.GetMessage() + " not found.\n";
		}
		catch (agi::AudioOpenError const& err) {
			found_file = true;
			msg += err.GetChainedMessage() + "\n";
		}
	}

	if (!provider) {
		std::vector<std::string> list = GetClasses(OPT_GET("Audio/Provider")->GetString());
		if (list.empty()) throw agi::NoAudioProvidersError("No audio providers are available.", 0);

		for (size_t i = 0; i < list.size() ; ++i) {
			try {
				provider = Create(list[i], filename);
				if (provider) {
					LOG_D("audio_provider") << "Using audio provider: " << list[i];
					break;
				}
			}
			catch (agi::FileNotFoundError const& err) {
				msg += list[i] + ": " + err.GetMessage() + " not found.\n";
			}
			catch (agi::AudioDataNotFoundError const& err) {
				found_file = true;
				msg += list[i] + ": " + err.GetChainedMessage() + "\n";
			}
			catch (agi::AudioOpenError const& err) {
				found_audio = true;
				found_file = true;
				msg += list[i] + ": " + err.GetChainedMessage() + "\n";
			}
		}
	}

	if (!provider) {
		if (found_audio)
			throw agi::AudioProviderOpenError(msg, 0);
		if (found_file)
			throw agi::AudioDataNotFoundError(msg, 0);
		throw agi::FileNotFoundError(STD_STR(filename));
	}

	bool needsCache = provider->NeedsCache();

	// Give it a converter if needed
	if (provider->GetBytesPerSample() != 2 || provider->GetSampleRate() < 32000 || provider->GetChannels() != 1)
		provider = CreateConvertAudioProvider(provider);

	// Change provider to RAM/HD cache if needed
	if (cache == -1) cache = OPT_GET("Audio/Cache/Type")->GetInt();
	if (!cache || !needsCache) {
		return new LockAudioProvider(provider);
	}

	DialogProgress progress(wxGetApp().frame, _("Load audio"));

	// Convert to RAM
	if (cache == 1) return new RAMAudioProvider(provider, &progress);

	// Convert to HD
	if (cache == 2) return new HDAudioProvider(provider, &progress);

	throw agi::AudioCacheOpenError("Unknown caching method", 0);
}
Exemplo n.º 6
0
/// @brief Fill buffer 
/// @param fill 
/// @return 
///
bool DirectSoundPlayer::FillBuffer(bool fill) {
	if (playPos >= endPos) return false;

	// Variables
	HRESULT res;
	void *ptr1, *ptr2;
	unsigned long int size1, size2;
	AudioProvider *provider = GetProvider();
	int bytesps = provider->GetBytesPerSample();

	// To write length
	int toWrite = 0;
	if (fill) {
		toWrite = bufSize;
	}
	else {
		DWORD bufplay;
		res = buffer->GetCurrentPosition(&bufplay, NULL);
		if (FAILED(res)) return false;
		toWrite = (int)bufplay - (int)offset;
		if (toWrite < 0) toWrite += bufSize;
	}
	if (toWrite == 0) return true;

	// Make sure we only get as many samples as are available
	if (playPos + toWrite/bytesps > endPos) {
		toWrite = (endPos - playPos) * bytesps;
	}

	// If we're going to fill the entire buffer (ie. at start of playback) start by zeroing it out
	// If it's not zeroed out we might have a playback selection shorter than the buffer
	// and then everything after the playback selection will be junk, which we don't want played.
	if (fill) {
RetryClear:
		res = buffer->Lock(0, bufSize, &ptr1, &size1, &ptr2, &size2, 0);
		if (res == DSERR_BUFFERLOST) {
			buffer->Restore();
			goto RetryClear;
		}
		memset(ptr1, 0, size1);
		memset(ptr2, 0, size2);
		buffer->Unlock(ptr1, size1, ptr2, size2);
	}

	// Lock buffer
RetryLock:
	if (fill) {
		res = buffer->Lock(offset, toWrite, &ptr1, &size1, &ptr2, &size2, 0);
	}
	else {
		res = buffer->Lock(offset, toWrite, &ptr1, &size1, &ptr2, &size2, 0);//DSBLOCK_FROMWRITECURSOR);
	}

	// Buffer lost?
	if (res == DSERR_BUFFERLOST) {
		LOG_D("audio/player/dsound1") << "lost dsound buffer";
		buffer->Restore();
		goto RetryLock;
	}

	// Error
	if (FAILED(res)) return false;

	// Convert size to number of samples
	unsigned long int count1 = size1 / bytesps;
	unsigned long int count2 = size2 / bytesps;

	LOG_D_IF(count1, "audio/player/dsound1") << "DS fill: " << (unsigned long)playPos << " -> " << (unsigned long)playPos+count1;
	LOG_D_IF(count2, "audio/player/dsound1") << "DS fill: " << (unsigned long)playPos+count1 << " -> " << (unsigned long)playPos+count1+count2;
	LOG_D_IF(!count1 && !count2, "audio/player/dsound1") << "DS fill: nothing";

	// Get source wave
	if (count1) provider->GetAudioWithVolume(ptr1, playPos, count1, volume);
	if (count2) provider->GetAudioWithVolume(ptr2, playPos+count1, count2, volume);
	playPos += count1+count2;

	// Unlock
	buffer->Unlock(ptr1,count1*bytesps,ptr2,count2*bytesps);

	// Update offset
	offset = (offset + count1*bytesps + count2*bytesps) % bufSize;

	return playPos < endPos;
}