Ejemplo n.º 1
0
void cmdMusic(const std::string& args) {
	// terrible hack for testing audio
	// decode the entire file into a big buffer all at once

	OggOpusFile* opusFile;
	if (args.length() == 0) {
		opusFile = op_open_memory(
		    static_cast<const uint8_t*>(EMBED_DATA(Who_Likes_to_Party_Kevin_MacLeod_incompetech_opus)),
		    EMBED_SIZE(Who_Likes_to_Party_Kevin_MacLeod_incompetech_opus),
		    NULL);
	} else {
		opusFile = op_open_file(args.c_str(), NULL);
	}

	if (!opusFile) {
		narf::console->println("Failed to open music file " + args);
		return;
	}

	auto newMusicSize = op_pcm_total(opusFile, -1) * 2; // stereo
	auto newMusicSamples = new float[newMusicSize];

	size_t decoded = 0;
	while (decoded < newMusicSize) {
		auto rc = op_read_float_stereo(opusFile, newMusicSamples + decoded, newMusicSize - decoded);
		if (rc < 0) {
			narf::console->println("opusfile decode failed");
			decoded = 0;
			break;
		}

		decoded += rc * 2; // return code is number of samples per channel, and we are decoding in stereo
	}

	if (decoded != newMusicSize) {
		narf::console->println("opusfile decode returned wrong number of samples (got " + std::to_string(decoded) + ", expected " + std::to_string(newMusicSize) + ")");
		delete[] newMusicSamples;
		newMusicSamples = nullptr;
		newMusicSize = 0;
	}

	SDL_LockMutex(musicMutex);

	if (musicSamples) {
		delete[] musicSamples;
	}

	musicSamples = newMusicSamples;
	musicSize = newMusicSize;
	musicCursor = 0;

	SDL_UnlockMutex(musicMutex);

	op_free(opusFile);
}
Ejemplo n.º 2
0
int CSound::DecodeOpus(int SampleID, const void *pData, unsigned DataSize)
{
	if(SampleID == -1 || SampleID >= NUM_SAMPLES)
		return -1;

	CSample *pSample = &m_aSamples[SampleID];

	OggOpusFile *OpusFile = op_open_memory((const unsigned char *) pData, DataSize, NULL);
	if (OpusFile)
	{
		int NumChannels = op_channel_count(OpusFile, -1);
		int NumSamples = op_pcm_total(OpusFile, -1); // per channel!

		pSample->m_Channels = NumChannels;

		if(pSample->m_Channels > 2)
		{
			dbg_msg("sound/opus", "file is not mono or stereo.");
			return -1;
		}

		pSample->m_pData = (short *)mem_alloc(NumSamples * sizeof(short) * NumChannels, 1);

		int Read;
		int Pos = 0;
		while (Pos < NumSamples)
		{
			Read = op_read(OpusFile, pSample->m_pData + Pos*NumChannels, NumSamples*NumChannels, NULL);
			Pos += Read;
		}

		pSample->m_NumFrames = NumSamples; // ?
		pSample->m_Rate = 48000;
		pSample->m_LoopStart = -1;
		pSample->m_LoopEnd = -1;
		pSample->m_PausedAt = 0;
	}
	else
	{
		dbg_msg("sound/opus", "failed to decode sample");
		return -1;
	}

	return SampleID;
}
Ejemplo n.º 3
0
bool CSoundFile::ReadOpusSample(SAMPLEINDEX sample, FileReader &file)
{
	file.Rewind();

#if defined(MPT_WITH_OPUSFILE)

	int rate = 0;
	int channels = 0;
	std::vector<int16> raw_sample_data;

	FileReader initial = file.GetChunk(65536); // 512 is recommended by libopusfile
	if(op_test(NULL, initial.GetRawData<unsigned char>(), initial.GetLength()) != 0)
	{
		return false;
	}

	OggOpusFile *of = op_open_memory(file.GetRawData<unsigned char>(), file.GetLength(), NULL);
	if(!of)
	{
		return false;
	}

	rate = 48000;
	channels = op_channel_count(of, -1);
	if(rate <= 0 || channels <= 0)
	{
		op_free(of);
		of = NULL;
		return false;
	}
	if(channels > 2 || op_link_count(of) != 1)
	{
		// We downmix multichannel to stereo as recommended by Opus specification in
		// case we are not able to handle > 2 channels.
		// We also decode chained files as stereo even if they start with a mono
		// stream, which simplifies handling of link boundaries for us.
		channels = 2;
	}

	std::vector<int16> decodeBuf(120 * 48000 / 1000); // 120ms (max Opus packet), 48kHz
	bool eof = false;
	while(!eof)
	{
		int framesRead = 0;
		if(channels == 2)
		{
			framesRead = op_read_stereo(of, &(decodeBuf[0]), static_cast<int>(decodeBuf.size()));
		} else if(channels == 1)
		{
			framesRead = op_read(of, &(decodeBuf[0]), static_cast<int>(decodeBuf.size()), NULL);
		}
		if(framesRead > 0)
		{
			raw_sample_data.insert(raw_sample_data.end(), decodeBuf.begin(), decodeBuf.begin() + (framesRead * channels));
		} else if(framesRead == 0)
		{
			eof = true;
		} else if(framesRead == OP_HOLE)
		{
			// continue
		} else
		{
			// other errors are fatal, stop decoding
			eof = true;
		}
	}

	op_free(of);
	of = NULL;

	if(raw_sample_data.empty())
	{
		return false;
	}

	DestroySampleThreadsafe(sample);
	strcpy(m_szNames[sample], "");
	Samples[sample].Initialize();
	Samples[sample].nC5Speed = rate;
	Samples[sample].nLength = raw_sample_data.size() / channels;

	Samples[sample].uFlags.set(CHN_16BIT);
	Samples[sample].uFlags.set(CHN_STEREO, channels == 2);
	Samples[sample].AllocateSample();

	std::copy(raw_sample_data.begin(), raw_sample_data.end(), Samples[sample].pSample16);

	Samples[sample].Convert(MOD_TYPE_IT, GetType());
	Samples[sample].PrecomputeLoops(*this, false);

	return Samples[sample].pSample != nullptr;

#else // !MPT_WITH_OPUSFILE

	MPT_UNREFERENCED_PARAMETER(sample);
	MPT_UNREFERENCED_PARAMETER(file);

	return false;

#endif // MPT_WITH_OPUSFILE

}
Ejemplo n.º 4
0
void VoiceMessagesLoader::onLoad(AudioData *audio) {
	bool started = false;
	int32 audioindex = -1;
	Loader *l = 0;
	Loaders::iterator j = _loaders.end();
	{
		QMutexLocker lock(&voicemsgsMutex);
		VoiceMessages *voice = audioVoice();
		if (!voice) return;

		for (int32 i = 0; i < AudioVoiceMsgSimultaneously; ++i) {
			VoiceMessages::Msg &m(voice->_data[i]);
			if (m.audio != audio || !m.loading) continue;

			audioindex = i;
			j = _loaders.find(audio);
			if (j != _loaders.end() && (j.value()->fname != m.fname || j.value()->data.size() != m.data.size())) {
				delete j.value();
				_loaders.erase(j);
				j = _loaders.end();
			}
			if (j == _loaders.end()) {
				l = (j = _loaders.insert(audio, new Loader())).value();
				l->fname = m.fname;
				l->data = m.data;
				
				int ret;
				if (m.data.isEmpty()) {
					l->file = op_open_file(m.fname.toUtf8().constData(), &ret);
				} else {
					l->file = op_open_memory((const unsigned char*)m.data.constData(), m.data.size(), &ret);
				}
				if (!l->file) {
					LOG(("Audio Error: op_open_file failed for '%1', data size '%2', error code %3").arg(m.fname).arg(m.data.size()).arg(ret));
					m.state = VoiceMessageStopped;
					return loadError(j);
				}
				ogg_int64_t duration = op_pcm_total(l->file, -1);
				if (duration < 0) {
					LOG(("Audio Error: op_pcm_total failed to get full duration for '%1', data size '%2', error code %3").arg(m.fname).arg(m.data.size()).arg(duration));
					m.state = VoiceMessageStopped;
					return loadError(j);
				}
				m.duration = duration;
				m.skipStart = 0;
				m.skipEnd = duration;
				m.position = 0;
				m.started = 0;
				started = true;
			} else {
				if (!m.skipEnd) continue;
				l = j.value();
			}
			break;
		}
	}

	if (j == _loaders.end()) {
		LOG(("Audio Error: trying to load part of audio, that is not playing at the moment"));
		emit error(audio);
		return;
	}
	if (started) {
		l->pcm_offset = op_pcm_tell(l->file);
		l->pcm_print_offset = l->pcm_offset - AudioVoiceMsgFrequency;
	}

	bool finished = false;
    DEBUG_LOG(("Audio Info: reading buffer for file '%1', data size '%2', current pcm_offset %3").arg(l->fname).arg(l->data.size()).arg(l->pcm_offset));

	QByteArray result;
	int64 samplesAdded = 0;
	while (result.size() < AudioVoiceMsgBufferSize) {
		opus_int16 pcm[AudioVoiceMsgFrequency * AudioVoiceMsgChannels];

		int ret = op_read_stereo(l->file, pcm, sizeof(pcm) / sizeof(*pcm));
		if (ret < 0) {
			{
				QMutexLocker lock(&voicemsgsMutex);
				VoiceMessages *voice = audioVoice();
				if (voice) {
					VoiceMessages::Msg &m(voice->_data[audioindex]);
					if (m.audio == audio) {
						m.state = VoiceMessageStopped;
					}
				}
			}
			LOG(("Audio Error: op_read_stereo failed, error code %1").arg(ret));
			return loadError(j);
		}

		int li = op_current_link(l->file);
		if (li != l->prev_li) {
			const OpusHead *head = op_head(l->file, li);
			const OpusTags *tags = op_tags(l->file, li);
			for (int32 ci = 0; ci < tags->comments; ++ci) {
				const char *comment = tags->user_comments[ci];
				if (opus_tagncompare("METADATA_BLOCK_PICTURE", 22, comment) == 0) {
					OpusPictureTag pic;
					int err = opus_picture_tag_parse(&pic, comment);
					if (err >= 0) {
						opus_picture_tag_clear(&pic);
					}
				}
			}
			if (!op_seekable(l->file)) {
				l->pcm_offset = op_pcm_tell(l->file) - ret;
			}
		}
		if (li != l->prev_li || l->pcm_offset >= l->pcm_print_offset + AudioVoiceMsgFrequency) {
			l->pcm_print_offset = l->pcm_offset;
		}
		l->pcm_offset = op_pcm_tell(l->file);

		if (!ret) {
			DEBUG_LOG(("Audio Info: read completed"));
			finished = true;
			break;
		}
		result.append((const char*)pcm, sizeof(*pcm) * ret * AudioVoiceMsgChannels);
		l->prev_li = li;
		samplesAdded += ret;

		{
			QMutexLocker lock(&voicemsgsMutex);
			VoiceMessages *voice = audioVoice();
			if (!voice) return;

			VoiceMessages::Msg &m(voice->_data[audioindex]);
			if (m.audio != audio || !m.loading || m.fname != l->fname || m.data.size() != l->data.size()) {
				LOG(("Audio Error: playing changed while loading"));
				m.state = VoiceMessageStopped;
				return loadError(j);
			}
		}
	}

	QMutexLocker lock(&voicemsgsMutex);
	VoiceMessages *voice = audioVoice();
	if (!voice) return;

	VoiceMessages::Msg &m(voice->_data[audioindex]);
	if (m.audio != audio || !m.loading || m.fname != l->fname || m.data.size() != l->data.size()) {
		LOG(("Audio Error: playing changed while loading"));
		m.state = VoiceMessageStopped;
		return loadError(j);
	}

	if (started) {
		if (m.source) {
			alSourceStop(m.source);
			for (int32 i = 0; i < 3; ++i) {
				if (m.samplesCount[i]) {
					alSourceUnqueueBuffers(m.source, 1, m.buffers + i);
					m.samplesCount[i] = 0;
				}
			}
			m.nextBuffer = 0;
		}
	}
	if (samplesAdded) {
		if (!m.source) {
			alGenSources(1, &m.source);
			alSourcef(m.source, AL_PITCH, 1.f);
			alSourcef(m.source, AL_GAIN, 1.f);
			alSource3f(m.source, AL_POSITION, 0, 0, 0);
			alSource3f(m.source, AL_VELOCITY, 0, 0, 0);
			alSourcei(m.source, AL_LOOPING, 0);
		}
		if (!m.buffers[m.nextBuffer]) alGenBuffers(3, m.buffers);
		if (!_checkALError()) {
			m.state = VoiceMessageStopped;
			return loadError(j);
		}

		if (m.samplesCount[m.nextBuffer]) {
			alSourceUnqueueBuffers(m.source, 1, m.buffers + m.nextBuffer);
			m.skipStart += m.samplesCount[m.nextBuffer];
		}

		m.samplesCount[m.nextBuffer] = samplesAdded;
		alBufferData(m.buffers[m.nextBuffer], AL_FORMAT_STEREO16, result.constData(), result.size(), AudioVoiceMsgFrequency);
		alSourceQueueBuffers(m.source, 1, m.buffers + m.nextBuffer);
		m.skipEnd -= samplesAdded;

		m.nextBuffer = (m.nextBuffer + 1) % 3;

		if (!_checkALError()) {
			m.state = VoiceMessageStopped;
			return loadError(j);
		}
	} else {
		finished = true;
	}
	if (finished) {
		m.skipEnd = 0;
		m.duration = m.skipStart + m.samplesCount[0] + m.samplesCount[1] + m.samplesCount[2];
	}
	m.loading = false;
	if (m.state == VoiceMessageResuming || m.state == VoiceMessagePlaying || m.state == VoiceMessageStarting) {
		ALint state = AL_INITIAL;
		alGetSourcei(m.source, AL_SOURCE_STATE, &state);
		if (_checkALError()) {
			if (state != AL_PLAYING) {
				alSourcePlay(m.source);
				emit needToCheck();
			}
		}
	}
}