Example #1
0
int OpusDecoder::GetTicks() const {
	if (!oof) {
		return 0;
	}

	// According to the docs it is number of samples at 48 kHz
	return op_pcm_tell(oof) / 48000;
}
Example #2
0
SINT SoundSourceOpus::seekSampleFrame(SINT frameIndex) {
    DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
    DEBUG_ASSERT(isValidFrameIndex(frameIndex));

    int seekResult = op_pcm_seek(m_pOggOpusFile, frameIndex);
    if (0 == seekResult) {
        m_curFrameIndex = frameIndex;
    } else {
        qWarning() << "Failed to seek OggOpus file:" << seekResult;
        const ogg_int64_t pcmOffset = op_pcm_tell(m_pOggOpusFile);
        if (0 <= pcmOffset) {
            m_curFrameIndex = pcmOffset;
        } else {
            // Reset to EOF
            m_curFrameIndex = getMaxFrameIndex();
        }
    }

    DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
    return m_curFrameIndex;
}
Example #3
0
static int
opusdec_read (DB_fileinfo_t *_info, char *bytes, int size) {
    opusdec_info_t *info = (opusdec_info_t *)_info;

    // Work round some streamer limitations and infobar issue #22
    if (info->new_track && is_playing_track(info->new_track)) {
        info->new_track = NULL;
        send_event(info->it, DB_EV_TRACKINFOCHANGED);
        info->next_update = -2;
    }

    // Don't read past the end of a sub-track
    int samples_to_read = size / sizeof(float) / _info->fmt.channels;
    int64_t endsample = deadbeef->pl_item_get_endsample (info->it);
    if (endsample > 0) {
        opus_int64 samples_left = endsample - op_pcm_tell (info->opusfile);
        if (samples_left < samples_to_read) {
            samples_to_read = (int)samples_left;
            size = samples_to_read * sizeof(float) * _info->fmt.channels;
        }
    }

    // Read until we have enough bytes to satisfy streamer, or there are none left
    int bytes_read = 0;
    int ret = OP_HOLE;

    int samples_read = 0;
    while (samples_read < samples_to_read && (ret > 0 || ret == OP_HOLE))
    {
        int nframes = samples_to_read-samples_read;
        float pcm[nframes * _info->fmt.channels];
        int new_link = -1;
        ret = op_read_float(info->opusfile, pcm, nframes * _info->fmt.channels, &new_link);

        if (ret < 0) {
        }
        else if (new_link != info->cur_bit_stream && !op_seekable (info->opusfile) && new_streaming_link(info, new_link)) {
            samples_read = samples_to_read;
            break;
        }
        else if (ret > 0) {
            for (int channel = 0; channel < _info->fmt.channels; channel++) {
                const float *pcm_channel = &pcm[info->channelmap ? info->channelmap[channel] : channel];
                float *ptr = ((float *)bytes + samples_read*_info->fmt.channels) + channel;
                for (int sample = 0; sample < ret; sample ++, pcm_channel += _info->fmt.channels) {
                    *ptr = *pcm_channel;
                    ptr += _info->fmt.channels;
                }
            }
            samples_read += ret;
        }
    }
    bytes_read = samples_read * sizeof(float) * _info->fmt.channels;
    info->currentsample += bytes_read / (sizeof (float) * _info->fmt.channels);


    int64_t startsample = deadbeef->pl_item_get_startsample (info->it);
    _info->readpos = (float)(op_pcm_tell(info->opusfile) - startsample) / _info->fmt.samplerate;
    if (info->set_bitrate && _info->readpos > info->next_update) {
        const int rate = (int)op_bitrate_instant(info->opusfile) / 1000;
        if (rate > 0) {
            deadbeef->streamer_set_bitrate(rate);
            info->next_update = info->next_update <= 0 ? info->next_update + 1 : _info->readpos + 5;
        }
    }

    return bytes_read;
}
Example #4
0
void VoiceMessagesLoader::onLoad(AudioData *audio) {
	bool started = false;
	int32 audioindex = -1;
	Loader *l = 0;
	Loaders::iterator j = _loaders.end();
	{
		QMutexLocker lock(&voicemsgsMutex);
		VoiceMessages *voice = audioVoice();
		if (!voice) return;

		for (int32 i = 0; i < AudioVoiceMsgSimultaneously; ++i) {
			VoiceMessages::Msg &m(voice->_data[i]);
			if (m.audio != audio || !m.loading) continue;

			audioindex = i;
			j = _loaders.find(audio);
			if (j != _loaders.end() && (j.value()->fname != m.fname || j.value()->data.size() != m.data.size())) {
				delete j.value();
				_loaders.erase(j);
				j = _loaders.end();
			}
			if (j == _loaders.end()) {
				l = (j = _loaders.insert(audio, new Loader())).value();
				l->fname = m.fname;
				l->data = m.data;
				
				int ret;
				if (m.data.isEmpty()) {
					l->file = op_open_file(m.fname.toUtf8().constData(), &ret);
				} else {
					l->file = op_open_memory((const unsigned char*)m.data.constData(), m.data.size(), &ret);
				}
				if (!l->file) {
					LOG(("Audio Error: op_open_file failed for '%1', data size '%2', error code %3").arg(m.fname).arg(m.data.size()).arg(ret));
					m.state = VoiceMessageStopped;
					return loadError(j);
				}
				ogg_int64_t duration = op_pcm_total(l->file, -1);
				if (duration < 0) {
					LOG(("Audio Error: op_pcm_total failed to get full duration for '%1', data size '%2', error code %3").arg(m.fname).arg(m.data.size()).arg(duration));
					m.state = VoiceMessageStopped;
					return loadError(j);
				}
				m.duration = duration;
				m.skipStart = 0;
				m.skipEnd = duration;
				m.position = 0;
				m.started = 0;
				started = true;
			} else {
				if (!m.skipEnd) continue;
				l = j.value();
			}
			break;
		}
	}

	if (j == _loaders.end()) {
		LOG(("Audio Error: trying to load part of audio, that is not playing at the moment"));
		emit error(audio);
		return;
	}
	if (started) {
		l->pcm_offset = op_pcm_tell(l->file);
		l->pcm_print_offset = l->pcm_offset - AudioVoiceMsgFrequency;
	}

	bool finished = false;
    DEBUG_LOG(("Audio Info: reading buffer for file '%1', data size '%2', current pcm_offset %3").arg(l->fname).arg(l->data.size()).arg(l->pcm_offset));

	QByteArray result;
	int64 samplesAdded = 0;
	while (result.size() < AudioVoiceMsgBufferSize) {
		opus_int16 pcm[AudioVoiceMsgFrequency * AudioVoiceMsgChannels];

		int ret = op_read_stereo(l->file, pcm, sizeof(pcm) / sizeof(*pcm));
		if (ret < 0) {
			{
				QMutexLocker lock(&voicemsgsMutex);
				VoiceMessages *voice = audioVoice();
				if (voice) {
					VoiceMessages::Msg &m(voice->_data[audioindex]);
					if (m.audio == audio) {
						m.state = VoiceMessageStopped;
					}
				}
			}
			LOG(("Audio Error: op_read_stereo failed, error code %1").arg(ret));
			return loadError(j);
		}

		int li = op_current_link(l->file);
		if (li != l->prev_li) {
			const OpusHead *head = op_head(l->file, li);
			const OpusTags *tags = op_tags(l->file, li);
			for (int32 ci = 0; ci < tags->comments; ++ci) {
				const char *comment = tags->user_comments[ci];
				if (opus_tagncompare("METADATA_BLOCK_PICTURE", 22, comment) == 0) {
					OpusPictureTag pic;
					int err = opus_picture_tag_parse(&pic, comment);
					if (err >= 0) {
						opus_picture_tag_clear(&pic);
					}
				}
			}
			if (!op_seekable(l->file)) {
				l->pcm_offset = op_pcm_tell(l->file) - ret;
			}
		}
		if (li != l->prev_li || l->pcm_offset >= l->pcm_print_offset + AudioVoiceMsgFrequency) {
			l->pcm_print_offset = l->pcm_offset;
		}
		l->pcm_offset = op_pcm_tell(l->file);

		if (!ret) {
			DEBUG_LOG(("Audio Info: read completed"));
			finished = true;
			break;
		}
		result.append((const char*)pcm, sizeof(*pcm) * ret * AudioVoiceMsgChannels);
		l->prev_li = li;
		samplesAdded += ret;

		{
			QMutexLocker lock(&voicemsgsMutex);
			VoiceMessages *voice = audioVoice();
			if (!voice) return;

			VoiceMessages::Msg &m(voice->_data[audioindex]);
			if (m.audio != audio || !m.loading || m.fname != l->fname || m.data.size() != l->data.size()) {
				LOG(("Audio Error: playing changed while loading"));
				m.state = VoiceMessageStopped;
				return loadError(j);
			}
		}
	}

	QMutexLocker lock(&voicemsgsMutex);
	VoiceMessages *voice = audioVoice();
	if (!voice) return;

	VoiceMessages::Msg &m(voice->_data[audioindex]);
	if (m.audio != audio || !m.loading || m.fname != l->fname || m.data.size() != l->data.size()) {
		LOG(("Audio Error: playing changed while loading"));
		m.state = VoiceMessageStopped;
		return loadError(j);
	}

	if (started) {
		if (m.source) {
			alSourceStop(m.source);
			for (int32 i = 0; i < 3; ++i) {
				if (m.samplesCount[i]) {
					alSourceUnqueueBuffers(m.source, 1, m.buffers + i);
					m.samplesCount[i] = 0;
				}
			}
			m.nextBuffer = 0;
		}
	}
	if (samplesAdded) {
		if (!m.source) {
			alGenSources(1, &m.source);
			alSourcef(m.source, AL_PITCH, 1.f);
			alSourcef(m.source, AL_GAIN, 1.f);
			alSource3f(m.source, AL_POSITION, 0, 0, 0);
			alSource3f(m.source, AL_VELOCITY, 0, 0, 0);
			alSourcei(m.source, AL_LOOPING, 0);
		}
		if (!m.buffers[m.nextBuffer]) alGenBuffers(3, m.buffers);
		if (!_checkALError()) {
			m.state = VoiceMessageStopped;
			return loadError(j);
		}

		if (m.samplesCount[m.nextBuffer]) {
			alSourceUnqueueBuffers(m.source, 1, m.buffers + m.nextBuffer);
			m.skipStart += m.samplesCount[m.nextBuffer];
		}

		m.samplesCount[m.nextBuffer] = samplesAdded;
		alBufferData(m.buffers[m.nextBuffer], AL_FORMAT_STEREO16, result.constData(), result.size(), AudioVoiceMsgFrequency);
		alSourceQueueBuffers(m.source, 1, m.buffers + m.nextBuffer);
		m.skipEnd -= samplesAdded;

		m.nextBuffer = (m.nextBuffer + 1) % 3;

		if (!_checkALError()) {
			m.state = VoiceMessageStopped;
			return loadError(j);
		}
	} else {
		finished = true;
	}
	if (finished) {
		m.skipEnd = 0;
		m.duration = m.skipStart + m.samplesCount[0] + m.samplesCount[1] + m.samplesCount[2];
	}
	m.loading = false;
	if (m.state == VoiceMessageResuming || m.state == VoiceMessagePlaying || m.state == VoiceMessageStarting) {
		ALint state = AL_INITIAL;
		alGetSourcei(m.source, AL_SOURCE_STATE, &state);
		if (_checkALError()) {
			if (state != AL_PLAYING) {
				alSourcePlay(m.source);
				emit needToCheck();
			}
		}
	}
}