コード例 #1
0
void visualizationWidget::setActive( bool _active )
{
	m_active = _active;
	if( m_active )
	{
		connect( engine::mainWindow(),
					SIGNAL( periodicUpdate() ),
					this, SLOT( update() ) );
		connect( engine::mixer(),
					SIGNAL( nextAudioBuffer() ),
				this, SLOT( updateAudioBuffer() ) );
	}
	else
	{
		disconnect( engine::mainWindow(),
					SIGNAL( periodicUpdate() ),
					this, SLOT( update() ) );
		disconnect( engine::mixer(),
					SIGNAL( nextAudioBuffer() ),
				this, SLOT( updateAudioBuffer() ) );
		// we have to update (remove last waves),
		// because timer doesn't do that anymore
		update();
	}
}
コード例 #2
0
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
	if (!_nextVideoTrack)
		return 0;

	const Graphics::Surface *frame = _nextVideoTrack->decodeNextFrame();

	if (!_setStartTime) {
		_startTime = g_system->getMillis();
		_setStartTime = true;
	}

	_nextVideoTrack = findNextVideoTrack();
	_needUpdate = false;

	// Update audio buffers too
	// (needs to be done after we find the next track)
	updateAudioBuffer();

	// We have to initialize the scaled surface
	if (frame && (_scaleFactorX != 1 || _scaleFactorY != 1)) {
		if (!_scaledSurface) {
			_scaledSurface = new Graphics::Surface();
			_scaledSurface->create(_width, _height, getPixelFormat());
		}

		scaleSurface(frame, _scaledSurface, _scaleFactorX, _scaleFactorY);
		return _scaledSurface;
	}

	return frame;
}
コード例 #3
0
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
	if (!_nextVideoTrack)
		return 0;

	const Graphics::Surface *frame = _nextVideoTrack->decodeNextFrame();

	if (!_setStartTime) {
		_startTime = g_system->getMillis();
		_setStartTime = true;
	}

	_nextVideoTrack = findNextVideoTrack();
	_needUpdate = false;

	// Update audio buffers too
	// (needs to be done after we find the next track)
	updateAudioBuffer();

	if (_scaledSurface) {
		scaleSurface(frame, _scaledSurface, _scaleFactorX, _scaleFactorY);
		return _scaledSurface;
	}

	return frame;
}
コード例 #4
0
bool QSpotifyAudioThreadWorker::event(QEvent *e)
{
    // Ignore timer events to have less log trashing
    if(e->type() != QEvent::Timer)
        qDebug() << "QSpotifyAudioThreadWorker::event" << e->type();
    if (e->type() == StreamingStartedEventType) {
        QMutexLocker lock(&g_mutex);
        QSpotifyStreamingStartedEvent *ev = static_cast<QSpotifyStreamingStartedEvent *>(e);
        startStreaming(ev->channels(), ev->sampleRate());
        e->accept();
        return true;
    } else if (e->type() == ResumeEventType) {
        QMutexLocker lock(&g_mutex);
        if (m_audioOutput) {
            m_audioOutput->resume();
            m_audioTimerID = startTimer(AUDIOSTREAM_UPDATE_INTERVAL);
        }
        e->accept();
        return true;
    } else if (e->type() == SuspendEventType) {
        QMutexLocker lock(&g_mutex);
        if (m_audioOutput) {
            killTimer(m_audioTimerID);
            m_audioOutput->suspend();
        }
        e->accept();
        return true;
    } else if (e->type() == AudioStopEventType) {
        QMutexLocker lock(&g_mutex);
        killTimer(m_audioTimerID);
        g_buffer.close();
        if (m_audioOutput) {
            m_audioOutput->stop();
            m_audioOutput->deleteLater();
            m_audioOutput = nullptr;
            m_iodevice = nullptr;
        }
        e->accept();
        return true;
    } else if (e->type() == ResetBufferEventType) {
        QMutexLocker lock(&g_mutex);
        if (m_audioOutput) {
            killTimer(m_audioTimerID);
            m_audioOutput->reset();
            g_buffer.reset();
            startAudioOutput();
        }
        e->accept();
        return true;
    } else if (e->type() == QEvent::Timer) {
        QTimerEvent *te = static_cast<QTimerEvent *>(e);
        if (te->timerId() == m_audioTimerID) {
            updateAudioBuffer();
            e->accept();
            return true;
        }
    }
    return QObject::event(e);
}
コード例 #5
0
void QuickTimeDecoder::startAudio() {
	updateAudioBuffer();

	for (uint32 i = 0; i < _audioTracks.size(); i++) {
		g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, &_audioHandles[i], _audioTracks[i], -1, Audio::Mixer::kMaxChannelVolume, 0, DisposeAfterUse::NO);

		// Pause the audio again if we're still paused
		if (isPaused())
			g_system->getMixer()->pauseHandle(_audioHandles[i], true);
	}
}
コード例 #6
0
ファイル: qt_decoder.cpp プロジェクト: jweinberg/scummvm
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
	if (_videoStreamIndex < 0 || _curFrame >= (int32)getFrameCount() - 1)
		return 0;

	if (_startTime == 0)
		_startTime = g_system->getMillis();

	_curFrame++;
	_nextFrameStartTime += getFrameDuration();

	// Update the audio while we're at it
	updateAudioBuffer();

	// Get the next packet
	uint32 descId;
	Common::SeekableReadStream *frameData = getNextFramePacket(descId);

	if (!frameData || !descId || descId > _streams[_videoStreamIndex]->stsdEntryCount)
		return 0;

	// Find which video description entry we want
	STSDEntry *entry = &_streams[_videoStreamIndex]->stsdEntries[descId - 1];

	if (!entry->videoCodec)
		return 0;

	const Graphics::Surface *frame = entry->videoCodec->decodeImage(frameData);
	delete frameData;

	// Update the palette
	if (entry->videoCodec->containsPalette()) {
		// The codec itself contains a palette
		if (entry->videoCodec->hasDirtyPalette()) {
			_palette = entry->videoCodec->getPalette();
			_dirtyPalette = true;
		}
	} else {
		// Check if the video description has been updated
		byte *palette = entry->palette;

		if (palette != _palette) {
			_palette = palette;
			_dirtyPalette = true;
		}
	}

	return scaleSurface(frame);
}
コード例 #7
0
ファイル: qt_decoder.cpp プロジェクト: Fyre91/scummvm
const Graphics::Surface *QuickTimeDecoder::decodeNextFrame() {
	const Graphics::Surface *frame = VideoDecoder::decodeNextFrame();

	// Update audio buffers too
	// (needs to be done after we find the next track)
	updateAudioBuffer();

	// We have to initialize the scaled surface
	if (frame && (_scaleFactorX != 1 || _scaleFactorY != 1)) {
		if (!_scaledSurface) {
			_scaledSurface = new Graphics::Surface();
			_scaledSurface->create(_width, _height, getPixelFormat());
		}

		scaleSurface(frame, _scaledSurface, _scaleFactorX, _scaleFactorY);
		return _scaledSurface;
	}

	return frame;
}
コード例 #8
0
ファイル: quicktime.cpp プロジェクト: Hellzed/xoreos
void QuickTimeDecoder::processData() {
	if (_curFrame >= (int32)_tracks[_videoTrackIndex]->frameCount - 1) {
		finish();
		return;
	}

	if (getTimeToNextFrame() > 0)
		return;

	_curFrame++;
	_nextFrameStartTime += getFrameDuration();

	// Update the audio while we're at it
	updateAudioBuffer();

	// Get the next packet
	uint32 descId;
	Common::SeekableReadStream *frameData = getNextFramePacket(descId);

	if (!frameData || !descId || descId > _tracks[_videoTrackIndex]->sampleDescs.size()) {
		delete frameData;
		return;
	}

	// Find which video description entry we want
	VideoSampleDesc *entry = (VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[descId - 1];

	if (entry->_videoCodec) {
		assert(_surface);

		entry->_videoCodec->decodeFrame(*_surface, *frameData);
		_needCopy = true;
	}

	delete frameData;
}
コード例 #9
0
ファイル: qt_decoder.cpp プロジェクト: jweinberg/scummvm
void QuickTimeDecoder::startAudio() {
	if (_audStream) { // No audio/audio not supported
		updateAudioBuffer();
		g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, &_audHandle, _audStream);
	}
}
コード例 #10
0
ファイル: qt_decoder.cpp プロジェクト: TomFrost/scummvm
void QuickTimeDecoder::startAudio() {
	if (_audStream) {
		updateAudioBuffer();
		g_system->getMixer()->playStream(Audio::Mixer::kPlainSoundType, &_audHandle, _audStream, -1, Audio::Mixer::kMaxChannelVolume, 0, DisposeAfterUse::NO);
	} // else no audio or the audio compression is not supported
}