bool MoviePlayerDXA::processFrame() { Graphics::Surface *screen = _vm->_system->lockScreen(); copyFrameToBuffer((byte *)screen->pixels, (_vm->_screenWidth - getWidth()) / 2, (_vm->_screenHeight - getHeight()) / 2, _vm->_screenWidth); _vm->_system->unlockScreen(); Common::Rational soundTime(_mixer->getSoundElapsedTime(_bgSound), 1000); if ((_bgSoundStream == NULL) || ((soundTime * getFrameRate()).toInt() / 1000 < getCurFrame() + 1)) { if (_bgSoundStream && _mixer->isSoundHandleActive(_bgSound)) { while (_mixer->isSoundHandleActive(_bgSound) && (soundTime * getFrameRate()).toInt() < getCurFrame()) { _vm->_system->delayMillis(10); soundTime = Common::Rational(_mixer->getSoundElapsedTime(_bgSound), 1000); } // In case the background sound ends prematurely, update // _ticks so that we can still fall back on the no-sound // sync case for the subsequent frames. _ticks = _vm->_system->getMillis(); } else { _ticks += getTimeToNextFrame(); while (_vm->_system->getMillis() < _ticks) _vm->_system->delayMillis(10); } return true; } warning("dropped frame %i", getCurFrame()); return false; }
bool MoviePlayerDXA::processFrame() { Graphics::Surface *screen = _vm->_system->lockScreen(); copyFrameToBuffer((byte *)screen->getPixels(), (_vm->_screenWidth - getWidth()) / 2, (_vm->_screenHeight - getHeight()) / 2, screen->pitch); _vm->_system->unlockScreen(); uint32 soundTime = _mixer->getSoundElapsedTime(_bgSound); uint32 nextFrameStartTime = ((Video::VideoDecoder::VideoTrack *)getTrack(0))->getNextFrameStartTime(); if ((_bgSoundStream == NULL) || soundTime < nextFrameStartTime) { if (_bgSoundStream && _mixer->isSoundHandleActive(_bgSound)) { while (_mixer->isSoundHandleActive(_bgSound) && soundTime < nextFrameStartTime) { _vm->_system->delayMillis(10); soundTime = _mixer->getSoundElapsedTime(_bgSound); } // In case the background sound ends prematurely, update // _ticks so that we can still fall back on the no-sound // sync case for the subsequent frames. _ticks = _vm->_system->getMillis(); } else { _ticks += getTimeToNextFrame(); while (_vm->_system->getMillis() < _ticks) _vm->_system->delayMillis(10); } return true; } warning("dropped frame %i", getCurFrame()); return false; }
void QuickTimeDecoder::updateAudioBuffer() { if (!_audStream) return; uint32 numberOfChunksNeeded = 0; if (_videoTrackIndex < 0 || _curFrame == (int32)_tracks[_videoTrackIndex]->frameCount - 1) { // If we have no video, there's nothing to base our buffer against // However, one must ask why a QuickTimeDecoder is being used instead of the nice makeQuickTimeStream() function // If we're on the last frame, make sure all audio remaining is buffered numberOfChunksNeeded = _tracks[_audioTrackIndex]->chunkCount; } else { Audio::QuickTimeAudioDecoder::AudioSampleDesc *entry = (Audio::QuickTimeAudioDecoder::AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0]; // Calculate the amount of chunks we need in memory until the next frame uint32 timeToNextFrame = getTimeToNextFrame(); uint32 timeFilled = 0; uint32 curAudioChunk = _curAudioChunk - _audStream->numQueuedStreams(); for (; timeFilled < timeToNextFrame && curAudioChunk < _tracks[_audioTrackIndex]->chunkCount; numberOfChunksNeeded++, curAudioChunk++) { uint32 sampleCount = entry->getAudioChunkSampleCount(curAudioChunk); assert(sampleCount); timeFilled += sampleCount * 1000 / entry->_sampleRate; } // Add a couple extra to ensure we don't underrun numberOfChunksNeeded += 3; } // Keep three streams in buffer so that if/when the first two end, it goes right into the next while (_audStream->numQueuedStreams() < numberOfChunksNeeded && _curAudioChunk < _tracks[_audioTrackIndex]->chunkCount) queueNextAudioChunk(); }
void VideoDecoder::update() { if (getTimeToNextFrame() > 0) return; processData(); copyData(); }
void QuickTimeDecoder::updateAudioBuffer() { if (_audioTrackIndex < 0) return; uint32 numberOfChunksNeeded = 0; if (_curFrame == (int32)_tracks[_videoTrackIndex]->frameCount - 1) { // If we're on the last frame, make sure all audio remaining is buffered numberOfChunksNeeded = _tracks[_audioTrackIndex]->chunkCount; } else { AudioSampleDesc *entry = (AudioSampleDesc *)_tracks[_audioTrackIndex]->sampleDescs[0]; // Calculate the amount of chunks we need in memory until the next frame uint32 timeToNextFrame = getTimeToNextFrame(); uint32 timeFilled = 0; uint32 curAudioChunk = _curAudioChunk - getNumQueuedStreams(); for (; timeFilled < timeToNextFrame && curAudioChunk < _tracks[_audioTrackIndex]->chunkCount; numberOfChunksNeeded++, curAudioChunk++) { uint32 sampleCount = entry->getAudioChunkSampleCount(curAudioChunk); assert(sampleCount); timeFilled += sampleCount * 1000 / entry->_sampleRate; } // Add a couple extra to ensure we don't underrun numberOfChunksNeeded += 3; } // Keep three streams in buffer so that if/when the first two end, it goes right into the next while (getNumQueuedStreams() < numberOfChunksNeeded && _curAudioChunk < _tracks[_audioTrackIndex]->chunkCount) queueNextAudioChunk(); }
void VideoDecoder::update() { if (getTimeToNextFrame() > 0) return; debugC(Common::kDebugVideo, 9, "New video frame"); processData(); copyData(); }
bool MoviePlayerSMK::processFrame() { Graphics::Surface *screen = _vm->_system->lockScreen(); copyFrameToBuffer((byte *)screen->getPixels(), (_vm->_screenWidth - getWidth()) / 2, (_vm->_screenHeight - getHeight()) / 2, screen->pitch); _vm->_system->unlockScreen(); uint32 waitTime = getTimeToNextFrame(); if (!waitTime && !endOfVideoTracks()) { warning("dropped frame %i", getCurFrame()); return false; } _vm->_system->updateScreen(); // Wait before showing the next frame _vm->_system->delayMillis(waitTime); return true; }
void QuickTimeDecoder::processData() { if (_curFrame >= (int32)_tracks[_videoTrackIndex]->frameCount - 1) { finish(); return; } if (getTimeToNextFrame() > 0) return; _curFrame++; _nextFrameStartTime += getFrameDuration(); // Update the audio while we're at it updateAudioBuffer(); // Get the next packet uint32 descId; Common::SeekableReadStream *frameData = getNextFramePacket(descId); if (!frameData || !descId || descId > _tracks[_videoTrackIndex]->sampleDescs.size()) { delete frameData; return; } // Find which video description entry we want VideoSampleDesc *entry = (VideoSampleDesc *)_tracks[_videoTrackIndex]->sampleDescs[descId - 1]; if (entry->_videoCodec) { assert(_surface); entry->_videoCodec->decodeFrame(*_surface, *frameData); _needCopy = true; } delete frameData; }
bool VideoDecoder::needsUpdate() const { return hasFramesLeft() && getTimeToNextFrame() == 0; }
void Bink::processData() { if (getTimeToNextFrame() > 0) return; if (_curFrame >= _frames.size()) { finish(); return; } VideoFrame &frame = _frames[_curFrame]; if (!_bink->seek(frame.offset)) throw Common::Exception(Common::kSeekError); uint32 frameSize = frame.size; for (uint32 i = 0; i < _audioTracks.size(); i++) { AudioTrack &audio = _audioTracks[i]; uint32 audioPacketLength = _bink->readUint32LE(); frameSize -= 4; if (frameSize < audioPacketLength) throw Common::Exception("Audio packet too big for the frame"); if (audioPacketLength >= 4) { uint32 audioPacketStart = _bink->pos(); uint32 audioPacketEnd = _bink->pos() + audioPacketLength; if (i == _audioTrack) { // Only play one audio track // Number of samples in bytes audio.sampleCount = _bink->readUint32LE() / (2 * audio.channels); audio.bits = new Common::BitStream32LELSB(new Common::SeekableSubReadStream(_bink, audioPacketStart + 4, audioPacketEnd), true); audioPacket(audio); delete audio.bits; audio.bits = 0; } _bink->seek(audioPacketEnd); frameSize -= audioPacketLength; } } uint32 videoPacketStart = _bink->pos(); uint32 videoPacketEnd = _bink->pos() + frameSize; frame.bits = new Common::BitStream32LELSB(new Common::SeekableSubReadStream(_bink, videoPacketStart, videoPacketEnd), true); videoPacket(frame); delete frame.bits; frame.bits = 0; _needCopy = true; _curFrame++; }
bool VideoDecoder::needsUpdate() const { return !endOfVideo() && getTimeToNextFrame() == 0; }