void SeekableBinkDecoder::seekToFrame(uint32 frame) { assert(frame < _frames.size()); // Fast path if ((int32)frame == _curFrame + 1) return; // Stop all audio (for now) stopAudio(); // Track down the keyframe _curFrame = findKeyFrame(frame) - 1; while (_curFrame < (int32)frame - 1) skipNextFrame(); // Map out the starting point Common::Rational startTime = frame * 1000 / getFrameRate(); _startTime = g_system->getMillis() - startTime.toInt(); resetPauseStartTime(); // Adjust the audio starting point if (_audioTrack < _audioTracks.size()) { Common::Rational audioStartTime = (frame + 1) * 1000 / getFrameRate(); _audioStartOffset = audioStartTime.toInt(); } // Restart the audio startAudio(); }
void QuickTimeDecoder::VideoTrackHandler::enterNewEditList(bool bufferFrames) { // Bypass all empty edit lists first while (!atLastEdit() && _parent->editList[_curEdit].mediaTime == -1) _curEdit++; if (atLastEdit()) return; uint32 frameNum = 0; bool done = false; uint32 totalDuration = 0; uint32 prevDuration = 0; // Track down where the mediaTime is in the media // This is basically time -> frame mapping // Note that this code uses first frame = 0 for (int32 i = 0; i < _parent->timeToSampleCount && !done; i++) { for (int32 j = 0; j < _parent->timeToSample[i].count; j++) { if (totalDuration == (uint32)_parent->editList[_curEdit].mediaTime) { done = true; prevDuration = totalDuration; break; } else if (totalDuration > (uint32)_parent->editList[_curEdit].mediaTime) { done = true; frameNum--; break; } prevDuration = totalDuration; totalDuration += _parent->timeToSample[i].duration; frameNum++; } } if (bufferFrames) { // Track down the keyframe // Then decode until the frame before target _curFrame = findKeyFrame(frameNum) - 1; while (_curFrame < (int32)frameNum - 1) bufferNextFrame(); } else { // Since frameNum is the frame that needs to be displayed // we'll set _curFrame to be the "last frame displayed" _curFrame = frameNum - 1; } _nextFrameStartTime = getCurEditTimeOffset(); // Set an override for the duration since we came up in-between two frames if (prevDuration != totalDuration) _durationOverride = totalDuration - prevDuration; else _durationOverride = -1; }
void QuickTimeDecoder::VideoTrackHandler::enterNewEditList(bool bufferFrames) { // Bypass all empty edit lists first while (!endOfTrack() && _parent->editList[_curEdit].mediaTime == -1) _curEdit++; if (endOfTrack()) return; uint32 frameNum = 0; bool done = false; uint32 totalDuration = 0; uint32 prevDuration = 0; // Track down where the mediaTime is in the media for (int32 i = 0; i < _parent->timeToSampleCount && !done; i++) { for (int32 j = 0; j < _parent->timeToSample[i].count; j++) { if (totalDuration == (uint32)_parent->editList[_curEdit].mediaTime) { done = true; prevDuration = totalDuration; break; } else if (totalDuration > (uint32)_parent->editList[_curEdit].mediaTime) { done = true; frameNum--; break; } prevDuration = totalDuration; totalDuration += _parent->timeToSample[i].duration; frameNum++; } } if (bufferFrames) { // Track down the keyframe _curFrame = findKeyFrame(frameNum) - 1; while (_curFrame < (int32)frameNum - 1) bufferNextFrame(); } else { _curFrame = frameNum - 1; } _nextFrameStartTime = getCurEditTimeOffset(); // Set an override for the duration since we came up in-between two frames if (prevDuration != totalDuration) _durationOverride = totalDuration - prevDuration; }
void QuickTimeDecoder::VideoTrackHandler::seekToTime(Audio::Timestamp time) { // First, figure out what edit we're in time = time.convertToFramerate(_parent->timeScale); // Continue until we get to where we need to be for (_curEdit = 0; !endOfTrack(); _curEdit++) if ((uint32)time.totalNumberOfFrames() >= getCurEditTimeOffset() && (uint32)time.totalNumberOfFrames() < getCurEditTimeOffset() + getCurEditTrackDuration()) break; // This track is done if (endOfTrack()) return; enterNewEditList(false); // One extra check for the end of a track if (endOfTrack()) return; // Now we're in the edit and need to figure out what frame we need while (getRateAdjustedFrameTime() < (uint32)time.totalNumberOfFrames()) { _curFrame++; if (_durationOverride >= 0) { _nextFrameStartTime += _durationOverride; _durationOverride = -1; } else { _nextFrameStartTime += getFrameDuration(); } } // All that's left is to figure out what our starting time is going to be // Compare the starting point for the frame to where we need to be _holdNextFrameStartTime = getRateAdjustedFrameTime() != (uint32)time.totalNumberOfFrames(); // If we went past the time, go back a frame if (_holdNextFrameStartTime) _curFrame--; // Handle the keyframe here int32 destinationFrame = _curFrame + 1; assert(destinationFrame < (int32)_parent->frameCount); _curFrame = findKeyFrame(destinationFrame) - 1; while (_curFrame < destinationFrame - 1) bufferNextFrame(); }
void QuickTimeDecoder::seekToFrame(uint32 frame) { assert(_videoTrackIndex >= 0); assert(frame < _tracks[_videoTrackIndex]->frameCount); // Stop all audio (for now) stopAudio(); // Track down the keyframe _curFrame = findKeyFrame(frame) - 1; while (_curFrame < (int32)frame - 1) decodeNextFrame(); // Map out the starting point _nextFrameStartTime = 0; uint32 curFrame = 0; for (int32 i = 0; i < _tracks[_videoTrackIndex]->timeToSampleCount && curFrame < frame; i++) { for (int32 j = 0; j < _tracks[_videoTrackIndex]->timeToSample[i].count && curFrame < frame; j++) { curFrame++; _nextFrameStartTime += _tracks[_videoTrackIndex]->timeToSample[i].duration; } } // Adjust the video starting point const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _tracks[_videoTrackIndex]->timeScale); _startTime = g_system->getMillis() - curVideoTime.msecs(); resetPauseStartTime(); // Adjust the audio starting point if (_audioTrackIndex >= 0) { _audioStartOffset = curVideoTime; // Seek to the new audio location setAudioStreamPos(_audioStartOffset); // Restart the audio startAudio(); } }
void QuickTimeDecoder::seekToFrame(uint32 frame) { assert(_videoStreamIndex >= 0); assert(frame < _streams[_videoStreamIndex]->nb_frames); // Stop all audio (for now) stopAudio(); // Track down the keyframe _curFrame = findKeyFrame(frame) - 1; while (_curFrame < (int32)frame - 1) decodeNextFrame(); // Map out the starting point _nextFrameStartTime = 0; uint32 curFrame = 0; for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count && curFrame < frame; i++) { for (int32 j = 0; j < _streams[_videoStreamIndex]->stts_data[i].count && curFrame < frame; j++) { curFrame++; _nextFrameStartTime += _streams[_videoStreamIndex]->stts_data[i].duration; } } // Adjust the video starting point const Audio::Timestamp curVideoTime(0, _nextFrameStartTime, _streams[_videoStreamIndex]->time_scale); _startTime = g_system->getMillis() - curVideoTime.msecs(); resetPauseStartTime(); // Adjust the audio starting point if (_audioStreamIndex >= 0) { _audioStartOffset = curVideoTime; // Re-create the audio stream STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0]; _audStream = Audio::makeQueuingAudioStream(entry->sampleRate, entry->channels == 2); // First, we need to track down what audio sample we need Audio::Timestamp curAudioTime(0, _streams[_audioStreamIndex]->time_scale); uint sample = 0; bool done = false; for (int32 i = 0; i < _streams[_audioStreamIndex]->stts_count && !done; i++) { for (int32 j = 0; j < _streams[_audioStreamIndex]->stts_data[i].count; j++) { curAudioTime = curAudioTime.addFrames(_streams[_audioStreamIndex]->stts_data[i].duration); if (curAudioTime > curVideoTime) { done = true; break; } sample++; } } // Now to track down what chunk it's in _curAudioChunk = 0; uint32 totalSamples = 0; for (uint32 i = 0; i < _streams[_audioStreamIndex]->chunk_count; i++, _curAudioChunk++) { int sampleToChunkIndex = -1; for (uint32 j = 0; j < _streams[_audioStreamIndex]->sample_to_chunk_sz; j++) if (i >= _streams[_audioStreamIndex]->sample_to_chunk[j].first) sampleToChunkIndex = j; assert(sampleToChunkIndex >= 0); totalSamples += _streams[_audioStreamIndex]->sample_to_chunk[sampleToChunkIndex].count; if (sample < totalSamples) { totalSamples -= _streams[_audioStreamIndex]->sample_to_chunk[sampleToChunkIndex].count; break; } } // Reposition the audio stream readNextAudioChunk(); if (sample != totalSamples) { // HACK: Skip a certain amount of samples from the stream // (There's got to be a better way to do this!) int16 *tempBuffer = new int16[sample - totalSamples]; _audStream->readBuffer(tempBuffer, sample - totalSamples); delete[] tempBuffer; debug(3, "Skipping %d audio samples", sample - totalSamples); } // Restart the audio startAudio(); } }
bool QuickTimeDecoder::VideoTrackHandler::setReverse(bool reverse) { _reversed = reverse; if (_reversed) { if (_parent->editCount != 1) { // TODO: Myst's holo.mov needs this :( warning("Can only set reverse without edits"); return false; } if (atLastEdit()) { // If we're at the end of the video, go to the penultimate edit. // The current frame is set to one beyond the last frame here; // one "past" the currently displayed frame. _curEdit = _parent->editCount - 1; _curFrame = _parent->frameCount; _nextFrameStartTime = _parent->editList[_curEdit].trackDuration + _parent->editList[_curEdit].timeOffset; } else if (_holdNextFrameStartTime) { // We just seeked, so "pivot" around the frame that should be displayed _curFrame++; _nextFrameStartTime -= getFrameDuration(); _curFrame++; } else { // We need to put _curFrame to be the one after the one that should be displayed. // Since we're on the frame that should be displaying right now, add one. _curFrame++; } } else { // Update the edit list, if applicable // HACK: We're also accepting the time minus one because edit lists // aren't as accurate as one would hope. if (!atLastEdit() && getRateAdjustedFrameTime() >= getCurEditTimeOffset() + getCurEditTrackDuration() - 1) { _curEdit++; if (atLastEdit()) return true; } if (_holdNextFrameStartTime) { // We just seeked, so "pivot" around the frame that should be displayed _curFrame--; _nextFrameStartTime += getFrameDuration(); } // We need to put _curFrame to be the one before the one that should be displayed. // Since we're on the frame that should be displaying right now, subtract one. // (As long as the current frame isn't -1, of course) if (_curFrame > 0) { // We then need to handle the keyframe situation int targetFrame = _curFrame - 1; _curFrame = findKeyFrame(targetFrame) - 1; while (_curFrame < targetFrame) bufferNextFrame(); } else if (_curFrame == 0) { // Make us start at the first frame (no keyframe needed) _curFrame--; } } return true; }
const Graphics::Surface *QuickTimeDecoder::VideoTrackHandler::decodeNextFrame() { if (endOfTrack()) return 0; if (_reversed) { // Subtract one to place us on the frame before the current displayed frame. _curFrame--; // We have one "dummy" frame at the end to so the last frame is displayed // for the right amount of time. if (_curFrame < 0) return 0; // Decode from the last key frame to the frame before the one we need. // TODO: Probably would be wise to do some caching int targetFrame = _curFrame; _curFrame = findKeyFrame(targetFrame) - 1; while (_curFrame != targetFrame - 1) bufferNextFrame(); } const Graphics::Surface *frame = bufferNextFrame(); if (_reversed) { if (_holdNextFrameStartTime) { // Don't set the next frame start time here; we just did a seek _holdNextFrameStartTime = false; } else { // Just need to subtract the time _nextFrameStartTime -= getFrameDuration(); } } else { if (_holdNextFrameStartTime) { // Don't set the next frame start time here; we just did a seek _holdNextFrameStartTime = false; } else if (_durationOverride >= 0) { // Use our own duration from the edit list calculation _nextFrameStartTime += _durationOverride; _durationOverride = -1; } else { _nextFrameStartTime += getFrameDuration(); } // Update the edit list, if applicable // HACK: We're also accepting the time minus one because edit lists // aren't as accurate as one would hope. if (!atLastEdit() && getRateAdjustedFrameTime() >= getCurEditTimeOffset() + getCurEditTrackDuration() - 1) { _curEdit++; if (!atLastEdit()) enterNewEditList(true); } } if (frame && (_parent->scaleFactorX != 1 || _parent->scaleFactorY != 1)) { if (!_scaledSurface) { _scaledSurface = new Graphics::Surface(); _scaledSurface->create(getScaledWidth().toInt(), getScaledHeight().toInt(), getPixelFormat()); } _decoder->scaleSurface(frame, _scaledSurface, _parent->scaleFactorX, _parent->scaleFactorY); return _scaledSurface; } return frame; }
bool QuickTimeDecoder::VideoTrackHandler::seek(const Audio::Timestamp &requestedTime) { uint32 convertedFrames = requestedTime.convertToFramerate(_decoder->_timeScale).totalNumberOfFrames(); for (_curEdit = 0; !atLastEdit(); _curEdit++) if (convertedFrames >= _parent->editList[_curEdit].timeOffset && convertedFrames < _parent->editList[_curEdit].timeOffset + _parent->editList[_curEdit].trackDuration) break; // If we did reach the end of the track, break out if (atLastEdit()) return true; // If this track is in an empty edit, position us at the next non-empty // edit. There's nothing else to do after this. if (_parent->editList[_curEdit].mediaTime == -1) { while (!atLastEdit() && _parent->editList[_curEdit].mediaTime == -1) _curEdit++; if (!atLastEdit()) enterNewEditList(true); return true; } enterNewEditList(false); // One extra check for the end of a track if (atLastEdit()) return true; // Now we're in the edit and need to figure out what frame we need Audio::Timestamp time = requestedTime.convertToFramerate(_parent->timeScale); while (getRateAdjustedFrameTime() < (uint32)time.totalNumberOfFrames()) { _curFrame++; if (_durationOverride >= 0) { _nextFrameStartTime += _durationOverride; _durationOverride = -1; } else { _nextFrameStartTime += getFrameDuration(); } } // All that's left is to figure out what our starting time is going to be // Compare the starting point for the frame to where we need to be _holdNextFrameStartTime = getRateAdjustedFrameTime() != (uint32)time.totalNumberOfFrames(); // If we went past the time, go back a frame. _curFrame before this point is at the frame // that should be displayed. This adjustment ensures it is on the frame before the one that // should be displayed. if (_holdNextFrameStartTime) _curFrame--; if (_reversed) { // Call setReverse again to update setReverse(true); } else { // Handle the keyframe here int32 destinationFrame = _curFrame + 1; assert(destinationFrame < (int32)_parent->frameCount); _curFrame = findKeyFrame(destinationFrame) - 1; while (_curFrame < destinationFrame - 1) bufferNextFrame(); } return true; }