void WebVTTParser::parseBytes(const char* data, unsigned length) { // 4.8.10.13.3 WHATWG WebVTT Parser algorithm. // 1-3 - Initial setup. unsigned position = 0; while (position < length) { String line = collectNextLine(data, length, &position); switch (m_state) { case Initial: // Buffer up at least 9 bytes before proceeding with checking for the file identifier. m_identifierData.append(data, length); if (m_identifierData.size() < bomLength + fileIdentifierLength) return; // 4-12 - Collect the first line and check for "WEBVTT". if (!hasRequiredFileIdentifier()) { if (m_client) m_client->fileFailedToParse(); return; } m_state = Header; m_identifierData.clear(); break; case Header: // 13-18 - Allow a header (comment area) under the WEBVTT line. if (line.isEmpty()) m_state = Id; break; case Id: // 19-29 - Allow any number of line terminators, then initialize new cue values. if (line.isEmpty()) break; resetCueValues(); // 30-39 - Check if this line contains an optional identifier or timing data. m_state = collectCueId(line); break; case TimingsAndSettings: // 40 - Collect cue timings and settings. m_state = collectTimingsAndSettings(line); break; case CueText: // 41-53 - Collect the cue text, create a cue, and add it to the output. m_state = collectCueText(line, length, position); break; case BadCue: // 54-62 - Collect and discard the remaining cue. m_state = ignoreBadCue(line); break; } } }
void InbandTextTrackPrivateAVF::processCue(CFArrayRef attributedStrings, double time) { if (!m_player) return; if (m_havePartialCue) { // Cues do not have an explicit duration, they are displayed until the next "cue" (which might be empty) is emitted. m_currentCueEndTime = time; LOG(Media, "InbandTextTrackPrivateAVF::processCue flushing cue: start=%.2f, end=%.2f, settings=\"%s\", content=\"%s\" \n", m_currentCueStartTime, m_currentCueEndTime, m_currentCueSettings.toString().utf8().data(), m_currentCueContent.toString().utf8().data()); m_player->flushCurrentCue(this); resetCueValues(); } CFIndex count = CFArrayGetCount(attributedStrings); for (CFIndex i = 0; i < count; i++) { CFAttributedStringRef attributedString = static_cast<CFAttributedStringRef>(CFArrayGetValueAtIndex(attributedStrings, i)); if (!attributedString || !CFAttributedStringGetLength(attributedString)) continue; processCueAttributes(attributedString, m_currentCueContent, m_currentCueSettings); m_currentCueStartTime = time; m_havePartialCue = true; } }
void InbandTextTrackPrivateAVF::beginSeeking() { // Forget any partially accumulated cue data as the seek could be to a time outside of the cue's // range, which will mean that the next cue delivered will result in the current cue getting the // incorrect duration. resetCueValues(); m_seeking = true; }
VTTParser::ParseState VTTParser::recoverCue(const String& line) { // Step 17 and 21. resetCueValues(); // Step 22. return collectTimingsAndSettings(line); }
void InbandTextTrackPrivateAVF::processCue(CFArrayRef attributedStrings, double time) { if (!client()) return; if (m_havePartialCue) { // Cues do not have an explicit duration, they are displayed until the next "cue" (which might be empty) is emitted. m_currentCueEndTime = time; if (m_currentCueEndTime >= m_currentCueStartTime) { for (size_t i = 0; i < m_cues.size(); i++) { GenericCueData* cueData = m_cues[i].get(); LOG(Media, "InbandTextTrackPrivateAVF::processCue flushing cue: start=%.2f, end=%.2f, content=\"%s\" \n", m_currentCueStartTime, m_currentCueEndTime, cueData->content().utf8().data()); if (!cueData->content().length()) continue; cueData->setStartTime(m_currentCueStartTime); cueData->setEndTime(m_currentCueEndTime); // AVFoundation cue "position" is to the center of the text so adjust relative to the edge because we will use it to // set CSS "left". if (cueData->position() >= 0 && cueData->size() > 0) cueData->setPosition(cueData->position() - cueData->size() / 2); LOG(Media, "InbandTextTrackPrivateAVF::processCue(%p) - adding cue for time = %.2f, position = %.2f, line = %.2f", this, cueData->startTime(), cueData->position(), cueData->line()); client()->addGenericCue(this, cueData); } } else LOG(Media, "InbandTextTrackPrivateAVF::processCue negative length cue(s) ignored: start=%.2f, end=%.2f\n", m_currentCueStartTime, m_currentCueEndTime); resetCueValues(); } if (!attributedStrings) return; CFIndex count = CFArrayGetCount(attributedStrings); if (!count) return; for (CFIndex i = 0; i < count; i++) { CFAttributedStringRef attributedString = static_cast<CFAttributedStringRef>(CFArrayGetValueAtIndex(attributedStrings, i)); if (!attributedString || !CFAttributedStringGetLength(attributedString)) continue; m_cues.append(adoptPtr(new GenericCueData)); processCueAttributes(attributedString, m_cues[i].get()); m_currentCueStartTime = time; m_havePartialCue = true; } }
void InbandTextTrackPrivateAVF::processCue(CFArrayRef attributedStrings, double time) { if (!client()) return; LOG(Media, "InbandTextTrackPrivateAVF::processCue - %li cues at time %.2f\n", attributedStrings ? CFArrayGetCount(attributedStrings) : 0, time); if (m_pendingCueStatus != None) { // Cues do not have an explicit duration, they are displayed until the next "cue" (which might be empty) is emitted. m_currentCueEndTime = time; if (m_currentCueEndTime >= m_currentCueStartTime) { for (size_t i = 0; i < m_cues.size(); i++) { GenericCueData* cueData = m_cues[i].get(); if (m_pendingCueStatus == Valid) { cueData->setEndTime(m_currentCueEndTime); cueData->setStatus(GenericCueData::Complete); LOG(Media, "InbandTextTrackPrivateAVF::processCue(%p) - updating cue: start=%.2f, end=%.2f, content=\"%s\"", this, cueData->startTime(), m_currentCueEndTime, cueData->content().utf8().data()); client()->updateGenericCue(this, cueData); } else { // We have to assume that the implicit duration is invalid for cues delivered during a seek because the AVF decode pipeline may not // see every cue, so DO NOT update cue duration while seeking. LOG(Media, "InbandTextTrackPrivateAVF::processCue(%p) - ignoring cue delivered during seek: start=%.2f, end=%.2f, content=\"%s\"", this, cueData->startTime(), m_currentCueEndTime, cueData->content().utf8().data()); } } } else LOG(Media, "InbandTextTrackPrivateAVF::processCue negative length cue(s) ignored: start=%.2f, end=%.2f\n", m_currentCueStartTime, m_currentCueEndTime); resetCueValues(); } if (!attributedStrings) return; CFIndex count = CFArrayGetCount(attributedStrings); if (!count) return; for (CFIndex i = 0; i < count; i++) { CFAttributedStringRef attributedString = static_cast<CFAttributedStringRef>(CFArrayGetValueAtIndex(attributedStrings, i)); if (!attributedString || !CFAttributedStringGetLength(attributedString)) continue; RefPtr<GenericCueData> cueData = GenericCueData::create(); processCueAttributes(attributedString, cueData.get()); if (!cueData->content().length()) continue; m_cues.append(cueData); m_currentCueStartTime = time; cueData->setStartTime(m_currentCueStartTime); cueData->setEndTime(numeric_limits<double>::infinity()); // AVFoundation cue "position" is to the center of the text so adjust relative to the edge because we will use it to // set CSS "left". if (cueData->position() >= 0 && cueData->size() > 0) cueData->setPosition(cueData->position() - cueData->size() / 2); LOG(Media, "InbandTextTrackPrivateAVF::processCue(%p) - adding cue for time = %.2f, position = %.2f, line = %.2f", this, cueData->startTime(), cueData->position(), cueData->line()); cueData->setStatus(GenericCueData::Partial); client()->addGenericCue(this, cueData.release()); m_pendingCueStatus = seeking() ? DeliveredDuringSeek : Valid; } }
void VTTParser::parse() { // WebVTT parser algorithm. (5.1 WebVTT file parsing.) // Steps 1 - 3 - Initial setup. String line; while (m_lineReader.getLine(line)) { switch (m_state) { case Initial: // Steps 4 - 9 - Check for a valid WebVTT signature. if (!hasRequiredFileIdentifier(line)) { if (m_client) m_client->fileFailedToParse(); return; } m_state = Header; break; case Header: // Steps 10 - 14 - Allow a header (comment area) under the WEBVTT line. collectMetadataHeader(line); if (line.isEmpty()) { if (m_client && m_regionList.size()) m_client->newRegionsParsed(); m_state = Id; break; } // Step 15 - Break out of header loop if the line could be a timestamp line. if (line.contains("-->")) m_state = recoverCue(line); // Step 16 - Line is not the empty string and does not contain "-->". break; case Id: // Steps 17 - 20 - Allow any number of line terminators, then initialize new cue values. if (line.isEmpty()) break; // Step 21 - Cue creation (start a new cue). resetCueValues(); // Steps 22 - 25 - Check if this line contains an optional identifier or timing data. m_state = collectCueId(line); break; case TimingsAndSettings: // Steps 26 - 27 - Discard current cue if the line is empty. if (line.isEmpty()) { m_state = Id; break; } // Steps 28 - 29 - Collect cue timings and settings. m_state = collectTimingsAndSettings(line); break; case CueText: // Steps 31 - 41 - Collect the cue text, create a cue, and add it to the output. m_state = collectCueText(line); break; case BadCue: // Steps 42 - 48 - Discard lines until an empty line or a potential timing line is seen. m_state = ignoreBadCue(line); break; } } }