static int wv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { AVStream *st = s->streams[stream_index]; WVContext *wc = s->priv_data; AVPacket pkt1, *pkt = &pkt1; int ret; int index = av_index_search_timestamp(st, timestamp, flags); int64_t pos, pts; /* if found, seek there */ if (index >= 0){ wc->block_parsed = 1; url_fseek(s->pb, st->index_entries[index].pos, SEEK_SET); return 0; } /* if timestamp is out of bounds, return error */ if(timestamp < 0 || timestamp >= s->duration) return -1; pos = url_ftell(s->pb); do{ ret = av_read_frame(s, pkt); if (ret < 0){ url_fseek(s->pb, pos, SEEK_SET); return -1; } pts = pkt->pts; av_free_packet(pkt); }while(pts < timestamp); return 0; }
static int ape_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { AVStream *st = s->streams[stream_index]; APEContext *ape = s->priv_data; int index = av_index_search_timestamp(st, timestamp, flags); if (index < 0) return -1; ape->currentframe = index; return 0; }
static int tta_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { TTAContext *c = s->priv_data; AVStream *st = s->streams[stream_index]; int index = av_index_search_timestamp(st, timestamp, flags); if (index < 0) return -1; c->currentframe = index; url_fseek(s->pb, st->index_entries[index].pos, SEEK_SET); return 0; }
int32_t Stream :: findTimeStampPositionInIndex(int64_t wantedTimeStamp, int32_t flags) { int retval = -1; if (mStream) { retval = av_index_search_timestamp( mStream, wantedTimeStamp, flags); } return retval; }
static int film_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { FilmDemuxContext *film = s->priv_data; AVStream *st = s->streams[stream_index]; int64_t pos; int ret = av_index_search_timestamp(st, timestamp, flags); if (ret < 0) return ret; pos = avio_seek(s->pb, st->index_entries[ret].pos, SEEK_SET); if (pos < 0) return pos; film->current_sample = ret; return 0; }
long GetNumberVideoFrames(const char *file) { long nb_frames = 0L; AVFormatContext *pFormatCtx = NULL; av_log_set_level(AV_LOG_QUIET); av_register_all(); // Open video file if (avformat_open_input(&pFormatCtx, file, NULL, NULL)) return -1 ; // Couldn't open file // Retrieve stream information if(avformat_find_stream_info(pFormatCtx, NULL)<0) return -1; // Couldn't find stream information // Find the first video stream int videoStream=-1; for(unsigned int i=0; i<pFormatCtx->nb_streams; i++) { if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) { videoStream=i; break; } } if(videoStream==-1) return -1; // Didn't find a video stream AVStream *str = pFormatCtx->streams[videoStream]; nb_frames = str->nb_frames; if (nb_frames > 0) { //the easy way if value is already contained in struct avformat_close_input(&pFormatCtx); return nb_frames; } else { // frames must be counted AVPacket packet; nb_frames = (long)av_index_search_timestamp(str,str->duration, AVSEEK_FLAG_ANY|AVSEEK_FLAG_BACKWARD); // Close the video file int timebase = str->time_base.den / str->time_base.num; if (nb_frames <= 0) nb_frames = str->duration/timebase; avformat_close_input(&pFormatCtx); return nb_frames; } }
static int img_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { VideoDemuxData *s1 = s->priv_data; AVStream *st = s->streams[0]; if (s1->ts_from_file) { int index = av_index_search_timestamp(st, timestamp, flags); if(index < 0) return -1; s1->img_number = st->index_entries[index].pos; return 0; } if (timestamp < 0 || !s1->loop && timestamp > s1->img_last - s1->img_first) return -1; s1->img_number = timestamp%(s1->img_last - s1->img_first + 1) + s1->img_first; s1->pts = timestamp; return 0; }
static int mv_read_seek(AVFormatContext *avctx, int stream_index, int64_t timestamp, int flags) { MvContext *mv = avctx->priv_data; AVStream *st = avctx->streams[stream_index]; int frame, i; if ((flags & AVSEEK_FLAG_FRAME) || (flags & AVSEEK_FLAG_BYTE)) return AVERROR(ENOSYS); if (!(avctx->pb->seekable & AVIO_SEEKABLE_NORMAL)) return AVERROR(EIO); frame = av_index_search_timestamp(st, timestamp, flags); if (frame < 0) return AVERROR_INVALIDDATA; for (i = 0; i < avctx->nb_streams; i++) mv->frame[i] = frame; return 0; }
static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { MP3DecContext *mp3 = s->priv_data; AVIndexEntry *ie; AVStream *st = s->streams[0]; int64_t ret = av_index_search_timestamp(st, timestamp, flags); if (!mp3->xing_toc) return AVERROR(ENOSYS); if (ret < 0) return ret; ie = &st->index_entries[ret]; ret = reposition(s, ie->pos); if (ret < 0) return ret; ff_update_cur_dts(s, st, ie->timestamp); return 0; }
static int dhav_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { AVStream *st = s->streams[stream_index]; int index = av_index_search_timestamp(st, timestamp, flags); int64_t pts; if (index < 0) return -1; if (avio_seek(s->pb, st->index_entries[index].pos, SEEK_SET) < 0) return -1; pts = st->index_entries[index].timestamp; for (int n = 0; n < s->nb_streams; n++) { AVStream *st = s->streams[n]; DHAVStream *dst = st->priv_data; dst->pts = pts; dst->last_timestamp = AV_NOPTS_VALUE; } return 0; }
status_t StreamBase::Seek(uint32 flags, int64* frame, bigtime_t* time) { BAutolock _(fStreamLock); if (fContext == NULL || fStream == NULL) return B_NO_INIT; TRACE_SEEK("StreamBase::Seek(%ld,%s%s%s%s, %lld, " "%lld)\n", VirtualIndex(), (flags & B_MEDIA_SEEK_TO_FRAME) ? " B_MEDIA_SEEK_TO_FRAME" : "", (flags & B_MEDIA_SEEK_TO_TIME) ? " B_MEDIA_SEEK_TO_TIME" : "", (flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) ? " B_MEDIA_SEEK_CLOSEST_BACKWARD" : "", (flags & B_MEDIA_SEEK_CLOSEST_FORWARD) ? " B_MEDIA_SEEK_CLOSEST_FORWARD" : "", *frame, *time); double frameRate = FrameRate(); if ((flags & B_MEDIA_SEEK_TO_FRAME) != 0) { // Seeking is always based on time, initialize it when client seeks // based on frame. *time = (bigtime_t)(*frame * 1000000.0 / frameRate + 0.5); } int64_t timeStamp = *time; int searchFlags = AVSEEK_FLAG_BACKWARD; if ((flags & B_MEDIA_SEEK_CLOSEST_FORWARD) != 0) searchFlags = 0; if (fSeekByBytes) { searchFlags |= AVSEEK_FLAG_BYTE; BAutolock _(fSourceLock); int64_t fileSize; if (fSource->GetSize(&fileSize) != B_OK) return B_NOT_SUPPORTED; int64_t duration = Duration(); if (duration == 0) return B_NOT_SUPPORTED; timeStamp = int64_t(fileSize * ((double)timeStamp / duration)); if ((flags & B_MEDIA_SEEK_CLOSEST_BACKWARD) != 0) { timeStamp -= 65536; if (timeStamp < 0) timeStamp = 0; } bool seekAgain = true; bool seekForward = true; bigtime_t lastFoundTime = -1; int64_t closestTimeStampBackwards = -1; while (seekAgain) { if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX, searchFlags) < 0) { TRACE(" avformat_seek_file() (by bytes) failed.\n"); return B_ERROR; } seekAgain = false; // Our last packet is toast in any case. Read the next one so we // know where we really seeked. fReusePacket = false; if (_NextPacket(true) == B_OK) { while (fPacket.pts == kNoPTSValue) { fReusePacket = false; if (_NextPacket(true) != B_OK) return B_ERROR; } if (fPacket.pos >= 0) timeStamp = fPacket.pos; bigtime_t foundTime = _ConvertFromStreamTimeBase(fPacket.pts); if (foundTime != lastFoundTime) { lastFoundTime = foundTime; if (foundTime > *time) { if (closestTimeStampBackwards >= 0) { timeStamp = closestTimeStampBackwards; seekAgain = true; seekForward = false; continue; } int64_t diff = int64_t(fileSize * ((double)(foundTime - *time) / (2 * duration))); if (diff < 8192) break; timeStamp -= diff; TRACE_SEEK(" need to seek back (%lld) (time: %.2f " "-> %.2f)\n", timeStamp, *time / 1000000.0, foundTime / 1000000.0); if (timeStamp < 0) foundTime = 0; else { seekAgain = true; continue; } } else if (seekForward && foundTime < *time - 100000) { closestTimeStampBackwards = timeStamp; int64_t diff = int64_t(fileSize * ((double)(*time - foundTime) / (2 * duration))); if (diff < 8192) break; timeStamp += diff; TRACE_SEEK(" need to seek forward (%lld) (time: " "%.2f -> %.2f)\n", timeStamp, *time / 1000000.0, foundTime / 1000000.0); if (timeStamp > duration) foundTime = duration; else { seekAgain = true; continue; } } } TRACE_SEEK(" found time: %lld -> %lld (%.2f)\n", *time, foundTime, foundTime / 1000000.0); *time = foundTime; *frame = (uint64)(*time * frameRate / 1000000LL + 0.5); TRACE_SEEK(" seeked frame: %lld\n", *frame); } else { TRACE_SEEK(" _NextPacket() failed!\n"); return B_ERROR; } } } else { // We may not get a PTS from the next packet after seeking, so // we try to get an expected time from the index. int64_t streamTimeStamp = _ConvertToStreamTimeBase(*time); int index = av_index_search_timestamp(fStream, streamTimeStamp, searchFlags); if (index < 0) { TRACE(" av_index_search_timestamp() failed\n"); } else { if (index > 0) { const AVIndexEntry& entry = fStream->index_entries[index]; streamTimeStamp = entry.timestamp; } else { // Some demuxers use the first index entry to store some // other information, like the total playing time for example. // Assume the timeStamp of the first entry is alays 0. // TODO: Handle start-time offset? streamTimeStamp = 0; } bigtime_t foundTime = _ConvertFromStreamTimeBase(streamTimeStamp); bigtime_t timeDiff = foundTime > *time ? foundTime - *time : *time - foundTime; if (timeDiff > 1000000 && (fStreamBuildsIndexWhileReading || index == fStream->nb_index_entries - 1)) { // If the stream is building the index on the fly while parsing // it, we only have entries in the index for positions already // decoded, i.e. we cannot seek into the future. In that case, // just assume that we can seek where we want and leave // time/frame unmodified. Since successfully seeking one time // will generate index entries for the seeked to position, we // need to remember this in fStreamBuildsIndexWhileReading, // since when seeking back there will be later index entries, // but we still want to ignore the found entry. fStreamBuildsIndexWhileReading = true; TRACE_SEEK(" Not trusting generic index entry. " "(Current count: %d)\n", fStream->nb_index_entries); } else { // If we found a reasonably time, write it into *time. // After seeking, we will try to read the sought time from // the next packet. If the packet has no PTS value, we may // still have a more accurate time from the index lookup. *time = foundTime; } } if (avformat_seek_file(fContext, -1, INT64_MIN, timeStamp, INT64_MAX, searchFlags) < 0) { TRACE(" avformat_seek_file() failed.\n"); // Try to fall back to av_seek_frame() timeStamp = _ConvertToStreamTimeBase(timeStamp); if (av_seek_frame(fContext, fStream->index, timeStamp, searchFlags) < 0) { TRACE(" avformat_seek_frame() failed as well.\n"); // Fall back to seeking to the beginning by bytes timeStamp = 0; if (av_seek_frame(fContext, fStream->index, timeStamp, AVSEEK_FLAG_BYTE) < 0) { TRACE(" avformat_seek_frame() by bytes failed as " "well.\n"); // Do not propagate error in any case. We fail if we can't // read another packet. } else *time = 0; } } // Our last packet is toast in any case. Read the next one so // we know where we really sought. bigtime_t foundTime = *time; fReusePacket = false; if (_NextPacket(true) == B_OK) { if (fPacket.pts != kNoPTSValue) foundTime = _ConvertFromStreamTimeBase(fPacket.pts); else TRACE_SEEK(" no PTS in packet after seeking\n"); } else TRACE_SEEK(" _NextPacket() failed!\n"); *time = foundTime; TRACE_SEEK(" sought time: %.2fs\n", *time / 1000000.0); *frame = (uint64)(*time * frameRate / 1000000.0 + 0.5); TRACE_SEEK(" sought frame: %lld\n", *frame); } return B_OK; }
static int read_packet(AVFormatContext *s, AVPacket *pkt) { BinkDemuxContext *bink = s->priv_data; AVIOContext *pb = s->pb; int ret; if (bink->current_track < 0) { int index_entry; AVStream *st = s->streams[0]; // stream 0 is video stream with index if (bink->video_pts >= st->duration) return AVERROR(EIO); index_entry = av_index_search_timestamp(st, bink->video_pts, AVSEEK_FLAG_ANY); if (index_entry < 0) { av_log(s, AV_LOG_ERROR, "could not find index entry for frame %"PRId64"\n", bink->video_pts); return AVERROR(EIO); } bink->remain_packet_size = st->index_entries[index_entry].size; bink->current_track = 0; } while (bink->current_track < bink->num_audio_tracks) { uint32_t audio_size = avio_rl32(pb); if (audio_size > bink->remain_packet_size - 4) { av_log(s, AV_LOG_ERROR, "frame %"PRId64": audio size in header (%u) > size of packet left (%u)\n", bink->video_pts, audio_size, bink->remain_packet_size); return AVERROR(EIO); } bink->remain_packet_size -= 4 + audio_size; bink->current_track++; if (audio_size >= 4) { /* get one audio packet per track */ if ((ret = av_get_packet(pb, pkt, audio_size)) < 0) return ret; pkt->stream_index = bink->current_track; pkt->pts = bink->audio_pts[bink->current_track - 1]; /* Each audio packet reports the number of decompressed samples (in bytes). We use this value to calcuate the audio PTS */ if (pkt->size >= 4) bink->audio_pts[bink->current_track -1] += AV_RL32(pkt->data) / (2 * s->streams[bink->current_track]->codec->channels); return 0; } else { avio_seek(pb, audio_size, SEEK_CUR); } } /* get video packet */ if ((ret = av_get_packet(pb, pkt, bink->remain_packet_size)) < 0) return ret; pkt->stream_index = 0; pkt->pts = bink->video_pts++; pkt->flags |= AV_PKT_FLAG_KEY; /* -1 instructs the next call to read_packet() to read the next frame */ bink->current_track = -1; return 0; }