/***************************************************************************** * DecodeAudio: Called to decode one frame *****************************************************************************/ aout_buffer_t * DecodeAudio ( decoder_t *p_dec, block_t **pp_block ) { decoder_sys_t *p_sys = p_dec->p_sys; int i_used, i_output; aout_buffer_t *p_buffer; block_t *p_block; if( !pp_block || !*pp_block ) return NULL; p_block = *pp_block; if( p_block->i_flags & (BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED) ) { block_Release( p_block ); avcodec_flush_buffers( p_sys->p_context ); p_sys->i_samples = 0; date_Set( &p_sys->end_date, 0 ); if( p_sys->i_codec_id == CODEC_ID_MP2 || p_sys->i_codec_id == CODEC_ID_MP3 ) p_sys->i_reject_count = 3; return NULL; } if( p_sys->i_samples > 0 ) { /* More data */ p_buffer = SplitBuffer( p_dec ); if( !p_buffer ) block_Release( p_block ); return p_buffer; } if( !date_Get( &p_sys->end_date ) && !p_block->i_pts ) { /* We've just started the stream, wait for the first PTS. */ block_Release( p_block ); return NULL; } if( p_block->i_buffer <= 0 ) { block_Release( p_block ); return NULL; } if( (p_block->i_flags & BLOCK_FLAG_PRIVATE_REALLOCATED) == 0 ) { *pp_block = p_block = block_Realloc( p_block, 0, p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE ); if( !p_block ) return NULL; p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE; memset( &p_block->p_buffer[p_block->i_buffer], 0, FF_INPUT_BUFFER_PADDING_SIZE ); p_block->i_flags |= BLOCK_FLAG_PRIVATE_REALLOCATED; } do { i_output = __MAX( p_block->i_buffer, p_sys->i_output_max ); if( i_output > p_sys->i_output_max ) { /* Grow output buffer if necessary (eg. for PCM data) */ p_sys->p_output = av_realloc( p_sys->p_output, i_output ); } i_used = avcodec_decode_audio2( p_sys->p_context, (int16_t*)p_sys->p_output, &i_output, p_block->p_buffer, p_block->i_buffer ); if( i_used < 0 || i_output < 0 ) { if( i_used < 0 ) msg_Warn( p_dec, "cannot decode one frame (%zu bytes)", p_block->i_buffer ); block_Release( p_block ); return NULL; } else if( (size_t)i_used > p_block->i_buffer ) { i_used = p_block->i_buffer; } p_block->i_buffer -= i_used; p_block->p_buffer += i_used; } while( p_block->i_buffer > 0 && i_output <= 0 ); if( p_sys->p_context->channels <= 0 || p_sys->p_context->channels > 8 || p_sys->p_context->sample_rate <= 0 ) { msg_Warn( p_dec, "invalid audio properties channels count %d, sample rate %d", p_sys->p_context->channels, p_sys->p_context->sample_rate ); block_Release( p_block ); return NULL; } if( p_dec->fmt_out.audio.i_rate != (unsigned int)p_sys->p_context->sample_rate ) { date_Init( &p_sys->end_date, p_sys->p_context->sample_rate, 1 ); date_Set( &p_sys->end_date, p_block->i_pts ); } /* **** Set audio output parameters **** */ SetupOutputFormat( p_dec, true ); if( p_block->i_pts != 0 && p_block->i_pts != date_Get( &p_sys->end_date ) ) { date_Set( &p_sys->end_date, p_block->i_pts ); } p_block->i_pts = 0; /* **** Now we can output these samples **** */ p_sys->i_samples = i_output / (p_dec->fmt_out.audio.i_bitspersample / 8) / p_sys->p_context->channels; p_sys->p_samples = p_sys->p_output; /* Silent unwanted samples */ if( p_sys->i_reject_count > 0 ) { memset( p_sys->p_output, 0, i_output ); p_sys->i_reject_count--; } p_buffer = SplitBuffer( p_dec ); if( !p_buffer ) block_Release( p_block ); return p_buffer; }
void VideoThread::run(){ /* alloco i frame YVU e RGB */ pFrame = avcodec_alloc_frame(); pFrameRGB = avcodec_alloc_frame(); /* da questo momento in poi permetto alla finestra di resfreshare */ _is->window->startdisplay(); //Calculate the size in bytes that a picture of the given width and height would occupy if stored in the given picture format. bytes = avpicture_get_size(CONV_FORMAT, _is->video_st->codec->width, _is->video_st->codec->height); uint8_t *video_buffer = (uint8_t*)av_malloc( bytes * sizeof(uint8_t) ); avpicture_fill((AVPicture *)pFrameRGB, video_buffer, CONV_FORMAT, _is->video_st->codec->width, _is->video_st->codec->height); /* ciclo di lettura dei frame prelevo dalla coda dei pkt decodifico il frame YUV trasformo il frame in RGB aggiungo il frameRGB alla nuova coda */ while(1) { if(_is->ut.getPauseValue() && !_is->ut.getStopValue()){ continue; //this->usleep(10000); }; // leggo i paccehtti dalla coda if(_is->videoq.Get(packet, 1) < 0){ // means we quit getting packets //qDebug() << "quitting getting packets - videothread"; break; } //controllo se ho letto pacchetto di FLUSH if(packet->data == _is->flush_pkt->data){ //qDebug() << "VideoThread - letto FLUSH PKT"; avcodec_flush_buffers(_is->video_st->codec); _is->pictq.Flush(); _is->frame_last_pts = AV_NOPTS_VALUE; _is->frame_last_delay = 0; _is->frame_timer = (double)av_gettime() / 1000000.0; continue; } pts = 0; //resetto il pts a 0, ovvero non trovato //Save global pts to be stored in pFrame in first call _is->global_video_pkt_pts = packet->pts; // Decode video frame avcodec_decode_video2(_is->video_st->codec, pFrame, &frameFinished, packet); //nota: opaque è una variabile interna a pFrame lasciata libera //per essere usata dall'utente come variabile di appoggio per dei dati /* caso in cui NON RIESCO a reperire DTS, ma sho allocato il buffer */ if (packet->dts == (int64_t)AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) { //vado a reperire il PTS del primo pacchetto, messo in opaque dalla nostra funzione //di allocazione del buffer pts = *(uint64_t *) pFrame->opaque; } /* caso in cui RIESCO a reperire DTS */ else if (packet->dts != (int64_t)AV_NOPTS_VALUE) { pts = packet->dts; } else { pts = 0; } /** PTS = PTS * (time_base convertito in double) ottengo cosi il PTS in secondi */ pts *= av_q2d(_is->video_st->time_base); // Did we get a video frame? if(frameFinished) { synchronize_video(); //sincronizzazione del PTS /* conversione pFrame -> pFrameRGB */ sws_scale(_is->sws_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, _is->video_st->codec->height, pFrameRGB->data, pFrameRGB->linesize); while(_is->pictq.getSize() > VIDEO_PICTURE_QUEUE_SIZE && (_is->ut.getStopValue() == false)){ this->usleep(1000); } /* aggiunta del frame RGB alla nuova coda */ if(_is->pictq.Put(pFrameRGB, pts) < 0) { //qDebug() << "quitting putting frame - videothread"; break; } } av_free_packet(packet); } av_free(pFrame); av_free(pFrameRGB); return; }
static pixmap_t * fa_image_from_video2(const char *url0, const image_meta_t *im, const char *cacheid) { pixmap_t *pm = NULL; char *url = mystrdupa(url0); char *tim = strchr(url, '#'); *tim++ = 0; if(ifv_url == NULL || strcmp(url, ifv_url)) { // Need to open int i; AVFormatContext *fctx; AVIOContext *avio; if((avio = fa_libav_open(url, 65536, NULL, 0, 0)) == NULL) return NULL; if((fctx = fa_libav_open_format(avio, url, NULL, 0, NULL)) == NULL) { fa_libav_close(avio); return NULL; } if(!strcmp(fctx->iformat->name, "avi")) fctx->flags |= AVFMT_FLAG_GENPTS; AVCodecContext *ctx = NULL; for(i = 0; i < fctx->nb_streams; i++) { if(fctx->streams[i]->codec != NULL && fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { ctx = fctx->streams[i]->codec; break; } } if(ctx == NULL) { fa_libav_close_format(fctx); return NULL; } AVCodec *codec = avcodec_find_decoder(ctx->codec_id); if(codec == NULL) { fa_libav_close_format(fctx); return NULL; } if(avcodec_open(ctx, codec) < 0) { fa_libav_close_format(fctx); return NULL; } ifv_close(); ifv_stream = i; ifv_url = strdup(url); ifv_fctx = fctx; ifv_ctx = ctx; } AVPacket pkt; AVFrame *frame = avcodec_alloc_frame(); int got_pic; int secs = atoi(tim); AVStream *st = ifv_fctx->streams[ifv_stream]; int64_t ts = av_rescale(secs, st->time_base.den, st->time_base.num); if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) { ifv_close(); return NULL; } avcodec_flush_buffers(ifv_ctx); int cnt = 500; while(1) { int r; r = av_read_frame(ifv_fctx, &pkt); if(r == AVERROR(EAGAIN)) continue; if(r == AVERROR_EOF) break; if(r != 0) { ifv_close(); break; } if(pkt.stream_index != ifv_stream) { av_free_packet(&pkt); continue; } cnt--; int want_pic = pkt.pts >= ts || cnt <= 0; ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF; avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt); if(got_pic == 0 || !want_pic) continue; int w,h; if(im->req_width != -1 && im->req_height != -1) { w = im->req_width; h = im->req_height; } else if(im->req_width != -1) { w = im->req_width; h = im->req_width * ifv_ctx->height / ifv_ctx->width; } else if(im->req_height != -1) { w = im->req_height * ifv_ctx->width / ifv_ctx->height; h = im->req_height; } else { w = im->req_width; h = im->req_height; } pm = pixmap_create(w, h, PIX_FMT_RGB24); struct SwsContext *sws; sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt, w, h, PIX_FMT_RGB24, SWS_LANCZOS, NULL, NULL, NULL); if(sws == NULL) { ifv_close(); return NULL; } uint8_t *ptr[4] = {0,0,0,0}; int strides[4] = {0,0,0,0}; ptr[0] = pm->pm_pixels; strides[0] = pm->pm_linesize; sws_scale(sws, (const uint8_t **)frame->data, frame->linesize, 0, ifv_ctx->height, ptr, strides); sws_freeContext(sws); if(pngencoder != NULL) { AVFrame *oframe = avcodec_alloc_frame(); memset(&frame, 0, sizeof(frame)); oframe->data[0] = pm->pm_pixels; oframe->linesize[0] = pm->pm_linesize; size_t outputsize = pm->pm_linesize * h; void *output = malloc(outputsize); pngencoder->width = w; pngencoder->height = h; pngencoder->pix_fmt = PIX_FMT_RGB24; r = avcodec_encode_video(pngencoder, output, outputsize, oframe); if(r > 0) blobcache_put(cacheid, "videothumb", output, outputsize, 86400 * 5); free(output); av_free(oframe); } break; } av_free(frame); return pm; }
/***************************************************************************** * DecodeAudio: Called to decode one frame *****************************************************************************/ static block_t *DecodeAudio( decoder_t *p_dec, block_t **pp_block ) { decoder_sys_t *p_sys = p_dec->p_sys; AVCodecContext *ctx = p_sys->p_context; if( !pp_block || !*pp_block ) return NULL; block_t *p_block = *pp_block; if( !ctx->extradata_size && p_dec->fmt_in.i_extra && p_sys->b_delayed_open) { InitDecoderConfig( p_dec, ctx ); OpenAudioCodec( p_dec ); } if( p_sys->b_delayed_open ) goto end; if( p_block->i_flags & (BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED) ) { avcodec_flush_buffers( ctx ); date_Set( &p_sys->end_date, VLC_TS_INVALID ); if( ctx->codec_id == AV_CODEC_ID_MP2 || ctx->codec_id == AV_CODEC_ID_MP3 ) p_sys->i_reject_count = 3; goto end; } /* We've just started the stream, wait for the first PTS. */ if( !date_Get( &p_sys->end_date ) && p_block->i_pts <= VLC_TS_INVALID ) goto end; if( p_block->i_buffer <= 0 ) goto end; if( (p_block->i_flags & BLOCK_FLAG_PRIVATE_REALLOCATED) == 0 ) { p_block = block_Realloc( p_block, 0, p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE ); if( !p_block ) return NULL; *pp_block = p_block; p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE; memset( &p_block->p_buffer[p_block->i_buffer], 0, FF_INPUT_BUFFER_PADDING_SIZE ); p_block->i_flags |= BLOCK_FLAG_PRIVATE_REALLOCATED; } AVFrame *frame = av_frame_alloc(); if (unlikely(frame == NULL)) goto end; for( int got_frame = 0; !got_frame; ) { if( p_block->i_buffer == 0 ) goto end; AVPacket pkt; av_init_packet( &pkt ); pkt.data = p_block->p_buffer; pkt.size = p_block->i_buffer; int used = avcodec_decode_audio4( ctx, frame, &got_frame, &pkt ); if( used < 0 ) { msg_Warn( p_dec, "cannot decode one frame (%zu bytes)", p_block->i_buffer ); goto end; } assert( p_block->i_buffer >= (unsigned)used ); if( used > p_block->i_buffer ) used = p_block->i_buffer; p_block->p_buffer += used; p_block->i_buffer -= used; } if( ctx->channels <= 0 || ctx->channels > 8 || ctx->sample_rate <= 0 ) { msg_Warn( p_dec, "invalid audio properties channels count %d, sample rate %d", ctx->channels, ctx->sample_rate ); goto end; } if( p_dec->fmt_out.audio.i_rate != (unsigned int)ctx->sample_rate ) date_Init( &p_sys->end_date, ctx->sample_rate, 1 ); if( p_block->i_pts > date_Get( &p_sys->end_date ) ) { date_Set( &p_sys->end_date, p_block->i_pts ); } if( p_block->i_buffer == 0 ) { /* Done with this buffer */ block_Release( p_block ); p_block = NULL; *pp_block = NULL; } /* NOTE WELL: Beyond this point, p_block refers to the DECODED block! */ SetupOutputFormat( p_dec, true ); if( decoder_UpdateAudioFormat( p_dec ) ) goto drop; /* Interleave audio if required */ if( av_sample_fmt_is_planar( ctx->sample_fmt ) ) { p_block = block_Alloc(frame->linesize[0] * ctx->channels); if (unlikely(p_block == NULL)) goto drop; const void *planes[ctx->channels]; for (int i = 0; i < ctx->channels; i++) planes[i] = frame->extended_data[i]; aout_Interleave(p_block->p_buffer, planes, frame->nb_samples, ctx->channels, p_dec->fmt_out.audio.i_format); p_block->i_nb_samples = frame->nb_samples; av_frame_free(&frame); } else { p_block = vlc_av_frame_Wrap(frame); if (unlikely(p_block == NULL)) goto drop; } if (p_sys->b_extract) { /* TODO: do not drop channels... at least not here */ block_t *p_buffer = block_Alloc( p_dec->fmt_out.audio.i_bytes_per_frame * p_block->i_nb_samples ); if( unlikely(p_buffer == NULL) ) goto drop; aout_ChannelExtract( p_buffer->p_buffer, p_dec->fmt_out.audio.i_channels, p_block->p_buffer, ctx->channels, p_block->i_nb_samples, p_sys->pi_extraction, p_dec->fmt_out.audio.i_bitspersample ); p_buffer->i_nb_samples = p_block->i_nb_samples; block_Release( p_block ); p_block = p_buffer; } /* Silent unwanted samples */ if( p_sys->i_reject_count > 0 ) { memset( p_block->p_buffer, 0, p_block->i_buffer ); p_sys->i_reject_count--; } p_block->i_buffer = p_block->i_nb_samples * p_dec->fmt_out.audio.i_bytes_per_frame; p_block->i_pts = date_Get( &p_sys->end_date ); p_block->i_length = date_Increment( &p_sys->end_date, p_block->i_nb_samples ) - p_block->i_pts; return p_block; end: *pp_block = NULL; drop: if( p_block != NULL ) block_Release(p_block); return NULL; }
static pixmap_t * fa_image_from_video2(const char *url, const image_meta_t *im, const char *cacheid, char *errbuf, size_t errlen, int sec, time_t mtime, cancellable_t *c) { pixmap_t *pm = NULL; if(ifv_url == NULL || strcmp(url, ifv_url)) { // Need to open int i; AVFormatContext *fctx; fa_handle_t *fh = fa_open_ex(url, errbuf, errlen, FA_BUFFERED_BIG, NULL); if(fh == NULL) return NULL; AVIOContext *avio = fa_libav_reopen(fh, 0); if((fctx = fa_libav_open_format(avio, url, NULL, 0, NULL, 0, 0)) == NULL) { fa_libav_close(avio); snprintf(errbuf, errlen, "Unable to open format"); return NULL; } if(!strcmp(fctx->iformat->name, "avi")) fctx->flags |= AVFMT_FLAG_GENPTS; AVCodecContext *ctx = NULL; for(i = 0; i < fctx->nb_streams; i++) { if(fctx->streams[i]->codec != NULL && fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { ctx = fctx->streams[i]->codec; break; } } if(ctx == NULL) { fa_libav_close_format(fctx); return NULL; } AVCodec *codec = avcodec_find_decoder(ctx->codec_id); if(codec == NULL) { fa_libav_close_format(fctx); snprintf(errbuf, errlen, "Unable to find codec"); return NULL; } if(avcodec_open2(ctx, codec, NULL) < 0) { fa_libav_close_format(fctx); snprintf(errbuf, errlen, "Unable to open codec"); return NULL; } ifv_close(); ifv_stream = i; ifv_url = strdup(url); ifv_fctx = fctx; ifv_ctx = ctx; } AVPacket pkt; AVFrame *frame = avcodec_alloc_frame(); int got_pic; AVStream *st = ifv_fctx->streams[ifv_stream]; int64_t ts = av_rescale(sec, st->time_base.den, st->time_base.num); if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) { ifv_close(); snprintf(errbuf, errlen, "Unable to seek to %"PRId64, ts); return NULL; } avcodec_flush_buffers(ifv_ctx); #define MAX_FRAME_SCAN 500 int cnt = MAX_FRAME_SCAN; while(1) { int r; r = av_read_frame(ifv_fctx, &pkt); if(r == AVERROR(EAGAIN)) continue; if(r == AVERROR_EOF) break; if(cancellable_is_cancelled(c)) { snprintf(errbuf, errlen, "Cancelled"); av_free_packet(&pkt); break; } if(r != 0) { ifv_close(); break; } if(pkt.stream_index != ifv_stream) { av_free_packet(&pkt); continue; } cnt--; int want_pic = pkt.pts >= ts || cnt <= 0; ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF; avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt); av_free_packet(&pkt); if(got_pic == 0 || !want_pic) { continue; } int w,h; if(im->im_req_width != -1 && im->im_req_height != -1) { w = im->im_req_width; h = im->im_req_height; } else if(im->im_req_width != -1) { w = im->im_req_width; h = im->im_req_width * ifv_ctx->height / ifv_ctx->width; } else if(im->im_req_height != -1) { w = im->im_req_height * ifv_ctx->width / ifv_ctx->height; h = im->im_req_height; } else { w = im->im_req_width; h = im->im_req_height; } pm = pixmap_create(w, h, PIXMAP_BGR32, 0); if(pm == NULL) { ifv_close(); snprintf(errbuf, errlen, "Out of memory"); av_free(frame); return NULL; } struct SwsContext *sws; sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt, w, h, AV_PIX_FMT_BGR32, SWS_BILINEAR, NULL, NULL, NULL); if(sws == NULL) { ifv_close(); snprintf(errbuf, errlen, "Scaling failed"); pixmap_release(pm); av_free(frame); return NULL; } uint8_t *ptr[4] = {0,0,0,0}; int strides[4] = {0,0,0,0}; ptr[0] = pm->pm_pixels; strides[0] = pm->pm_linesize; sws_scale(sws, (const uint8_t **)frame->data, frame->linesize, 0, ifv_ctx->height, ptr, strides); sws_freeContext(sws); write_thumb(ifv_ctx, frame, w, h, cacheid, mtime); break; } av_free(frame); if(pm == NULL) snprintf(errbuf, errlen, "Frame not found (scanned %d)", MAX_FRAME_SCAN - cnt); avcodec_flush_buffers(ifv_ctx); callout_arm(&thumb_flush_callout, ifv_autoclose, NULL, 5); return pm; }
void player_decode(void *data) { State *state = (State *) data; int ret; int eof = 0; for (;;) { if (state->abort_request) { break; } if (state->paused != state->last_paused) { state->last_paused = state->paused; if (state->paused) { state->read_pause_return = av_read_pause(state->pFormatCtx); } else { av_read_play(state->pFormatCtx); } } if (state->seek_req) { int64_t seek_target = state->seek_pos; int64_t seek_min = state->seek_rel > 0 ? seek_target - state->seek_rel + 2: INT64_MIN; int64_t seek_max = state->seek_rel < 0 ? seek_target - state->seek_rel - 2: INT64_MAX; ret = avformat_seek_file(state->pFormatCtx, -1, seek_min, seek_target, seek_max, state->seek_flags); if (ret < 0) { fprintf(stderr, "%s: error while seeking\n", state->pFormatCtx->filename); } else { if (state->audio_stream >= 0) { avcodec_flush_buffers(state->audio_st->codec); } state->notify_callback(state->clazz, MEDIA_SEEK_COMPLETE, 0, 0, FROM_THREAD); } state->seek_req = 0; eof = 0; } if (state->paused) { goto sleep; } AVPacket packet; memset(&packet, 0, sizeof(packet)); //make sure we can safely free it int i; for (i = 0; i < state->pFormatCtx->nb_streams; ++i) { //av_init_packet(&packet); ret = av_read_frame(state->pFormatCtx, &packet); if (ret < 0) { if (ret == AVERROR_EOF || url_feof(state->pFormatCtx->pb)) { eof = 1; break; } } int frame_size_ptr; ret = decode_frame_from_packet(state, &packet, &frame_size_ptr, FROM_THREAD); av_free_packet(&packet); if (ret != 0) { //an error or a frame decoded // TODO add this bacl=k } } if (eof) { break; } sleep: usleep(100); } if (eof) { state->notify_callback(state->clazz, MEDIA_PLAYBACK_COMPLETE, 0, 0, FROM_THREAD); } }
bool seekInternal(double t, int depth) { ResetRetries(); emptyFrameQueue(); audioHandler->clearQueue(); int64_t firstTs = getFirstSeekTs(); double backSeek = (double)depth * 2.0f + 1.0f; int64_t minTs = tsFromTime(t - backSeek - 2.5) + firstTs; int64_t ts = tsFromTime(t - backSeek) + firstTs; int64_t maxTs = tsFromTime(t - backSeek) + firstTs; // There is no discernible way to determine if negative timestamps are allowed // (or even required) to seek to low timestamps. // On some files you must seek to negative timestamps to be able to seek to 0 // but on other files you get weird results from seeking to below 0. // So, every other try, we will allow seeking to negative timestamps. if((depth % 2) == 1){ minTs = std::max((int64_t)0, minTs); ts = std::max((int64_t)0, minTs); maxTs = std::max((int64_t)0, minTs); } FlogD("Trying to seek to minTs: " << minTs << " ts: " << ts << " maxTs: " << maxTs << " with firsTs: " << firstTs); int flags = 0; if(ts < pFormatCtx->streams[videoStream]->cur_dts) flags |= AVSEEK_FLAG_BACKWARD; int seekRet = avformat_seek_file(pFormatCtx, videoStream, minTs, ts, maxTs, flags); if(seekRet > 0){ FlogD("avformat_seek_file failed, returned " << seekRet); return false; } avcodec_flush_buffers(pCodecCtx); double newTime = t + timeFromTs(firstPts); double actualTime = skipToTs(newTime); // consider the seek failed and try again if the actual time diffs more than .5 seconds // from the desired new time. FlogD("wanted to seek to " << newTime << " and ended up at " << actualTime); bool ret = true; if(fabsf(newTime - actualTime) > .5){ if(depth < 5){ FlogD("not good enough, trying again"); return seekInternal(t, depth + 1); } else{ ret = false; FlogW("seek failed, wanted to seek to " << newTime << " and ended up at " << actualTime); } } timeHandler->SetTime(actualTime); stepIntoQueue = true; audioHandler->onSeek(); return ret; }
static int compute_crc_of_packets(AVFormatContext *fmt_ctx, int video_stream, AVCodecContext *ctx, AVFrame *fr, uint64_t ts_start, uint64_t ts_end, int no_seeking) { int number_of_written_bytes; int got_frame = 0; int result; int end_of_stream = 0; int byte_buffer_size; uint8_t *byte_buffer; int64_t crc; AVPacket pkt; byte_buffer_size = av_image_get_buffer_size(ctx->pix_fmt, ctx->width, ctx->height, 16); byte_buffer = av_malloc(byte_buffer_size); if (!byte_buffer) { av_log(NULL, AV_LOG_ERROR, "Can't allocate buffer\n"); return AVERROR(ENOMEM); } if (!no_seeking) { result = av_seek_frame(fmt_ctx, video_stream, ts_start, AVSEEK_FLAG_ANY); printf("Seeking to %"PRId64", computing crc for frames with pts < %"PRId64"\n", ts_start, ts_end); if (result < 0) { av_log(NULL, AV_LOG_ERROR, "Error in seeking\n"); return result; } avcodec_flush_buffers(ctx); } av_init_packet(&pkt); do { if (!end_of_stream) if (av_read_frame(fmt_ctx, &pkt) < 0) end_of_stream = 1; if (end_of_stream) { pkt.data = NULL; pkt.size = 0; } if (pkt.stream_index == video_stream || end_of_stream) { got_frame = 0; if ((pkt.pts == AV_NOPTS_VALUE) && (!end_of_stream)) { av_log(NULL, AV_LOG_ERROR, "Error: frames doesn't have pts values\n"); return -1; } result = avcodec_decode_video2(ctx, fr, &got_frame, &pkt); if (result < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding frame\n"); return result; } if (got_frame) { number_of_written_bytes = av_image_copy_to_buffer(byte_buffer, byte_buffer_size, (const uint8_t* const *)fr->data, (const int*) fr->linesize, ctx->pix_fmt, ctx->width, ctx->height, 1); if (number_of_written_bytes < 0) { av_log(NULL, AV_LOG_ERROR, "Can't copy image to buffer\n"); return number_of_written_bytes; } if ((fr->pts > ts_end) && (!no_seeking)) break; crc = av_adler32_update(0, (const uint8_t*)byte_buffer, number_of_written_bytes); printf("%10"PRId64", 0x%08lx\n", fr->pts, crc); if (no_seeking) { if (add_crc_to_array(crc, fr->pts) < 0) return -1; } else { if (compare_crc_in_array(crc, fr->pts) < 0) return -1; } } } av_packet_unref(&pkt); av_init_packet(&pkt); } while ((!end_of_stream || got_frame) && (no_seeking || (fr->pts + av_frame_get_pkt_duration(fr) <= ts_end))); av_packet_unref(&pkt); av_freep(&byte_buffer); return 0; }
CBaseDec::RetCode CFfmpegDec::Decoder(FILE *_in, int /*OutputFd*/, State* state, CAudioMetaData* _meta_data, time_t* time_played, unsigned int* secondsToSkip) { in = _in; RetCode Status=OK; is_stream = fseek((FILE *)in, 0, SEEK_SET); if (!SetMetaData((FILE *)in, _meta_data, true)) { DeInit(); Status=DATA_ERR; return Status; } AVCodecContext *c = avc->streams[best_stream]->codec; mutex.lock(); int r = avcodec_open2(c, codec, NULL); mutex.unlock(); if (r) { DeInit(); Status=DATA_ERR; return Status; } SwrContext *swr = swr_alloc(); if (!swr) { mutex.lock(); avcodec_close(c); mutex.unlock(); DeInit(); Status=DATA_ERR; return Status; } mSampleRate = samplerate; mChannels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO); audioDecoder->PrepareClipPlay(mChannels, mSampleRate, 16, 1); AVFrame *frame = NULL; AVPacket rpacket; av_init_packet(&rpacket); c->channel_layout = c->channel_layout ? c->channel_layout : AV_CH_LAYOUT_STEREO; av_opt_set_int(swr, "in_channel_layout", c->channel_layout, 0); //av_opt_set_int(swr, "out_channel_layout", c->channel_layout, 0); av_opt_set_int(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); av_opt_set_int(swr, "in_sample_rate", c->sample_rate, 0); av_opt_set_int(swr, "out_sample_rate", c->sample_rate, 0); av_opt_set_sample_fmt(swr, "in_sample_fmt", c->sample_fmt, 0); av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); if (( swr_init(swr)) < 0) { Status=DATA_ERR; return Status; } uint8_t *outbuf = NULL; int outsamples = 0; int outsamples_max = 0; int64_t pts = 0, start_pts = 0, next_skip_pts = 0; uint64_t skip = 0; int seek_flags = 0; do { int actSecsToSkip = *secondsToSkip; if (!is_stream && (actSecsToSkip || *state==FF || *state==REV) && avc->streams[best_stream]->time_base.num) { if (!next_skip_pts || pts >= next_skip_pts) { skip = avc->streams[best_stream]->time_base.den / avc->streams[best_stream]->time_base.num; if (actSecsToSkip) skip *= actSecsToSkip; if (*state == REV) { next_skip_pts = pts - skip; pts = next_skip_pts - skip/4; seek_flags = AVSEEK_FLAG_BACKWARD; if (pts < start_pts) { pts = start_pts; *state = PAUSE; } } else { pts += skip; next_skip_pts = pts + skip/4; seek_flags = 0; } av_seek_frame(avc, best_stream, pts, seek_flags); // if a custom value was set we only jump once if (actSecsToSkip != 0) { *state=PLAY; *secondsToSkip = 0; } } } while(*state==PAUSE && !is_stream) usleep(10000); if (av_read_frame(avc, &rpacket)) { Status=DATA_ERR; break; } if (rpacket.stream_index != best_stream) { av_packet_unref(&rpacket); continue; } AVPacket packet = rpacket; while (packet.size > 0) { int got_frame = 0; if (!frame) { if (!(frame = av_frame_alloc())) { Status=DATA_ERR; break; } } else av_frame_unref(frame); int len = avcodec_decode_audio4(c, frame, &got_frame, &packet); if (len < 0) { // skip frame packet.size = 0; avcodec_flush_buffers(c); mutex.lock(); avcodec_close(c); avcodec_open2(c, codec, NULL); mutex.unlock(); continue; } if (got_frame && *state!=PAUSE) { int out_samples; outsamples = av_rescale_rnd(swr_get_delay(swr, c->sample_rate) + frame->nb_samples, c->sample_rate, c->sample_rate, AV_ROUND_UP); if (outsamples > outsamples_max) { av_free(outbuf); if (av_samples_alloc(&outbuf, &out_samples, mChannels, //c->channels, frame->nb_samples, AV_SAMPLE_FMT_S16, 1) < 0) { Status=WRITE_ERR; packet.size = 0; break; } outsamples_max = outsamples; } outsamples = swr_convert(swr, &outbuf, outsamples, (const uint8_t **) &frame->data[0], frame->nb_samples); int outbuf_size = av_samples_get_buffer_size(&out_samples, mChannels, //c->channels, outsamples, AV_SAMPLE_FMT_S16, 1); if(audioDecoder->WriteClip((unsigned char*) outbuf, outbuf_size) != outbuf_size) { fprintf(stderr,"%s: PCM write error (%s).\n", ProgName, strerror(errno)); Status=WRITE_ERR; } pts = av_frame_get_best_effort_timestamp(frame); if (!start_pts) start_pts = pts; } packet.size -= len; packet.data += len; } if (time_played && avc->streams[best_stream]->time_base.den) *time_played = (pts - start_pts) * avc->streams[best_stream]->time_base.num / avc->streams[best_stream]->time_base.den; av_packet_unref(&rpacket); } while (*state!=STOP_REQ && Status==OK); audioDecoder->StopClip(); meta_data_valid = false; swr_free(&swr); av_free(outbuf); av_packet_unref(&rpacket); av_frame_free(&frame); avcodec_close(c); //av_free(avcc); DeInit(); if (_meta_data->cover_temporary && !_meta_data->cover.empty()) { _meta_data->cover_temporary = false; unlink(_meta_data->cover.c_str()); } return Status; }
static HRESULT FFMVWrapper_ProcessReceive( CTransformBaseImpl* pImpl, IMediaSample* pSampIn ) { CFFMVWrapperImpl* This = pImpl->m_pUserData; BYTE* pDataIn = NULL; LONG lDataInLen; IMediaSample* pSampOut = NULL; BYTE* pOutBuf; HRESULT hr; AVFrame tmp_pic; AVPicture dst_pic; int nOut, got_pic; LONG width, height; REFERENCE_TIME rtStart, rtStop, rtNow; BOOL skip; TRACE("(%p)\n",This); if ( This == NULL || !This->ctx.codec || This->m_pbiIn == NULL || This->m_pbiOut == NULL ) return E_UNEXPECTED; hr = IMediaSample_GetPointer( pSampIn, &pDataIn ); if ( FAILED(hr) ) return hr; lDataInLen = IMediaSample_GetActualDataLength( pSampIn ); if ( lDataInLen < 0 ) return E_FAIL; EnterCriticalSection( &This->m_cs ); if ( !This->ctx.codec ) { hr = E_UNEXPECTED; goto failed; } if ( IMediaSample_IsDiscontinuity( pSampIn ) == S_OK ) avcodec_flush_buffers( &This->ctx ); width = This->m_pbiIn->bmiHeader.biWidth; height = (This->m_pbiIn->bmiHeader.biHeight < 0) ? -This->m_pbiIn->bmiHeader.biHeight : This->m_pbiIn->bmiHeader.biHeight; while ( TRUE ) { nOut = avcodec_decode_video( &This->ctx, &tmp_pic, &got_pic, (void*)pDataIn, lDataInLen ); if ( nOut < 0 ) { TRACE("decoding error\n"); goto fail; } TRACE("used %d of %d bytes\n", nOut, lDataInLen); if ( nOut > lDataInLen ) { WARN("arrgh - FFmpeg read too much\n"); nOut = lDataInLen; } pDataIn += nOut; lDataInLen -= nOut; if (!got_pic) { TRACE("no frame decoded\n"); if (lDataInLen) continue; LeaveCriticalSection( &This->m_cs ); return S_OK; } TRACE("frame decoded\n"); This->rtInternal ++; hr = IMediaSample_GetTime( pSampIn, &rtStart, &rtStop ); if ( hr == S_OK ) { /* if the parser gives us a timestamp, the data * we got from it should be a single frame */ if ( lDataInLen ) { ERR("excessive data in compressed frame\n"); lDataInLen = 0; } } else { /* compute our own timestamp */ rtStart = This->rtCur; This->rtCur = This->rtInternal * (REFERENCE_TIME)QUARTZ_TIMEUNITS * This->ctx.frame_rate_base / This->ctx.frame_rate; rtStop = This->rtCur; } TRACE("frame start=%lld, stop=%lld\n", rtStart, rtStop); skip = FALSE; hr = IReferenceClock_GetTime(pImpl->basefilter.pClock, &rtNow); if (SUCCEEDED(hr)) { rtNow -= pImpl->basefilter.rtStart; TRACE("time=%lld\n", rtNow); if (rtStart < rtNow + SKIP_TIME) { skip = TRUE; if ( ++This->skipFrames >= MAX_SKIP ) { This->skipFrames = 0; TRACE("frame late, but max skip exceeded\n"); skip = FALSE; } } } if (skip) { TRACE("skipping late frame\n"); if ( lDataInLen == 0 ) { LeaveCriticalSection( &This->m_cs ); return S_OK; } } else { /* process frame */ hr = IMemAllocator_GetBuffer( pImpl->m_pOutPinAllocator, &pSampOut, &rtStart, &rtStop, 0 ); if ( FAILED(hr) ) goto failed; hr = IMediaSample_GetPointer( pSampOut, &pOutBuf ); if ( FAILED(hr) ) goto failed; dst_pic.data[0] = ( This->m_pOutBuf != NULL ) ? This->m_pOutBuf : pOutBuf; dst_pic.linesize[0] = DIBWIDTHBYTES(This->m_pbiOut->bmiHeader); /* convert to RGB (or BGR) */ switch (This->m_pbiOut->bmiHeader.biBitCount) { case 24: img_convert( &dst_pic, PIX_FMT_BGR24, (AVPicture*)&tmp_pic, This->ctx.pix_fmt, width, height ); break; case 32: /* RGBA32 is misnamed (is actually cpu-endian ARGB, which means BGRA on x86), * might get renamed in future ffmpeg snapshots */ img_convert( &dst_pic, PIX_FMT_RGBA32, (AVPicture*)&tmp_pic, This->ctx.pix_fmt, width, height ); break; default: TRACE("bad bpp\n"); goto fail; } if ( This->m_pOutBuf != NULL ) memcpy( pOutBuf, This->m_pOutBuf, This->m_pbiOut->bmiHeader.biSizeImage ); IMediaSample_SetActualDataLength( pSampOut, This->m_pbiOut->bmiHeader.biSizeImage ); /* FIXME: discontinuity and sync point */ LeaveCriticalSection( &This->m_cs ); hr = CPinBaseImpl_SendSample( &pImpl->pOutPin->pin, pSampOut ); if ( FAILED(hr) ) return hr; IMediaSample_Release( pSampOut ); pSampOut = NULL; if ( lDataInLen == 0 ) return S_OK; EnterCriticalSection( &This->m_cs ); if ( !This->ctx.codec ) { hr = E_UNEXPECTED; goto failed; } } } fail: hr = E_FAIL; failed: LeaveCriticalSection( &This->m_cs ); return hr; }
static ImBuf *ffmpeg_fetchibuf(struct anim *anim, int position, IMB_Timecode_Type tc) { int64_t pts_to_search = 0; double frame_rate; double pts_time_base; long long st_time; struct anim_index *tc_index = 0; AVStream *v_st; int new_frame_index = 0; /* To quiet gcc barking... */ int old_frame_index = 0; /* To quiet gcc barking... */ if (anim == NULL) return (0); av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: pos=%d\n", position); if (tc != IMB_TC_NONE) { tc_index = IMB_anim_open_index(anim, tc); } v_st = anim->pFormatCtx->streams[anim->videoStream]; frame_rate = av_q2d(av_get_r_frame_rate_compat(anim->pFormatCtx, v_st)); st_time = anim->pFormatCtx->start_time; pts_time_base = av_q2d(v_st->time_base); if (tc_index) { new_frame_index = IMB_indexer_get_frame_index( tc_index, position); old_frame_index = IMB_indexer_get_frame_index( tc_index, anim->curposition); pts_to_search = IMB_indexer_get_pts( tc_index, new_frame_index); } else { pts_to_search = (long long) floor(((double) position) / pts_time_base / frame_rate + 0.5); if (st_time != AV_NOPTS_VALUE) { pts_to_search += st_time / pts_time_base / AV_TIME_BASE; } } av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: looking for PTS=%lld " "(pts_timebase=%g, frame_rate=%g, st_time=%lld)\n", (long long int)pts_to_search, pts_time_base, frame_rate, st_time); if (anim->last_frame && anim->last_pts <= pts_to_search && anim->next_pts > pts_to_search) { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: frame repeat: last: %lld next: %lld\n", (long long int)anim->last_pts, (long long int)anim->next_pts); IMB_refImBuf(anim->last_frame); anim->curposition = position; return anim->last_frame; } if (position > anim->curposition + 1 && anim->preseek && !tc_index && position - (anim->curposition + 1) < anim->preseek) { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: within preseek interval (no index)\n"); ffmpeg_decode_video_frame_scan(anim, pts_to_search); } else if (tc_index && IMB_indexer_can_scan(tc_index, old_frame_index, new_frame_index)) { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: within preseek interval " "(index tells us)\n"); ffmpeg_decode_video_frame_scan(anim, pts_to_search); } else if (position != anim->curposition + 1) { long long pos; int ret; if (tc_index) { unsigned long long dts; pos = IMB_indexer_get_seek_pos( tc_index, new_frame_index); dts = IMB_indexer_get_seek_pos_dts( tc_index, new_frame_index); av_log(anim->pFormatCtx, AV_LOG_DEBUG, "TC INDEX seek pos = %lld\n", pos); av_log(anim->pFormatCtx, AV_LOG_DEBUG, "TC INDEX seek dts = %llu\n", dts); if (ffmpeg_seek_by_byte(anim->pFormatCtx)) { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "... using BYTE pos\n"); ret = av_seek_frame(anim->pFormatCtx, -1, pos, AVSEEK_FLAG_BYTE); av_update_cur_dts(anim->pFormatCtx, v_st, dts); } else { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "... using DTS pos\n"); ret = av_seek_frame(anim->pFormatCtx, anim->videoStream, dts, AVSEEK_FLAG_BACKWARD); } } else { pos = (long long) (position - anim->preseek) * AV_TIME_BASE / frame_rate; av_log(anim->pFormatCtx, AV_LOG_DEBUG, "NO INDEX seek pos = %lld, st_time = %lld\n", pos, (st_time != AV_NOPTS_VALUE) ? st_time : 0); if (pos < 0) { pos = 0; } if (st_time != AV_NOPTS_VALUE) { pos += st_time; } av_log(anim->pFormatCtx, AV_LOG_DEBUG, "NO INDEX final seek pos = %lld\n", pos); ret = av_seek_frame(anim->pFormatCtx, -1, pos, AVSEEK_FLAG_BACKWARD); } if (ret < 0) { av_log(anim->pFormatCtx, AV_LOG_ERROR, "FETCH: " "error while seeking to DTS = %lld " "(frameno = %d, PTS = %lld): errcode = %d\n", pos, position, (long long int)pts_to_search, ret); } avcodec_flush_buffers(anim->pCodecCtx); anim->next_pts = -1; if (anim->next_packet.stream_index == anim->videoStream) { av_free_packet(&anim->next_packet); anim->next_packet.stream_index = -1; } /* memset(anim->pFrame, ...) ?? */ if (ret >= 0) { ffmpeg_decode_video_frame_scan(anim, pts_to_search); } } else if (position == 0 && anim->curposition == -1) { /* first frame without seeking special case... */ ffmpeg_decode_video_frame(anim); } else { av_log(anim->pFormatCtx, AV_LOG_DEBUG, "FETCH: no seek necessary, just continue...\n"); } IMB_freeImBuf(anim->last_frame); anim->last_frame = IMB_allocImBuf(anim->x, anim->y, 32, IB_rect); anim->last_frame->rect_colorspace = colormanage_colorspace_get_named(anim->colorspace); ffmpeg_postprocess(anim); anim->last_pts = anim->next_pts; ffmpeg_decode_video_frame(anim); anim->curposition = position; IMB_refImBuf(anim->last_frame); return anim->last_frame; }
virtual void cpu_task() override { while (true) { if (Emu.IsStopped() || is_closed) { break; } if (!job.pop(task, &is_closed)) { break; } switch (task.type) { case adecStartSeq: { // TODO: reset data cellAdec.warning("adecStartSeq:"); reader.addr = 0; reader.size = 0; reader.init = false; reader.has_ats = false; just_started = true; if (adecIsAtracX(type)) { ch_cfg = task.at3p.channel_config; ch_out = task.at3p.channels; frame_size = task.at3p.frame_size; sample_rate = task.at3p.sample_rate; use_ats_headers = task.at3p.ats_header == 1; } break; } case adecEndSeq: { // TODO: finalize cellAdec.warning("adecEndSeq:"); cbFunc(*this, id, CELL_ADEC_MSG_TYPE_SEQDONE, CELL_OK, cbArg); just_finished = true; break; } case adecDecodeAu: { int err = 0; reader.addr = task.au.addr; reader.size = task.au.size; reader.has_ats = use_ats_headers; //LOG_NOTICE(HLE, "Audio AU: size = 0x%x, pts = 0x%llx", task.au.size, task.au.pts); if (just_started) { first_pts = task.au.pts; last_pts = task.au.pts; if (adecIsAtracX(type)) last_pts -= 0x10000; // hack } struct AVPacketHolder : AVPacket { AVPacketHolder(u32 size) { av_init_packet(this); if (size) { data = (u8*)av_calloc(1, size + FF_INPUT_BUFFER_PADDING_SIZE); this->size = size + FF_INPUT_BUFFER_PADDING_SIZE; } else { data = NULL; size = 0; } } ~AVPacketHolder() { av_free(data); } } au(0); if (just_started && just_finished) { avcodec_flush_buffers(ctx); reader.init = true; // wrong just_finished = false; just_started = false; } else if (just_started) // deferred initialization { AVDictionary* opts = nullptr; av_dict_set(&opts, "probesize", "96", 0); err = avformat_open_input(&fmt, NULL, input_format, &opts); if (err || opts) { fmt::throw_exception("avformat_open_input() failed (err=0x%x, opts=%d)" HERE, err, opts ? 1 : 0); } //err = avformat_find_stream_info(fmt, NULL); //if (err || !fmt->nb_streams) //{ // ADEC_ERROR("adecDecodeAu: avformat_find_stream_info() failed (err=0x%x, nb_streams=%d)", err, fmt->nb_streams); //} if (!avformat_new_stream(fmt, codec)) { fmt::throw_exception("avformat_new_stream() failed" HERE); } ctx = fmt->streams[0]->codec; // TODO: check data opts = nullptr; av_dict_set(&opts, "refcounted_frames", "1", 0); { std::lock_guard<std::mutex> lock(g_mutex_avcodec_open2); // not multithread-safe (???) err = avcodec_open2(ctx, codec, &opts); } if (err || opts) { fmt::throw_exception("avcodec_open2() failed (err=0x%x, opts=%d)" HERE, err, opts ? 1 : 0); } just_started = false; } bool last_frame = false; while (true) { if (Emu.IsStopped() || is_closed) { if (Emu.IsStopped()) cellAdec.warning("adecDecodeAu: aborted"); break; } last_frame = av_read_frame(fmt, &au) < 0; if (last_frame) { //break; av_free(au.data); au.data = NULL; au.size = 0; } struct AdecFrameHolder : AdecFrame { AdecFrameHolder() { data = av_frame_alloc(); } ~AdecFrameHolder() { if (data) { av_frame_unref(data); av_frame_free(&data); } } } frame; if (!frame.data) { fmt::throw_exception("av_frame_alloc() failed" HERE); } int got_frame = 0; int decode = avcodec_decode_audio4(ctx, frame.data, &got_frame, &au); if (decode <= 0) { if (decode < 0) { cellAdec.error("adecDecodeAu: AU decoding error(0x%x)", decode); } if (!got_frame && reader.size == 0) break; } if (got_frame) { //u64 ts = av_frame_get_best_effort_timestamp(frame.data); //if (ts != AV_NOPTS_VALUE) //{ // frame.pts = ts/* - first_pts*/; // last_pts = frame.pts; //} last_pts += ((u64)frame.data->nb_samples) * 90000 / frame.data->sample_rate; frame.pts = last_pts; s32 nbps = av_get_bytes_per_sample((AVSampleFormat)frame.data->format); switch (frame.data->format) { case AV_SAMPLE_FMT_FLTP: break; case AV_SAMPLE_FMT_S16P: break; default: { fmt::throw_exception("Unsupported frame format(%d)" HERE, frame.data->format); } } frame.auAddr = task.au.addr; frame.auSize = task.au.size; frame.userdata = task.au.userdata; frame.size = frame.data->nb_samples * frame.data->channels * nbps; //LOG_NOTICE(HLE, "got audio frame (pts=0x%llx, nb_samples=%d, ch=%d, sample_rate=%d, nbps=%d)", //frame.pts, frame.data->nb_samples, frame.data->channels, frame.data->sample_rate, nbps); if (frames.push(frame, &is_closed)) { frame.data = nullptr; // to prevent destruction cbFunc(*this, id, CELL_ADEC_MSG_TYPE_PCMOUT, CELL_OK, cbArg); } } } cbFunc(*this, id, CELL_ADEC_MSG_TYPE_AUDONE, task.au.auInfo_addr, cbArg); break; } case adecClose: { break; } default: { fmt::throw_exception("Unknown task(%d)" HERE, (u32)task.type); } } } is_finished = true; }
int video_thread(void *arg){ VideoState *is = (VideoState *) arg; AVPacket pkt1, *packet = &pkt1; int frameFinished; AVFrame *pFrame; AVFrame *pFrameRGB; double pts; int numBytes; uint8_t *buffer; // Allocate an AVFrame structure pFrameRGB = avcodec_alloc_frame(); if (pFrameRGB == NULL){ return -1; } // Determine required buffer size and allocate buffer numBytes = avpicture_get_size(PIX_FMT_RGB24, is->video_st->codec->width, is->video_st->codec->height); buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t)); // Assign appropriate parts of buffer to image planes in pFrameRGB // Note that pFrameRGB is an AVFrame, but AVFrame is a superset // of AVPicture avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24, is->video_st->codec->width, is->video_st->codec->height); pFrame = avcodec_alloc_frame(); for (;;){ if (packet_queue_get(&is->videoq, packet, 1, is) < 0){ // means we quit getting packets break; } if (packet->data == f_pkt.data){ avcodec_flush_buffers(is->video_st->codec); continue; } pts = 0; // Save global pts to be stored in pFrame in first call global_video_pkt_pts = packet->pts; // Decode video frame //len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,packet); if (packet->dts == AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE){ pts = *(uint64_t *) pFrame->opaque; }else if (packet->dts != AV_NOPTS_VALUE){ pts = packet->dts; }else{ pts = 0; } pts *= av_q2d(is->video_st->time_base); // Did we get a video frame? if (frameFinished){ pts = synchronize_video(is, pFrame,pts); if (queue_picture(is, pFrame,pFrameRGB, pts) < 0){ break; } } av_free_packet(packet); } av_free(pFrame); return 0; }
int get_frame_at_time(State **ps, int64_t timeUs, int option, AVPacket *pkt) { printf("get_frame_at_time\n"); int got_packet = 0; int64_t desired_frame_number = -1; State *state = *ps; Options opt = option; if (!state || !state->pFormatCtx || state->video_stream < 0) { return FAILURE; } if (timeUs != -1) { int stream_index = state->video_stream; int64_t seek_time = av_rescale_q(timeUs, AV_TIME_BASE_Q, state->pFormatCtx->streams[stream_index]->time_base); int64_t seek_stream_duration = state->pFormatCtx->streams[stream_index]->duration; int flags = 0; int ret = -1; // For some reason the seek_stream_duration is sometimes a negative value, // make sure to check that it is greater than 0 before adjusting the // seek_time if (seek_stream_duration > 0 && seek_time > seek_stream_duration) { seek_time = seek_stream_duration; } if (seek_time < 0) { return FAILURE; } if (opt == OPTION_CLOSEST) { desired_frame_number = seek_time; flags = AVSEEK_FLAG_BACKWARD; } else if (opt == OPTION_CLOSEST_SYNC) { flags = 0; } else if (opt == OPTION_NEXT_SYNC) { flags = 0; } else if (opt == OPTION_PREVIOUS_SYNC) { flags = AVSEEK_FLAG_BACKWARD; } ret = av_seek_frame(state->pFormatCtx, stream_index, seek_time, flags); if (ret < 0) { return FAILURE; } else { if (state->audio_stream >= 0) { avcodec_flush_buffers(state->audio_st->codec); } if (state->video_stream >= 0) { avcodec_flush_buffers(state->video_st->codec); } } } decode_frame(state, pkt, &got_packet, desired_frame_number); if (got_packet) { //const char *filename = "/Users/wseemann/Desktop/one.png"; //FILE *picture = fopen(filename, "wb"); //fwrite(pkt->data, pkt->size, 1, picture); //fclose(picture); } if (got_packet) { return SUCCESS; } else { return FAILURE; } }
void FFMS_AudioSource::GetAudio(void *Buf, int64_t Start, int64_t Count) { if (Start < 0 || Start + Count > AP.NumSamples || Count < 0) throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_INVALID_ARGUMENT, "Out of bounds audio samples requested"); CacheBeginning(); uint8_t *Dst = static_cast<uint8_t*>(Buf); // Apply audio delay (if any) and fill any samples before the start time with zero Start -= Delay; if (Start < 0) { size_t Bytes = static_cast<size_t>(BytesPerSample * FFMIN(-Start, Count)); memset(Dst, 0, Bytes); Count += Start; // Entire request was before the start of the audio if (Count <= 0) return; Start = 0; Dst += Bytes; } CacheIterator it = Cache.begin(); while (Count > 0) { // Find first useful cache block while (it != Cache.end() && it->Start + it->Samples <= Start) ++it; // Cache has the next block we want if (it != Cache.end() && it->Start <= Start) { int64_t SrcOffset = FFMAX(0, Start - it->Start); int64_t DstOffset = FFMAX(0, it->Start - Start); int64_t CopySamples = FFMIN(it->Samples - SrcOffset, Count - DstOffset); size_t Bytes = static_cast<size_t>(CopySamples * BytesPerSample); memcpy(Dst + DstOffset * BytesPerSample, &it->Data[SrcOffset * BytesPerSample], Bytes); Start += CopySamples; Count -= CopySamples; Dst += Bytes; ++it; } // Decode another block else { if (Start < CurrentSample && SeekOffset == -1) throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_CODEC, "Audio stream is not seekable"); if (SeekOffset >= 0 && (Start < CurrentSample || Start > CurrentSample + DecodeFrame->nb_samples * 5)) { TFrameInfo f; f.SampleStart = Start; int NewPacketNumber = std::distance(Frames.begin(), std::lower_bound(Frames.begin(), Frames.end(), f, SampleStartComp)); NewPacketNumber = FFMAX(0, NewPacketNumber - SeekOffset - 15); while (NewPacketNumber > 0 && !Frames[NewPacketNumber].KeyFrame) --NewPacketNumber; // Only seek forward if it'll actually result in moving forward if (Start < CurrentSample || static_cast<size_t>(NewPacketNumber) > PacketNumber) { PacketNumber = NewPacketNumber; CurrentSample = -1; DecodeFrame.reset(); avcodec_flush_buffers(CodecContext); Seek(); } } // Decode until we hit the block we want if (PacketNumber >= Frames.size()) throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_CODEC, "Seeking is severely broken"); while (CurrentSample + DecodeFrame->nb_samples <= Start && PacketNumber < Frames.size()) DecodeNextBlock(&it); if (CurrentSample > Start) throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_CODEC, "Seeking is severely broken"); // The block we want is now in the cache immediate before it --it; } } }
void COMXAudioCodecOMX::Reset() { if (m_pCodecContext) avcodec_flush_buffers(m_pCodecContext); m_bGotFrame = false; m_iBufferOutputUsed = 0; }
void lavc_conv_reset(struct lavc_conv *priv) { avcodec_flush_buffers(priv->avctx); }
int audio_decode_frame(VideoState *is, double *pts_ptr) { int len1, data_size = 0, n; AVPacket *pkt = &is->audio_pkt; double pts; for(;;) { while(is->audio_pkt_size > 0) { int got_frame = 0; len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt); if(len1 < 0) { /* if error, skip frame */ is->audio_pkt_size = 0; break; } if (got_frame) { if (is->audio_frame.format != AV_SAMPLE_FMT_S16) { data_size = decode_frame_from_packet(is, is->audio_frame); } else { data_size = av_samples_get_buffer_size ( NULL, is->audio_st->codec->channels, is->audio_frame.nb_samples, is->audio_st->codec->sample_fmt, 1 ); memcpy(is->audio_buf, is->audio_frame.data[0], data_size); } } is->audio_pkt_data += len1; is->audio_pkt_size -= len1; if(data_size <= 0) { /* No data yet, get more frames */ continue; } pts = is->audio_clock; *pts_ptr = pts; n = 2 * is->audio_st->codec->channels; is->audio_clock += (double)data_size / (double)(n * is->audio_st->codec->sample_rate); /* We have data, return it and come back for more later */ return data_size; } if(pkt->data) av_free_packet(pkt); if(is->quit) { return -1; } /* next packet */ if(packet_queue_get(is, &is->audioq, pkt, 1) < 0) { return -1; } if(pkt->data == is->flush_pkt.data) { avcodec_flush_buffers(is->audio_st->codec); continue; } is->audio_pkt_data = pkt->data; is->audio_pkt_size = pkt->size; /* if update, update the audio clock w/pts */ if(pkt->pts != AV_NOPTS_VALUE) { is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts; } } }