/** * Unlink the provided picture and ensure that the decoder * does not own it anymore. */ static void DpbUnlinkPicture( decoder_t *p_dec, picture_t *p_picture ) { picture_dpb_t *p = DpbFindPicture( p_dec, p_picture ); assert( p && p->b_linked ); decoder_UnlinkPicture( p_dec, p->p_picture ); p->b_linked = false; if( !p->b_displayed ) decoder_DeletePicture( p_dec, p->p_picture ); p->p_picture = NULL; }
/** * Empty and reset the current DPB */ static void DpbClean( decoder_t *p_dec ) { decoder_sys_t *p_sys = p_dec->p_sys; for( int i = 0; i < DPB_COUNT; i++ ) { picture_dpb_t *p = &p_sys->p_dpb[i]; if( !p->p_picture ) continue; if( p->b_linked ) decoder_UnlinkPicture( p_dec, p->p_picture ); if( !p->b_displayed ) decoder_DeletePicture( p_dec, p->p_picture ); p->p_picture = NULL; } }
/** * Unlink the provided picture and ensure that the decoder * does not own it anymore. */ static void DpbUnlinkPicture( decoder_t *p_dec, picture_t *p_picture ) { picture_dpb_t *p = DpbFindPicture( p_dec, p_picture ); /* XXX it is needed to workaround libmpeg2 bugs */ if( !p || !p->b_linked ) { msg_Err( p_dec, "DpbUnlinkPicture called on an invalid picture" ); return; } assert( p && p->b_linked ); decoder_UnlinkPicture( p_dec, p->p_picture ); p->b_linked = false; if( !p->b_displayed ) decoder_DeletePicture( p_dec, p->p_picture ); p->p_picture = NULL; }
/***************************************************************************** * DecodeVideo: Called to decode one or more frames *****************************************************************************/ picture_t *DecodeVideo( decoder_t *p_dec, block_t **pp_block ) { decoder_sys_t *p_sys = p_dec->p_sys; AVCodecContext *p_context = p_sys->p_context; int b_drawpicture; int b_null_size = false; block_t *p_block; if( !pp_block || !*pp_block ) return NULL; if( !p_context->extradata_size && p_dec->fmt_in.i_extra ) { ffmpeg_InitCodec( p_dec ); if( p_sys->b_delayed_open ) { if( ffmpeg_OpenCodec( p_dec ) ) msg_Err( p_dec, "cannot open codec (%s)", p_sys->psz_namecodec ); } } p_block = *pp_block; if( p_sys->b_delayed_open ) { block_Release( p_block ); return NULL; } if( p_block->i_flags & (BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED) ) { p_sys->i_pts = VLC_TS_INVALID; /* To make sure we recover properly */ p_sys->i_late_frames = 0; if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY ) avcodec_flush_buffers( p_context ); block_Release( p_block ); return NULL; } if( p_block->i_flags & BLOCK_FLAG_PREROLL ) { /* Do not care about late frames when prerolling * TODO avoid decoding of non reference frame * (ie all B except for H264 where it depends only on nal_ref_idc) */ p_sys->i_late_frames = 0; } if( !p_dec->b_pace_control && (p_sys->i_late_frames > 0) && (mdate() - p_sys->i_late_frames_start > INT64_C(5000000)) ) { if( p_sys->i_pts > VLC_TS_INVALID ) { msg_Err( p_dec, "more than 5 seconds of late video -> " "dropping frame (computer too slow ?)" ); p_sys->i_pts = VLC_TS_INVALID; /* To make sure we recover properly */ } block_Release( p_block ); p_sys->i_late_frames--; return NULL; } /* A good idea could be to decode all I pictures and see for the other */ if( !p_dec->b_pace_control && p_sys->b_hurry_up && (p_sys->i_late_frames > 4) ) { b_drawpicture = 0; if( p_sys->i_late_frames < 12 ) { p_context->skip_frame = (p_sys->i_skip_frame <= AVDISCARD_BIDIR) ? AVDISCARD_BIDIR : p_sys->i_skip_frame; } else { /* picture too late, won't decode * but break picture until a new I, and for mpeg4 ...*/ p_sys->i_late_frames--; /* needed else it will never be decrease */ block_Release( p_block ); return NULL; } } else { if( p_sys->b_hurry_up ) p_context->skip_frame = p_sys->i_skip_frame; if( !(p_block->i_flags & BLOCK_FLAG_PREROLL) ) b_drawpicture = 1; else b_drawpicture = 0; } if( p_context->width <= 0 || p_context->height <= 0 ) { if( p_sys->b_hurry_up ) p_context->skip_frame = p_sys->i_skip_frame; b_null_size = true; } else if( !b_drawpicture ) { /* It creates broken picture * FIXME either our parser or ffmpeg is broken */ #if 0 if( p_sys->b_hurry_up ) p_context->skip_frame = __MAX( p_context->skip_frame, AVDISCARD_NONREF ); #endif } /* * Do the actual decoding now */ /* Don't forget that ffmpeg requires a little more bytes * that the real frame size */ if( p_block->i_buffer > 0 ) { p_sys->b_flush = ( p_block->i_flags & BLOCK_FLAG_END_OF_SEQUENCE ) != 0; p_block = block_Realloc( p_block, 0, p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE ); if( !p_block ) return NULL; p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE; *pp_block = p_block; memset( p_block->p_buffer + p_block->i_buffer, 0, FF_INPUT_BUFFER_PADDING_SIZE ); } while( p_block->i_buffer > 0 || p_sys->b_flush ) { int i_used, b_gotpicture; picture_t *p_pic; /* Set the PTS/DTS in the context reordered_opaque field */ if( p_block->i_pts > VLC_TS_INVALID ) p_context->reordered_opaque = (p_block->i_pts << 1) | 0; else if( p_block->i_dts > VLC_TS_INVALID ) p_context->reordered_opaque = (p_block->i_dts << 1) | 1; else p_context->reordered_opaque = INT64_MIN; p_sys->p_ff_pic->reordered_opaque = p_context->reordered_opaque; /* Make sure we don't reuse the same timestamps twice */ p_block->i_pts = p_block->i_dts = VLC_TS_INVALID; post_mt( p_sys ); i_used = avcodec_decode_video( p_context, p_sys->p_ff_pic, &b_gotpicture, p_block->i_buffer <= 0 && p_sys->b_flush ? NULL : p_block->p_buffer, p_block->i_buffer ); if( b_null_size && !p_sys->b_flush && p_context->width > 0 && p_context->height > 0 ) { /* Reparse it to not drop the I frame */ b_null_size = false; if( p_sys->b_hurry_up ) p_context->skip_frame = p_sys->i_skip_frame; i_used = avcodec_decode_video( p_context, p_sys->p_ff_pic, &b_gotpicture, p_block->p_buffer, p_block->i_buffer ); } wait_mt( p_sys ); if( p_sys->b_flush ) p_sys->b_first_frame = true; if( p_block->i_buffer <= 0 ) p_sys->b_flush = false; if( i_used < 0 ) { if( b_drawpicture ) msg_Warn( p_dec, "cannot decode one frame (%zu bytes)", p_block->i_buffer ); block_Release( p_block ); return NULL; } else if( i_used > p_block->i_buffer || p_context->thread_count > 1 ) { i_used = p_block->i_buffer; } /* Consumed bytes */ p_block->i_buffer -= i_used; p_block->p_buffer += i_used; /* Nothing to display */ if( !b_gotpicture ) { if( i_used == 0 ) break; continue; } /* Compute the PTS */ mtime_t i_pts = VLC_TS_INVALID; if( p_sys->p_ff_pic->reordered_opaque != INT64_MIN ) { mtime_t i_ts = p_sys->p_ff_pic->reordered_opaque >> 1; bool b_dts = p_sys->p_ff_pic->reordered_opaque & 1; if( b_dts ) { if( !p_context->has_b_frames || !p_sys->b_has_b_frames || !p_sys->p_ff_pic->reference || p_sys->i_pts <= VLC_TS_INVALID ) i_pts = i_ts; } else { i_pts = i_ts; } } if( i_pts <= VLC_TS_INVALID ) i_pts = p_sys->i_pts; /* Interpolate the next PTS */ if( i_pts > VLC_TS_INVALID ) p_sys->i_pts = i_pts; if( p_sys->i_pts > VLC_TS_INVALID ) { /* interpolate the next PTS */ if( p_dec->fmt_in.video.i_frame_rate > 0 && p_dec->fmt_in.video.i_frame_rate_base > 0 ) { p_sys->i_pts += INT64_C(1000000) * (2 + p_sys->p_ff_pic->repeat_pict) * p_dec->fmt_in.video.i_frame_rate_base / (2 * p_dec->fmt_in.video.i_frame_rate); } else if( p_context->time_base.den > 0 ) { #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(52,20,0) int i_tick = p_context->ticks_per_frame; if( i_tick <= 0 ) i_tick = 1; #else int i_tick = 1; #endif p_sys->i_pts += INT64_C(1000000) * (2 + p_sys->p_ff_pic->repeat_pict) * i_tick * p_context->time_base.num / (2 * p_context->time_base.den); } } /* Update frame late count (except when doing preroll) */ mtime_t i_display_date = 0; if( !(p_block->i_flags & BLOCK_FLAG_PREROLL) ) i_display_date = decoder_GetDisplayDate( p_dec, i_pts ); if( i_display_date > 0 && i_display_date <= mdate() ) { p_sys->i_late_frames++; if( p_sys->i_late_frames == 1 ) p_sys->i_late_frames_start = mdate(); } else { p_sys->i_late_frames = 0; } if( !b_drawpicture || ( !p_sys->p_va && !p_sys->p_ff_pic->linesize[0] ) ) continue; if( !p_sys->p_ff_pic->opaque ) { /* Get a new picture */ p_pic = ffmpeg_NewPictBuf( p_dec, p_context ); if( !p_pic ) { block_Release( p_block ); return NULL; } /* Fill p_picture_t from AVVideoFrame and do chroma conversion * if needed */ ffmpeg_CopyPicture( p_dec, p_pic, p_sys->p_ff_pic ); } else { p_pic = (picture_t *)p_sys->p_ff_pic->opaque; decoder_LinkPicture( p_dec, p_pic ); } /* Sanity check (seems to be needed for some streams) */ if( p_sys->p_ff_pic->pict_type == FF_B_TYPE ) { p_sys->b_has_b_frames = true; } if( !p_dec->fmt_in.video.i_sar_num || !p_dec->fmt_in.video.i_sar_den ) { /* Fetch again the aspect ratio in case it changed */ p_dec->fmt_out.video.i_sar_num = p_context->sample_aspect_ratio.num; p_dec->fmt_out.video.i_sar_den = p_context->sample_aspect_ratio.den; if( !p_dec->fmt_out.video.i_sar_num || !p_dec->fmt_out.video.i_sar_den ) { p_dec->fmt_out.video.i_sar_num = 1; p_dec->fmt_out.video.i_sar_den = 1; } } /* Send decoded frame to vout */ if( i_pts > VLC_TS_INVALID) { p_pic->date = i_pts; if( p_sys->b_first_frame ) { /* Hack to force display of still pictures */ p_sys->b_first_frame = false; p_pic->b_force = true; } p_pic->i_nb_fields = 2 + p_sys->p_ff_pic->repeat_pict; p_pic->b_progressive = !p_sys->p_ff_pic->interlaced_frame; p_pic->b_top_field_first = p_sys->p_ff_pic->top_field_first; p_pic->i_qstride = p_sys->p_ff_pic->qstride; int i_mb_h = ( p_pic->format.i_height + 15 ) / 16; p_pic->p_q = malloc( p_pic->i_qstride * i_mb_h ); memcpy( p_pic->p_q, p_sys->p_ff_pic->qscale_table, p_pic->i_qstride * i_mb_h ); switch( p_sys->p_ff_pic->qscale_type ) { case FF_QSCALE_TYPE_MPEG1: p_pic->i_qtype = QTYPE_MPEG1; break; case FF_QSCALE_TYPE_MPEG2: p_pic->i_qtype = QTYPE_MPEG2; break; case FF_QSCALE_TYPE_H264: p_pic->i_qtype = QTYPE_H264; break; } return p_pic; } else { decoder_DeletePicture( p_dec, p_pic ); } }
static picture_t *lavc_dr_GetFrame(struct AVCodecContext *ctx, AVFrame *frame, int flags) { decoder_t *dec = (decoder_t *)ctx->opaque; decoder_sys_t *sys = dec->p_sys; if (GetVlcChroma(&dec->fmt_out.video, ctx->pix_fmt) != VLC_SUCCESS) return NULL; dec->fmt_out.i_codec = dec->fmt_out.video.i_chroma; if (ctx->pix_fmt == PIX_FMT_PAL8) return NULL; int width = frame->width; int height = frame->height; int aligns[AV_NUM_DATA_POINTERS]; avcodec_align_dimensions2(ctx, &width, &height, aligns); picture_t *pic = ffmpeg_NewPictBuf(dec, ctx); if (pic == NULL) return NULL; /* Check that the picture is suitable for libavcodec */ if (pic->p[0].i_pitch < width * pic->p[0].i_pixel_pitch) { if (sys->i_direct_rendering_used != 0) msg_Dbg(dec, "plane 0: pitch too small (%d/%d*%d)", pic->p[0].i_pitch, width, pic->p[0].i_pixel_pitch); goto no_dr; } if (pic->p[0].i_lines < height) { if (sys->i_direct_rendering_used != 0) msg_Dbg(dec, "plane 0: lines too few (%d/%d)", pic->p[0].i_lines, height); goto no_dr; } for (int i = 0; i < pic->i_planes; i++) { if (pic->p[i].i_pitch % aligns[i]) { if (sys->i_direct_rendering_used != 0) msg_Dbg(dec, "plane %d: pitch not aligned (%d%%%d)", i, pic->p[i].i_pitch, aligns[i]); goto no_dr; } if (((uintptr_t)pic->p[i].p_pixels) % aligns[i]) { if (sys->i_direct_rendering_used != 0) msg_Warn(dec, "plane %d not aligned", i); goto no_dr; } } /* Allocate buffer references */ for (int i = 0; i < pic->i_planes; i++) { lavc_pic_ref_t *ref = malloc(sizeof (*ref)); if (ref == NULL) goto error; ref->decoder = dec; ref->picture = pic; decoder_LinkPicture(dec, pic); uint8_t *data = pic->p[i].p_pixels; int size = pic->p[i].i_pitch * pic->p[i].i_lines; frame->buf[i] = av_buffer_create(data, size, lavc_dr_ReleaseFrame, ref, 0); if (unlikely(frame->buf[i] == NULL)) { lavc_dr_ReleaseFrame(ref, data); goto error; } } decoder_UnlinkPicture(dec, pic); (void) flags; return pic; error: for (unsigned i = 0; frame->buf[i] != NULL; i++) av_buffer_unref(&frame->buf[i]); no_dr: decoder_DeletePicture(dec, pic); return NULL; }
/***************************************************************************** * DecodeVideo: Called to decode one or more frames *****************************************************************************/ picture_t *DecodeVideo( decoder_t *p_dec, block_t **pp_block ) { decoder_sys_t *p_sys = p_dec->p_sys; AVCodecContext *p_context = p_sys->p_context; int b_drawpicture; block_t *p_block; if( !pp_block ) return NULL; if( !p_context->extradata_size && p_dec->fmt_in.i_extra ) { ffmpeg_InitCodec( p_dec ); if( p_sys->b_delayed_open ) { if( ffmpeg_OpenCodec( p_dec ) ) msg_Err( p_dec, "cannot open codec (%s)", p_sys->psz_namecodec ); } } p_block = *pp_block; if(!p_block && !(p_sys->p_codec->capabilities & CODEC_CAP_DELAY) ) return NULL; if( p_sys->b_delayed_open ) { if( p_block ) block_Release( p_block ); return NULL; } if( p_block) { if( p_block->i_flags & (BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED) ) { p_sys->i_pts = VLC_TS_INVALID; /* To make sure we recover properly */ p_sys->i_late_frames = 0; post_mt( p_sys ); if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY ) avcodec_flush_buffers( p_context ); wait_mt( p_sys ); block_Release( p_block ); return NULL; } if( p_block->i_flags & BLOCK_FLAG_PREROLL ) { /* Do not care about late frames when prerolling * TODO avoid decoding of non reference frame * (ie all B except for H264 where it depends only on nal_ref_idc) */ p_sys->i_late_frames = 0; } } if( !p_dec->b_pace_control && (p_sys->i_late_frames > 0) && (mdate() - p_sys->i_late_frames_start > INT64_C(5000000)) ) { if( p_sys->i_pts > VLC_TS_INVALID ) { p_sys->i_pts = VLC_TS_INVALID; /* To make sure we recover properly */ } if( p_block ) block_Release( p_block ); p_sys->i_late_frames--; msg_Err( p_dec, "more than 5 seconds of late video -> " "dropping frame (computer too slow ?)" ); return NULL; } /* A good idea could be to decode all I pictures and see for the other */ if( !p_dec->b_pace_control && p_sys->b_hurry_up && (p_sys->i_late_frames > 4) ) { b_drawpicture = 0; if( p_sys->i_late_frames < 12 ) { p_context->skip_frame = (p_sys->i_skip_frame <= AVDISCARD_NONREF) ? AVDISCARD_NONREF : p_sys->i_skip_frame; } else { /* picture too late, won't decode * but break picture until a new I, and for mpeg4 ...*/ p_sys->i_late_frames--; /* needed else it will never be decrease */ if( p_block ) block_Release( p_block ); msg_Warn( p_dec, "More than 4 late frames, dropping frame" ); return NULL; } } else { if( p_sys->b_hurry_up ) p_context->skip_frame = p_sys->i_skip_frame; if( !p_block || !(p_block->i_flags & BLOCK_FLAG_PREROLL) ) b_drawpicture = 1; else b_drawpicture = 0; } if( p_context->width <= 0 || p_context->height <= 0 ) { if( p_sys->b_hurry_up ) p_context->skip_frame = p_sys->i_skip_frame; } else if( !b_drawpicture ) { /* It creates broken picture * FIXME either our parser or ffmpeg is broken */ #if 0 if( p_sys->b_hurry_up ) p_context->skip_frame = __MAX( p_context->skip_frame, AVDISCARD_NONREF ); #endif } /* * Do the actual decoding now */ /* Don't forget that libavcodec requires a little more bytes * that the real frame size */ if( p_block && p_block->i_buffer > 0 ) { p_sys->b_flush = ( p_block->i_flags & BLOCK_FLAG_END_OF_SEQUENCE ) != 0; p_block = block_Realloc( p_block, 0, p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE ); if( !p_block ) return NULL; p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE; *pp_block = p_block; memset( p_block->p_buffer + p_block->i_buffer, 0, FF_INPUT_BUFFER_PADDING_SIZE ); } while( !p_block || p_block->i_buffer > 0 || p_sys->b_flush ) { int i_used, b_gotpicture; picture_t *p_pic; AVPacket pkt; post_mt( p_sys ); av_init_packet( &pkt ); if( p_block ) { pkt.data = p_block->p_buffer; pkt.size = p_block->i_buffer; pkt.pts = p_block->i_pts; pkt.dts = p_block->i_dts; } else { /* Return delayed frames if codec has CODEC_CAP_DELAY */ pkt.data = NULL; pkt.size = 0; } #if LIBAVCODEC_VERSION_MAJOR >= 54 if( !p_sys->palette_sent ) { uint8_t *pal = av_packet_new_side_data(&pkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE); if (pal) { memcpy(pal, p_dec->fmt_in.video.p_palette->palette, AVPALETTE_SIZE); p_sys->palette_sent = true; } } #endif /* Make sure we don't reuse the same timestamps twice */ if( p_block ) { p_block->i_pts = p_block->i_dts = VLC_TS_INVALID; } i_used = avcodec_decode_video2( p_context, p_sys->p_ff_pic, &b_gotpicture, &pkt ); wait_mt( p_sys ); if( p_sys->b_flush ) p_sys->b_first_frame = true; if( p_block ) { if( p_block->i_buffer <= 0 ) p_sys->b_flush = false; if( i_used < 0 ) { if( b_drawpicture ) msg_Warn( p_dec, "cannot decode one frame (%zu bytes)", p_block->i_buffer ); block_Release( p_block ); return NULL; } else if( (unsigned)i_used > p_block->i_buffer || p_context->thread_count > 1 ) { i_used = p_block->i_buffer; } /* Consumed bytes */ p_block->i_buffer -= i_used; p_block->p_buffer += i_used; } /* Nothing to display */ if( !b_gotpicture ) { if( i_used == 0 ) break; continue; } /* Sanity check (seems to be needed for some streams) */ if( p_sys->p_ff_pic->pict_type == AV_PICTURE_TYPE_B) { p_sys->b_has_b_frames = true; } /* Compute the PTS */ mtime_t i_pts = p_sys->p_ff_pic->pkt_pts; if (i_pts <= VLC_TS_INVALID) i_pts = p_sys->p_ff_pic->pkt_dts; if( i_pts <= VLC_TS_INVALID ) i_pts = p_sys->i_pts; /* Interpolate the next PTS */ if( i_pts > VLC_TS_INVALID ) p_sys->i_pts = i_pts; if( p_sys->i_pts > VLC_TS_INVALID ) { /* interpolate the next PTS */ if( p_dec->fmt_in.video.i_frame_rate > 0 && p_dec->fmt_in.video.i_frame_rate_base > 0 ) { p_sys->i_pts += INT64_C(1000000) * (2 + p_sys->p_ff_pic->repeat_pict) * p_dec->fmt_in.video.i_frame_rate_base / (2 * p_dec->fmt_in.video.i_frame_rate); } else if( p_context->time_base.den > 0 ) { int i_tick = p_context->ticks_per_frame; if( i_tick <= 0 ) i_tick = 1; p_sys->i_pts += INT64_C(1000000) * (2 + p_sys->p_ff_pic->repeat_pict) * i_tick * p_context->time_base.num / (2 * p_context->time_base.den); } } /* Update frame late count (except when doing preroll) */ mtime_t i_display_date = 0; if( !p_block || !(p_block->i_flags & BLOCK_FLAG_PREROLL) ) i_display_date = decoder_GetDisplayDate( p_dec, i_pts ); if( i_display_date > 0 && i_display_date <= mdate() ) { p_sys->i_late_frames++; if( p_sys->i_late_frames == 1 ) p_sys->i_late_frames_start = mdate(); } else { p_sys->i_late_frames = 0; } if( !b_drawpicture || ( !p_sys->p_va && !p_sys->p_ff_pic->linesize[0] ) ) continue; if( p_sys->p_va != NULL || p_sys->p_ff_pic->opaque == NULL ) { /* Get a new picture */ p_pic = ffmpeg_NewPictBuf( p_dec, p_context ); if( !p_pic ) { if( p_block ) block_Release( p_block ); return NULL; } /* Fill p_picture_t from AVVideoFrame and do chroma conversion * if needed */ ffmpeg_CopyPicture( p_dec, p_pic, p_sys->p_ff_pic ); } else { p_pic = (picture_t *)p_sys->p_ff_pic->opaque; decoder_LinkPicture( p_dec, p_pic ); } if( !p_dec->fmt_in.video.i_sar_num || !p_dec->fmt_in.video.i_sar_den ) { /* Fetch again the aspect ratio in case it changed */ p_dec->fmt_out.video.i_sar_num = p_context->sample_aspect_ratio.num; p_dec->fmt_out.video.i_sar_den = p_context->sample_aspect_ratio.den; if( !p_dec->fmt_out.video.i_sar_num || !p_dec->fmt_out.video.i_sar_den ) { p_dec->fmt_out.video.i_sar_num = 1; p_dec->fmt_out.video.i_sar_den = 1; } } /* Send decoded frame to vout */ if( i_pts > VLC_TS_INVALID) { p_pic->date = i_pts; if( p_sys->b_first_frame ) { /* Hack to force display of still pictures */ p_sys->b_first_frame = false; p_pic->b_force = true; } p_pic->i_nb_fields = 2 + p_sys->p_ff_pic->repeat_pict; p_pic->b_progressive = !p_sys->p_ff_pic->interlaced_frame; p_pic->b_top_field_first = p_sys->p_ff_pic->top_field_first; return p_pic; } else { decoder_DeletePicture( p_dec, p_pic ); } } if( p_block ) block_Release( p_block ); return NULL; }
static picture_t *ffmpeg_dr_GetFrameBuf(struct AVCodecContext *p_context) { decoder_t *p_dec = (decoder_t *)p_context->opaque; decoder_sys_t *p_sys = p_dec->p_sys; int i_width = p_context->width; int i_height = p_context->height; avcodec_align_dimensions( p_context, &i_width, &i_height ); picture_t *p_pic = NULL; if (GetVlcChroma(&p_dec->fmt_out.video, p_context->pix_fmt) != VLC_SUCCESS) goto no_dr; if (p_context->pix_fmt == PIX_FMT_PAL8) goto no_dr; p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma; p_pic = ffmpeg_NewPictBuf( p_dec, p_context ); if( !p_pic ) goto no_dr; if( p_pic->p[0].i_pitch / p_pic->p[0].i_pixel_pitch < i_width || p_pic->p[0].i_lines < i_height ) goto no_dr; for( int i = 0; i < p_pic->i_planes; i++ ) { unsigned i_align; switch( p_sys->i_codec_id ) { case AV_CODEC_ID_SVQ1: case AV_CODEC_ID_VP5: case AV_CODEC_ID_VP6: case AV_CODEC_ID_VP6F: case AV_CODEC_ID_VP6A: i_align = 16; break; default: i_align = i == 0 ? 16 : 8; break; } if( p_pic->p[i].i_pitch % i_align ) goto no_dr; if( (intptr_t)p_pic->p[i].p_pixels % i_align ) goto no_dr; } if( p_context->pix_fmt == PIX_FMT_YUV422P ) { if( 2 * p_pic->p[1].i_pitch != p_pic->p[0].i_pitch || 2 * p_pic->p[2].i_pitch != p_pic->p[0].i_pitch ) goto no_dr; } return p_pic; no_dr: if (p_pic) decoder_DeletePicture( p_dec, p_pic ); return NULL; }
static int ffmpeg_GetFrameBuf( struct AVCodecContext *p_context, AVFrame *p_ff_pic ) { decoder_t *p_dec = (decoder_t *)p_context->opaque; decoder_sys_t *p_sys = p_dec->p_sys; picture_t *p_pic; /* Set picture PTS */ ffmpeg_SetFrameBufferPts( p_dec, p_ff_pic ); /* */ p_ff_pic->opaque = NULL; if( p_sys->p_va ) { #ifdef HAVE_AVCODEC_VA /* hwaccel_context is not present in old fffmpeg version */ if( vlc_va_Setup( p_sys->p_va, &p_sys->p_context->hwaccel_context, &p_dec->fmt_out.video.i_chroma, p_sys->p_context->width, p_sys->p_context->height ) ) { msg_Err( p_dec, "vlc_va_Setup failed" ); return -1; } #else assert(0); #endif /* */ p_ff_pic->type = FF_BUFFER_TYPE_USER; /* FIXME what is that, should give good value */ p_ff_pic->age = 256*256*256*64; // FIXME FIXME from ffmpeg if( vlc_va_Get( p_sys->p_va, p_ff_pic ) ) { msg_Err( p_dec, "VaGrabSurface failed" ); return -1; } return 0; } else if( !p_sys->b_direct_rendering ) { /* Not much to do in indirect rendering mode. */ return avcodec_default_get_buffer( p_context, p_ff_pic ); } /* Some codecs set pix_fmt only after the 1st frame has been decoded, * so we need to check for direct rendering again. */ int i_width = p_sys->p_context->width; int i_height = p_sys->p_context->height; avcodec_align_dimensions( p_sys->p_context, &i_width, &i_height ); if( GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) != VLC_SUCCESS || p_context->pix_fmt == PIX_FMT_PAL8 ) goto no_dr; p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma; /* Get a new picture */ p_pic = ffmpeg_NewPictBuf( p_dec, p_sys->p_context ); if( !p_pic ) goto no_dr; bool b_compatible = true; if( p_pic->p[0].i_pitch / p_pic->p[0].i_pixel_pitch < i_width || p_pic->p[0].i_lines < i_height ) b_compatible = false; for( int i = 0; i < p_pic->i_planes && b_compatible; i++ ) { unsigned i_align; switch( p_sys->i_codec_id ) { case CODEC_ID_SVQ1: case CODEC_ID_VP5: case CODEC_ID_VP6: case CODEC_ID_VP6F: case CODEC_ID_VP6A: i_align = 16; break; default: i_align = i == 0 ? 16 : 8; break; } if( p_pic->p[i].i_pitch % i_align ) b_compatible = false; if( (intptr_t)p_pic->p[i].p_pixels % i_align ) b_compatible = false; } if( p_context->pix_fmt == PIX_FMT_YUV422P && b_compatible ) { if( 2 * p_pic->p[1].i_pitch != p_pic->p[0].i_pitch || 2 * p_pic->p[2].i_pitch != p_pic->p[0].i_pitch ) b_compatible = false; } if( !b_compatible ) { decoder_DeletePicture( p_dec, p_pic ); goto no_dr; } if( p_sys->i_direct_rendering_used != 1 ) { msg_Dbg( p_dec, "using direct rendering" ); p_sys->i_direct_rendering_used = 1; } p_sys->p_context->draw_horiz_band = NULL; p_ff_pic->opaque = (void*)p_pic; p_ff_pic->type = FF_BUFFER_TYPE_USER; p_ff_pic->data[0] = p_pic->p[0].p_pixels; p_ff_pic->data[1] = p_pic->p[1].p_pixels; p_ff_pic->data[2] = p_pic->p[2].p_pixels; p_ff_pic->data[3] = NULL; /* alpha channel but I'm not sure */ p_ff_pic->linesize[0] = p_pic->p[0].i_pitch; p_ff_pic->linesize[1] = p_pic->p[1].i_pitch; p_ff_pic->linesize[2] = p_pic->p[2].i_pitch; p_ff_pic->linesize[3] = 0; decoder_LinkPicture( p_dec, p_pic ); /* FIXME what is that, should give good value */ p_ff_pic->age = 256*256*256*64; // FIXME FIXME from ffmpeg return 0; no_dr: if( p_sys->i_direct_rendering_used != 0 ) { msg_Warn( p_dec, "disabling direct rendering" ); p_sys->i_direct_rendering_used = 0; } return avcodec_default_get_buffer( p_context, p_ff_pic ); }
/**************************************************************************** * DecodeBlock: the whole thing ****************************************************************************/ static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block ) { decoder_sys_t *p_sys = p_dec->p_sys; block_t *p_block; BC_DTS_PROC_OUT proc_out; BC_DTS_STATUS driver_stat; /* First check the status of the decode to produce pictures */ if( BC_FUNC_PSYS(DtsGetDriverStatus)( p_sys->bcm_handle, &driver_stat ) != BC_STS_SUCCESS ) return NULL; p_block = *pp_block; if( p_block ) { if( ( p_block->i_flags&(BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED) ) == 0 ) { /* Valid input block, so we can send to HW to decode */ BC_STATUS status = BC_FUNC_PSYS(DtsProcInput)( p_sys->bcm_handle, p_block->p_buffer, p_block->i_buffer, p_block->i_pts >= VLC_TS_INVALID ? TO_BC_PTS(p_block->i_pts) : 0, false ); block_Release( p_block ); *pp_block = NULL; if( status != BC_STS_SUCCESS ) return NULL; } } #ifdef DEBUG_CRYSTALHD else { if( driver_stat.ReadyListCount != 0 ) msg_Err( p_dec, " Input NULL but have pictures %u", driver_stat.ReadyListCount ); } #endif if( driver_stat.ReadyListCount == 0 ) return NULL; /* Prepare the Output structure */ /* We always expect and use YUY2 */ memset( &proc_out, 0, sizeof(BC_DTS_PROC_OUT) ); proc_out.PicInfo.width = p_dec->fmt_out.video.i_width; proc_out.PicInfo.height = p_dec->fmt_out.video.i_height; proc_out.PoutFlags = BC_POUT_FLAGS_SIZE; proc_out.AppCallBack = ourCallback; proc_out.hnd = p_dec; p_sys->proc_out = &proc_out; /* */ BC_STATUS sts = BC_FUNC_PSYS(DtsProcOutput)( p_sys->bcm_handle, 128, &proc_out ); #ifdef DEBUG_CRYSTALHD if( sts != BC_STS_SUCCESS ) msg_Err( p_dec, "DtsProcOutput returned %i", sts ); #endif uint8_t b_eos; picture_t *p_pic = p_sys->p_pic; switch( sts ) { case BC_STS_SUCCESS: if( !(proc_out.PoutFlags & BC_POUT_FLAGS_PIB_VALID) ) { msg_Dbg( p_dec, "Invalid PIB" ); break; } if( !p_pic ) break; /* In interlaced mode, do not push the first field in the pipeline */ if( (proc_out.PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) && !(proc_out.PicInfo.flags & VDEC_FLAG_FIELDPAIR) ) return NULL; // crystal_CopyPicture( p_pic, &proc_out ); p_pic->date = proc_out.PicInfo.timeStamp > 0 ? FROM_BC_PTS(proc_out.PicInfo.timeStamp) : VLC_TS_INVALID; //p_pic->date += 100 * 1000; #ifdef DEBUG_CRYSTALHD msg_Dbg( p_dec, "TS Output is %"PRIu64, p_pic->date); #endif return p_pic; case BC_STS_DEC_NOT_OPEN: case BC_STS_DEC_NOT_STARTED: msg_Err( p_dec, "Decoder not opened or started" ); break; case BC_STS_INV_ARG: msg_Warn( p_dec, "Invalid arguments. Please report" ); break; case BC_STS_FMT_CHANGE: /* Format change */ /* if( !(proc_out.PoutFlags & BC_POUT_FLAGS_PIB_VALID) ) break; */ p_dec->fmt_out.video.i_width = proc_out.PicInfo.width; p_dec->fmt_out.video.i_height = proc_out.PicInfo.height; if( proc_out.PicInfo.height == 1088 ) p_dec->fmt_out.video.i_height = 1080; #define setAR( a, b, c ) case a: p_dec->fmt_out.video.i_sar_num = b; \ p_dec->fmt_out.video.i_sar_den = c; break; switch( proc_out.PicInfo.aspect_ratio ) { setAR( vdecAspectRatioSquare, 1, 1 ) setAR( vdecAspectRatio12_11, 12, 11 ) setAR( vdecAspectRatio10_11, 10, 11 ) setAR( vdecAspectRatio16_11, 16, 11 ) setAR( vdecAspectRatio40_33, 40, 33 ) setAR( vdecAspectRatio24_11, 24, 11 ) setAR( vdecAspectRatio20_11, 20, 11 ) setAR( vdecAspectRatio32_11, 32, 11 ) setAR( vdecAspectRatio80_33, 80, 33 ) setAR( vdecAspectRatio18_11, 18, 11 ) setAR( vdecAspectRatio15_11, 15, 11 ) setAR( vdecAspectRatio64_33, 64, 33 ) setAR( vdecAspectRatio160_99, 160, 99 ) setAR( vdecAspectRatio4_3, 4, 3 ) setAR( vdecAspectRatio16_9, 16, 9 ) setAR( vdecAspectRatio221_1, 221, 1 ) default: break; } #undef setAR msg_Dbg( p_dec, "Format Change Detected [%i, %i], AR: %i/%i", proc_out.PicInfo.width, proc_out.PicInfo.height, p_dec->fmt_out.video.i_sar_num, p_dec->fmt_out.video.i_sar_den ); break; /* Nothing is documented here... */ case BC_STS_NO_DATA: if( BC_FUNC_PSYS(DtsIsEndOfStream)( p_sys->bcm_handle, &b_eos ) == BC_STS_SUCCESS ) if( b_eos ) msg_Dbg( p_dec, "End of Stream" ); break; case BC_STS_TIMEOUT: /* Timeout */ msg_Err( p_dec, "ProcOutput timeout" ); break; case BC_STS_IO_XFR_ERROR: case BC_STS_IO_USER_ABORT: case BC_STS_IO_ERROR: msg_Err( p_dec, "ProcOutput return mode not implemented. Please report" ); break; default: msg_Err( p_dec, "Unknown return status. Please report %i", sts ); break; } if( p_pic ) decoder_DeletePicture( p_dec, p_pic ); return NULL; }