/* [DIRAC_STD] 10. Sequence Header. sequence_header() */ int avpriv_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb, dirac_source_params *source) { unsigned version_major; unsigned video_format, picture_coding_mode; int ret; /* [DIRAC_SPEC] 10.1 Parse Parameters. parse_parameters() */ version_major = svq3_get_ue_golomb(gb); svq3_get_ue_golomb(gb); /* version_minor */ avctx->profile = svq3_get_ue_golomb(gb); avctx->level = svq3_get_ue_golomb(gb); /* [DIRAC_SPEC] sequence_header() -> base_video_format as defined in * 10.2 Base Video Format, table 10.1 Dirac predefined video formats */ video_format = svq3_get_ue_golomb(gb); if (version_major < 2) av_log(avctx, AV_LOG_WARNING, "Stream is old and may not work\n"); else if (version_major > 2) av_log(avctx, AV_LOG_WARNING, "Stream may have unhandled features\n"); if (video_format > 20U) return AVERROR_INVALIDDATA; /* Fill in defaults for the source parameters. */ *source = dirac_source_parameters_defaults[video_format]; /* [DIRAC_STD] 10.3 Source Parameters * Override the defaults. */ if (ret = parse_source_parameters(avctx, gb, source)) return ret; ret = ff_set_dimensions(avctx, source->width, source->height); if (ret < 0) return ret; ff_set_sar(avctx, avctx->sample_aspect_ratio); /* [DIRAC_STD] picture_coding_mode shall be 0 for fields and 1 for frames * currently only used to signal field coding */ picture_coding_mode = svq3_get_ue_golomb(gb); if (picture_coding_mode != 0) { av_log(avctx, AV_LOG_ERROR, "Unsupported picture coding mode %d\n", picture_coding_mode); return AVERROR_INVALIDDATA; } return 0; }
static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format) { AVCodecContext *avctx = opaque; CuvidContext *ctx = avctx->priv_data; AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data; CUVIDDECODECREATEINFO cuinfo; av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence); ctx->internal_error = 0; avctx->width = format->display_area.right; avctx->height = format->display_area.bottom; ff_set_sar(avctx, av_div_q( (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y }, (AVRational){ avctx->width, avctx->height })); if (!format->progressive_sequence && ctx->deint_mode == cudaVideoDeinterlaceMode_Weave) avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT; else avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT; if (format->video_signal_description.video_full_range_flag) avctx->color_range = AVCOL_RANGE_JPEG; else avctx->color_range = AVCOL_RANGE_MPEG; avctx->color_primaries = format->video_signal_description.color_primaries; avctx->color_trc = format->video_signal_description.transfer_characteristics; avctx->colorspace = format->video_signal_description.matrix_coefficients; if (format->bitrate) avctx->bit_rate = format->bitrate; if (format->frame_rate.numerator && format->frame_rate.denominator) { avctx->framerate.num = format->frame_rate.numerator; avctx->framerate.den = format->frame_rate.denominator; } if (ctx->cudecoder && avctx->coded_width == format->coded_width && avctx->coded_height == format->coded_height && ctx->chroma_format == format->chroma_format && ctx->codec_type == format->codec) return 1; if (ctx->cudecoder) { av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n"); ctx->internal_error = CHECK_CU(cuvidDestroyDecoder(ctx->cudecoder)); if (ctx->internal_error < 0) return 0; ctx->cudecoder = NULL; } if (hwframe_ctx->pool && ( hwframe_ctx->width < avctx->width || hwframe_ctx->height < avctx->height || hwframe_ctx->format != AV_PIX_FMT_CUDA || hwframe_ctx->sw_format != AV_PIX_FMT_NV12)) { av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n"); ctx->internal_error = AVERROR(EINVAL); return 0; } if (format->chroma_format != cudaVideoChromaFormat_420) { av_log(avctx, AV_LOG_ERROR, "Chroma formats other than 420 are not supported\n"); ctx->internal_error = AVERROR(EINVAL); return 0; } avctx->coded_width = format->coded_width; avctx->coded_height = format->coded_height; ctx->chroma_format = format->chroma_format; memset(&cuinfo, 0, sizeof(cuinfo)); cuinfo.CodecType = ctx->codec_type = format->codec; cuinfo.ChromaFormat = format->chroma_format; cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12; cuinfo.ulWidth = avctx->coded_width; cuinfo.ulHeight = avctx->coded_height; cuinfo.ulTargetWidth = cuinfo.ulWidth; cuinfo.ulTargetHeight = cuinfo.ulHeight; cuinfo.target_rect.left = 0; cuinfo.target_rect.top = 0; cuinfo.target_rect.right = cuinfo.ulWidth; cuinfo.target_rect.bottom = cuinfo.ulHeight; cuinfo.ulNumDecodeSurfaces = MAX_FRAME_COUNT; cuinfo.ulNumOutputSurfaces = 1; cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID; cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8; if (format->progressive_sequence) { ctx->deint_mode = cuinfo.DeinterlaceMode = cudaVideoDeinterlaceMode_Weave; } else { cuinfo.DeinterlaceMode = ctx->deint_mode; } if (ctx->deint_mode != cudaVideoDeinterlaceMode_Weave) avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1}); ctx->internal_error = CHECK_CU(cuvidCreateDecoder(&ctx->cudecoder, &cuinfo)); if (ctx->internal_error < 0) return 0; if (!hwframe_ctx->pool) { hwframe_ctx->format = AV_PIX_FMT_CUDA; hwframe_ctx->sw_format = AV_PIX_FMT_NV12; hwframe_ctx->width = avctx->width; hwframe_ctx->height = avctx->height; if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n"); return 0; } } return 1; }
/* Returns the number of bytes consumed from the bytestream. Returns -1 if * there was an error while decoding the header */ static int truemotion1_decode_header(TrueMotion1Context *s) { int i, ret; int width_shift = 0; int new_pix_fmt; struct frame_header header; uint8_t header_buffer[128] = { 0 }; /* logical maximum size of the header */ const uint8_t *sel_vector_table; header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f; if (s->buf[0] < 0x10) { av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]); return AVERROR_INVALIDDATA; } if (header.header_size + 1 > s->size) { av_log(s->avctx, AV_LOG_ERROR, "Input packet too small.\n"); return AVERROR_INVALIDDATA; } /* unscramble the header bytes with a XOR operation */ for (i = 1; i < header.header_size; i++) header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1]; header.compression = header_buffer[0]; header.deltaset = header_buffer[1]; header.vectable = header_buffer[2]; header.ysize = AV_RL16(&header_buffer[3]); header.xsize = AV_RL16(&header_buffer[5]); header.checksum = AV_RL16(&header_buffer[7]); header.version = header_buffer[9]; header.header_type = header_buffer[10]; header.flags = header_buffer[11]; header.control = header_buffer[12]; /* Version 2 */ if (header.version >= 2) { if (header.header_type > 3) { av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type); return AVERROR_INVALIDDATA; } else if ((header.header_type == 2) || (header.header_type == 3)) { s->flags = header.flags; if (!(s->flags & FLAG_INTERFRAME)) s->flags |= FLAG_KEYFRAME; } else s->flags = FLAG_KEYFRAME; } else /* Version 1 */ s->flags = FLAG_KEYFRAME; if (s->flags & FLAG_SPRITE) { avpriv_request_sample(s->avctx, "Frame with sprite"); /* FIXME header.width, height, xoffset and yoffset aren't initialized */ return AVERROR_PATCHWELCOME; } else { s->w = header.xsize; s->h = header.ysize; if (header.header_type < 2) { if ((s->w < 213) && (s->h >= 176)) { s->flags |= FLAG_INTERPOLATED; avpriv_request_sample(s->avctx, "Interpolated frame"); } } } if (header.compression >= 17) { av_log(s->avctx, AV_LOG_ERROR, "invalid compression type (%d)\n", header.compression); return AVERROR_INVALIDDATA; } if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable)) select_delta_tables(s, header.deltaset); if ((header.compression & 1) && header.header_type) sel_vector_table = pc_tbl2; else { if (header.vectable > 0 && header.vectable < 4) sel_vector_table = tables[header.vectable - 1]; else { av_log(s->avctx, AV_LOG_ERROR, "invalid vector table id (%d)\n", header.vectable); return AVERROR_INVALIDDATA; } } if (compression_types[header.compression].algorithm == ALGO_RGB24H) { new_pix_fmt = AV_PIX_FMT_RGB32; width_shift = 1; } else new_pix_fmt = AV_PIX_FMT_RGB555; // RGB565 is supported as well s->w >>= width_shift; if (s->w != s->avctx->width || s->h != s->avctx->height || new_pix_fmt != s->avctx->pix_fmt) { av_frame_unref(s->frame); s->avctx->sample_aspect_ratio = (AVRational){ 1 << width_shift, 1 }; s->avctx->pix_fmt = new_pix_fmt; if ((ret = ff_set_dimensions(s->avctx, s->w, s->h)) < 0) return ret; ff_set_sar(s->avctx, s->avctx->sample_aspect_ratio); av_fast_malloc(&s->vert_pred, &s->vert_pred_size, s->avctx->width * sizeof(unsigned int)); if (!s->vert_pred) return AVERROR(ENOMEM); } /* There is 1 change bit per 4 pixels, so each change byte represents * 32 pixels; divide width by 4 to obtain the number of change bits and * then round up to the nearest byte. */ s->mb_change_bits_row_size = ((s->avctx->width >> (2 - width_shift)) + 7) >> 3; if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable)) { if (compression_types[header.compression].algorithm == ALGO_RGB24H) gen_vector_table24(s, sel_vector_table); else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB555) gen_vector_table15(s, sel_vector_table); else gen_vector_table16(s, sel_vector_table); } /* set up pointers to the other key data chunks */ s->mb_change_bits = s->buf + header.header_size; if (s->flags & FLAG_KEYFRAME) { /* no change bits specified for a keyframe; only index bytes */ s->index_stream = s->mb_change_bits; } else { /* one change bit per 4x4 block */ s->index_stream = s->mb_change_bits + (s->mb_change_bits_row_size * (s->avctx->height >> 2)); } s->index_stream_size = s->size - (s->index_stream - s->buf); s->last_deltaset = header.deltaset; s->last_vectable = header.vectable; s->compression = header.compression; s->block_width = compression_types[header.compression].block_width; s->block_height = compression_types[header.compression].block_height; s->block_type = compression_types[header.compression].block_type; if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "tables: %d / %d c:%d %dx%d t:%d %s%s%s%s\n", s->last_deltaset, s->last_vectable, s->compression, s->block_width, s->block_height, s->block_type, s->flags & FLAG_KEYFRAME ? " KEY" : "", s->flags & FLAG_INTERFRAME ? " INTER" : "", s->flags & FLAG_SPRITE ? " SPRITE" : "", s->flags & FLAG_INTERPOLATED ? " INTERPOL" : ""); return header.header_size; }
static int mediacodec_dec_parse_format(AVCodecContext *avctx, MediaCodecDecContext *s) { int ret = 0; int width = 0; int height = 0; char *format = NULL; if (!s->format) { av_log(avctx, AV_LOG_ERROR, "Output MediaFormat is not set\n"); return AVERROR(EINVAL); } format = ff_AMediaFormat_toString(s->format); if (!format) { return AVERROR_EXTERNAL; } av_log(avctx, AV_LOG_DEBUG, "Parsing MediaFormat %s\n", format); /* Mandatory fields */ AMEDIAFORMAT_GET_INT32(s->width, "width", 1); AMEDIAFORMAT_GET_INT32(s->height, "height", 1); AMEDIAFORMAT_GET_INT32(s->stride, "stride", 1); s->stride = s->stride > 0 ? s->stride : s->width; AMEDIAFORMAT_GET_INT32(s->slice_height, "slice-height", 1); s->slice_height = s->slice_height > 0 ? s->slice_height : s->height; if (strstr(s->codec_name, "OMX.Nvidia.")) { s->slice_height = FFALIGN(s->height, 16); } else if (strstr(s->codec_name, "OMX.SEC.avc.dec")) { s->slice_height = avctx->height; s->stride = avctx->width; } AMEDIAFORMAT_GET_INT32(s->color_format, "color-format", 1); avctx->pix_fmt = mcdec_map_color_format(avctx, s, s->color_format); if (avctx->pix_fmt == AV_PIX_FMT_NONE) { av_log(avctx, AV_LOG_ERROR, "Output color format is not supported\n"); ret = AVERROR(EINVAL); goto fail; } /* Optional fields */ AMEDIAFORMAT_GET_INT32(s->crop_top, "crop-top", 0); AMEDIAFORMAT_GET_INT32(s->crop_bottom, "crop-bottom", 0); AMEDIAFORMAT_GET_INT32(s->crop_left, "crop-left", 0); AMEDIAFORMAT_GET_INT32(s->crop_right, "crop-right", 0); width = s->crop_right + 1 - s->crop_left; height = s->crop_bottom + 1 - s->crop_top; AMEDIAFORMAT_GET_INT32(s->display_width, "display-width", 0); AMEDIAFORMAT_GET_INT32(s->display_height, "display-height", 0); if (s->display_width && s->display_height) { AVRational sar = av_div_q( (AVRational){ s->display_width, s->display_height }, (AVRational){ width, height }); ff_set_sar(avctx, sar); } av_log(avctx, AV_LOG_INFO, "Output crop parameters top=%d bottom=%d left=%d right=%d, " "resulting dimensions width=%d height=%d\n", s->crop_top, s->crop_bottom, s->crop_left, s->crop_right, width, height); av_freep(&format); return ff_set_dimensions(avctx, width, height); fail: av_freep(&format); return ret; }
static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format) { AVCodecContext *avctx = opaque; CuvidContext *ctx = avctx->priv_data; AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data; CUVIDDECODECAPS *caps = NULL; CUVIDDECODECREATEINFO cuinfo; int surface_fmt; int old_width = avctx->width; int old_height = avctx->height; enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE, // Will be updated below AV_PIX_FMT_NONE }; av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence); memset(&cuinfo, 0, sizeof(cuinfo)); ctx->internal_error = 0; avctx->coded_width = cuinfo.ulWidth = format->coded_width; avctx->coded_height = cuinfo.ulHeight = format->coded_height; // apply cropping cuinfo.display_area.left = format->display_area.left + ctx->crop.left; cuinfo.display_area.top = format->display_area.top + ctx->crop.top; cuinfo.display_area.right = format->display_area.right - ctx->crop.right; cuinfo.display_area.bottom = format->display_area.bottom - ctx->crop.bottom; // width and height need to be set before calling ff_get_format if (ctx->resize_expr) { avctx->width = ctx->resize.width; avctx->height = ctx->resize.height; } else { avctx->width = cuinfo.display_area.right - cuinfo.display_area.left; avctx->height = cuinfo.display_area.bottom - cuinfo.display_area.top; } // target width/height need to be multiples of two cuinfo.ulTargetWidth = avctx->width = (avctx->width + 1) & ~1; cuinfo.ulTargetHeight = avctx->height = (avctx->height + 1) & ~1; // aspect ratio conversion, 1:1, depends on scaled resolution cuinfo.target_rect.left = 0; cuinfo.target_rect.top = 0; cuinfo.target_rect.right = cuinfo.ulTargetWidth; cuinfo.target_rect.bottom = cuinfo.ulTargetHeight; switch (format->bit_depth_luma_minus8) { case 0: // 8-bit pix_fmts[1] = AV_PIX_FMT_NV12; caps = &ctx->caps8; break; case 2: // 10-bit pix_fmts[1] = AV_PIX_FMT_P010; caps = &ctx->caps10; break; case 4: // 12-bit pix_fmts[1] = AV_PIX_FMT_P016; caps = &ctx->caps12; break; default: break; } if (!caps || !caps->bIsSupported) { av_log(avctx, AV_LOG_ERROR, "unsupported bit depth: %d\n", format->bit_depth_luma_minus8 + 8); ctx->internal_error = AVERROR(EINVAL); return 0; } surface_fmt = ff_get_format(avctx, pix_fmts); if (surface_fmt < 0) { av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", surface_fmt); ctx->internal_error = AVERROR(EINVAL); return 0; } av_log(avctx, AV_LOG_VERBOSE, "Formats: Original: %s | HW: %s | SW: %s\n", av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(surface_fmt), av_get_pix_fmt_name(avctx->sw_pix_fmt)); avctx->pix_fmt = surface_fmt; // Update our hwframe ctx, as the get_format callback might have refreshed it! if (avctx->hw_frames_ctx) { av_buffer_unref(&ctx->hwframe); ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx); if (!ctx->hwframe) { ctx->internal_error = AVERROR(ENOMEM); return 0; } hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data; } ff_set_sar(avctx, av_div_q( (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y }, (AVRational){ avctx->width, avctx->height })); ctx->deint_mode_current = format->progressive_sequence ? cudaVideoDeinterlaceMode_Weave : ctx->deint_mode; if (!format->progressive_sequence && ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave) avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT; else avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT; if (format->video_signal_description.video_full_range_flag) avctx->color_range = AVCOL_RANGE_JPEG; else avctx->color_range = AVCOL_RANGE_MPEG; avctx->color_primaries = format->video_signal_description.color_primaries; avctx->color_trc = format->video_signal_description.transfer_characteristics; avctx->colorspace = format->video_signal_description.matrix_coefficients; if (format->bitrate) avctx->bit_rate = format->bitrate; if (format->frame_rate.numerator && format->frame_rate.denominator) { avctx->framerate.num = format->frame_rate.numerator; avctx->framerate.den = format->frame_rate.denominator; } if (ctx->cudecoder && avctx->coded_width == format->coded_width && avctx->coded_height == format->coded_height && avctx->width == old_width && avctx->height == old_height && ctx->chroma_format == format->chroma_format && ctx->codec_type == format->codec) return 1; if (ctx->cudecoder) { av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n"); ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder)); if (ctx->internal_error < 0) return 0; ctx->cudecoder = NULL; } if (hwframe_ctx->pool && ( hwframe_ctx->width < avctx->width || hwframe_ctx->height < avctx->height || hwframe_ctx->format != AV_PIX_FMT_CUDA || hwframe_ctx->sw_format != avctx->sw_pix_fmt)) { av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n"); av_log(avctx, AV_LOG_DEBUG, "width: %d <-> %d\n", hwframe_ctx->width, avctx->width); av_log(avctx, AV_LOG_DEBUG, "height: %d <-> %d\n", hwframe_ctx->height, avctx->height); av_log(avctx, AV_LOG_DEBUG, "format: %s <-> cuda\n", av_get_pix_fmt_name(hwframe_ctx->format)); av_log(avctx, AV_LOG_DEBUG, "sw_format: %s <-> %s\n", av_get_pix_fmt_name(hwframe_ctx->sw_format), av_get_pix_fmt_name(avctx->sw_pix_fmt)); ctx->internal_error = AVERROR(EINVAL); return 0; } if (format->chroma_format != cudaVideoChromaFormat_420) { av_log(avctx, AV_LOG_ERROR, "Chroma formats other than 420 are not supported\n"); ctx->internal_error = AVERROR(EINVAL); return 0; } ctx->chroma_format = format->chroma_format; cuinfo.CodecType = ctx->codec_type = format->codec; cuinfo.ChromaFormat = format->chroma_format; switch (avctx->sw_pix_fmt) { case AV_PIX_FMT_NV12: cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12; break; case AV_PIX_FMT_P010: case AV_PIX_FMT_P016: cuinfo.OutputFormat = cudaVideoSurfaceFormat_P016; break; default: av_log(avctx, AV_LOG_ERROR, "Output formats other than NV12, P010 or P016 are not supported\n"); ctx->internal_error = AVERROR(EINVAL); return 0; } cuinfo.ulNumDecodeSurfaces = ctx->nb_surfaces; cuinfo.ulNumOutputSurfaces = 1; cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID; cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8; cuinfo.DeinterlaceMode = ctx->deint_mode_current; if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave && !ctx->drop_second_field) avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1}); ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidCreateDecoder(&ctx->cudecoder, &cuinfo)); if (ctx->internal_error < 0) return 0; if (!hwframe_ctx->pool) { hwframe_ctx->format = AV_PIX_FMT_CUDA; hwframe_ctx->sw_format = avctx->sw_pix_fmt; hwframe_ctx->width = avctx->width; hwframe_ctx->height = avctx->height; if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) { av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n"); return 0; } } return 1; }