static av_cold int init_stage(CUDAScaleContext *s, AVBufferRef *device_ctx) { AVBufferRef *out_ref = NULL; AVHWFramesContext *out_ctx; int in_sw, in_sh, out_sw, out_sh; int ret, i; av_pix_fmt_get_chroma_sub_sample(s->in_fmt, &in_sw, &in_sh); av_pix_fmt_get_chroma_sub_sample(s->out_fmt, &out_sw, &out_sh); if (!s->planes_out[0].width) { s->planes_out[0].width = s->planes_in[0].width; s->planes_out[0].height = s->planes_in[0].height; } for (i = 1; i < FF_ARRAY_ELEMS(s->planes_in); i++) { s->planes_in[i].width = s->planes_in[0].width >> in_sw; s->planes_in[i].height = s->planes_in[0].height >> in_sh; s->planes_out[i].width = s->planes_out[0].width >> out_sw; s->planes_out[i].height = s->planes_out[0].height >> out_sh; } out_ref = av_hwframe_ctx_alloc(device_ctx); if (!out_ref) return AVERROR(ENOMEM); out_ctx = (AVHWFramesContext*)out_ref->data; out_ctx->format = AV_PIX_FMT_CUDA; out_ctx->sw_format = s->out_fmt; out_ctx->width = FFALIGN(s->planes_out[0].width, 32); out_ctx->height = FFALIGN(s->planes_out[0].height, 32); ret = av_hwframe_ctx_init(out_ref); if (ret < 0) goto fail; av_frame_unref(s->frame); ret = av_hwframe_get_buffer(out_ref, s->frame, 0); if (ret < 0) goto fail; s->frame->width = s->planes_out[0].width; s->frame->height = s->planes_out[0].height; av_buffer_unref(&s->frames_ctx); s->frames_ctx = out_ref; return 0; fail: av_buffer_unref(&out_ref); return ret; }
void ff_mjpeg_init_hvsample(AVCodecContext *avctx, int hsample[3], int vsample[3]) { int chroma_h_shift, chroma_v_shift; av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); if (avctx->codec->id == AV_CODEC_ID_LJPEG && ( avctx->pix_fmt == AV_PIX_FMT_BGR0 || avctx->pix_fmt == AV_PIX_FMT_BGRA || avctx->pix_fmt == AV_PIX_FMT_BGR24)) { vsample[0] = hsample[0] = vsample[1] = hsample[1] = vsample[2] = hsample[2] = 1; } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P) { vsample[0] = vsample[1] = vsample[2] = 2; hsample[0] = hsample[1] = hsample[2] = 1; } else { vsample[0] = 2; vsample[1] = 2 >> chroma_v_shift; vsample[2] = 2 >> chroma_v_shift; hsample[0] = 2; hsample[1] = 2 >> chroma_h_shift; hsample[2] = 2 >> chroma_h_shift; } }
static inline void copy_data(AVFrame *pic, const struct encoder_frame *frame, int height, enum AVPixelFormat format) { int h_chroma_shift, v_chroma_shift; av_pix_fmt_get_chroma_sub_sample( format, &h_chroma_shift, &v_chroma_shift); for (int plane = 0; plane < MAX_AV_PLANES; plane++) { if (!frame->data[plane]) continue; int frame_rowsize = (int)frame->linesize[plane]; int pic_rowsize = pic->linesize[plane]; int bytes = frame_rowsize < pic_rowsize ? frame_rowsize : pic_rowsize; int plane_height = height >> (plane ? v_chroma_shift : 0); for (int y = 0; y < plane_height; y++) { int pos_frame = y * frame_rowsize; int pos_pic = y * pic_rowsize; memcpy(pic->data[plane] + pos_pic, frame->data[plane] + pos_frame, bytes); } } }
static int map_chroma_format(enum AVPixelFormat pix_fmt) { int shift_h = 0, shift_v = 0; av_pix_fmt_get_chroma_sub_sample(pix_fmt, &shift_h, &shift_v); if (shift_h == 1 && shift_v == 1) return cudaVideoChromaFormat_420; else if (shift_h == 1 && shift_v == 0) return cudaVideoChromaFormat_422; else if (shift_h == 0 && shift_v == 0) return cudaVideoChromaFormat_444; return -1; }
static int cuda_frames_init(AVHWFramesContext *ctx) { CUDAFramesContext *priv = ctx->internal->priv; int aligned_width = FFALIGN(ctx->width, CUDA_FRAME_ALIGNMENT); int i; for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) { if (ctx->sw_format == supported_formats[i]) break; } if (i == FF_ARRAY_ELEMS(supported_formats)) { av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n", av_get_pix_fmt_name(ctx->sw_format)); return AVERROR(ENOSYS); } av_pix_fmt_get_chroma_sub_sample(ctx->sw_format, &priv->shift_width, &priv->shift_height); if (!ctx->pool) { int size; switch (ctx->sw_format) { case AV_PIX_FMT_NV12: case AV_PIX_FMT_YUV420P: size = aligned_width * ctx->height * 3 / 2; break; case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_P010: case AV_PIX_FMT_P016: size = aligned_width * ctx->height * 3; break; case AV_PIX_FMT_YUV444P16: size = aligned_width * ctx->height * 6; break; default: av_log(ctx, AV_LOG_ERROR, "BUG: Pixel format missing from size calculation."); return AVERROR_BUG; } ctx->internal->pool_internal = av_buffer_pool_init2(size, ctx, cuda_pool_alloc, NULL); if (!ctx->internal->pool_internal) return AVERROR(ENOMEM); } return 0; }
static int get_vpx_chroma_subsampling(AVFormatContext *s, enum AVPixelFormat pixel_format, enum AVChromaLocation chroma_location) { int chroma_w, chroma_h; if (av_pix_fmt_get_chroma_sub_sample(pixel_format, &chroma_w, &chroma_h) == 0) { if (chroma_w == 1 && chroma_h == 1) { return (chroma_location == AVCHROMA_LOC_LEFT) ? VPX_SUBSAMPLING_420_VERTICAL : VPX_SUBSAMPLING_420_COLLOCATED_WITH_LUMA; } else if (chroma_w == 1 && chroma_h == 0) { return VPX_SUBSAMPLING_422; } else if (chroma_w == 0 && chroma_h == 0) { return VPX_SUBSAMPLING_444; } } av_log(s, AV_LOG_ERROR, "Unsupported pixel format (%d)\n", pixel_format); return -1; }
void video_effect_show_title_safe(struct raw_frame_ref dest) { int chroma_shift_horiz, chroma_shift_vert; av_pix_fmt_get_chroma_sub_sample(dest.pix_fmt, &chroma_shift_horiz, &chroma_shift_vert); unsigned width = FRAME_WIDTH; unsigned height = dest.height; unsigned border_horiz = (FRAME_WIDTH + 5) / 10; unsigned border_vert = (dest.height + 5) / 10; unsigned bias = luma_bias; // Darken the non-title-safe area for (int plane = 0; plane != 3; ++plane) { if (plane == 1) { width >>= chroma_shift_horiz; border_horiz >>= chroma_shift_horiz; height >>= chroma_shift_vert; border_vert >>= chroma_shift_vert; bias = chroma_bias; } for (unsigned y = 0; y != height; ++y) { uint8_t * p, * end; // Do left border p = dest.planes.data[plane] + dest.planes.linesize[plane] * y; end = p + border_horiz; while (p != end) *p = (*p + bias) / 2, ++p; end = p + width - border_horiz; if (y >= border_vert && y < height - border_vert) // Skip to right border p += width - 2 * border_horiz; // else continue across top border or bottom border while (p != end) *p = (*p + bias) / 2, ++p; } }
static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet) { MpegEncContext *s = avctx->priv_data; AVFrame *pic; int i, ret; int chroma_h_shift, chroma_v_shift; av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); //CODEC_FLAG_EMU_EDGE have to be cleared if(s->avctx->flags & CODEC_FLAG_EMU_EDGE) return AVERROR(EINVAL); if ((avctx->height & 15) && avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) { av_log(avctx, AV_LOG_ERROR, "Heights which are not a multiple of 16 might fail with some decoders, " "use vstrict=-1 / -strict -1 to use %d anyway.\n", avctx->height); av_log(avctx, AV_LOG_WARNING, "If you have a device that plays AMV videos, please test if videos " "with such heights work with it and report your findings to [email protected]\n"); return AVERROR_EXPERIMENTAL; } pic = av_frame_clone(pic_arg); if (!pic) return AVERROR(ENOMEM); //picture should be flipped upside-down for(i=0; i < 3; i++) { int vsample = i ? 2 >> chroma_v_shift : 2; pic->data[i] += pic->linesize[i] * (vsample * s->height / V_MAX - 1); pic->linesize[i] *= -1; } ret = ff_mpv_encode_picture(avctx, pkt, pic, got_packet); av_frame_free(&pic); return ret; }
static av_cold int encode_init(AVCodecContext* avc_context) { th_info t_info; th_comment t_comment; ogg_packet o_packet; unsigned int offset; TheoraContext *h = avc_context->priv_data; uint32_t gop_size = avc_context->gop_size; /* Set up the theora_info struct */ th_info_init(&t_info); t_info.frame_width = FFALIGN(avc_context->width, 16); t_info.frame_height = FFALIGN(avc_context->height, 16); t_info.pic_width = avc_context->width; t_info.pic_height = avc_context->height; t_info.pic_x = 0; t_info.pic_y = 0; /* Swap numerator and denominator as time_base in AVCodecContext gives the * time period between frames, but theora_info needs the framerate. */ t_info.fps_numerator = avc_context->time_base.den; t_info.fps_denominator = avc_context->time_base.num; if (avc_context->sample_aspect_ratio.num) { t_info.aspect_numerator = avc_context->sample_aspect_ratio.num; t_info.aspect_denominator = avc_context->sample_aspect_ratio.den; } else { t_info.aspect_numerator = 1; t_info.aspect_denominator = 1; } if (avc_context->color_primaries == AVCOL_PRI_BT470M) t_info.colorspace = TH_CS_ITU_REC_470M; else if (avc_context->color_primaries == AVCOL_PRI_BT470BG) t_info.colorspace = TH_CS_ITU_REC_470BG; else t_info.colorspace = TH_CS_UNSPECIFIED; if (avc_context->pix_fmt == AV_PIX_FMT_YUV420P) t_info.pixel_fmt = TH_PF_420; else if (avc_context->pix_fmt == AV_PIX_FMT_YUV422P) t_info.pixel_fmt = TH_PF_422; else if (avc_context->pix_fmt == AV_PIX_FMT_YUV444P) t_info.pixel_fmt = TH_PF_444; else { av_log(avc_context, AV_LOG_ERROR, "Unsupported pix_fmt\n"); return -1; } av_pix_fmt_get_chroma_sub_sample(avc_context->pix_fmt, &h->uv_hshift, &h->uv_vshift); if (avc_context->flags & CODEC_FLAG_QSCALE) { /* to be constant with the libvorbis implementation, clip global_quality to 0 - 10 Theora accepts a quality parameter p, which is: * 0 <= p <=63 * an int value */ t_info.quality = av_clipf(avc_context->global_quality / (float)FF_QP2LAMBDA, 0, 10) * 6.3; t_info.target_bitrate = 0; } else { t_info.target_bitrate = avc_context->bit_rate; t_info.quality = 0; } /* Now initialise libtheora */ h->t_state = th_encode_alloc(&t_info); if (!h->t_state) { av_log(avc_context, AV_LOG_ERROR, "theora_encode_init failed\n"); return -1; } h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1; /* Clear up theora_info struct */ th_info_clear(&t_info); if (th_encode_ctl(h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE, &gop_size, sizeof(gop_size))) { av_log(avc_context, AV_LOG_ERROR, "Error setting GOP size\n"); return -1; } // need to enable 2 pass (via TH_ENCCTL_2PASS_) before encoding headers if (avc_context->flags & CODEC_FLAG_PASS1) { if (get_stats(avc_context, 0)) return -1; } else if (avc_context->flags & CODEC_FLAG_PASS2) { if (submit_stats(avc_context)) return -1; } /* Output first header packet consisting of theora header, comment, and tables. Each one is prefixed with a 16bit size, then they are concatenated together into libavcodec's extradata. */ offset = 0; /* Headers */ th_comment_init(&t_comment); while (th_encode_flushheader(h->t_state, &t_comment, &o_packet)) if (concatenate_packet(&offset, avc_context, &o_packet)) return -1; th_comment_clear(&t_comment); /* Set up the output AVFrame */ avc_context->coded_frame = av_frame_alloc(); return 0; }
static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[pkt->stream_index]; AVIOContext *pb = s->pb; AVFrame *frame; int* first_pkt = s->priv_data; int width, height, h_chroma_shift, v_chroma_shift; int i; char buf2[Y4M_LINE_MAX + 1]; uint8_t *ptr, *ptr1, *ptr2; frame = (AVFrame *)pkt->data; /* for the first packet we have to output the header as well */ if (*first_pkt) { *first_pkt = 0; if (yuv4_generate_header(s, buf2) < 0) { av_log(s, AV_LOG_ERROR, "Error. YUV4MPEG stream header write failed.\n"); return AVERROR(EIO); } else { avio_write(pb, buf2, strlen(buf2)); } } /* construct frame header */ avio_printf(s->pb, "%s\n", Y4M_FRAME_MAGIC); width = st->codec->width; height = st->codec->height; ptr = frame->data[0]; switch (st->codec->pix_fmt) { case AV_PIX_FMT_GRAY8: case AV_PIX_FMT_YUV411P: case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUV422P: case AV_PIX_FMT_YUV444P: break; case AV_PIX_FMT_GRAY16: case AV_PIX_FMT_YUV420P9: case AV_PIX_FMT_YUV422P9: case AV_PIX_FMT_YUV444P9: case AV_PIX_FMT_YUV420P10: case AV_PIX_FMT_YUV422P10: case AV_PIX_FMT_YUV444P10: case AV_PIX_FMT_YUV420P12: case AV_PIX_FMT_YUV422P12: case AV_PIX_FMT_YUV444P12: case AV_PIX_FMT_YUV420P14: case AV_PIX_FMT_YUV422P14: case AV_PIX_FMT_YUV444P14: case AV_PIX_FMT_YUV420P16: case AV_PIX_FMT_YUV422P16: case AV_PIX_FMT_YUV444P16: width *= 2; break; default: av_log(s, AV_LOG_ERROR, "The pixel format '%s' is not supported.\n", av_get_pix_fmt_name(st->codec->pix_fmt)); return AVERROR(EINVAL); } for (i = 0; i < height; i++) { avio_write(pb, ptr, width); ptr += frame->linesize[0]; } if (st->codec->pix_fmt != AV_PIX_FMT_GRAY8 && st->codec->pix_fmt != AV_PIX_FMT_GRAY16) { // Adjust for smaller Cb and Cr planes av_pix_fmt_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift, &v_chroma_shift); width = FF_CEIL_RSHIFT(width, h_chroma_shift); height = FF_CEIL_RSHIFT(height, v_chroma_shift); ptr1 = frame->data[1]; ptr2 = frame->data[2]; for (i = 0; i < height; i++) { /* Cb */ avio_write(pb, ptr1, width); ptr1 += frame->linesize[1]; } for (i = 0; i < height; i++) { /* Cr */ avio_write(pb, ptr2, width); ptr2 += frame->linesize[2]; } } return 0; }
static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[pkt->stream_index]; AVIOContext *pb = s->pb; AVFrame *frame; int* first_pkt = s->priv_data; int width, height, h_chroma_shift, v_chroma_shift; int i; char buf2[Y4M_LINE_MAX + 1]; char buf1[20]; uint8_t *ptr, *ptr1, *ptr2; frame = (AVFrame *)pkt->data; /* for the first packet we have to output the header as well */ if (*first_pkt) { *first_pkt = 0; if (yuv4_generate_header(s, buf2) < 0) { av_log(s, AV_LOG_ERROR, "Error. YUV4MPEG stream header write failed.\n"); return AVERROR(EIO); } else { avio_write(pb, buf2, strlen(buf2)); } } /* construct frame header */ snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC); avio_write(pb, buf1, strlen(buf1)); width = st->codec->width; height = st->codec->height; ptr = frame->data[0]; for (i = 0; i < height; i++) { avio_write(pb, ptr, width); ptr += frame->linesize[0]; } if (st->codec->pix_fmt != AV_PIX_FMT_GRAY8) { // Adjust for smaller Cb and Cr planes av_pix_fmt_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift, &v_chroma_shift); // Shift right, rounding up width = AV_CEIL_RSHIFT(width, h_chroma_shift); height = AV_CEIL_RSHIFT(height, v_chroma_shift); ptr1 = frame->data[1]; ptr2 = frame->data[2]; for (i = 0; i < height; i++) { /* Cb */ avio_write(pb, ptr1, width); ptr1 += frame->linesize[1]; } for (i = 0; i < height; i++) { /* Cr */ avio_write(pb, ptr2, width); ptr2 += frame->linesize[2]; } } return 0; }