SwsContext *GetSwsContext(int SrcW, int SrcH, AVPixelFormat SrcFormat, int SrcColorSpace, int SrcColorRange, int DstW, int DstH, AVPixelFormat DstFormat, int DstColorSpace, int DstColorRange, int64_t Flags) { Flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP | SWS_ACCURATE_RND; SwsContext *Context = sws_alloc_context(); if (!Context) return nullptr; // 0 = limited range, 1 = full range int SrcRange = SrcColorRange == AVCOL_RANGE_JPEG; int DstRange = DstColorRange == AVCOL_RANGE_JPEG; av_opt_set_int(Context, "sws_flags", Flags, 0); av_opt_set_int(Context, "srcw", SrcW, 0); av_opt_set_int(Context, "srch", SrcH, 0); av_opt_set_int(Context, "dstw", DstW, 0); av_opt_set_int(Context, "dsth", DstH, 0); av_opt_set_int(Context, "src_range", SrcRange, 0); av_opt_set_int(Context, "dst_range", DstRange, 0); av_opt_set_int(Context, "src_format", SrcFormat, 0); av_opt_set_int(Context, "dst_format", DstFormat, 0); sws_setColorspaceDetails(Context, sws_getCoefficients(SrcColorSpace), SrcRange, sws_getCoefficients(DstColorSpace), DstRange, 0, 1<<16, 1<<16); if(sws_init_context(Context, nullptr, nullptr) < 0){ sws_freeContext(Context); return nullptr; } return Context; }
void MovieDecoder::convertAndScaleFrame(PixelFormat format, int scaledSize, bool maintainAspectRatio, int& scaledWidth, int& scaledHeight) { calculateDimensions(scaledSize, maintainAspectRatio, scaledWidth, scaledHeight); #ifdef LATEST_GREATEST_FFMPEG // Enable this when it hits the released ffmpeg version SwsContext* scaleContext = sws_alloc_context(); if (scaleContext == nullptr) { throw std::logic_error("Failed to allocate scale context"); } av_set_int(scaleContext, "srcw", m_pVideoCodecContext->width); av_set_int(scaleContext, "srch", m_pVideoCodecContext->height); av_set_int(scaleContext, "src_format", m_pVideoCodecContext->pix_fmt); av_set_int(scaleContext, "dstw", scaledWidth); av_set_int(scaleContext, "dsth", scaledHeight); av_set_int(scaleContext, "dst_format", format); av_set_int(scaleContext, "sws_flags", SWS_BICUBIC); const int* coeff = sws_getCoefficients(SWS_CS_DEFAULT); if (sws_setColorspaceDetails(scaleContext, coeff, m_pVideoCodecContext->pix_fmt, coeff, format, 0, 1<<16, 1<<16) < 0) { sws_freeContext(scaleContext); throw std::logic_error("Failed to set colorspace details"); } if (sws_init_context(scaleContext, nullptr, nullptr) < 0) { sws_freeContext(scaleContext); throw std::logic_error("Failed to initialise scale context"); } #endif SwsContext* scaleContext = sws_getContext(m_pVideoCodecContext->width, m_pVideoCodecContext->height, m_pVideoCodecContext->pix_fmt, scaledWidth, scaledHeight, format, SWS_BICUBIC, nullptr, nullptr, nullptr); if (nullptr == scaleContext) { throw logic_error("Failed to create resize context"); } AVFrame* convertedFrame = nullptr; uint8_t* convertedFrameBuffer = nullptr; createAVFrame(&convertedFrame, &convertedFrameBuffer, scaledWidth, scaledHeight, format); sws_scale(scaleContext, m_pFrame->data, m_pFrame->linesize, 0, m_pVideoCodecContext->height, convertedFrame->data, convertedFrame->linesize); sws_freeContext(scaleContext); av_free(m_pFrame); av_free(m_pFrameBuffer); m_pFrame = convertedFrame; m_pFrameBuffer = convertedFrameBuffer; }
SwsContext *FFGetSwsContext(int SrcW, int SrcH, PixelFormat SrcFormat, int DstW, int DstH, PixelFormat DstFormat, int64_t Flags, int ColorSpace, int ColorRange) { Flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP; #if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0) return sws_getContext(SrcW, SrcH, SrcFormat, DstW, DstH, DstFormat, Flags, 0, 0, 0); #else SwsContext *Context = sws_alloc_context(); if (!Context) return 0; // The intention here is to never change the color range. int Range; // 0 = limited range, 1 = full range if (ColorRange == AVCOL_RANGE_JPEG) Range = 1; else // explicit limited range, or unspecified Range = 0; av_opt_set_int(Context, "sws_flags", Flags, 0); av_opt_set_int(Context, "srcw", SrcW, 0); av_opt_set_int(Context, "srch", SrcH, 0); av_opt_set_int(Context, "dstw", DstW, 0); av_opt_set_int(Context, "dsth", DstH, 0); av_opt_set_int(Context, "src_range", Range, 0); av_opt_set_int(Context, "dst_range", Range, 0); av_opt_set_int(Context, "src_format", SrcFormat, 0); av_opt_set_int(Context, "dst_format", DstFormat, 0); sws_setColorspaceDetails(Context, sws_getCoefficients(ColorSpace), Range, sws_getCoefficients(ColorSpace), Range, 0, 1<<16, 1<<16); if(sws_init_context(Context, 0, 0) < 0){ sws_freeContext(Context); return 0; } return Context; #endif }
struct SwsContext *update_scaler_configuration ( struct SwsContext *sws_ctx, int flags, int width, int height, enum AVPixelFormat input_pixel_format, enum AVPixelFormat output_pixel_format, enum AVColorSpace colorspace, int yuv_range ) { if( sws_ctx ) sws_freeContext( sws_ctx ); sws_ctx = sws_alloc_context(); if( !sws_ctx ) return NULL; av_opt_set_int( sws_ctx, "sws_flags", flags, 0 ); av_opt_set_int( sws_ctx, "srcw", width, 0 ); av_opt_set_int( sws_ctx, "srch", height, 0 ); av_opt_set_int( sws_ctx, "dstw", width, 0 ); av_opt_set_int( sws_ctx, "dsth", height, 0 ); av_opt_set_int( sws_ctx, "src_format", input_pixel_format, 0 ); av_opt_set_int( sws_ctx, "dst_format", output_pixel_format, 0 ); const int *yuv2rgb_coeffs = sws_getCoefficients( colorspace ); sws_setColorspaceDetails( sws_ctx, yuv2rgb_coeffs, yuv_range, yuv2rgb_coeffs, yuv_range, 0, 1 << 16, 1 << 16 ); if( sws_init_context( sws_ctx, NULL, NULL ) < 0 ) { sws_freeContext( sws_ctx ); return NULL; } return sws_ctx; }
SwsContext *GetSwsContext(int SrcW, int SrcH, PixelFormat SrcFormat, int SrcColorSpace, int SrcColorRange, int DstW, int DstH, PixelFormat DstFormat, int DstColorSpace, int DstColorRange, int64_t Flags) { Flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP; #if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0) return sws_getContext(SrcW, SrcH, SrcFormat, DstW, DstH, DstFormat, Flags, 0, 0, 0); #else SwsContext *Context = sws_alloc_context(); if (!Context) return 0; // 0 = limited range, 1 = full range int SrcRange = SrcColorRange == AVCOL_RANGE_JPEG; int DstRange = DstColorRange == AVCOL_RANGE_JPEG; av_opt_set_int(Context, "sws_flags", Flags, 0); av_opt_set_int(Context, "srcw", SrcW, 0); av_opt_set_int(Context, "srch", SrcH, 0); av_opt_set_int(Context, "dstw", DstW, 0); av_opt_set_int(Context, "dsth", DstH, 0); av_opt_set_int(Context, "src_range", SrcRange, 0); av_opt_set_int(Context, "dst_range", DstRange, 0); av_opt_set_int(Context, "src_format", SrcFormat, 0); av_opt_set_int(Context, "dst_format", DstFormat, 0); sws_setColorspaceDetails(Context, sws_getCoefficients(SrcColorSpace), SrcRange, sws_getCoefficients(DstColorSpace), DstRange, 0, 1<<16, 1<<16); if(sws_init_context(Context, 0, 0) < 0) { sws_freeContext(Context); return 0; } return Context; #endif }
int yuv420P2rgb24(unsigned char *yuv_data, unsigned char *rgb24_data, int width, int hight) { //Parameters // FILE *src_file =fopen("pre.yuv", "rb"); // if (src_file == NULL) // { // perror("open pre.yuv error\n"); // } const int src_w=1328,src_h=hight;//1328*720 enum AVPixelFormat src_pixfmt=AV_PIX_FMT_YUV420P; int src_bpp=av_get_bits_per_pixel(av_pix_fmt_desc_get(src_pixfmt)); // FILE *dst_file = fopen("sintel_1280x720_rgb24.rgb", "wb"); // if (dst_file == NULL) // { // perror("open sintel_1280x720_rgb24.rgb error\n"); // } const int dst_w=width,dst_h=hight; enum AVPixelFormat dst_pixfmt=AV_PIX_FMT_RGB24; int dst_bpp=av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pixfmt)); //Structures uint8_t *src_data[4]; int src_linesize[4]; uint8_t *dst_data[4]; int dst_linesize[4]; int rescale_method=SWS_BICUBIC; struct SwsContext *img_convert_ctx; uint8_t *temp_buffer=(uint8_t *)malloc(src_w*src_h*src_bpp/8); int frame_idx=0; int ret=0; ret= av_image_alloc(src_data, src_linesize,src_w, src_h, src_pixfmt, 1); if (ret< 0) { printf( "Could not allocate source image\n"); return -1; } ret = av_image_alloc(dst_data, dst_linesize,dst_w, dst_h, dst_pixfmt, 1); if (ret< 0) { printf( "Could not allocate destination image\n"); return -1; } //----------------------------- //Init Method 1 img_convert_ctx =sws_alloc_context(); //Show AVOption //av_opt_show2(img_convert_ctx,stdout,AV_OPT_FLAG_VIDEO_PARAM,0); //Set Value av_opt_set_int(img_convert_ctx,"sws_flags",SWS_BICUBIC|SWS_PRINT_INFO,0); av_opt_set_int(img_convert_ctx,"srcw",src_w,0); av_opt_set_int(img_convert_ctx,"srch",src_h,0); av_opt_set_int(img_convert_ctx,"src_format",src_pixfmt,0); //'0' for MPEG (Y:0-235);'1' for JPEG (Y:0-255) av_opt_set_int(img_convert_ctx,"src_range",1,0); av_opt_set_int(img_convert_ctx,"dstw",dst_w,0); av_opt_set_int(img_convert_ctx,"dsth",dst_h,0); av_opt_set_int(img_convert_ctx,"dst_format",dst_pixfmt,0); av_opt_set_int(img_convert_ctx,"dst_range",1,0); sws_init_context(img_convert_ctx,NULL,NULL); //Init Method 2 //img_convert_ctx = sws_getContext(src_w, src_h,src_pixfmt, dst_w, dst_h, dst_pixfmt, // rescale_method, NULL, NULL, NULL); //----------------------------- /* //Colorspace ret=sws_setColorspaceDetails(img_convert_ctx,sws_getCoefficients(SWS_CS_ITU601),0, sws_getCoefficients(SWS_CS_ITU709),0, 0, 1 << 16, 1 << 16); if (ret==-1) { printf( "Colorspace not support.\n"); return -1; } */ // while(1) // { // memcpy(temp_buffer, yuv_data, src_w*src_h*src_bpp/8); // if (fread(temp_buffer, 1, src_w*src_h*src_bpp/8, src_file) != src_w*src_h*src_bpp/8){ // break; // } switch(src_pixfmt){ case AV_PIX_FMT_GRAY8:{ memcpy(src_data[0],temp_buffer,src_w*src_h); break; } case AV_PIX_FMT_YUV420P:{ memcpy(src_data[0],temp_buffer,src_w*src_h); //Y memcpy(src_data[1],temp_buffer+src_w*src_h,src_w*src_h/4); //U memcpy(src_data[2],temp_buffer+src_w*src_h*5/4,src_w*src_h/4); //V break; } case AV_PIX_FMT_YUV422P:{ memcpy(src_data[0],temp_buffer,src_w*src_h); //Y memcpy(src_data[1],temp_buffer+src_w*src_h,src_w*src_h/2); //U memcpy(src_data[2],temp_buffer+src_w*src_h*3/2,src_w*src_h/2); //V break; } case AV_PIX_FMT_YUV444P:{ memcpy(src_data[0],temp_buffer,src_w*src_h); //Y memcpy(src_data[1],temp_buffer+src_w*src_h,src_w*src_h); //U memcpy(src_data[2],temp_buffer+src_w*src_h*2,src_w*src_h); //V break; } case AV_PIX_FMT_YUYV422:{ memcpy(src_data[0],temp_buffer,src_w*src_h*2); //Packed break; } case AV_PIX_FMT_RGB24:{ memcpy(src_data[0],temp_buffer,src_w*src_h*3); //Packed break; } default:{ printf("Not Support Input Pixel Format.\n"); break; } } sws_scale(img_convert_ctx, src_data, src_linesize, 0, src_h, dst_data, dst_linesize); frame_idx++; switch(dst_pixfmt){ case AV_PIX_FMT_GRAY8:{ memcpy(rgb24_data, dst_data[0], dst_w*dst_h); break; } case AV_PIX_FMT_YUV420P:{ memcpy(rgb24_data, dst_data[0], dst_w*dst_h);rgb24_data+=dst_w*dst_h; memcpy(rgb24_data, dst_data[1], dst_w*dst_h/4);rgb24_data+=dst_w*dst_h/4; memcpy(rgb24_data, dst_data[2], dst_w*dst_h/4);rgb24_data+=dst_w*dst_h/4; break; } case AV_PIX_FMT_YUV422P:{ // fwrite(dst_data[0],1,dst_w*dst_h,dst_file); //Y // fwrite(dst_data[1],1,dst_w*dst_h/2,dst_file); //U // fwrite(dst_data[2],1,dst_w*dst_h/2,dst_file); //V break; } case AV_PIX_FMT_YUV444P:{ // fwrite(dst_data[0],1,dst_w*dst_h,dst_file); //Y // fwrite(dst_data[1],1,dst_w*dst_h,dst_file); //U // fwrite(dst_data[2],1,dst_w*dst_h,dst_file); //V break; } case AV_PIX_FMT_YUYV422:{ // fwrite(dst_data[0],1,dst_w*dst_h*2,dst_file); //Packed break; } case AV_PIX_FMT_RGB24:{ memcpy(rgb24_data, dst_data[0], dst_w*dst_h*3); // fwrite(dst_data[0],1,dst_w*dst_h*3,dst_file); //Packed break; } default:{ printf("Not Support Output Pixel Format.\n"); break; } } // } sws_freeContext(img_convert_ctx); free(temp_buffer); // fclose(dst_file); av_freep(&src_data[0]); av_freep(&dst_data[0]); return 0; }
unsigned long* ffmpegsimple_readfirstframe_impl( struct ffmpeg_simple_data *d ,const char* filename ,int *width ,int *height ) { if (avformat_open_input(&d->pFormatCtx, filename, NULL, NULL)!=0) { fprintf(stderr, "av_open_input_file failed\n"); return NULL; } if(avformat_find_stream_info(d->pFormatCtx, NULL)<0) { fprintf(stderr, "avformat_find_stream_info failed\n"); return NULL; } int i; for(i=0; i < d->pFormatCtx->nb_streams; i++) { AVStream *r = d->pFormatCtx->streams[i]; if(r->codec->codec_type == AVMEDIA_TYPE_VIDEO) { if (!d->videoStream) { d->videoStream = r; d->videoStreamIndex = i; } else { fprintf(stderr, "Warning: ignoring other video streams\n"); } } } if (!d->videoStream) { fprintf(stderr, "No video streams found\n"); return NULL; } d->pCodec = avcodec_find_decoder(d->videoStream->codec->codec_id); if (d->pCodec==NULL) { fprintf(stderr, "Can't find the codec\n"); return NULL; } if(avcodec_open2(d->videoStream->codec, d->pCodec, NULL)<0){ fprintf(stderr, "Can't open the codec\n"); return NULL; } int packet_limit = 256; int no_more_frames_flag = 0; int read_at_least_one_packet = 0; int read_at_least_one_our_packet = 0; for(;;) { if (!no_more_frames_flag) { for(;;) { int ret = av_read_frame(d->pFormatCtx, &d->packet); if (ret<0) { no_more_frames_flag = 1; break; } read_at_least_one_packet = 1; if (d->packet.stream_index == d->videoStream->index) { read_at_least_one_our_packet = 1; break; // OK, it's our packet; } if (!--packet_limit) { fprintf(stderr, "Packet limit expired while reading more packets\n"); return NULL; } } } if (no_more_frames_flag && !read_at_least_one_our_packet) { if (read_at_least_one_packet) { fprintf(stderr, "Can't read any packet for our track\n"); } else { fprintf(stderr, "Can't read any packets\n"); } return NULL; } d->frame = avcodec_alloc_frame(); d->frameRGB = avcodec_alloc_frame(); int isFrameAvailable; int ret = avcodec_decode_video2(d->videoStream->codec, d->frame, &isFrameAvailable, &d->packet); if (ret < 0) { fprintf(stderr, "avcodec_decode_video2 failed\n"); return NULL; } if (isFrameAvailable) { break; } if (!--packet_limit) { fprintf(stderr, "Decoding tries limit expired\n"); return NULL; } } int w = d->frame->width; int h = d->frame->height; if (width ) *width = w; if (height) *height = h; int numBytes=avpicture_get_size(PIX_FMT_BGRA, w, h); d->img_convert_ctx = sws_alloc_context(); d->img_convert_ctx = sws_getCachedContext( d->img_convert_ctx, w, h, d->videoStream->codec->pix_fmt, w, h, PIX_FMT_BGRA, SWS_BICUBIC, NULL, NULL, NULL); if (d->img_convert_ctx == NULL) { fprintf(stderr, "Cannot initialize the conversion context\n"); return NULL; } unsigned char* buffer = malloc(numBytes); if (!buffer) { fprintf(stderr, "Cannot allocate picture buffer\n"); return NULL; } // Assign appropriate parts of buffer to image planes in pFrameRGB avpicture_fill((AVPicture *)d->frameRGB, buffer, PIX_FMT_BGRA, w, h); sws_scale(d->img_convert_ctx, (const unsigned char*const*)d->frame->data, d->frame->linesize, 0, d->videoStream->codec->height, d->frameRGB->data, d->frameRGB->linesize); return (unsigned long*) buffer; }
void write_video_frame(AVFormatContext *oc, AVStream *st) { int out_size, ret; AVCodecContext *c; static struct SwsContext *img_convert_ctx; //printf("Here0 \n"); c = st->codec; if (c->pix_fmt != PIX_FMT_YUV420P) { /* as we only generate a YUV420P picture, we must convert it to the codec pixel format if needed */ if (img_convert_ctx == NULL) { #if (LIBSWSCALE_VERSION_INT<AV_VERSION_INT(0,12,0)) img_convert_ctx = sws_getContext(c->width, c->height, PIX_FMT_YUV420P, c->width, c->height, c->pix_fmt, sws_flags, NULL, NULL, NULL); #else img_convert_ctx = sws_alloc_context(); if (img_convert_ctx == NULL) { fprintf(stderr, "Cannot initialize the conversion context\n"); exit(1); } /* see http://permalink.gmane.org/gmane.comp.video.ffmpeg.devel/118362 */ /* see http://ffmpeg-users.933282.n4.nabble.com/Documentation-for-sws-init-context-td2956723.html */ av_set_int(img_convert_ctx, "srcw", c->width); av_set_int(img_convert_ctx, "srch", c->height); av_set_int(img_convert_ctx, "dstw", c->width); av_set_int(img_convert_ctx, "dsth", c->height); av_set_int(img_convert_ctx, "src_format", PIX_FMT_YUV420P); av_set_int(img_convert_ctx, "dst_format", c->pix_fmt); av_set_int(img_convert_ctx, "param0", 0); av_set_int(img_convert_ctx, "param1", 0); av_set_int(img_convert_ctx, "flags", sws_flags); sws_init_context(img_convert_ctx,NULL,NULL); #endif } sws_scale(img_convert_ctx, (const uint8_t* const *)tmp_picture->data, tmp_picture->linesize, 0, c->height, picture_to_encode->data, picture_to_encode->linesize); } else { } if (oc->oformat->flags & AVFMT_RAWPICTURE) { /* raw video case. The API will change slightly in the near futur for that */ AVPacket pkt; av_init_packet(&pkt); pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index= st->index; pkt.data= (uint8_t *)picture_to_encode; pkt.size= sizeof(AVPicture); ret = av_interleaved_write_frame(oc, &pkt); } else { /* encode the image */ //printf("Here1 \n"); out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture_to_encode); /* if zero size, it means the image was buffered */ if (out_size > 0) { AVPacket pkt; av_init_packet(&pkt); if (c->coded_frame->pts != AV_NOPTS_VALUE) pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base); if(c->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.stream_index= st->index; pkt.data= video_outbuf; pkt.size= out_size; /* write the compressed frame in the media file */ ret = av_interleaved_write_frame(oc, &pkt); } else { ret = 0; } } if (ret != 0) { fprintf(stderr, "Error while writing video frame\n"); exit(1); } frame_count++; }
static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i, double *zoom, double *dx, double *dy) { ZPContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; int64_t pts = s->frame_count; int k, x, y, w, h, ret = 0; uint8_t *input[4]; int px[4], py[4]; AVFrame *out; var_values[VAR_PX] = s->x; var_values[VAR_PY] = s->y; var_values[VAR_PZOOM] = s->prev_zoom; var_values[VAR_PDURATION] = s->prev_nb_frames; var_values[VAR_TIME] = pts * av_q2d(outlink->time_base); var_values[VAR_FRAME] = i; var_values[VAR_ON] = outlink->frame_count + 1; if ((ret = av_expr_parse_and_eval(zoom, s->zoom_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) return ret; *zoom = av_clipd(*zoom, 1, 10); var_values[VAR_ZOOM] = *zoom; w = in->width * (1.0 / *zoom); h = in->height * (1.0 / *zoom); if ((ret = av_expr_parse_and_eval(dx, s->x_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) return ret; x = *dx = av_clipd(*dx, 0, FFMAX(in->width - w, 0)); var_values[VAR_X] = *dx; x &= ~((1 << s->desc->log2_chroma_w) - 1); if ((ret = av_expr_parse_and_eval(dy, s->y_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) return ret; y = *dy = av_clipd(*dy, 0, FFMAX(in->height - h, 0)); var_values[VAR_Y] = *dy; y &= ~((1 << s->desc->log2_chroma_h) - 1); out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { ret = AVERROR(ENOMEM); return ret; } px[1] = px[2] = AV_CEIL_RSHIFT(x, s->desc->log2_chroma_w); px[0] = px[3] = x; py[1] = py[2] = AV_CEIL_RSHIFT(y, s->desc->log2_chroma_h); py[0] = py[3] = y; s->sws = sws_alloc_context(); if (!s->sws) { ret = AVERROR(ENOMEM); return ret; } for (k = 0; in->data[k]; k++) input[k] = in->data[k] + py[k] * in->linesize[k] + px[k]; av_opt_set_int(s->sws, "srcw", w, 0); av_opt_set_int(s->sws, "srch", h, 0); av_opt_set_int(s->sws, "src_format", in->format, 0); av_opt_set_int(s->sws, "dstw", outlink->w, 0); av_opt_set_int(s->sws, "dsth", outlink->h, 0); av_opt_set_int(s->sws, "dst_format", outlink->format, 0); av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0); if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0) return ret; sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize); out->pts = pts; s->frame_count++; ret = ff_filter_frame(outlink, out); sws_freeContext(s->sws); s->sws = NULL; s->current_frame++; return ret; }
int FrameScaler::SetResize(int srcWidth,int srcHeight,int srcLineWidth,int dstWidth,int dstHeight,int dstLineWidth) { // Check Size if (!srcWidth || !srcHeight || !srcLineWidth || !dstWidth || !dstHeight || !dstLineWidth) { //If we got a context if (resizeCtx) //Closse context sws_freeContext(resizeCtx); // No valid context resizeCtx = NULL; //Exit return 0; } // Check if we already have a scaler for this if (resizeCtx && (resizeWidth==srcWidth) && (srcHeight==resizeHeight) && (dstWidth==resizeDstWidth) && (dstHeight==resizeDstHeight)) //Done return 1; //If we already got a context if (resizeCtx) //Closse context sws_freeContext(resizeCtx); // Create new context if (!(resizeCtx = sws_alloc_context())) // Exit return 0; // Set property's of context av_opt_set_defaults(resizeCtx); av_opt_set_int(resizeCtx, "srcw", srcWidth ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(resizeCtx, "srch", srcHeight ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(resizeCtx, "src_format", PIX_FMT_YUV420P ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(resizeCtx, "dstw", dstWidth ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(resizeCtx, "dsth", dstHeight ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(resizeCtx, "dst_format", PIX_FMT_YUV420P ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(resizeCtx, "sws_flags", resizeFlags ,AV_OPT_SEARCH_CHILDREN); // Init context if (sws_init_context(resizeCtx, NULL, NULL) < 0) { //Free context sws_freeContext(resizeCtx); //Nullify it resizeCtx = NULL; // Exit return Error("Couldn't init sws context"); } // Set values resizeWidth = srcWidth; resizeHeight = srcHeight; resizeDstWidth = dstWidth; resizeDstHeight = dstHeight; resizeDstLineWidth = dstLineWidth; //to use MM2 we need the width and heinght to be multiple of 32 tmpWidth = (resizeDstWidth/32 +1)*32; tmpHeight = (resizeDstHeight/32 +1)*32; //Get tmp buffer size tmpBufferSize = tmpWidth*tmpHeight*3/2+FF_INPUT_BUFFER_PADDING_SIZE+32; //Check if we had it already if (tmpBuffer) //Free it free(tmpBuffer); //Allocate it tmpBuffer = (BYTE*)malloc(tmpBufferSize); // Set values for line sizes resizeSrc[0] = srcLineWidth; resizeSrc[1] = srcLineWidth/2; resizeSrc[2] = srcLineWidth/2; /*resizeDst[0] = dstLineWidth; resizeDst[1] = dstLineWidth/2; resizeDst[2] = dstLineWidth/2;*/ resizeDst[0] = tmpWidth; resizeDst[1] = tmpWidth/2; resizeDst[2] = tmpWidth/2; //Get tmp planes tmpY = ALIGNTO32(tmpBuffer); tmpU = tmpY+tmpWidth*tmpHeight; tmpV = tmpU+tmpWidth*tmpHeight/4; // exit return 1; }
/* * 初始化视频解码参数 * platform: 0:A5s66 1:3516A(Hisi) * encode: 0:h.264 1:h.265 2:jpeg */ void QDecodeStream::ipcam_decode_init(int platform,int encode) { int pps_code_size; AVDictionary *opts = NULL; avcodec_register_all(); //注册所有的文件格式和编解码库,只能调用一次 if(encode == H264) decode_h264.codec = avcodec_find_decoder(AV_CODEC_ID_H264); else if(encode == H265) decode_h264.codec = avcodec_find_decoder(AV_CODEC_ID_H265); else if(encode == JPEG) decode_h264.codec = avcodec_find_decoder(AV_CODEC_ID_MJPEG); decode_h264.codecCtx = avcodec_alloc_context3(decode_h264.codec); //解码器的环境变量选择解码器 //decode_h264.codecCtx->flags |= CODEC_FLAG_TRUNCATED; int frame_thread_supported = (decode_h264.codecCtx->codec->capabilities & CODEC_CAP_FRAME_THREADS) && !(decode_h264.codecCtx->flags & CODEC_FLAG_TRUNCATED) && !(decode_h264.codecCtx->flags2 & CODEC_FLAG_LOW_DELAY) && !(decode_h264.codecCtx->flags2 & CODEC_FLAG2_CHUNKS); decode_h264.codecCtx->delay = 0; if(decode_h264.codecCtx->thread_count == 1) { decode_h264.codecCtx->active_thread_type = 0; } else if(frame_thread_supported && (decode_h264.codecCtx->thread_type & FF_THREAD_FRAME)) { decode_h264.codecCtx->active_thread_type = FF_THREAD_FRAME; } else if((decode_h264.codecCtx->codec->capabilities & CODEC_CAP_SLICE_THREADS) && (decode_h264.codecCtx->thread_type & FF_THREAD_SLICE)) { decode_h264.codecCtx->active_thread_type = FF_THREAD_SLICE; } else if(!(decode_h264.codecCtx->codec->capabilities & CODEC_CAP_AUTO_THREADS)) { decode_h264.codecCtx->thread_count = 1; decode_h264.codecCtx->active_thread_type = 0; } if(decode_h264.codecCtx->thread_count > MAX_AUTO_THREADS) { av_log(decode_h264.codecCtx,AV_LOG_WARNING,"Application has requested %d threads.Using a thread count greater than %d is not recommended.\n", decode_h264.codecCtx->thread_count,MAX_AUTO_THREADS); } //根据编码,决定PPS PSP的头格式数据 pps_code_size = decoder_pps_psp(platform,encode); if(pps_code_size == 0) printf("get pps & psp head failed!\n"); /* //解码设置 if(encode == H265) { av_opt_set(decode_h264.codecCtx->priv_data,"x265-params","qp=0",0); av_opt_set(decode_h264.codecCtx->priv_data,"preset","veryfast",0); av_opt_set(decode_h264.codecCtx->priv_data,"x265-params","crf=10",0); } decode_h264.codecCtx->codec_type = AVMEDIA_TYPE_VIDEO; decode_h264.codecCtx->time_base.den = 25; decode_h264.codecCtx->global_quality = 1; av_opt_set(decode_h264.codecCtx->priv_data,"tune","zero-latency",0); decode_h264.codecCtx->active_thread_type |= FF_THREAD_FRAME; */ //打开编码 avcodec_open2(decode_h264.codecCtx, decode_h264.codec, &opts); decode_h264.frame = av_frame_alloc(); decode_h264.parser = av_parser_init(AV_CODEC_ID_H264); decode_h264.img_convert_ctx = sws_alloc_context(); sws_init_context(decode_h264.img_convert_ctx,NULL,NULL); //分配一帧图像的存储空间 one_frame_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_RGB24, play_win->image.width,play_win->image.height)); }
void VideoPlayer::start() { LOG_START(); if (! mStop) { return; } mStop = false; mTimeScale = 1.0; av_register_all(); mFilePath = CCFileUtils::sharedFileUtils()->fullPathForFilename(mPath); vLOGE("file path: %s\n", mFilePath.c_str()); if (avformat_open_input(&mFormatCtx, mFilePath.c_str(), nullptr, nullptr) != 0) { vLOGE("avformat_open_input failed.\n"); return; } if (avformat_find_stream_info(mFormatCtx, nullptr) < 0) { vLOGE("avformat_find_stream_info failed.\n"); return; } for (int i = 0; i < mFormatCtx->nb_streams; i ++) { if (mVideoStreamIndex == -1 && mFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { mVideoStreamIndex = i; break; } } vLOGE("video stream index: %d\n", mVideoStreamIndex); if (mVideoStreamIndex == -1) { return; } if (mWidth <= 0) { mWidth = mFormatCtx->streams[mVideoStreamIndex]->codec->width; } if (mHeight <= 0) { mHeight = mFormatCtx->streams[mVideoStreamIndex]->codec->height; } vLOGE("width: %d height: %d\n", mWidth, mHeight); mCodecCtx = mFormatCtx->streams[mVideoStreamIndex]->codec; mCodecCtx->thread_count = 4; AVCodec *codec = avcodec_find_decoder(mCodecCtx->codec_id); if (codec == nullptr) { vLOGE("avcodec_find_decoder failed.\n"); return; } if (avcodec_open2(mCodecCtx, codec, nullptr) != 0) { vLOGE("avcodec_open2 failed.\n"); return; } if ((mFrame = av_frame_alloc()) == nullptr) { vLOGE("av_frame_alloc failed.\n"); } mImageConvertCtx = sws_alloc_context(); if (mImageConvertCtx == nullptr) { vLOGE("sws_alloc_context failed.\n"); return; } sws_init_context(mImageConvertCtx, nullptr, nullptr); mImageConvertCtx = sws_getContext(mCodecCtx->width, mCodecCtx->height, mCodecCtx->pix_fmt, mWidth, mHeight, PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL); const CCSize& size = CCDirector::getInstance()->getWinSize(); setPosition(Vec2(size.width, size.height)); pthread_create(&mDcoderThread, nullptr, doProcessVideo, this); pthread_create(&mRenderThread, nullptr, doTimeCounter, this); LOG_END(); }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; ZPContext *s = ctx->priv; double var_values[VARS_NB], nb_frames, zoom, dx, dy; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(in->format); AVFrame *out; int i, k, x, y, w, h, ret = 0; var_values[VAR_IN_W] = var_values[VAR_IW] = in->width; var_values[VAR_IN_H] = var_values[VAR_IH] = in->height; var_values[VAR_OUT_W] = var_values[VAR_OW] = s->w; var_values[VAR_OUT_H] = var_values[VAR_OH] = s->h; var_values[VAR_IN] = inlink->frame_count + 1; var_values[VAR_ON] = outlink->frame_count + 1; var_values[VAR_PX] = s->x; var_values[VAR_PY] = s->y; var_values[VAR_X] = 0; var_values[VAR_Y] = 0; var_values[VAR_PZOOM] = s->prev_zoom; var_values[VAR_ZOOM] = 1; var_values[VAR_PDURATION] = s->prev_nb_frames; var_values[VAR_A] = (double) in->width / in->height; var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR]; var_values[VAR_HSUB] = 1 << desc->log2_chroma_w; var_values[VAR_VSUB] = 1 << desc->log2_chroma_h; if ((ret = av_expr_parse_and_eval(&nb_frames, s->duration_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail; var_values[VAR_DURATION] = nb_frames; for (i = 0; i < nb_frames; i++) { int px[4]; int py[4]; uint8_t *input[4]; int64_t pts = av_rescale_q(in->pts, inlink->time_base, outlink->time_base) + s->frame_count; var_values[VAR_TIME] = pts * av_q2d(outlink->time_base); var_values[VAR_FRAME] = i; var_values[VAR_ON] = outlink->frame_count + 1; if ((ret = av_expr_parse_and_eval(&zoom, s->zoom_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail; zoom = av_clipd(zoom, 1, 10); var_values[VAR_ZOOM] = zoom; w = in->width * (1.0 / zoom); h = in->height * (1.0 / zoom); if ((ret = av_expr_parse_and_eval(&dx, s->x_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail; x = dx = av_clipd(dx, 0, FFMAX(in->width - w, 0)); var_values[VAR_X] = dx; x &= ~((1 << desc->log2_chroma_w) - 1); if ((ret = av_expr_parse_and_eval(&dy, s->y_expr_str, var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail; y = dy = av_clipd(dy, 0, FFMAX(in->height - h, 0)); var_values[VAR_Y] = dy; y &= ~((1 << desc->log2_chroma_h) - 1); out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { ret = AVERROR(ENOMEM); goto fail; } px[1] = px[2] = FF_CEIL_RSHIFT(x, desc->log2_chroma_w); px[0] = px[3] = x; py[1] = py[2] = FF_CEIL_RSHIFT(y, desc->log2_chroma_h); py[0] = py[3] = y; s->sws = sws_alloc_context(); if (!s->sws) { ret = AVERROR(ENOMEM); goto fail; } for (k = 0; in->data[k]; k++) input[k] = in->data[k] + py[k] * in->linesize[k] + px[k]; av_opt_set_int(s->sws, "srcw", w, 0); av_opt_set_int(s->sws, "srch", h, 0); av_opt_set_int(s->sws, "src_format", in->format, 0); av_opt_set_int(s->sws, "dstw", outlink->w, 0); av_opt_set_int(s->sws, "dsth", outlink->h, 0); av_opt_set_int(s->sws, "dst_format", outlink->format, 0); av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0); if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0) goto fail; sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize); out->pts = pts; s->frame_count++; ret = ff_filter_frame(outlink, out); if (ret < 0) break; sws_freeContext(s->sws); s->sws = NULL; } s->x = dx; s->y = dy; s->prev_zoom = zoom; s->prev_nb_frames = nb_frames; fail: sws_freeContext(s->sws); s->sws = NULL; av_frame_free(&in); return ret; }
MediaRet MediaRecorder::setup_video_stream(const char *fname, int w, int h, int d) { AVCodecContext *ctx; vid_st = av_new_stream(oc, 0); if(!vid_st) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } ctx = vid_st->codec; ctx->codec_id = oc->oformat->video_codec; ctx->codec_type = AVMEDIA_TYPE_VIDEO; ctx->width = w; ctx->height = h; ctx->time_base.den = 60; ctx->time_base.num = 1; // dunno if any of these help; some output just looks plain crappy // will have to investigate further ctx->bit_rate = 400000; ctx->gop_size = 12; ctx->max_b_frames = 2; switch(d) { case 16: // FIXME: test & make endian-neutral pixfmt = PIX_FMT_RGB565LE; break; case 24: pixfmt = PIX_FMT_RGB24; break; case 32: default: // should never be anything else pixfmt = PIX_FMT_RGBA; break; } ctx->pix_fmt = pixfmt; pixsize = d >> 3; linesize = pixsize * w; ctx->max_b_frames = 2; if(oc->oformat->flags & AVFMT_GLOBALHEADER) ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; AVCodec *codec = avcodec_find_encoder(oc->oformat->video_codec); // make sure RGB is supported (mostly not) if(codec->pix_fmts) { const enum PixelFormat *p; int64_t mask = 0; for(p = codec->pix_fmts; *p != -1; p++) { // may get complaints about 1LL; thus the cast mask |= ((int64_t)1) << *p; if(*p == pixfmt) break; } if(*p == -1) { // if not supported, use a converter to the next best format // this is swscale, the converter used by the output demo enum PixelFormat dp = (PixelFormat)avcodec_find_best_pix_fmt(mask, pixfmt, 0, NULL); if(dp == -1) dp = codec->pix_fmts[0]; if(!(convpic = avcodec_alloc_frame()) || avpicture_alloc((AVPicture *)convpic, dp, w, h) < 0) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } #if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0) converter = sws_getContext(w, h, pixfmt, w, h, dp, SWS_BICUBIC, NULL, NULL, NULL); #else converter = sws_alloc_context(); // what a convoluted, inefficient way to set options av_set_int(converter, "sws_flags", SWS_BICUBIC); av_set_int(converter, "srcw", w); av_set_int(converter, "srch", h); av_set_int(converter, "dstw", w); av_set_int(converter, "dsth", h); av_set_int(converter, "src_format", pixfmt); av_set_int(converter, "dst_format", dp); sws_init_context(converter, NULL, NULL); #endif ctx->pix_fmt = dp; } } if(!codec || avcodec_open(ctx, codec)) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOCODEC; } return MRET_OK; }
MediaRet MediaRecorder::setup_sound_stream(const char *fname, AVOutputFormat *fmt) { oc = avformat_alloc_context(); if(!oc) return MRET_ERR_NOMEM; oc->oformat = fmt; strncpy(oc->filename, fname, sizeof(oc->filename) - 1); oc->filename[sizeof(oc->filename) - 1] = 0; if(fmt->audio_codec == CODEC_ID_NONE) return MRET_OK; AVCodecContext *ctx; #if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,10,0) aud_st = av_new_stream(oc, 1); #else aud_st = avformat_new_stream(oc, NULL); #endif if(!aud_st) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } AVCodec *codec = avcodec_find_encoder(fmt->audio_codec); ctx = aud_st->codec; ctx->codec_id = fmt->audio_codec; ctx->codec_type = AVMEDIA_TYPE_AUDIO; // Some encoders don't like int16_t (SAMPLE_FMT_S16) ctx->sample_fmt = codec->sample_fmts[0]; // This was changed in the initial ffmpeg 3.0 update, // but shouldn't (as far as I'm aware) cause problems with older versions ctx->bit_rate = 128000; // arbitrary; in case we're generating mp3 ctx->sample_rate = soundGetSampleRate(); ctx->channels = 2; ctx->time_base.den = 60; ctx->time_base.num = 1; if(fmt->flags & AVFMT_GLOBALHEADER) ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,6,0) if(!codec || avcodec_open(ctx, codec)) { #else if(!codec || avcodec_open2(ctx, codec, NULL)) { #endif avformat_free_context(oc); oc = NULL; return MRET_ERR_NOCODEC; } return MRET_OK; } MediaRet MediaRecorder::setup_video_stream(const char *fname, int w, int h, int d) { AVCodecContext *ctx; #if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,10,0) vid_st = av_new_stream(oc, 0); #else vid_st = avformat_new_stream(oc, NULL); #endif if(!vid_st) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } ctx = vid_st->codec; ctx->codec_id = oc->oformat->video_codec; ctx->codec_type = AVMEDIA_TYPE_VIDEO; ctx->width = w; ctx->height = h; ctx->time_base.den = 60; ctx->time_base.num = 1; // dunno if any of these help; some output just looks plain crappy // will have to investigate further ctx->bit_rate = 400000; ctx->gop_size = 12; ctx->max_b_frames = 2; switch(d) { case 16: // FIXME: test & make endian-neutral pixfmt = PIX_FMT_RGB565LE; break; case 24: pixfmt = PIX_FMT_RGB24; break; case 32: default: // should never be anything else pixfmt = PIX_FMT_RGBA; break; } ctx->pix_fmt = pixfmt; pixsize = d >> 3; linesize = pixsize * w; ctx->max_b_frames = 2; if(oc->oformat->flags & AVFMT_GLOBALHEADER) ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; AVCodec *codec = avcodec_find_encoder(oc->oformat->video_codec); // make sure RGB is supported (mostly not) if(codec->pix_fmts) { const enum PixelFormat *p; #if LIBAVCODEC_VERSION_MAJOR < 55 int64_t mask = 0; #endif for(p = codec->pix_fmts; *p != -1; p++) { // may get complaints about 1LL; thus the cast #if LIBAVCODEC_VERSION_MAJOR < 55 mask |= ((int64_t)1) << *p; #endif if(*p == pixfmt) break; } if(*p == -1) { // if not supported, use a converter to the next best format // this is swscale, the converter used by the output demo #if LIBAVCODEC_VERSION_MAJOR < 55 enum PixelFormat dp = (PixelFormat)avcodec_find_best_pix_fmt(mask, pixfmt, 0, NULL); #else #if LIBAVCODEC_VERSION_MICRO >= 100 // FFmpeg enum AVPixelFormat dp = avcodec_find_best_pix_fmt_of_list(codec->pix_fmts, pixfmt, 0, NULL); #else // Libav enum AVPixelFormat dp = avcodec_find_best_pix_fmt2(codec->pix_fmts, pixfmt, 0, NULL); #endif #endif if(dp == -1) dp = codec->pix_fmts[0]; if(!(convpic = avcodec_alloc_frame()) || avpicture_alloc((AVPicture *)convpic, dp, w, h) < 0) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } #if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0) converter = sws_getContext(w, h, pixfmt, w, h, dp, SWS_BICUBIC, NULL, NULL, NULL); #else converter = sws_alloc_context(); // what a convoluted, inefficient way to set options av_opt_set_int(converter, "sws_flags", SWS_BICUBIC, 0); av_opt_set_int(converter, "srcw", w, 0); av_opt_set_int(converter, "srch", h, 0); av_opt_set_int(converter, "dstw", w, 0); av_opt_set_int(converter, "dsth", h, 0); av_opt_set_int(converter, "src_format", pixfmt, 0); av_opt_set_int(converter, "dst_format", dp, 0); sws_init_context(converter, NULL, NULL); #endif ctx->pix_fmt = dp; } } #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,6,0) if(!codec || avcodec_open(ctx, codec)) { #else if(!codec || avcodec_open2(ctx, codec, NULL)) { #endif avformat_free_context(oc); oc = NULL; return MRET_ERR_NOCODEC; } return MRET_OK; } MediaRet MediaRecorder::finish_setup(const char *fname) { if(audio_buf) free(audio_buf); if(audio_buf2) free(audio_buf2); audio_buf2 = NULL; in_audio_buf2 = 0; if(aud_st) { frame_len = aud_st->codec->frame_size * 4; sample_len = soundGetSampleRate() * 4 / 60; switch(aud_st->codec->codec_id) { case CODEC_ID_PCM_S16LE: case CODEC_ID_PCM_S16BE: case CODEC_ID_PCM_U16LE: case CODEC_ID_PCM_U16BE: frame_len = sample_len; } audio_buf = (uint8_t *)malloc(AUDIO_BUF_LEN); if(!audio_buf) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } if(frame_len != sample_len && (frame_len > sample_len || sample_len % frame_len)) { audio_buf2 = (uint16_t *)malloc(frame_len); if(!audio_buf2) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } } } else audio_buf = NULL; if(video_buf) free(video_buf); if(vid_st) { video_buf = (uint8_t *)malloc(VIDEO_BUF_LEN); if(!video_buf) { avformat_free_context(oc); oc = NULL; return MRET_ERR_NOMEM; } } else { video_buf = NULL; } if(!(oc->oformat->flags & AVFMT_NOFILE)) { if(avio_open(&oc->pb, fname, AVIO_FLAG_WRITE) < 0) { avformat_free_context(oc); oc = NULL; return MRET_ERR_FERR; } } avformat_write_header(oc, NULL); return MRET_OK; } MediaRet MediaRecorder::Record(const char *fname, int width, int height, int depth) { if(oc) return MRET_ERR_RECORDING; aud_st = vid_st = NULL; AVOutputFormat *fmt = av_guess_format(NULL, fname, NULL); if(!fmt) fmt = av_guess_format("avi", NULL, NULL); if(!fmt || fmt->video_codec == CODEC_ID_NONE) return MRET_ERR_FMTGUESS; MediaRet ret; if((ret = setup_sound_stream(fname, fmt)) == MRET_OK && (ret = setup_video_stream(fname, width, height, depth)) == MRET_OK) ret = finish_setup(fname); return ret; } MediaRet MediaRecorder::Record(const char *fname) { if(oc) return MRET_ERR_RECORDING; aud_st = vid_st = NULL; AVOutputFormat *fmt = av_guess_format(NULL, fname, NULL); if(!fmt) fmt = av_guess_format("wav", NULL, NULL); if(!fmt || fmt->audio_codec == CODEC_ID_NONE) return MRET_ERR_FMTGUESS; MediaRet ret; if((ret = setup_sound_stream(fname, fmt)) == MRET_OK) ret = finish_setup(fname); return ret; } void MediaRecorder::Stop() { if(oc) { if(in_audio_buf2) AddFrame((uint16_t *)0); av_write_trailer(oc); avformat_free_context(oc); oc = NULL; } if(audio_buf) { free(audio_buf); audio_buf = NULL; } if(video_buf) { free(video_buf); video_buf = NULL; } if(audio_buf2) { free(audio_buf2); audio_buf2 = NULL; } if(convpic) { avpicture_free((AVPicture *)convpic); av_free(convpic); convpic = NULL; } if(converter) { sws_freeContext(converter); converter = NULL; } } MediaRecorder::~MediaRecorder() { Stop(); } // Still needs updating for avcodec_encode_video2 MediaRet MediaRecorder::AddFrame(const uint8_t *vid) { if(!oc || !vid_st) return MRET_OK; AVCodecContext *ctx = vid_st->codec; AVPacket pkt; #if LIBAVCODEC_VERSION_MAJOR > 56 int ret, got_packet = 0; #endif // strip borders. inconsistent between depths for some reason // but fortunately consistent between gb/gba. int tbord, rbord; switch(pixsize) { case 2: // 16-bit: 2 @ right, 1 @ top tbord = 1; rbord = 2; break; case 3: // 24-bit: no border tbord = rbord = 0; break; case 4: // 32-bit: 1 @ right, 1 @ top tbord = 1; rbord = 1; break; } avpicture_fill((AVPicture *)pic, (uint8_t *)vid + tbord * (linesize + pixsize * rbord), (PixelFormat)pixfmt, ctx->width + rbord, ctx->height); // satisfy stupid sws_scale()'s integrity check pic->data[1] = pic->data[2] = pic->data[3] = pic->data[0]; pic->linesize[1] = pic->linesize[2] = pic->linesize[3] = pic->linesize[0]; AVFrame *f = pic; if(converter) { sws_scale(converter, pic->data, pic->linesize, 0, ctx->height, convpic->data, convpic->linesize); f = convpic; } av_init_packet(&pkt); pkt.stream_index = vid_st->index; if(oc->oformat->flags & AVFMT_RAWPICTURE) { // this won't work due to border // not sure what formats set this, anyway pkt.flags |= AV_PKT_FLAG_KEY; pkt.data = f->data[0]; pkt.size = linesize * ctx->height; } else { #if LIBAVCODEC_VERSION_MAJOR > 56 pkt.data = video_buf; pkt.size = VIDEO_BUF_LEN; f->format = ctx->pix_fmt; f->width = ctx->width; f->height = ctx->height; ret = avcodec_encode_video2(ctx, &pkt, f, &got_packet); if(!ret && got_packet && ctx->coded_frame) { ctx->coded_frame->pts = pkt.pts; ctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY); } #else pkt.size = avcodec_encode_video(ctx, video_buf, VIDEO_BUF_LEN, f); #endif if(!pkt.size) return MRET_OK; if(ctx->coded_frame && ctx->coded_frame->pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(ctx->coded_frame->pts, ctx->time_base, vid_st->time_base); if(pkt.size > VIDEO_BUF_LEN) { avformat_free_context(oc); oc = NULL; return MRET_ERR_BUFSIZE; } if(ctx->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.data = video_buf; } if(av_interleaved_write_frame(oc, &pkt) < 0) { avformat_free_context(oc); oc = NULL; // yeah, err might not be a file error, but if it isn't, it's a // coding error rather than a user-controllable error // and better resolved using debugging return MRET_ERR_FERR; } return MRET_OK; } #if LIBAVCODEC_VERSION_MAJOR > 56 /* FFmpeg depricated avcodec_encode_audio. * It was removed completely in 3.0. * This will at least get audio recording *working* */ static inline int MediaRecorderEncodeAudio(AVCodecContext *ctx, AVPacket *pkt, uint8_t *buf, int buf_size, const short *samples) { AVFrame *frame; av_init_packet(pkt); int ret, samples_size, got_packet = 0; pkt->data = buf; pkt->size = buf_size; if (samples) { frame = frame = av_frame_alloc(); if (ctx->frame_size) { frame->nb_samples = ctx->frame_size; } else { frame->nb_samples = (int64_t)buf_size * 8 / (av_get_bits_per_sample(ctx->codec_id) * ctx->channels); } frame->format = ctx->sample_fmt; frame->channel_layout = ctx->channel_layout; samples_size = av_samples_get_buffer_size(NULL, ctx->channels, frame->nb_samples, ctx->sample_fmt, 1); avcodec_fill_audio_frame(frame, ctx->channels, ctx->sample_fmt, (const uint8_t *)samples, samples_size, 1); //frame->pts = AV_NOPTS_VALUE; } else { frame = NULL; } ret = avcodec_encode_audio2(ctx, pkt, frame, &got_packet); if (!ret && got_packet && ctx->coded_frame) { ctx->coded_frame->pts = pkt->pts; ctx->coded_frame->key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY); } if (frame && frame->extended_data != frame->data) av_freep(&frame->extended_data); return ret; }
int Logo::Load(const char* fileName) { AVFormatContext *fctx = NULL; AVCodecContext *ctx = NULL; AVCodec *codec = NULL; AVFrame *logoRGB = NULL; AVFrame* logo = NULL; SwsContext *sws = NULL; AVPacket packet; int res = 0; int gotLogo = 0; int numpixels = 0; int size = 0; //Create context from file if(avformat_open_input(&fctx, fileName, NULL, NULL)<0) return Error("Couldn't open the logo image file [%s]\n",fileName); //Check it's ok if(avformat_find_stream_info(fctx,NULL)<0) { //Set error res = Error("Couldn't find stream information for the logo image file...\n"); //Free resources goto end; } //Get codec from file fromat if (!(ctx = fctx->streams[0]->codec)) { //Set errror res = Error("Context codec not valid\n"); //Free resources goto end; } //Get decoder for format if (!(codec = avcodec_find_decoder(ctx->codec_id))) { //Set errror res = Error("Couldn't find codec for the logo image file...\n"); //Free resources goto end; } //Only one thread ctx->thread_count = 1; //Open codec if (avcodec_open2(ctx, codec, NULL)<0) { //Set errror res = Error("Couldn't open codec for the logo image file...\n"); //Free resources goto end; } //Read logo frame if (av_read_frame(fctx, &packet)<0) { //Set errror res = Error("Couldn't read frame from the image file...\n"); //Free resources goto end; } //Alloc frame if (!(logoRGB = av_frame_alloc())) { //Set errror res = Error("Couldn't alloc frame\n"); //Free resources goto end; } //Use only one thread to avoid decoding on background and logo not displayed ctx->thread_count = 1; //Decode logo if (avcodec_decode_video2(ctx, logoRGB, &gotLogo, &packet)<0) { //Set errror res = Error("Couldn't decode logo\n"); //Free resources av_free_packet(&packet); goto end; } av_free_packet(&packet); //If it we don't have a logo if (!gotLogo) { //Set errror res = Error("No logo on file\n"); //Free resources goto end; } //Allocate new one if (!(logo = av_frame_alloc())) { //Set errror res = Error("Couldn't alloc frame\n"); //Free resources goto end; } //Get frame sizes width = ctx->width; height = ctx->height; // Create YUV rescaller cotext if (!(sws = sws_alloc_context())) { //Set errror res = Error("Couldn't alloc sws context\n"); // Exit goto end; } // Set property's of YUV rescaller context av_opt_set_defaults(sws); av_opt_set_int(sws, "srcw", width ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(sws, "srch", height ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(sws, "src_format", ctx->pix_fmt ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(sws, "dstw", width ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(sws, "dsth", height ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(sws, "dst_format", AV_PIX_FMT_YUV420P ,AV_OPT_SEARCH_CHILDREN); av_opt_set_int(sws, "sws_flags", SWS_FAST_BILINEAR ,AV_OPT_SEARCH_CHILDREN); // Init YUV rescaller context if (sws_init_context(sws, NULL, NULL) < 0) { //Set errror res = Error("Couldn't init sws context\n"); // Exit goto end; } //Check if we already had one if (frame) //Free memory free(frame); //Check if we already had one if (frameRGBA) //Free memory free(frameRGBA); //Get size with padding size = (((width/32+1)*32)*((height/32+1)*32)*3)/2+FF_INPUT_BUFFER_PADDING_SIZE+32; //And numer of pixels numpixels = width*height; //Allocate frame frame = (BYTE*)malloc32(size); /* size for YUV 420 */ frameRGBA = (BYTE*)malloc32(numpixels*4); //Alloc data logo->data[0] = frame; logo->data[1] = logo->data[0] + numpixels; logo->data[2] = logo->data[1] + numpixels / 4; //Set size for planes logo->linesize[0] = width; logo->linesize[1] = width/2; logo->linesize[2] = width/2; //Convert sws_scale(sws, logoRGB->data, logoRGB->linesize, 0, height, logo->data, logo->linesize); //Copy logo from rgbA to rgb for (int j=0;j<height;j++) for (int i=0;i<width;i++) //Copy line by line memcpy(frameRGBA+(width*j+i)*4,logoRGB->data[0]+logoRGB->linesize[0]*j+i*3,3); //Everything was ok res = 1; end: if (logo) av_free(logo); if (logoRGB) av_free(logoRGB); if (ctx) avcodec_close(ctx); if (sws) sws_freeContext(sws); if (fctx) avformat_close_input(&fctx); //Exit return res; }