Exemple #1
0
int hardsubx_process_data(struct lib_hardsubx_ctx *ctx)
{
	// Get the required media attributes and initialize structures
	av_register_all();
	
	if(avformat_open_input(&ctx->format_ctx, ctx->inputfile[0], NULL, NULL)!=0)
	{
		fatal (EXIT_READ_ERROR, "Error reading input file!\n");
	}

	if(avformat_find_stream_info(ctx->format_ctx, NULL)<0)
	{
		fatal (EXIT_READ_ERROR, "Error reading input stream!\n");
	}

	// Important call in order to determine media information using ffmpeg
	// TODO: Handle multiple inputs
	av_dump_format(ctx->format_ctx, 0, ctx->inputfile[0], 0);
	

	ctx->video_stream_id = -1;
	for(int i = 0; i < ctx->format_ctx->nb_streams; i++)
	{
		if(ctx->format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			ctx->video_stream_id = i;
			break;
		}
	}
	if(ctx->video_stream_id == -1)
	{
		fatal (EXIT_READ_ERROR, "Video Stream not found!\n");
	}

	ctx->codec_ctx = ctx->format_ctx->streams[ctx->video_stream_id]->codec;
	ctx->codec = avcodec_find_decoder(ctx->codec_ctx->codec_id);
	if(ctx->codec == NULL)
	{
		fatal (EXIT_READ_ERROR, "Input codec is not supported!\n");
	}

	if(avcodec_open2(ctx->codec_ctx, ctx->codec, &ctx->options_dict) < 0)
	{
		fatal (EXIT_READ_ERROR, "Error opening input codec!\n");
	}

	ctx->frame = av_frame_alloc();
	ctx->rgb_frame = av_frame_alloc();
	if(!ctx->frame || !ctx->rgb_frame)
	{
		fatal(EXIT_NOT_ENOUGH_MEMORY, "Not enough memory to initialize frame!");
	}

	int frame_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, ctx->codec_ctx->width, ctx->codec_ctx->height, 16);
	ctx->rgb_buffer = (uint8_t *)av_malloc(frame_bytes*sizeof(uint8_t));
	
	ctx->sws_ctx = sws_getContext(
			ctx->codec_ctx->width,
			ctx->codec_ctx->height,
			ctx->codec_ctx->pix_fmt,
			ctx->codec_ctx->width,
			ctx->codec_ctx->height,
			AV_PIX_FMT_RGB24,
			SWS_BILINEAR,
			NULL,NULL,NULL
		);

	av_image_fill_arrays(ctx->rgb_frame->data, ctx->rgb_frame->linesize, ctx->rgb_buffer, AV_PIX_FMT_RGB24, ctx->codec_ctx->width, ctx->codec_ctx->height, 1);

	// int frame_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, 1280, 720, 16);
	// ctx->rgb_buffer = (uint8_t *)av_malloc(frame_bytes*sizeof(uint8_t));
	
	// ctx->sws_ctx = sws_getContext(
	// 		ctx->codec_ctx->width,
	// 		ctx->codec_ctx->height,
	// 		ctx->codec_ctx->pix_fmt,
	// 		1280,
	// 		720,
	// 		AV_PIX_FMT_RGB24,
	// 		SWS_BILINEAR,
	// 		NULL,NULL,NULL
	// 	);
	// avpicture_fill((AVPicture*)ctx->rgb_frame, ctx->rgb_buffer, AV_PIX_FMT_RGB24, 1280, 720);
	// av_image_fill_arrays(ctx->rgb_frame->data, ctx->rgb_frame->linesize, ctx->rgb_buffer, AV_PIX_FMT_RGB24, 1280, 720, 1);

	// Pass on the processing context to the appropriate functions
	struct encoder_ctx *enc_ctx;
	enc_ctx = init_encoder(&ccx_options.enc_cfg);
	
	mprint("Beginning burned-in subtitle detection...\n");
	hardsubx_process_frames_linear(ctx, enc_ctx);

	dinit_encoder(&enc_ctx, 0); //TODO: Replace 0 with end timestamp

	// Free the allocated memory for frame processing
	av_free(ctx->rgb_buffer);
	av_free(ctx->rgb_frame);
	av_free(ctx->frame);
	avcodec_close(ctx->codec_ctx);
	avformat_close_input(&ctx->format_ctx);
}
Exemple #2
0
static int compute_crc_of_packets(AVFormatContext *fmt_ctx, int video_stream,
                                  AVCodecContext *ctx, AVFrame *fr, uint64_t ts_start, uint64_t ts_end, int no_seeking)
{
    int number_of_written_bytes;
    int got_frame = 0;
    int result;
    int end_of_stream = 0;
    int byte_buffer_size;
    uint8_t *byte_buffer;
    int64_t crc;
    AVPacket pkt;

    byte_buffer_size = av_image_get_buffer_size(ctx->pix_fmt, ctx->width, ctx->height, 16);
    byte_buffer = av_malloc(byte_buffer_size);
    if (!byte_buffer) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate buffer\n");
        return AVERROR(ENOMEM);
    }

    if (!no_seeking) {
        result = av_seek_frame(fmt_ctx, video_stream, ts_start, AVSEEK_FLAG_ANY);
        printf("Seeking to %"PRId64", computing crc for frames with pts < %"PRId64"\n", ts_start, ts_end);
        if (result < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error in seeking\n");
            return result;
        }
        avcodec_flush_buffers(ctx);
    }

    av_init_packet(&pkt);
    do {
        if (!end_of_stream)
            if (av_read_frame(fmt_ctx, &pkt) < 0)
                end_of_stream = 1;
        if (end_of_stream) {
            pkt.data = NULL;
            pkt.size = 0;
        }
        if (pkt.stream_index == video_stream || end_of_stream) {
            got_frame = 0;
            if ((pkt.pts == AV_NOPTS_VALUE) && (!end_of_stream)) {
                av_log(NULL, AV_LOG_ERROR, "Error: frames doesn't have pts values\n");
                return -1;
            }
            result = avcodec_decode_video2(ctx, fr, &got_frame, &pkt);
            if (result < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding frame\n");
                return result;
            }
            if (got_frame) {
                number_of_written_bytes = av_image_copy_to_buffer(byte_buffer, byte_buffer_size,
                                          (const uint8_t* const *)fr->data, (const int*) fr->linesize,
                                          ctx->pix_fmt, ctx->width, ctx->height, 1);
                if (number_of_written_bytes < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Can't copy image to buffer\n");
                    return number_of_written_bytes;
                }
                if ((fr->pts > ts_end) && (!no_seeking))
                    break;
                crc = av_adler32_update(0, (const uint8_t*)byte_buffer, number_of_written_bytes);
                printf("%10"PRId64", 0x%08lx\n", fr->pts, crc);
                if (no_seeking) {
                    if (add_crc_to_array(crc, fr->pts) < 0)
                        return -1;
                }
                else {
                    if (compare_crc_in_array(crc, fr->pts) < 0)
                        return -1;
                }
            }
        }
        av_packet_unref(&pkt);
        av_init_packet(&pkt);
    } while ((!end_of_stream || got_frame) && (no_seeking || (fr->pts + av_frame_get_pkt_duration(fr) <= ts_end)));

    av_packet_unref(&pkt);
    av_freep(&byte_buffer);

    return 0;
}
Exemple #3
0
QByteArray AVDecoder::WriteJPEG(AVCodecContext *pCodecCtx, AVFrame *pFrame, int width, int height)
{
    AVCodecContext *pOCodecCtx;
    AVCodec        *pOCodec;

    QByteArray data;

    pOCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);

    if (!pOCodec) {
        return data;
    }

    SwsContext *sws_ctx = sws_getContext(
                pCodecCtx->width, pCodecCtx->height,
                pCodecCtx->pix_fmt,
                width, height,
                AV_PIX_FMT_YUV420P, SWS_BICUBIC,
                NULL, NULL, NULL);

    if(!sws_ctx) {
        return data;
    }

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    AVFrame *pFrameRGB = av_frame_alloc();
#else
    AVFrame *pFrameRGB = avcodec_alloc_frame();
#endif

    if(pFrameRGB == NULL) {
        sws_freeContext(sws_ctx);
        return data;
    }

    // detect ffmpeg (>= 100) or libav (< 100)
#if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \
    (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0))
    int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 16);
#else
    int numBytes = avpicture_get_size(PIX_FMT_YUVJ420P, width, height);
#endif

    uint8_t *buffer = (uint8_t *)av_malloc(numBytes);

    if(!buffer) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return data;
    }

    // detect ffmpeg (>= 100) or libav (< 100)
#if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \
    (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0))
    av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, AV_PIX_FMT_YUV420P, width, height, 1);
#else
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_YUVJ420P, width, height);
#endif

    sws_scale(
        sws_ctx,
        pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pFrameRGB->data,
        pFrameRGB->linesize
    );

    pOCodecCtx = avcodec_alloc_context3(pOCodec);

    if(pOCodecCtx == NULL) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return  0;
    }

    pOCodecCtx->bit_rate      = pCodecCtx->bit_rate;
    pOCodecCtx->width         = width;
    pOCodecCtx->height        = height;
    pOCodecCtx->pix_fmt       = AV_PIX_FMT_YUVJ420P;
    pOCodecCtx->color_range   = AVCOL_RANGE_JPEG;
    pOCodecCtx->codec_id      = AV_CODEC_ID_MJPEG;
    pOCodecCtx->codec_type    = AVMEDIA_TYPE_VIDEO;
    pOCodecCtx->time_base.num = pCodecCtx->time_base.num;
    pOCodecCtx->time_base.den = pCodecCtx->time_base.den;

    AVDictionary *opts = NULL;
    if(avcodec_open2(pOCodecCtx, pOCodec, &opts) < 0) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
         return  0;
    }

    av_opt_set_int(pOCodecCtx, "lmin", pOCodecCtx->qmin * FF_QP2LAMBDA, 0);
    av_opt_set_int(pOCodecCtx, "lmax", pOCodecCtx->qmax * FF_QP2LAMBDA, 0);

    pOCodecCtx->mb_lmin        = pOCodecCtx->qmin * FF_QP2LAMBDA;
    pOCodecCtx->mb_lmax        = pOCodecCtx->qmax * FF_QP2LAMBDA;
    pOCodecCtx->flags          = CODEC_FLAG_QSCALE;
    pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA;

    pFrame->pts     = 1;
    pFrame->quality = pOCodecCtx->global_quality;

    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    int gotPacket;

    avcodec_encode_video2(pOCodecCtx, &pkt, pFrameRGB, &gotPacket);

    QByteArray buffer2(reinterpret_cast<char *>(pkt.data), pkt.size);

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
    av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    av_frame_free(&pFrameRGB);
#else
    avcodec_free_frame(&pFrameRGB);
#endif
    avcodec_close(pOCodecCtx);
    sws_freeContext(sws_ctx);

    return buffer2;
}
Exemple #4
0
static int yuv4_read_header(AVFormatContext *s)
{
    char header[MAX_YUV4_HEADER + 10];  // Include headroom for
                                        // the longest option
    char *tokstart, *tokend, *header_end;
    int i;
    AVIOContext *pb = s->pb;
    int width = -1, height  = -1, raten   = 0,
        rated =  0, aspectn =  0, aspectd = 0;
    enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE, alt_pix_fmt = AV_PIX_FMT_NONE;
    enum AVChromaLocation chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED;
    enum AVFieldOrder field_order = AV_FIELD_UNKNOWN;
    AVStream *st;

    for (i = 0; i < MAX_YUV4_HEADER; i++) {
        header[i] = avio_r8(pb);
        if (header[i] == '\n') {
            header[i + 1] = 0x20;  // Add a space after last option.
                                   // Makes parsing "444" vs "444alpha" easier.
            header[i + 2] = 0;
            break;
        }
    }
    if (i == MAX_YUV4_HEADER)
        return -1;
    if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC)))
        return -1;

    header_end = &header[i + 1]; // Include space
    for (tokstart = &header[strlen(Y4M_MAGIC) + 1];
         tokstart < header_end; tokstart++) {
        if (*tokstart == 0x20)
            continue;
        switch (*tokstart++) {
        case 'W': // Width. Required.
            width    = strtol(tokstart, &tokend, 10);
            tokstart = tokend;
            break;
        case 'H': // Height. Required.
            height   = strtol(tokstart, &tokend, 10);
            tokstart = tokend;
            break;
        case 'C': // Color space
            if (strncmp("420jpeg", tokstart, 7) == 0) {
                pix_fmt = AV_PIX_FMT_YUV420P;
                chroma_sample_location = AVCHROMA_LOC_CENTER;
            } else if (strncmp("420mpeg2", tokstart, 8) == 0) {
                pix_fmt = AV_PIX_FMT_YUV420P;
                chroma_sample_location = AVCHROMA_LOC_LEFT;
            } else if (strncmp("420paldv", tokstart, 8) == 0) {
                pix_fmt = AV_PIX_FMT_YUV420P;
                chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
            } else if (strncmp("420p16", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV420P16;
            } else if (strncmp("422p16", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV422P16;
            } else if (strncmp("444p16", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV444P16;
            } else if (strncmp("420p14", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV420P14;
            } else if (strncmp("422p14", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV422P14;
            } else if (strncmp("444p14", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV444P14;
            } else if (strncmp("420p12", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV420P12;
            } else if (strncmp("422p12", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV422P12;
            } else if (strncmp("444p12", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV444P12;
            } else if (strncmp("420p10", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV420P10;
            } else if (strncmp("422p10", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV422P10;
            } else if (strncmp("444p10", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_YUV444P10;
            } else if (strncmp("420p9", tokstart, 5) == 0) {
                pix_fmt = AV_PIX_FMT_YUV420P9;
            } else if (strncmp("422p9", tokstart, 5) == 0) {
                pix_fmt = AV_PIX_FMT_YUV422P9;
            } else if (strncmp("444p9", tokstart, 5) == 0) {
                pix_fmt = AV_PIX_FMT_YUV444P9;
            } else if (strncmp("420", tokstart, 3) == 0) {
                pix_fmt = AV_PIX_FMT_YUV420P;
                chroma_sample_location = AVCHROMA_LOC_CENTER;
            } else if (strncmp("411", tokstart, 3) == 0) {
                pix_fmt = AV_PIX_FMT_YUV411P;
            } else if (strncmp("422", tokstart, 3) == 0) {
                pix_fmt = AV_PIX_FMT_YUV422P;
            } else if (strncmp("444alpha", tokstart, 8) == 0 ) {
                av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 "
                       "YUV4MPEG stream.\n");
                return -1;
            } else if (strncmp("444", tokstart, 3) == 0) {
                pix_fmt = AV_PIX_FMT_YUV444P;
            } else if (strncmp("mono16", tokstart, 6) == 0) {
                pix_fmt = AV_PIX_FMT_GRAY16;
            } else if (strncmp("mono", tokstart, 4) == 0) {
                pix_fmt = AV_PIX_FMT_GRAY8;
            } else {
                av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown "
                       "pixel format.\n");
                return -1;
            }
            while (tokstart < header_end && *tokstart != 0x20)
                tokstart++;
            break;
        case 'I': // Interlace type
            switch (*tokstart++){
            case '?':
                field_order = AV_FIELD_UNKNOWN;
                break;
            case 'p':
                field_order = AV_FIELD_PROGRESSIVE;
                break;
            case 't':
                field_order = AV_FIELD_TT;
                break;
            case 'b':
                field_order = AV_FIELD_BB;
                break;
            case 'm':
                av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed "
                       "interlaced and non-interlaced frames.\n");
            default:
                av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
                return AVERROR(EINVAL);
            }
            break;
        case 'F': // Frame rate
            sscanf(tokstart, "%d:%d", &raten, &rated); // 0:0 if unknown
            while (tokstart < header_end && *tokstart != 0x20)
                tokstart++;
            break;
        case 'A': // Pixel aspect
            sscanf(tokstart, "%d:%d", &aspectn, &aspectd); // 0:0 if unknown
            while (tokstart < header_end && *tokstart != 0x20)
                tokstart++;
            break;
        case 'X': // Vendor extensions
            if (strncmp("YSCSS=", tokstart, 6) == 0) {
                // Older nonstandard pixel format representation
                tokstart += 6;
                if (strncmp("420JPEG", tokstart, 7) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV420P;
                else if (strncmp("420MPEG2", tokstart, 8) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV420P;
                else if (strncmp("420PALDV", tokstart, 8) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV420P;
                else if (strncmp("420P9", tokstart, 5) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV420P9;
                else if (strncmp("422P9", tokstart, 5) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV422P9;
                else if (strncmp("444P9", tokstart, 5) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV444P9;
                else if (strncmp("420P10", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV420P10;
                else if (strncmp("422P10", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV422P10;
                else if (strncmp("444P10", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV444P10;
                else if (strncmp("420P12", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV420P12;
                else if (strncmp("422P12", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV422P12;
                else if (strncmp("444P12", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV444P12;
                else if (strncmp("420P14", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV420P14;
                else if (strncmp("422P14", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV422P14;
                else if (strncmp("444P14", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV444P14;
                else if (strncmp("420P16", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV420P16;
                else if (strncmp("422P16", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV422P16;
                else if (strncmp("444P16", tokstart, 6) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV444P16;
                else if (strncmp("411", tokstart, 3) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV411P;
                else if (strncmp("422", tokstart, 3) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV422P;
                else if (strncmp("444", tokstart, 3) == 0)
                    alt_pix_fmt = AV_PIX_FMT_YUV444P;
            }
            while (tokstart < header_end && *tokstart != 0x20)
                tokstart++;
            break;
        }
    }

    if (width == -1 || height == -1) {
        av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
        return -1;
    }

    if (pix_fmt == AV_PIX_FMT_NONE) {
        if (alt_pix_fmt == AV_PIX_FMT_NONE)
            pix_fmt = AV_PIX_FMT_YUV420P;
        else
            pix_fmt = alt_pix_fmt;
    }

    if (raten <= 0 || rated <= 0) {
        // Frame rate unknown
        raten = 25;
        rated = 1;
    }

    if (aspectn == 0 && aspectd == 0) {
        // Pixel aspect unknown
        aspectd = 1;
    }

    st = avformat_new_stream(s, NULL);
    if (!st)
        return AVERROR(ENOMEM);
    st->codecpar->width  = width;
    st->codecpar->height = height;
    av_reduce(&raten, &rated, raten, rated, (1UL << 31) - 1);
    avpriv_set_pts_info(st, 64, rated, raten);
    st->avg_frame_rate                = av_inv_q(st->time_base);
    st->codecpar->format              = pix_fmt;
    st->codecpar->codec_type          = AVMEDIA_TYPE_VIDEO;
    st->codecpar->codec_id            = AV_CODEC_ID_RAWVIDEO;
    st->sample_aspect_ratio           = (AVRational){ aspectn, aspectd };
    st->codecpar->chroma_location     = chroma_sample_location;
    st->codecpar->field_order         = field_order;
    s->packet_size = av_image_get_buffer_size(st->codecpar->format, width, height, 1) + Y4M_FRAME_MAGIC_LEN;
    if ((int) s->packet_size < 0)
        return s->packet_size;
    s->internal->data_offset = avio_tell(pb);

    st->duration = (avio_size(pb) - avio_tell(pb)) / s->packet_size;

    return 0;
}
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream, audioStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVPacket        packet;
  int             frameFinished;
  //float           aspect_ratio;
  
  AVCodecContext  *aCodecCtx = NULL;
  AVCodec         *aCodec = NULL;

    //SDL_Overlay     *bmp = NULL;
    //SDL_Surface     *screen = NULL;
    SDL_Window *m_pWindow = NULL;
    SDL_Renderer *m_pRenderer = NULL;
    
  SDL_Rect        rect;
  SDL_Event       event;
  SDL_AudioSpec   wanted_spec, spec;

  //struct SwsContext   *sws_ctx            = NULL;
  AVDictionary        *videoOptionsDict   = NULL;
  AVDictionary        *audioOptionsDict   = NULL;

  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();
  
  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
  {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  audioStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       videoStream < 0) {
      videoStream=i;
       // printf("video stream:%d",i);
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audioStream < 0) {
      audioStream=i;
       // printf("audio stream:%d",i);
    }
  }
    
//    for(i=0; i<pFormatCtx->nb_streams; i++) {
//        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
//            printf("video stream:%d\n",i);
//        }
//        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO ) {
//            printf("audio stream:%d\n",i);
//        }
//    }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  if(audioStream==-1)
    return -1;
   
  aCodecCtx=pFormatCtx->streams[audioStream]->codec;
    
    
    int count = SDL_GetNumAudioDevices(0);
    
    for (int i = 0; i < count; ++i) {
        SDL_Log("Audio device %d: %s", i, SDL_GetAudioDeviceName(i, 0));
    }
    
    
  // Set audio settings from codec info
  wanted_spec.freq = aCodecCtx->sample_rate;
  wanted_spec.format = AUDIO_S16SYS;
  wanted_spec.channels = aCodecCtx->channels;
  wanted_spec.silence = 0;
  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  wanted_spec.callback = audio_callback;
  wanted_spec.userdata = aCodecCtx;
  
//  if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
//  {
//    fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
//    return -1;
//  }
    SDL_AudioDeviceID dev;
    dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FORMAT_CHANGE);
    if(dev == 0)
    {
         fprintf(stderr, "Failed to open audio: %s\n", SDL_GetError());
    }
    else
    {
        if(wanted_spec.format != spec.format){
               fprintf(stderr, "We didn't get AUDIO_S16SYS audio format.\n");
               return -1;
        }
    }
    
    
  aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
  if(!aCodec)
  {
    fprintf(stderr, "Unsupported codec!\n");
    return -1;
  }
  avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict);

  // audio_st = pFormatCtx->streams[index]
  packet_queue_init(&audioq);
  //SDL_PauseAudio(0);
    SDL_PauseAudioDevice(dev,0);
    
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL)
  {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
    pFrame=av_frame_alloc();
    AVFrame*   m_pFrameYUV = av_frame_alloc();
    //int t_alloc_ret = av_image_alloc(m_pFrameYUV->data,m_pFrameYUV->linesize,pCodecCtx->width,pCodecCtx->height,AV_PIX_FMT_YUV420P,1);
//    int t_size0 = avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height);
//    int t_size1 = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height,1);
    //uint8_t *  out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height));
    uint8_t *  out_buffer = (uint8_t *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    //avpicture_fill((AVPicture *)m_pFrameYUV , out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height);
    av_image_fill_arrays(m_pFrameYUV->data , m_pFrameYUV->linesize, out_buffer,AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height,1);
    
    
    struct SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,pCodecCtx->sw_pix_fmt,
                                                        pCodecCtx->width, pCodecCtx->height,AV_PIX_FMT_YUV420P,
                                                        SWS_BICUBIC,
                                                        NULL, NULL, NULL);

  // Make a screen to put our video

//#ifndef __DARWIN__
//        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
//#else
//        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
//#endif
//    
//    
//  if(!screen) {
//    fprintf(stderr, "SDL: could not set video mode - exiting\n");
//    exit(1);
//  }
  
  // Allocate a place to put our YUV image on that screen
//  bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
//				 pCodecCtx->height,
//				 SDL_YV12_OVERLAY,
//				 screen);
    // Make a screen to put our video
    m_pWindow = SDL_CreateWindow("test windows", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,pCodecCtx->width, pCodecCtx->height,SDL_WINDOW_SHOWN);
    if(!m_pWindow)
    {
        printf("SDL: could not create window - exiting:%s\n",SDL_GetError());
        return -1;
    }
    
    m_pRenderer = SDL_CreateRenderer(m_pWindow, -1, 0);
    SDL_RenderClear(m_pRenderer);
    SDL_Texture *m_pSdlTexture = SDL_CreateTexture(m_pRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
                                                   pCodecCtx->width, pCodecCtx->height);
    rect.x = 0;
    rect.y = 0;
    rect.w = pCodecCtx->width;
    rect.h = pCodecCtx->height;
    
    
    
    
  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished)
      {
    
//SDL_LockTexture(m_pSdlTexture, &rect, m_pFrameYUV->data, m_pFrameYUV->linesize);
          
	//SDL_LockYUVOverlay(bmp);

//	AVPicture pict;
//	pict.data[0] = bmp->pixels[0];
//	pict.data[1] = bmp->pixels[2];
//	pict.data[2] = bmp->pixels[1];
//
//	pict.linesize[0] = bmp->pitches[0];
//	pict.linesize[1] = bmp->pitches[2];
//	pict.linesize[2] = bmp->pitches[1];

	// Convert the image into YUV format that SDL uses
//    sws_scale
//    (
//        sws_ctx, 
//        (uint8_t const * const *)pFrame->data, 
//        pFrame->linesize, 
//        0,
//        pCodecCtx->height,
//        pict.data,
//        pict.linesize
//    );
      sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
                m_pFrameYUV->data, m_pFrameYUV->linesize);
          
	//SDL_UnlockYUVOverlay(bmp);
     // SDL_UnlockTexture(m_pSdlTexture);
          
      SDL_UpdateYUVTexture(m_pSdlTexture, &rect,
                           m_pFrameYUV->data[0], m_pFrameYUV->linesize[0],
                           m_pFrameYUV->data[1], m_pFrameYUV->linesize[1],
                           m_pFrameYUV->data[2], m_pFrameYUV->linesize[2]);
//	rect.x = 0;
//	rect.y = 0;
//	rect.w = pCodecCtx->width;
//	rect.h = pCodecCtx->height;
	//SDL_DisplayYUVOverlay(bmp, &rect);
      SDL_RenderClear( m_pRenderer );//this line seems nothing to do
      SDL_RenderCopy( m_pRenderer, m_pSdlTexture,  NULL, &rect);
      
      SDL_RenderPresent(m_pRenderer);
      
      SDL_Delay(38);
          
//	av_free_packet(&packet);
      av_packet_unref(&packet);
      }
    }
    else if(packet.stream_index==audioStream)
    {
      packet_queue_put(&audioq, &packet);
    }
    else
    {
//      av_free_packet(&packet);
      av_packet_unref(&packet);
    }
    // Free the packet that was allocated by av_read_frame
    SDL_PollEvent(&event);
    switch(event.type)
    {
    case SDL_QUIT:
      quit = 1;
      SDL_Quit();
      exit(0);
      break;
    default:
      break;
    }

  }

  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
static int decode_write(AVCodecContext *avctx, AVPacket *packet)
{
    AVFrame *frame = NULL, *sw_frame = NULL;
    AVFrame *tmp_frame = NULL;
    uint8_t *buffer = NULL;
    int size;
    int ret = 0;

    ret = avcodec_send_packet(avctx, packet);
    if (ret < 0) {
        fprintf(stderr, "Error during decoding\n");
        return ret;
    }

    while (ret >= 0) {
        if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
            fprintf(stderr, "Can not alloc frame\n");
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        ret = avcodec_receive_frame(avctx, frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            av_frame_free(&frame);
            av_frame_free(&sw_frame);
            return 0;
        } else if (ret < 0) {
            fprintf(stderr, "Error while decoding\n");
            goto fail;
        }

        if (frame->format == hw_pix_fmt) {
            /* retrieve data from GPU to CPU */
            if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
                fprintf(stderr, "Error transferring the data to system memory\n");
                goto fail;
            }
            tmp_frame = sw_frame;
        } else
            tmp_frame = frame;

        size = av_image_get_buffer_size(tmp_frame->format, tmp_frame->width,
                                        tmp_frame->height, 1);
        buffer = av_malloc(size);
        if (!buffer) {
            fprintf(stderr, "Can not alloc buffer\n");
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        ret = av_image_copy_to_buffer(buffer, size,
                                      (const uint8_t * const *)tmp_frame->data,
                                      (const int *)tmp_frame->linesize, tmp_frame->format,
                                      tmp_frame->width, tmp_frame->height, 1);
        if (ret < 0) {
            fprintf(stderr, "Can not copy image to buffer\n");
            goto fail;
        }

        if ((ret = fwrite(buffer, 1, size, output_file)) < 0) {
            fprintf(stderr, "Failed to dump raw data.\n");
            goto fail;
        }

    fail:
        av_frame_free(&frame);
        av_frame_free(&sw_frame);
        if (buffer)
            av_freep(&buffer);
        if (ret < 0)
            return ret;
    }

    return 0;
}
Exemple #7
0
int cellVdecGetPicItem(u32 handle, mem32_t picItem_ptr)
{
    cellVdec->Log("cellVdecGetPicItem(handle=%d, picItem_ptr_addr=0x%x)", handle, picItem_ptr.GetAddr());

    VideoDecoder* vdec;
    if (!Emu.GetIdManager().GetIDData(handle, vdec))
    {
        return CELL_VDEC_ERROR_ARG;
    }

    if (!picItem_ptr.IsGood())
    {
        return CELL_VDEC_ERROR_FATAL;
    }

    VdecFrame& vf = vdec->frames.Peek();

    if (vdec->frames.IsEmpty())
    {
        Sleep(1);
        return CELL_VDEC_ERROR_EMPTY;
    }

    AVFrame& frame = *vf.data;

    mem_ptr_t<CellVdecPicItem> info(vdec->memAddr + vdec->memBias);

    vdec->memBias += 512;
    if (vdec->memBias + 512 > vdec->memSize)
    {
        vdec->memBias = 0;
    }

    info->codecType = vdec->type;
    info->startAddr = 0x00000123; // invalid value (no address for picture)
    info->size = a128(av_image_get_buffer_size(vdec->ctx->pix_fmt, vdec->ctx->width, vdec->ctx->height, 1));
    info->auNum = 1;
    info->auPts[0].lower = vf.pts;
    info->auPts[0].upper = vf.pts >> 32;
    info->auPts[1].lower = 0xffffffff;
    info->auPts[1].upper = 0xffffffff;
    info->auDts[0].lower = vf.dts;
    info->auDts[0].upper = vf.dts >> 32;
    info->auDts[1].lower = 0xffffffff;
    info->auDts[1].upper = 0xffffffff;
    info->auUserData[0] = vf.userdata;
    info->auUserData[1] = 0;
    info->status = CELL_OK;
    info->attr = CELL_VDEC_PICITEM_ATTR_NORMAL;
    info->picInfo_addr = info.GetAddr() + sizeof(CellVdecPicItem);

    mem_ptr_t<CellVdecAvcInfo> avc(info.GetAddr() + sizeof(CellVdecPicItem));

    avc->horizontalSize = frame.width;
    avc->verticalSize = frame.height;
    switch (frame.pict_type)
    {
    case AV_PICTURE_TYPE_I:
        avc->pictureType[0] = CELL_VDEC_AVC_PCT_I;
        break;
    case AV_PICTURE_TYPE_P:
        avc->pictureType[0] = CELL_VDEC_AVC_PCT_P;
        break;
    case AV_PICTURE_TYPE_B:
        avc->pictureType[0] = CELL_VDEC_AVC_PCT_B;
        break;
    default:
        avc->pictureType[0] = CELL_VDEC_AVC_PCT_UNKNOWN;
        break; // ???
    }
    avc->pictureType[1] = CELL_VDEC_AVC_PCT_UNKNOWN; // ???
    avc->idrPictureFlag = false; // ???
    avc->aspect_ratio_idc = CELL_VDEC_AVC_ARI_SAR_UNSPECIFIED; // ???
    avc->sar_height = 0;
    avc->sar_width = 0;
    avc->pic_struct = CELL_VDEC_AVC_PSTR_FRAME; // ???
    avc->picOrderCount[0] = 0; // ???
    avc->picOrderCount[1] = 0;
    avc->vui_parameters_present_flag = true; // ???
    avc->frame_mbs_only_flag = true; // ??? progressive
    avc->video_signal_type_present_flag = true; // ???
    avc->video_format = CELL_VDEC_AVC_VF_COMPONENT; // ???
    avc->video_full_range_flag = false; // ???
    avc->colour_description_present_flag = true;
    avc->colour_primaries = CELL_VDEC_AVC_CP_ITU_R_BT_709_5; // ???
    avc->transfer_characteristics = CELL_VDEC_AVC_TC_ITU_R_BT_709_5;
    avc->matrix_coefficients = CELL_VDEC_AVC_MXC_ITU_R_BT_709_5; // important
    avc->timing_info_present_flag = true;
    if (vdec->ctx->time_base.num == 1001)
    {
        if (vdec->ctx->time_base.den == 48000 && vdec->ctx->ticks_per_frame == 2)
        {
            avc->frameRateCode = CELL_VDEC_AVC_FRC_24000DIV1001;
        }
        else if (vdec->ctx->time_base.den == 60000 && vdec->ctx->ticks_per_frame == 2)
        {
            avc->frameRateCode = CELL_VDEC_AVC_FRC_30000DIV1001;
        }
        else
        {
            LOG_ERROR(HLE, "cellVdecGetPicItem: unsupported time_base.den (%d)", vdec->ctx->time_base.den);
            Emu.Pause();
        }
    }
    else
    {
        LOG_ERROR(HLE, "cellVdecGetPicItem: unsupported time_base.num (%d)", vdec->ctx->time_base.num);
        Emu.Pause();
    }
    avc->fixed_frame_rate_flag = true;
    avc->low_delay_hrd_flag = true; // ???
    avc->entropy_coding_mode_flag = true; // ???
    avc->nalUnitPresentFlags = 0; // ???
    avc->ccDataLength[0] = 0;
    avc->ccDataLength[1] = 0;
    avc->reserved[0] = 0;
    avc->reserved[1] = 0;

    picItem_ptr = info.GetAddr();

    return CELL_OK;
}
Exemple #8
0
int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height)
{
    return av_image_get_buffer_size(pix_fmt, width, height, 1);
}
Exemple #9
0
static av_cold int utvideo_decode_init(AVCodecContext *avctx)
{
    UtVideoContext *utv = (UtVideoContext *)avctx->priv_data;
    UtVideoExtra info;
    int format;
    int begin_ret;

    if (avctx->extradata_size != 16 && avctx->extradata_size != 8 ) {
        av_log(avctx, AV_LOG_ERROR, "Extradata size (%d) mismatch.\n", avctx->extradata_size);
        return -1;
    }

    /* Read extradata */
    info.version = AV_RL32(avctx->extradata);
    info.original_format = AV_RL32(avctx->extradata + 4);
    info.frameinfo_size = AV_RL32(avctx->extradata + 8);
    info.flags = AV_RL32(avctx->extradata + 12);

    /* Pick format based on FOURCC */
    switch (avctx->codec_tag) {
#ifdef UTV_BT709
    case MKTAG('U', 'L', 'H', '0'):
        avctx->pix_fmt = AV_PIX_FMT_YUV420P;
        avctx->colorspace = AVCOL_SPC_BT709;
        format = UTVF_YV12;
        break;
    case MKTAG('U', 'L', 'H', '2'):
        avctx->pix_fmt = AV_PIX_FMT_YUYV422;
        avctx->colorspace = AVCOL_SPC_BT709;
        format = UTVF_YUY2;
        break;
#endif
    case MKTAG('U', 'L', 'Y', '0'):
        avctx->pix_fmt = AV_PIX_FMT_YUV420P;
        format = UTVF_YV12;
        break;
    case MKTAG('U', 'L', 'Y', '2'):
        avctx->pix_fmt = AV_PIX_FMT_YUYV422;
        format = UTVF_YUY2;
        break;
    case MKTAG('U', 'L', 'R', 'G'):
        avctx->pix_fmt = AV_PIX_FMT_BGR24;
        format = UTVF_NFCC_BGR_BU;
        break;
    case MKTAG('U', 'L', 'R', 'A'):
        avctx->pix_fmt = AV_PIX_FMT_RGB32;
        format = UTVF_NFCC_BGRA_BU;
        break;
#ifdef UTVF_UQY2
    case MKTAG('U', 'Q', 'Y', '2'):
        avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
        format = UTVF_v210;
        break;
#endif
    default:
        av_log(avctx, AV_LOG_ERROR,
              "Not a Ut Video FOURCC: %X\n", avctx->codec_tag);
        return -1;
    }

    /* Only allocate the buffer once */
    utv->buf_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1);
#ifdef UTVF_UQY2
    if (format == UTVF_v210)
        utv->buf_size += avctx->height * ((avctx->width + 47) / 48) * 128; // the linesize used by the decoder, this does not seem to be exported
#endif
    utv->buffer = (uint8_t *)av_malloc(utv->buf_size * sizeof(uint8_t));

    if (utv->buffer == NULL) {
        av_log(avctx, AV_LOG_ERROR, "Unable to allocate output buffer.\n");
        return -1;
    }

    /* Allocate the output frame */
    avctx->coded_frame = av_frame_alloc();

    /* Ut Video only supports 8-bit */
    avctx->bits_per_raw_sample = 8;

    /* Is it interlaced? */
    avctx->coded_frame->interlaced_frame = info.flags & 0x800 ? 1 : 0;

    /* Apparently Ut Video doesn't store this info... */
    avctx->coded_frame->top_field_first = 1;

    /*
     * Create a Ut Video instance. Since the function wants
     * an "interface name" string, pass it the name of the lib.
     */
    utv->codec = CCodec::CreateInstance(UNFCC(avctx->codec_tag), "libavcodec");

    /* Initialize Decoding */
    begin_ret = utv->codec->DecodeBegin(format, avctx->width, avctx->height,
                            CBGROSSWIDTH_WINDOWS, &info, sizeof(UtVideoExtra));

    /* Check to see if the decoder initlized properly */
    if (begin_ret != 0) {
        av_log(avctx, AV_LOG_ERROR,
               "Could not initialize decoder: %d\n", begin_ret);
        return -1;
    }

    return 0;
}
Exemple #10
0
JNIEXPORT jint JNICALL Java_bits_jav_codec_JavFrame_nComputeVideoBufferSize
(JNIEnv* env, jclass clazz, jint w, jint h, jint pixelFormat)
{
    return av_image_get_buffer_size(pixelFormat, w, h, 1);
}
void * thread_routine(void *arg)
{
	struct mypara *recv_para = (struct mypara *)arg;;  //recv para data
	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame, *pFrameYUV;
	unsigned char *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;

	//char filepath[]="bigbuckbunny_480x272.h265";
	char filepath[] = "rtsp://192.168.131.4/0";
	//SDL---------------------------
	int screen_w = 0, screen_h = 0;
	SDL_Window *screen;
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect, sdlRect_tmp;

	FILE *fp_yuv;

	//av_register_all();
	//avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
			videoindex = i;
			break;
		}
	if (videoindex == -1){
		printf("Didn't find a video stream.\n");
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoindex]->codec;
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL){
		printf("Codec not found.\n");
		return -1;
	}
	//pthread_mutex_lock(&mutex);
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){
		printf("Could not open codec.\n");
		return -1;
	}
	//pthread_mutex_unlock(&mutex);

	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer,
		AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);

	packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx, 0, filepath, 0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

#if OUTPUT_YUV420P 
	fp_yuv = fopen("output.yuv", "wb+");
#endif  

	//if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
	//	printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
	//	return -1;
	//} 

	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	//SDL 2.0 Support for multiple windows
	//screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
	//	screen_w*2, screen_h,
	//	SDL_WINDOW_OPENGL);

	screen = (*recv_para).screen; //get the screen
	if (!screen) {
		printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
		return -1;
	}

	//sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	sdlRenderer = (*recv_para).sdlRenderer;//get the sdlRenderer
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	pthread_mutex_lock(&mutex);
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
	pthread_mutex_unlock(&mutex);


	//temp sdlRect for render copy
	sdlRect_tmp.x = 0;
	sdlRect_tmp.y = 0;
	sdlRect_tmp.w = screen_w;
	sdlRect_tmp.h = screen_h;

	//four rect in one line
	// total 4*4 = 16 rect
	sdlRect.x = 0 + screen_w / 2 * ((*recv_para).id % 4);
	sdlRect.y = 0 + screen_h / 2 * ((*recv_para).id / 4);
	sdlRect.w = screen_w / 2;
	sdlRect.h = screen_h / 2;


	//SDL End----------------------
	while (thread_exit && av_read_frame(pFormatCtx, packet) >= 0){
		if (packet->stream_index == videoindex){
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if (ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if (got_picture){
				//printf("id:%d\n",(*recv_para).id); //打印线程id
				//printf("x_pos:%d   y_pos:%d\n",sdlRect.x,sdlRect.y); //print rect position
				sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);

#if OUTPUT_YUV420P
				y_size = pCodecCtx->width*pCodecCtx->height;
				fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
#endif
				//SDL---------------------------
#if 0
				SDL_UpdateTexture(sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0]);
#else
				pthread_mutex_lock(&mutex);  //mutex or SEGFAULT
				SDL_UpdateYUVTexture(sdlTexture, &sdlRect_tmp,//sdl tmp
					pFrameYUV->data[0], pFrameYUV->linesize[0],
					pFrameYUV->data[1], pFrameYUV->linesize[1],
					pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif	
				//SDL_RenderClear( sdlRenderer );  
				SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &sdlRect);
				//SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect1);  
				SDL_RenderPresent(sdlRenderer);
				pthread_mutex_unlock(&mutex);
				//SDL End-----------------------
				//Delay 40ms
				//SDL_Delay(40);
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
			pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
		int y_size = pCodecCtx->width*pCodecCtx->height;
		fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
		fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
#endif
		//SDL---------------------------

		SDL_UpdateTexture(sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0]);
		SDL_RenderClear(sdlRenderer);
		SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &sdlRect);
		SDL_RenderPresent(sdlRenderer);
		//SDL End-----------------------
		//Delay 40ms
		//SDL_Delay(40);
	}

	sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P 
	fclose(fp_yuv);
#endif 

	SDL_RenderClear(sdlRenderer);
	SDL_Quit();

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);
}
Exemple #12
0
bool QVideoEncoder::convertImage_sws(const QImage &image, QString* errorString/*=0*/)
{
	// Check if the image matches the size
	if (image.width() != m_width || image.height() != m_height)
	{
		if (errorString)
			*errorString = "Wrong image size";
		return false;
	}
	
	QImage::Format format = image.format();
	if (	format != QImage::Format_RGB32
		&&	format != QImage::Format_ARGB32
		&&	format != QImage::Format_ARGB32_Premultiplied )
	{
		if (errorString)
			*errorString = "Wrong image format";
		return false;
	}

	//Check if context can be reused, otherwise reallocate a new one
	m_ff->swsContext = sws_getCachedContext(m_ff->swsContext,
											m_width,
											m_height,
											AV_PIX_FMT_BGRA,
											m_width,
											m_height,
											AV_PIX_FMT_YUV420P,
											SWS_BICUBIC,
											NULL,
											NULL,
											NULL);

	if (m_ff->swsContext == NULL)
	{
		if (errorString)
			*errorString = "[SWS] Cannot initialize the conversion context";
		return false;
	}

	int num_bytes = av_image_get_buffer_size(AV_PIX_FMT_BGRA, m_width, m_height, 1);
	if (num_bytes != image.byteCount())
	{
		if (errorString)
			*errorString = "[SWS] Number of bytes mismatch";
		return false;
	}

	uint8_t *srcSlice[3] = { (uint8_t*)image.bits(), 0, 0 };
	int srcStride[3] = { image.bytesPerLine(), 0, 0 };

	sws_scale(	m_ff->swsContext,
				srcSlice,
				srcStride,
				0,
				m_height,
				m_ff->frame->data,
				m_ff->frame->linesize );

	return true;
}
//初始化视频解码器与播放器
int open_input(JNIEnv * env, const char* file_name, jobject surface){
    LOGI("open file:%s\n", file_name);
    //注册所有组件
    av_register_all();
    //分配上下文
    pFormatCtx = avformat_alloc_context();
    //打开视频文件
    if(avformat_open_input(&pFormatCtx, file_name, NULL, NULL)!=0) {
        LOGE("Couldn't open file:%s\n", file_name);
        return -1;
    }
    //检索多媒体流信息
    if(avformat_find_stream_info(pFormatCtx, NULL)<0) {
        LOGE("Couldn't find stream information.");
        return -1;
    }
    //寻找视频流的第一帧
    int i;
    for (i = 0; i < pFormatCtx->nb_streams; i++) {
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO
            && video_stream_index < 0) {
            video_stream_index = i;
        }
    }
    if(video_stream_index == -1) {
        LOGE("couldn't find a video stream.");
        return -1;
    }

    //获取codec上下文指针
    pCodecCtx = pFormatCtx->streams[video_stream_index]->codec;
    //寻找视频流的解码器
    AVCodec * pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        LOGE("couldn't find Codec.");
        return -1;
    }
    if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        LOGE("Couldn't open codec.");
        return -1;
    }
    // 获取native window
    nativeWindow = ANativeWindow_fromSurface(env, surface);

    // 设置native window的buffer大小,可自动拉伸
    ANativeWindow_setBuffersGeometry(nativeWindow,  pCodecCtx->width, pCodecCtx->height, WINDOW_FORMAT_RGBA_8888);
    //申请内存
    pFrame = av_frame_alloc();
    pFrameRGBA = av_frame_alloc();
    if(pFrameRGBA == NULL || pFrame == NULL) {
        LOGE("Couldn't allocate video frame.");
        return -1;
    }
    // buffer中数据用于渲染,且格式为RGBA
    int numBytes=av_image_get_buffer_size(AV_PIX_FMT_RGBA, pCodecCtx->width, pCodecCtx->height, 1);

    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    av_image_fill_arrays(pFrameRGBA->data, pFrameRGBA->linesize, buffer, AV_PIX_FMT_RGBA,
                         pCodecCtx->width, pCodecCtx->height, 1);
    // 由于解码出来的帧格式不是RGBA的,在渲染之前需要进行格式转换
    sws_ctx = sws_getContext(pCodecCtx->width,
                                                pCodecCtx->height,
                                                pCodecCtx->pix_fmt,
                                                pCodecCtx->width,
                                                pCodecCtx->height,
                                                AV_PIX_FMT_RGBA,
                                                SWS_BILINEAR,
                                                NULL,
                                                NULL,
                                                NULL);

    return 0;
}
filter_wrapper::filter_wrapper(int width, int height, int width_out, int height_out, int fps, const std::string& filter_descr)
{
	if (once)
	{
		once = false;
		avfilter_register_all();
	}

	this->width_ = width;
	this->height_ = height;
	this->width_out = width_out;
	this->height_out = height_out;
	this->fps_ = fps;
	this->filter_descr_ = filter_descr;

	outputs = avfilter_inout_alloc();
	inputs = avfilter_inout_alloc();


	char args[512] = { 0 };
	AVFilter *buffersrc = avfilter_get_by_name("buffer");
	AVFilter *buffersink = avfilter_get_by_name("ffbuffersink");
	AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
	AVBufferSinkParams *buffersink_params;

	filter_graph = avfilter_graph_alloc();

	/* buffer video source: the decoded frames from the decoder will be inserted here. */
	snprintf(args, sizeof(args),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		width, height, AV_PIX_FMT_YUV420P, 1, fps, 1, 1);

	int ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
		args, NULL, filter_graph);
	if (ret < 0) {
		printf("Cannot create buffer source\n");
		return;
	}

	/* buffer video sink: to terminate the filter chain. */
	buffersink_params = av_buffersink_params_alloc();
	buffersink_params->pixel_fmts = pix_fmts;
	ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
		NULL, buffersink_params, filter_graph);
	av_free(buffersink_params);
	if (ret < 0) {
		printf("Cannot create buffer sink\n");
		return;
	}

	/* Endpoints for the filter graph. */
	outputs->name = av_strdup("in");
	outputs->filter_ctx = buffersrc_ctx;
	outputs->pad_idx = 0;
	outputs->next = NULL;

	inputs->name = av_strdup("out");
	inputs->filter_ctx = buffersink_ctx;
	inputs->pad_idx = 0;
	inputs->next = NULL;

	if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr.c_str(), &inputs, &outputs, NULL)) < 0)
		return;

	if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
		return;

	frame_in = av_frame_alloc();
	frame_buffer_in = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 1));
	av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in,
		AV_PIX_FMT_YUV420P, width, height, 1);

	frame_out = av_frame_alloc();
	frame_buffer_out = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width_out, height_out, 1));
	av_image_fill_arrays(frame_out->data, frame_out->linesize, frame_buffer_out,
		AV_PIX_FMT_YUV420P, width_out, height_out, 1);

	frame_in->width = width;
	frame_in->height = height;
	frame_in->format = AV_PIX_FMT_YUV420P;
}
Exemple #15
0
bool CFFmpegImage::CreateThumbnailFromSurface(unsigned char* bufferin, unsigned int width,
                                             unsigned int height, unsigned int format,
                                             unsigned int pitch,
                                             const std::string& destFile,
                                             unsigned char* &bufferout,
                                             unsigned int &bufferoutSize)
{
  // It seems XB_FMT_A8R8G8B8 mean RGBA and not ARGB
  if (format != XB_FMT_A8R8G8B8)
  {
    CLog::Log(LOGERROR, "Supplied format: %d is not supported.", format);
    return false;
  }

  bool jpg_output = false;
  if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg")
    jpg_output = true;
  else if (m_strMimeType == "image/png")
    jpg_output = false;
  else
  {
    CLog::Log(LOGERROR, "Output Format is not supported: %s is not supported.", destFile.c_str());
    return false;
  }

  ThumbDataManagement tdm;

  tdm.codec = avcodec_find_encoder(jpg_output ? AV_CODEC_ID_MJPEG : AV_CODEC_ID_PNG);
  if (!tdm.codec)
  {
    CLog::Log(LOGERROR, "You are missing a working encoder for format: %s", jpg_output ? "JPEG" : "PNG");
    return false;
  }

  tdm.avOutctx = avcodec_alloc_context3(tdm.codec);
  if (!tdm.avOutctx)
  {
    CLog::Log(LOGERROR, "Could not allocate context for thumbnail: %s", destFile.c_str());
    return false;
  }

  tdm.avOutctx->height = height;
  tdm.avOutctx->width = width;
  tdm.avOutctx->time_base.num = 1;
  tdm.avOutctx->time_base.den = 1;
  tdm.avOutctx->pix_fmt = jpg_output ? AV_PIX_FMT_YUVJ420P : AV_PIX_FMT_RGBA;
  tdm.avOutctx->flags = AV_CODEC_FLAG_QSCALE;
  tdm.avOutctx->mb_lmin = tdm.avOutctx->qmin * FF_QP2LAMBDA;
  tdm.avOutctx->mb_lmax = tdm.avOutctx->qmax * FF_QP2LAMBDA;
  tdm.avOutctx->global_quality = tdm.avOutctx->qmin * FF_QP2LAMBDA;

  unsigned int internalBufOutSize = 0;

  int size = av_image_get_buffer_size(tdm.avOutctx->pix_fmt, tdm.avOutctx->width, tdm.avOutctx->height, 16);
  if (size < 0)
  {
    CLog::Log(LOGERROR, "Could not compute picture size for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }
  internalBufOutSize = (unsigned int) size;

  tdm.intermediateBuffer = (uint8_t*) av_malloc(internalBufOutSize);
  if (!tdm.intermediateBuffer)
  {
    CLog::Log(LOGERROR, "Could not allocate memory for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  if (avcodec_open2(tdm.avOutctx, tdm.codec, NULL) < 0)
  {
    CLog::Log(LOGERROR, "Could not open avcodec context thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  tdm.frame_input = av_frame_alloc();
  if (!tdm.frame_input)
  {
    CLog::Log(LOGERROR, "Could not allocate frame for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  // convert the RGB32 frame to AV_PIX_FMT_YUV420P - we use this later on as AV_PIX_FMT_YUVJ420P
  tdm.frame_temporary = av_frame_alloc();
  if (!tdm.frame_temporary)
  {
    CLog::Log(LOGERROR, "Could not allocate frame for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  if (av_image_fill_arrays(tdm.frame_temporary->data, tdm.frame_temporary->linesize, tdm.intermediateBuffer, jpg_output ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_RGBA, width, height, 16) < 0)
  {
    CLog::Log(LOGERROR, "Could not fill picture for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  uint8_t* src[] = { bufferin, NULL, NULL, NULL };
  int srcStride[] = { (int) pitch, 0, 0, 0};

  //input size == output size which means only pix_fmt conversion
  tdm.sws = sws_getContext(width, height, AV_PIX_FMT_RGB32, width, height, jpg_output ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_RGBA, 0, 0, 0, 0);
  if (!tdm.sws)
  {
    CLog::Log(LOGERROR, "Could not setup scaling context for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  // Setup jpeg range for sws
  if (jpg_output)
  {
    int* inv_table = nullptr;
    int* table = nullptr;
    int srcRange, dstRange, brightness, contrast, saturation;

    if (sws_getColorspaceDetails(tdm.sws, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation) < 0)
    {
      CLog::Log(LOGERROR, "SWS_SCALE failed to get ColorSpaceDetails for thumbnail: %s", destFile.c_str());
      CleanupLocalOutputBuffer();
      return false;
    }
    dstRange = 1; // jpeg full range yuv420p output
    srcRange = 0; // full range RGB32 input
    if (sws_setColorspaceDetails(tdm.sws, inv_table, srcRange, table, dstRange, brightness, contrast, saturation) < 0)
    {
      CLog::Log(LOGERROR, "SWS_SCALE failed to set ColorSpace Details for thumbnail: %s", destFile.c_str());
      CleanupLocalOutputBuffer();
      return false;
    }
  }

  if (sws_scale(tdm.sws, src, srcStride, 0, height, tdm.frame_temporary->data, tdm.frame_temporary->linesize) < 0)
  {
    CLog::Log(LOGERROR, "SWS_SCALE failed for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }
  tdm.frame_input->pts = 1;
  tdm.frame_input->quality = tdm.avOutctx->global_quality;
  tdm.frame_input->data[0] = tdm.frame_temporary->data[0];
  tdm.frame_input->data[1] = tdm.frame_temporary->data[1];
  tdm.frame_input->data[2] = tdm.frame_temporary->data[2];
  tdm.frame_input->height = height;
  tdm.frame_input->width = width;
  tdm.frame_input->linesize[0] = tdm.frame_temporary->linesize[0];
  tdm.frame_input->linesize[1] = tdm.frame_temporary->linesize[1];
  tdm.frame_input->linesize[2] = tdm.frame_temporary->linesize[2];
  // this is deprecated but mjpeg is not yet transitioned
  tdm.frame_input->format = jpg_output ? AV_PIX_FMT_YUVJ420P : AV_PIX_FMT_RGBA;

  int got_package = 0;
  AVPacket avpkt;
  av_init_packet(&avpkt);
  // encoder will allocate memory
  avpkt.data = nullptr;
  avpkt.size = 0;

  int ret = EncodeFFmpegFrame(tdm.avOutctx, &avpkt, &got_package, tdm.frame_input);

  if ((ret < 0) || (got_package == 0))
  {
    CLog::Log(LOGERROR, "Could not encode thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  bufferoutSize = avpkt.size;
  m_outputBuffer = (uint8_t*) av_malloc(bufferoutSize);
  if (!m_outputBuffer)
  {
    CLog::Log(LOGERROR, "Could not generate allocate memory for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    av_packet_unref(&avpkt);
    return false;
  }
  // update buffer ptr for caller
  bufferout = m_outputBuffer;

  // copy avpkt data into outputbuffer
  memcpy(m_outputBuffer, avpkt.data, avpkt.size);
  av_packet_unref(&avpkt);

  return true;
}
int main(int argc, char* argv[])
{
    int ret;
    AVFrame *frame_in;
	AVFrame *frame_out;
	unsigned char *frame_buffer_in;
	unsigned char *frame_buffer_out;

	AVFilterContext *buffersink_ctx;
	AVFilterContext *buffersrc_ctx;
	AVFilterGraph *filter_graph;
	static int video_stream_index = -1;

	//Input YUV
	FILE *fp_in=fopen("sintel_480x272_yuv420p.yuv","rb+");
	if(fp_in==NULL){
		printf("Error open input file.\n");
		return -1;
	}
	int in_width=480;
	int in_height=272;

	//Output YUV
	FILE *fp_out=fopen("output.yuv","wb+");
	if(fp_out==NULL){
		printf("Error open output file.\n");
		return -1;
	}

	//const char *filter_descr = "lutyuv='u=128:v=128'";
	const char *filter_descr = "boxblur";
	//const char *filter_descr = "hflip";
	//const char *filter_descr = "hue='h=60:s=-3'";
	//const char *filter_descr = "crop=2/3*in_w:2/3*in_h";
	//const char *filter_descr = "drawbox=x=100:y=100:w=100:h=100:[email protected]";
	//const char *filter_descr = "drawtext=fontfile=arial.ttf:fontcolor=green:fontsize=30:text='Lei Xiaohua'";
	
	avfilter_register_all();

	char args[512];
	AVFilter *buffersrc  = avfilter_get_by_name("buffer");
	AVFilter *buffersink = avfilter_get_by_name("ffbuffersink");
	AVFilterInOut *outputs = avfilter_inout_alloc();
	AVFilterInOut *inputs  = avfilter_inout_alloc();
	enum PixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, PIX_FMT_NONE };
	AVBufferSinkParams *buffersink_params;

	filter_graph = avfilter_graph_alloc();

	/* buffer video source: the decoded frames from the decoder will be inserted here. */
	snprintf(args, sizeof(args),
		"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
		in_width,in_height,AV_PIX_FMT_YUV420P,
		1, 25,1,1);

	ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
		args, NULL, filter_graph);
	if (ret < 0) {
		printf("Cannot create buffer source\n");
		return ret;
	}

	/* buffer video sink: to terminate the filter chain. */
	buffersink_params = av_buffersink_params_alloc();
	buffersink_params->pixel_fmts = pix_fmts;
	ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
		NULL, buffersink_params, filter_graph);
	av_free(buffersink_params);
	if (ret < 0) {
		printf("Cannot create buffer sink\n");
		return ret;
	}

	/* Endpoints for the filter graph. */
	outputs->name       = av_strdup("in");
	outputs->filter_ctx = buffersrc_ctx;
	outputs->pad_idx    = 0;
	outputs->next       = NULL;

	inputs->name       = av_strdup("out");
	inputs->filter_ctx = buffersink_ctx;
	inputs->pad_idx    = 0;
	inputs->next       = NULL;

	if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
		&inputs, &outputs, NULL)) < 0)
		return ret;

	if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
		return ret;

	frame_in=av_frame_alloc();
	frame_buffer_in=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width,in_height,1));
	av_image_fill_arrays(frame_in->data, frame_in->linesize,frame_buffer_in,
		AV_PIX_FMT_YUV420P,in_width, in_height,1);

	frame_out=av_frame_alloc();
	frame_buffer_out=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width,in_height,1));
	av_image_fill_arrays(frame_out->data, frame_out->linesize,frame_buffer_out,
		AV_PIX_FMT_YUV420P,in_width, in_height,1);

	frame_in->width=in_width;
	frame_in->height=in_height;
	frame_in->format=AV_PIX_FMT_YUV420P;
	
    while (1) {

		if(fread(frame_buffer_in, 1, in_width*in_height*3/2, fp_in)!= in_width*in_height*3/2){
			break;
		}
		//input Y,U,V
		frame_in->data[0]=frame_buffer_in;
		frame_in->data[1]=frame_buffer_in+in_width*in_height;
		frame_in->data[2]=frame_buffer_in+in_width*in_height*5/4;

        if (av_buffersrc_add_frame(buffersrc_ctx, frame_in) < 0) {
            printf( "Error while add frame.\n");
            break;
        }

        /* pull filtered pictures from the filtergraph */
		ret = av_buffersink_get_frame(buffersink_ctx, frame_out);
        if (ret < 0)
            break;

		//output Y,U,V
		if(frame_out->format==AV_PIX_FMT_YUV420P){
			for(int i=0;i<frame_out->height;i++){
				fwrite(frame_out->data[0]+frame_out->linesize[0]*i,1,frame_out->width,fp_out);
			}
			for(int i=0;i<frame_out->height/2;i++){
				fwrite(frame_out->data[1]+frame_out->linesize[1]*i,1,frame_out->width/2,fp_out);
			}
			for(int i=0;i<frame_out->height/2;i++){
				fwrite(frame_out->data[2]+frame_out->linesize[2]*i,1,frame_out->width/2,fp_out);
			}
		}
		printf("Process 1 frame!\n");
		av_frame_unref(frame_out);
    }

	fclose(fp_in);
	fclose(fp_out);

	av_frame_free(&frame_in);
	av_frame_free(&frame_out);
    avfilter_graph_free(&filter_graph);

    return 0;
}
int main(int argc, char* argv[])
{
    AVFormatContext *pFormatCtx;
    int             i, videoindex;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame *pFrame, *pFrameYUV;
    unsigned char *out_buffer;
    AVPacket *packet;
    int y_size;
    int ret, got_picture;
    struct SwsContext *img_convert_ctx;

    char filepath[] = "F:\\codes\\simplest_ffmpeg_player\\simplest_ffmpeg_player\\bigbuckbunny_480x272.h265";
    //SDL---------------------------
    int screen_w = 0, screen_h = 0;
//  SDL_Window *screen;
//  SDL_Renderer* sdlRenderer;
//  SDL_Texture* sdlTexture;
//  SDL_Rect sdlRect;

    FILE *fp_yuv;

    av_register_all();
    avformat_network_init();
    pFormatCtx = avformat_alloc_context();

    /* filepath should be absolute path?! */
    if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
        printf("Couldn't open input stream.\n");
        return -1;
    }
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        printf("Couldn't find stream information.\n");
        return -1;
    }
    videoindex = -1;
    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoindex = i;
            break;
        }
    if (videoindex == -1) {
        printf("Didn't find a video stream.\n");
        return -1;
    }

    pCodecCtx = pFormatCtx->streams[videoindex]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        printf("Codec not found.\n");
        return -1;
    }
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        printf("Could not open codec.\n");
        return -1;
    }

    pFrame = av_frame_alloc();
    pFrameYUV = av_frame_alloc();
    out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
    av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer,
                         AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);

    packet = (AVPacket *)av_malloc(sizeof(AVPacket));
    //Output Info-----------------------------
    printf("--------------- File Information ----------------\n");
    av_dump_format(pFormatCtx, 0, filepath, 0);
    printf("-------------------------------------------------\n");
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                                     pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P,
                                     SWS_BICUBIC, NULL, NULL, NULL);

#if OUTPUT_YUV420P
    fp_yuv = fopen("output.yuv", "wb+");
#endif

//  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
//      printf( "Could not initialize SDL - %s\n", SDL_GetError());
//      return -1;
//  }

    screen_w = pCodecCtx->width;
    screen_h = pCodecCtx->height;
    //SDL 2.0 Support for multiple windows
//  screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
//      screen_w, screen_h,
//      SDL_WINDOW_OPENGL);

//  if(!screen) {
//      printf("SDL: could not create window - exiting:%s\n",SDL_GetError());
//      return -1;
//  }

//  sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
//  //IYUV: Y + U + V  (3 planes)
//  //YV12: Y + V + U  (3 planes)
//  sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);

//  sdlRect.x=0;
//  sdlRect.y=0;
//  sdlRect.w=screen_w;
//  sdlRect.h=screen_h;

    //SDL End----------------------
    while (av_read_frame(pFormatCtx, packet) >= 0) {
        if (packet->stream_index == videoindex) {
            ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
            if (ret < 0) {
                printf("Decode Error.\n");
                return -1;
            }
            if (got_picture) {
                /* scale operation */
                sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data,
                          pFrame->linesize, 0, pCodecCtx->height,
                          pFrameYUV->data, pFrameYUV->linesize);

#if OUTPUT_YUV420P
                y_size = pCodecCtx->width * pCodecCtx->height;
                fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
                fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
                fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
#endif
                //SDL---------------------------
#if 0
                SDL_UpdateTexture(sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0]);
#else
//              SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
//              pFrameYUV->data[0], pFrameYUV->linesize[0],
//              pFrameYUV->data[1], pFrameYUV->linesize[1],
//              pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif

//              SDL_RenderClear( sdlRenderer );
//              SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);
//              SDL_RenderPresent( sdlRenderer );
//              //SDL End-----------------------
//              //Delay 40ms
//              SDL_Delay(40);
            }
        }
        av_free_packet(packet);
    }
    //flush decoder
    //FIX: Flush Frames remained in Codec
    while (1) {
        ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
        if (ret < 0)
            break;
        if (!got_picture)
            break;
        sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data,
                  pFrame->linesize, 0, pCodecCtx->height,
                  pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
        int y_size = pCodecCtx->width * pCodecCtx->height;
        fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
        fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
        fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
#endif
        //SDL---------------------------
//      SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );
//      SDL_RenderClear( sdlRenderer );
//      SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);
//      SDL_RenderPresent( sdlRenderer );
//      //SDL End-----------------------
//      //Delay 40ms
//      SDL_Delay(40);
    }

    sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P
    fclose(fp_yuv);
#endif

//  SDL_Quit();

    av_frame_free(&pFrameYUV);
    av_frame_free(&pFrame);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}
void MP4Encoder::EncodeStart() {
    //1. 注册所有组件
    av_register_all();
    //2. 初始化输出码流的AVFormatContext
    avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, this->mp4Path);

    //3. 打开待输出的视频文件
    if (avio_open(&pFormatCtx->pb, this->mp4Path, AVIO_FLAG_READ_WRITE)) {
        LOGE("open output file failed");
        return;
    }
    //4. 初始化视频码流
    pStream = avformat_new_stream(pFormatCtx, NULL);
    if (pStream == NULL) {
        LOGE("allocating output stream failed");
        return;
    }
    //5. 寻找编码器并打开编码器
    pCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
    if (!pCodec) {
        LOGE("could not find encoder");
        return;
    }

    //6. 分配编码器并设置参数
    pCodecCtx = avcodec_alloc_context3(pCodec);
    pCodecCtx->codec_id = pCodec->id;
    pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
    pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
    pCodecCtx->width = height;
    pCodecCtx->height = width;
    pCodecCtx->time_base.num = 1;
    pCodecCtx->time_base.den = 25;
    pCodecCtx->bit_rate = 400000;
    pCodecCtx->gop_size = 12;


    //将AVCodecContext的成员复制到AVCodecParameters结构体
    avcodec_parameters_from_context(pStream->codecpar, pCodecCtx);
    av_stream_set_r_frame_rate(pStream, {1, 25});

    //7. 打开编码器
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        LOGE("open encoder fail!");
        return;
    }

    //输出格式信息
    av_dump_format(pFormatCtx, 0, this->mp4Path, 1);

    //初始化帧
    pFrame = av_frame_alloc();
    pFrame->width = pCodecCtx->width;
    pFrame->height = pCodecCtx->height;
    pFrame->format = pCodecCtx->pix_fmt;
    int bufferSize = av_image_get_buffer_size(pCodecCtx->pix_fmt, pCodecCtx->width,
                                              pCodecCtx->height, 1);
    pFrameBuffer = (uint8_t *) av_malloc(bufferSize);
    av_image_fill_arrays(pFrame->data, pFrame->linesize, pFrameBuffer, pCodecCtx->pix_fmt,
                         pCodecCtx->width, pCodecCtx->height, 1);

    AVDictionary *opt = 0;
    //H.264
    if (pCodecCtx->codec_id == AV_CODEC_ID_H264) {
        av_dict_set_int(&opt, "video_track_timescale", 25, 0);
        av_dict_set(&opt, "preset", "slow", 0);
        av_dict_set(&opt, "tune", "zerolatency", 0);
    }
    //8. 写文件头
    avformat_write_header(pFormatCtx, &opt);

    //创建已编码帧
    av_new_packet(&avPacket, bufferSize * 3);

    //标记正在转换
    this->transform = true;
}
Exemple #19
0
int cellVdecGetPicture(u32 handle, const mem_ptr_t<CellVdecPicFormat> format, u32 out_addr)
{
    cellVdec->Log("cellVdecGetPicture(handle=%d, format_addr=0x%x, out_addr=0x%x)", handle, format.GetAddr(), out_addr);

    VideoDecoder* vdec;
    if (!Emu.GetIdManager().GetIDData(handle, vdec))
    {
        return CELL_VDEC_ERROR_ARG;
    }

    if (!format.IsGood())
    {
        return CELL_VDEC_ERROR_FATAL;
    }

    if (vdec->frames.IsEmpty())
    {
        return CELL_VDEC_ERROR_EMPTY;
    }

    if (out_addr)
    {
        u32 buf_size = a128(av_image_get_buffer_size(vdec->ctx->pix_fmt, vdec->ctx->width, vdec->ctx->height, 1));

        if (!Memory.IsGoodAddr(out_addr, buf_size))
        {
            return CELL_VDEC_ERROR_FATAL;
        }

        if (format->formatType != CELL_VDEC_PICFMT_YUV420_PLANAR)
        {
            cellVdec->Error("cellVdecGetPicture: TODO: unknown formatType(%d)", (u32)format->formatType);
            return CELL_OK;
        }

        if (format->colorMatrixType != CELL_VDEC_COLOR_MATRIX_TYPE_BT709)
        {
            cellVdec->Error("cellVdecGetPicture: TODO: unknown colorMatrixType(%d)", (u32)format->colorMatrixType);
            return CELL_OK;
        }

        VdecFrame vf;

        vdec->frames.Pop(vf);

        AVFrame& frame = *vf.data;

        u8* buf = (u8*)malloc(buf_size);

        // TODO: zero padding bytes

        int err = av_image_copy_to_buffer(buf, buf_size, frame.data, frame.linesize, vdec->ctx->pix_fmt, frame.width, frame.height, 1);
        if (err < 0)
        {
            cellVdec->Error("cellVdecGetPicture: av_image_copy_to_buffer failed(%d)", err);
            Emu.Pause();
        }

        if (!Memory.CopyFromReal(out_addr, buf, buf_size))
        {
            cellVdec->Error("cellVdecGetPicture: data copying failed");
            Emu.Pause();
        }

        av_frame_unref(vf.data);
        av_frame_free(&vf.data);
        free(buf);
    }

    return CELL_OK;
}
Exemple #20
0
// adapted from [libav-user] list
// https://lists.libav.org/pipermail/libav-user/2010-August/005159.html
int WriteJPEG (AVCodecContext *pCodecCtx, AVFrame *pFrame, int FrameNo)
{
	AVCodecContext         *pOCodecCtx;
	AVCodec                *pOCodec;
	uint8_t                *Buffer;
	int                     BufSiz;
	enum AVPixelFormat      ImgFmt = AV_PIX_FMT_YUVJ420P;
	FILE                   *JPEGFile;
	char                    JPEGFName[256];

	BufSiz = av_image_get_buffer_size(ImgFmt, pCodecCtx->width, pCodecCtx->height, 1);

	Buffer = (uint8_t *)malloc ( BufSiz );
	if ( Buffer == NULL )
	{
		return 0; 
	}

	memset ( Buffer, 0, BufSiz );

	pOCodecCtx = avcodec_alloc_context3 ( NULL );
	if ( !pOCodecCtx ) 
	{
		printf("no pOCodecCtx");
		free ( Buffer );
		return ( 0 );
	}

	pOCodecCtx->bit_rate      = pCodecCtx->bit_rate;
	pOCodecCtx->width         = pCodecCtx->width;
	pOCodecCtx->height        = pCodecCtx->height;
	pOCodecCtx->pix_fmt       = ImgFmt;
	pOCodecCtx->codec_id      = AV_CODEC_ID_MJPEG;
	pOCodecCtx->codec_type    = AVMEDIA_TYPE_VIDEO;
	pOCodecCtx->time_base.num = pCodecCtx->time_base.num;
	pOCodecCtx->time_base.den = pCodecCtx->time_base.den;

	pOCodec = avcodec_find_encoder ( pOCodecCtx->codec_id );
	if ( !pOCodec ) 
	{
		printf("no pOCodec");
		free ( Buffer );
		return ( 0 );
	}
	if ( avcodec_open2 ( pOCodecCtx, pOCodec, NULL ) < 0 ) 
	{
		printf("avcodec_open2 failed");
		free ( Buffer );
		return ( 0 );
	}

	pOCodecCtx->mb_lmin = pOCodecCtx->qmin * FF_QP2LAMBDA;
	pOCodecCtx->mb_lmax = pOCodecCtx->qmax * FF_QP2LAMBDA;

	pOCodecCtx->flags = CODEC_FLAG_QSCALE;
	pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA;

	pFrame->pts = 1;
	pFrame->quality = pOCodecCtx->global_quality;

	AVPacket pOutPacket;
	pOutPacket.data = Buffer;
	pOutPacket.size = BufSiz;

	int got_packet_ptr = 0;
	int encode_err = avcodec_encode_video2( pOCodecCtx, &pOutPacket, pFrame, &got_packet_ptr);

	if (encode_err == 0)
	{
		sprintf ( JPEGFName, "%06d.jpg", FrameNo );
		JPEGFile = fopen ( JPEGFName, "wb" );
		fwrite ( Buffer, 1, BufSiz, JPEGFile );
		fclose ( JPEGFile );
	}

	avcodec_close ( pOCodecCtx );
	free ( Buffer );
	return ( BufSiz );
}
Exemple #21
0
AkPacket ConvertVideo::convert(const AkPacket &packet)
{
    AkVideoPacket videoPacket(packet);

    // Convert input format.
    QString format = AkVideoCaps::pixelFormatToString(videoPacket.caps().format());
    AVPixelFormat iFormat = av_get_pix_fmt(format.toStdString().c_str());

    // Initialize rescaling context.
    this->m_scaleContext = sws_getCachedContext(this->m_scaleContext,
                                                videoPacket.caps().width(),
                                                videoPacket.caps().height(),
                                                iFormat,
                                                videoPacket.caps().width(),
                                                videoPacket.caps().height(),
                                                AV_PIX_FMT_BGRA,
                                                SWS_FAST_BILINEAR,
                                                NULL,
                                                NULL,
                                                NULL);

    if (!this->m_scaleContext)
        return AkPacket();

    // Create iPicture.
    AVFrame iFrame;
    memset(&iFrame, 0, sizeof(AVFrame));

    if (av_image_fill_arrays((uint8_t **) iFrame.data,
                         iFrame.linesize,
                         (const uint8_t *) videoPacket.buffer().constData(),
                         iFormat,
                         videoPacket.caps().width(),
                         videoPacket.caps().height(),
                         1) < 0)
        return AkPacket();

    // Create oPicture
    int frameSize = av_image_get_buffer_size(AV_PIX_FMT_BGRA,
                                             videoPacket.caps().width(),
                                             videoPacket.caps().height(),
                                             1);

    QByteArray oBuffer(frameSize, Qt::Uninitialized);
    AVFrame oFrame;
    memset(&oFrame, 0, sizeof(AVFrame));

    if (av_image_fill_arrays((uint8_t **) oFrame.data,
                         oFrame.linesize,
                         (const uint8_t *) oBuffer.constData(),
                         AV_PIX_FMT_BGRA,
                         videoPacket.caps().width(),
                         videoPacket.caps().height(),
                         1) < 0)
        return AkPacket();

    // Convert picture format
    sws_scale(this->m_scaleContext,
              iFrame.data,
              iFrame.linesize,
              0,
              videoPacket.caps().height(),
              oFrame.data,
              oFrame.linesize);

    // Create packet
    AkVideoPacket oPacket(packet);
    oPacket.caps().format() = AkVideoCaps::Format_bgra;
    oPacket.buffer() = oBuffer;

    return oPacket.toPacket();
}
Exemple #22
0
int main(int argc, char *argv[]) {
	// Initalizing these to NULL prevents segfaults!
	AVFormatContext   *pFormatCtx = NULL;
	int               i, videoStream;
	AVCodecContext    *pCodecCtxOrig = NULL;
	AVCodecContext    *pCodecCtx = NULL;
	AVCodec           *pCodec = NULL;
	AVFrame           *pFrame = NULL;
	AVFrame           *pFrameRGB = NULL;
	AVPacket          packet;
	int               frameFinished;
	int               numBytes;
	uint8_t           *buffer = NULL;
	struct SwsContext *sws_ctx = NULL;

	if(argc < 2) {
		printf("Please provide a movie file\n");
		return -1;
	}
	// Register all formats and codecs
	av_register_all();

	// Open video file
	if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
	{
		return -1; // Couldn't open file
	}

	// Retrieve stream information
	if(avformat_find_stream_info(pFormatCtx, NULL)<0)
	{
		return -1; // Couldn't find stream information
	}

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if(videoStream==-1)
	{
		return -1; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtxOrig=pFormatCtx->streams[videoStream]->codec;
	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtxOrig->codec_id);
	if(pCodec==NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Copy context
	pCodecCtx = avcodec_alloc_context3(pCodec);
	if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
		fprintf(stderr, "Couldn't copy codec context");
		return -1; // Error copying codec context
	}

	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
	{
		return -1; // Could not open codec
	}

	// Allocate video frame
	pFrame=av_frame_alloc();

	// Allocate an AVFrame structure
	pFrameRGB=av_frame_alloc();
	if(pFrameRGB==NULL)
	{
		return -1;
	}

	// Determine required buffer size and allocate buffer
	numBytes=av_image_get_buffer_size(AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1);
	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, 
		AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1);

	// initialize SWS context for software scaling
	sws_ctx = sws_getContext(pCodecCtx->width,
		pCodecCtx->height,
		pCodecCtx->pix_fmt,
		pCodecCtx->width,
		pCodecCtx->height,
		AV_PIX_FMT_RGB24,
		SWS_BILINEAR,
		NULL,
		NULL,
		NULL);

	// Read frames and save first five frames to disk
	i=0;
	while(av_read_frame(pFormatCtx, &packet)>=0) {
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream) {
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if(frameFinished) {
				// Convert the image from its native format to RGB
				sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
				pFrame->linesize, 0, pCodecCtx->height,
				pFrameRGB->data, pFrameRGB->linesize);

				// Save the frame to disk
				if(++i<=5)
				{
					WriteJPEG(pCodecCtx, pFrame, i);
				}
				//SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_packet_unref(&packet);
	}

	// Free the RGB image
	av_free(buffer);
	av_frame_free(&pFrameRGB);

	// Free the YUV frame
	av_frame_free(&pFrame);

	// Close the codecs
	avcodec_close(pCodecCtx);
	avcodec_close(pCodecCtxOrig);

	// Close the video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
Exemple #23
0
static int video_decode_example(const char *input_filename)
{
    AVCodec *codec = NULL;
    AVCodecContext *ctx= NULL;
    AVCodecParameters *origin_par = NULL;
    AVFrame *fr = NULL;
    uint8_t *byte_buffer = NULL;
    AVPacket pkt;
    AVFormatContext *fmt_ctx = NULL;
    int number_of_written_bytes;
    int video_stream;
    int got_frame = 0;
    int byte_buffer_size;
    int i = 0;
    int result;
    int end_of_stream = 0;

    result = avformat_open_input(&fmt_ctx, input_filename, NULL, NULL);
    if (result < 0) {
        av_log(NULL, AV_LOG_ERROR, "Can't open file\n");
        return result;
    }

    result = avformat_find_stream_info(fmt_ctx, NULL);
    if (result < 0) {
        av_log(NULL, AV_LOG_ERROR, "Can't get stream info\n");
        return result;
    }

    video_stream = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (video_stream < 0) {
      av_log(NULL, AV_LOG_ERROR, "Can't find video stream in input file\n");
      return -1;
    }

    origin_par = fmt_ctx->streams[video_stream]->codecpar;

    codec = avcodec_find_decoder(origin_par->codec_id);
    if (!codec) {
        av_log(NULL, AV_LOG_ERROR, "Can't find decoder\n");
        return -1;
    }

    ctx = avcodec_alloc_context3(codec);
    if (!ctx) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate decoder context\n");
        return AVERROR(ENOMEM);
    }

    result = avcodec_parameters_to_context(ctx, origin_par);
    if (result) {
        av_log(NULL, AV_LOG_ERROR, "Can't copy decoder context\n");
        return result;
    }

    result = avcodec_open2(ctx, codec, NULL);
    if (result < 0) {
        av_log(ctx, AV_LOG_ERROR, "Can't open decoder\n");
        return result;
    }

    fr = av_frame_alloc();
    if (!fr) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate frame\n");
        return AVERROR(ENOMEM);
    }

    byte_buffer_size = av_image_get_buffer_size(ctx->pix_fmt, ctx->width, ctx->height, 16);
    byte_buffer = av_malloc(byte_buffer_size);
    if (!byte_buffer) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate buffer\n");
        return AVERROR(ENOMEM);
    }

    printf("#tb %d: %d/%d\n", video_stream, fmt_ctx->streams[video_stream]->time_base.num, fmt_ctx->streams[video_stream]->time_base.den);
    i = 0;
    av_init_packet(&pkt);
    do {
        if (!end_of_stream)
            if (av_read_frame(fmt_ctx, &pkt) < 0)
                end_of_stream = 1;
        if (end_of_stream) {
            pkt.data = NULL;
            pkt.size = 0;
        }
        if (pkt.stream_index == video_stream || end_of_stream) {
            got_frame = 0;
            if (pkt.pts == AV_NOPTS_VALUE)
                pkt.pts = pkt.dts = i;
            result = avcodec_decode_video2(ctx, fr, &got_frame, &pkt);
            if (result < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding frame\n");
                return result;
            }
            if (got_frame) {
                number_of_written_bytes = av_image_copy_to_buffer(byte_buffer, byte_buffer_size,
                                        (const uint8_t* const *)fr->data, (const int*) fr->linesize,
                                        ctx->pix_fmt, ctx->width, ctx->height, 1);
                if (number_of_written_bytes < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Can't copy image to buffer\n");
                    return number_of_written_bytes;
                }
                printf("%d, %10"PRId64", %10"PRId64", %8"PRId64", %8d, 0x%08lx\n", video_stream,
                        fr->pts, fr->pkt_dts, fr->pkt_duration,
                        number_of_written_bytes, av_adler32_update(0, (const uint8_t*)byte_buffer, number_of_written_bytes));
            }
            av_packet_unref(&pkt);
            av_init_packet(&pkt);
        }
        i++;
    } while (!end_of_stream || got_frame);

    av_packet_unref(&pkt);
    av_frame_free(&fr);
    avcodec_close(ctx);
    avformat_close_input(&fmt_ctx);
    avcodec_free_context(&ctx);
    av_freep(&byte_buffer);
    return 0;
}
  /**
    解码指定文件到目标路径
   */
  JNIEXPORT jint JNICALL Java_com_linecloud_composition_jni_LYCompositionKit_decodeForYUV
    (JNIEnv *env, jclass clazz, jstring input_jstr, jstring output_jstr) {

    	AVFormatContext *pFormatCtx;
    	int i;
    	int videoindex;

    	AVCodecContext *pCodecCtx;
    	AVCodec *pCodec;

    	AVFrame *pFrame;
    	AVFrame *pFrameYUV;
    	uint8_t *out_buffer;
    	AVPacket *packet;
    	int y_size;
    	int ret;
    	int got_picture;
    	struct SwsContext *img_convert_ctx;
    	FILE *fp_yuv;
    	int frame_cnt;
    	clock_t time_start;
    	clock_t time_finish;
    	double time_duration = 0.0;

    	char input_str[500] = {0};
    	char output_str[500] = {0};
    	char info[1000] = {0};
    	sprintf(input_str, "%s", (*env)->GetStringUTFChars(env, input_jstr, NULL));
    	sprintf(output_str, "%s", (*env)->GetStringUTFChars(env, output_jstr, NULL));

        //FFmpeg av_log() callback
        av_log_set_callback(custom_log);

    	// log
    	av_register_all();
    	avformat_network_init();

    	pFormatCtx = avformat_alloc_context();
    	if(avformat_open_input(&pFormatCtx, input_str, NULL, NULL) != 0) {
            LOGE("Couldn't open input stream.\n");
    	    return -1;
    	}

    	if(avformat_find_stream_info(pFormatCtx, NULL) < 0) {
    	    LOGE("Couldn't find stream information.\n");
    	    return -1;
    	}

    	videoindex = -1;

    	for(i = 0; i < pFormatCtx->nb_streams; i++) {
            if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
                videoindex = i;
                break;
            }
    	}

    	if(videoindex == -1) {
    	    LOGE("Couldn't find a video stream.\n");
    	    return -1;
    	}
        pCodecCtx = pFormatCtx->streams[videoindex]->codec;
        pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if(pCodec == NULL) {
            LOGE("Couldn't open codec.\n");
            return -1;
        }

        if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
            LOGE("Couldn't open codec.\n");
            return -1;
        }

        pFrame = av_frame_alloc();
        pFrameYUV = av_frame_alloc();
        out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
        av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);

        packet = (AVPacket *) av_malloc(sizeof(AVPacket));
        img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
            pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

        sprintf(info, "[Input   ]%s\n", input_str);
        sprintf(info, "%s[Output  ]%s\n", info, output_str);
        sprintf(info, "%s[Format  ]%s\n", info, pFormatCtx->iformat->name);
        sprintf(info, "%s[Codec   ]%s\n", info, pCodecCtx->codec->name);
        sprintf(info, "%s[Resolution]%dx%d\n", info, pCodecCtx->width, pCodecCtx->height);

        fp_yuv = fopen(output_str, "wb+");
        if (fp_yuv == NULL) {
            LOGE("Couldn't open output file.\n");
            return -1;
        }

        frame_cnt = 0;
        time_start = clock();
        while(av_read_frame(pFormatCtx, packet) >= 0) {

            if(packet->stream_index == videoindex) {
                ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
                if(ret < 0) {
                    LOGE("Decode error.\n");
                    return -1;
                }

                if(got_picture) {
                    // 缩放图片处理
                    sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrame->linesize);

                    y_size = pCodecCtx->width * pCodecCtx->height;
                    fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);// Y
                    fwrite(pFrameYUV->data[1], 1, y_size, fp_yuv);// U
                    fwrite(pFrameYUV->data[2], 1, y_size, fp_yuv);// V

                    // output info
                    char picture_str[10] = {0};
                    switch(pFrame->pict_type) {
                        case AV_PICTURE_TYPE_I:
                            sprintf(picture_str, "I");
                            break;
                        case AV_PICTURE_TYPE_B:
                            sprintf(picture_str, "B");
                            break;
                        default:
                            sprintf(picture_str, "Other");
                            break;
                    }
                    LOGI("Frame Index: %5d. Type:%s", frame_cnt, picture_str);
                    frame_cnt++;
                }
            }
            av_free_packet(packet);
        }

        // flush decoder
        // FIX: flush frames remained in decodec
        while(1) {
            ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
            if(ret < 0) {
                break;
            }
            if(!got_picture) {
                break;
            }
            sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
            int y_size = pCodecCtx->width * pCodecCtx->height;
            fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);// Y
            fwrite(pFrameYUV->data[1], 1, y_size, fp_yuv);// U
            fwrite(pFrameYUV->data[2], 1, y_size, fp_yuv);// V

            // output info
            char picture_str[10] = {0};
            switch(pFrame->pict_type) {
                case AV_PICTURE_TYPE_I:
                    sprintf(picture_str, "I");
                    break;
                case AV_PICTURE_TYPE_P:
                    sprintf(picture_str, "P");
                    break;
                case AV_PICTURE_TYPE_B:
                    sprintf(picture_str, "B");
                    break;
                default:
                    sprintf(picture_str, "Other");
                    break;
            }
            LOGI("Frame Index: %5d. Type:%s", frame_cnt, picture_str);
            frame_cnt++;
        }

        time_finish = clock();
        time_duration = (double)(time_finish - time_start);

        sprintf(info, "%s[Time    ]%f ms\n", info, time_duration);
        sprintf(info, "%s[Count   ]%d\n", info, frame_cnt);

        sws_freeContext(img_convert_ctx);

        flose(fp_yuv);

        av_frame_free(&pFrameYUV);
        av_frame_free(&pFrame);
        avcodec_close(pCodecCtx);
        avfromat_close_input(&pFormatCtx);

    	return 0;
    }
Exemple #25
0
void ffplayer::VideoDecoderThreadRun()
{
	bool bFirst = true;
	while (1)
	{
	//	SDL_Delay(1);
		if (m_bStop){
			break;
		}
		
		PInfo pFrameInfo = m_videoPacketBuf.getInfoFromList();
		if (!pFrameInfo)//事件等待
		{
			SDL_Delay(1);
			continue;
		}

		AVPacket packet;
		av_init_packet(&packet);
		packet.data = pFrameInfo->Data;
		packet.size = pFrameInfo->DataLen;
		packet.pts = pFrameInfo->frameInfo.iTimestampObsolute;

		int got = 0;	
		decode(packet, got, pFrameInfo->frameInfo.bNewPos);

		if (got)
		{	
			int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, m_pCodecCtx->width, m_pCodecCtx->height, 1);
			PInfo pYuvInfo = NULL;
			int count = m_freeRenderBuf.getCurCount(); //为了暂停后抓图,指针指向的pInfo没有被覆盖,保留最后一帧
			if (count == 1)
			{
			//	pYuvInfo = CBuffer::createBuf(m_pCodecCtx->width* m_pCodecCtx->height * 3 / 2);
			//	pYuvInfo = CBuffer::createBuf(m_pFrame->linesize[0]* m_pCodecCtx->height * 3 / 2);
				pYuvInfo = CBuffer::createBuf(size);
			}
			else
			{
				//pYuvInfo = m_freeRenderBuf.getFreeInfoFromList(m_pCodecCtx->width* m_pCodecCtx->height * 3 / 2);
				//pYuvInfo = m_freeRenderBuf.getFreeInfoFromList(m_pFrame->linesize[0] * m_pCodecCtx->height * 3 / 2);;
				pYuvInfo = m_freeRenderBuf.getFreeInfoFromList(size);
			}
				

			AVFrame dstFrame;
			av_image_fill_arrays(dstFrame.data, dstFrame.linesize, (uint8_t*)pYuvInfo->Data, AV_PIX_FMT_YUV420P, m_pCodecCtx->width, m_pCodecCtx->height, 1);
			
			if (m_pFrame->format != AV_PIX_FMT_YUV420P)
			{			
				struct SwsContext* img_convert_ctx = NULL;
				img_convert_ctx = sws_getContext(m_pCodecCtx->width, m_pCodecCtx->height, (AVPixelFormat)m_pFrame->format,
					m_pCodecCtx->width, m_pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);


				int n = sws_scale(img_convert_ctx, m_pFrame->data, m_pFrame->linesize,
					0, m_pCodecCtx->height, dstFrame.data, dstFrame.linesize);
				sws_freeContext(img_convert_ctx);
			}
			else
			{
				av_image_copy(dstFrame.data, dstFrame.linesize, (const uint8_t **)m_pFrame->data, m_pFrame->linesize, AV_PIX_FMT_YUV420P, m_pCodecCtx->width, m_pCodecCtx->height);
			}
					
			pYuvInfo->width = m_pCodecCtx->width;
			pYuvInfo->height = m_pCodecCtx->height;
			m_iWidth = m_pCodecCtx->width;
			m_iHeight = m_pCodecCtx->height;
			double pts= av_frame_get_best_effort_timestamp(m_pFrame);//av_frame_get_best_effort_timestamp
			
			pYuvInfo->frameInfo.iTimestampObsolute = pts;
			pYuvInfo->frameInfo.bNewPos = pFrameInfo->frameInfo.bNewPos;
			pYuvInfo->frameInfo.serial = pFrameInfo->frameInfo.serial;

			while (m_renderBuf.getCurCount() >= MAX_YUV_SIZE)
			{
				SDL_Delay(10);
				if (m_bStop){
					CBuffer::freeBuf(&pYuvInfo);
					CBuffer::freeBuf(&pFrameInfo);
					return;
				}
			}

			m_renderBuf.insertList(pYuvInfo);
			
		}
		
		CBuffer::freeBuf(&pFrameInfo);
	}//while
}
Exemple #26
0
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
{
    UtVideoContext *utv = (UtVideoContext *)avctx->priv_data;
    UtVideoExtra *info;
    uint32_t flags, in_format;
    int ret;

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_YUV420P:
        in_format = UTVF_YV12;
        avctx->bits_per_coded_sample = 12;
        if (avctx->colorspace == AVCOL_SPC_BT709)
            avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
        else
            avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
        break;
    case AV_PIX_FMT_YUYV422:
        in_format = UTVF_YUYV;
        avctx->bits_per_coded_sample = 16;
        if (avctx->colorspace == AVCOL_SPC_BT709)
            avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
        else
            avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
        break;
    case AV_PIX_FMT_BGR24:
        in_format = UTVF_NFCC_BGR_BU;
        avctx->bits_per_coded_sample = 24;
        avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
        break;
    case AV_PIX_FMT_RGB32:
        in_format = UTVF_NFCC_BGRA_BU;
        avctx->bits_per_coded_sample = 32;
        avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
        break;
    default:
        return AVERROR(EINVAL);
    }

#if FF_API_PRIVATE_OPT
FF_DISABLE_DEPRECATION_WARNINGS
    if (avctx->prediction_method)
        utv->pred = avctx->prediction_method;
FF_ENABLE_DEPRECATION_WARNINGS
#endif

    /* Check before we alloc anything */
    if (utv->pred != 0 && utv->pred != 2) {
        av_log(avctx, AV_LOG_ERROR, "Invalid prediction method.\n");
        return AVERROR(EINVAL);
    }

    flags = ((utv->pred + 1) << 8) | (avctx->thread_count - 1);

    avctx->priv_data = utv;

    /* Alloc extradata buffer */
    info = (UtVideoExtra *)av_malloc(sizeof(*info));

    if (!info) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata buffer.\n");
        return AVERROR(ENOMEM);
    }

    /*
     * We use this buffer to hold the data that Ut Video returns,
     * since we cannot decode planes separately with it.
     */
    ret = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1);
    if (ret < 0) {
        av_free(info);
        return ret;
    }
    utv->buf_size = ret;

    utv->buffer = (uint8_t *)av_malloc(utv->buf_size);

    if (utv->buffer == NULL) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate output buffer.\n");
        av_free(info);
        return AVERROR(ENOMEM);
    }

    /*
     * Create a Ut Video instance. Since the function wants
     * an "interface name" string, pass it the name of the lib.
     */
    utv->codec = CCodec::CreateInstance(UNFCC(avctx->codec_tag), "libavcodec");

    /* Initialize encoder */
    utv->codec->EncodeBegin(in_format, avctx->width, avctx->height,
                            CBGROSSWIDTH_WINDOWS);

    /* Get extradata from encoder */
    avctx->extradata_size = utv->codec->EncodeGetExtraDataSize();
    utv->codec->EncodeGetExtraData(info, avctx->extradata_size, in_format,
                                   avctx->width, avctx->height,
                                   CBGROSSWIDTH_WINDOWS);
    avctx->extradata = (uint8_t *)info;

    /* Set flags */
    utv->codec->SetState(&flags, sizeof(flags));

    return 0;
}