static av_cold int libdirac_encode_init(AVCodecContext *avccontext)
{

    DiracEncoderParams* p_dirac_params = avccontext->priv_data;
    int no_local = 1;
    int verbose  = avccontext->debug;
    VideoFormat preset;

    /* get Dirac preset */
    preset = GetDiracVideoFormatPreset(avccontext);

    /* initialize the encoder context */
    dirac_encoder_context_init(&(p_dirac_params->enc_ctx), preset);

    p_dirac_params->enc_ctx.src_params.chroma = GetDiracChromaFormat(avccontext->pix_fmt);

    if (p_dirac_params->enc_ctx.src_params.chroma == formatNK) {
        av_log(avccontext, AV_LOG_ERROR,
               "Unsupported pixel format %d. This codec supports only "
               "Planar YUV formats (yuv420p, yuv422p, yuv444p\n",
               avccontext->pix_fmt);
        return -1;
    }

    p_dirac_params->enc_ctx.src_params.frame_rate.numerator   = avccontext->time_base.den;
    p_dirac_params->enc_ctx.src_params.frame_rate.denominator = avccontext->time_base.num;

    p_dirac_params->enc_ctx.src_params.width  = avccontext->width;
    p_dirac_params->enc_ctx.src_params.height = avccontext->height;

    p_dirac_params->frame_size = avpicture_get_size(avccontext->pix_fmt,
                                 avccontext->width,
                                 avccontext->height);

    avccontext->coded_frame = &p_dirac_params->picture;

    if (no_local) {
        p_dirac_params->enc_ctx.decode_flag = 0;
        p_dirac_params->enc_ctx.instr_flag  = 0;
    } else {
        p_dirac_params->enc_ctx.decode_flag = 1;
        p_dirac_params->enc_ctx.instr_flag  = 1;
    }

    /* Intra-only sequence */
    if (!avccontext->gop_size) {
        p_dirac_params->enc_ctx.enc_params.num_L1 = 0;
        if (avccontext->coder_type == FF_CODER_TYPE_VLC)
            p_dirac_params->enc_ctx.enc_params.using_ac = 0;
    } else
        avccontext->has_b_frames = 1;

    if (avccontext->flags & CODEC_FLAG_QSCALE) {
        if (avccontext->global_quality) {
            p_dirac_params->enc_ctx.enc_params.qf = avccontext->global_quality
                                                    / (FF_QP2LAMBDA * 10.0);
            /* if it is not default bitrate then send target rate. */
            if (avccontext->bit_rate >= 1000 &&
                    avccontext->bit_rate != 200000)
                p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate
                        / 1000;
        } else
            p_dirac_params->enc_ctx.enc_params.lossless = 1;
    } else if (avccontext->bit_rate >= 1000)
        p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate / 1000;

    if ((preset > VIDEO_FORMAT_QCIF || preset < VIDEO_FORMAT_QSIF525) &&
            avccontext->bit_rate == 200000)
        p_dirac_params->enc_ctx.enc_params.trate = 0;

    if (avccontext->flags & CODEC_FLAG_INTERLACED_ME)
        /* all material can be coded as interlaced or progressive
         * irrespective of the type of source material */
        p_dirac_params->enc_ctx.enc_params.picture_coding_mode = 1;

    p_dirac_params->p_encoder = dirac_encoder_init(&(p_dirac_params->enc_ctx),
                                verbose);

    if (!p_dirac_params->p_encoder) {
        av_log(avccontext, AV_LOG_ERROR,
               "Unrecoverable Error: dirac_encoder_init failed. ");
        return EXIT_FAILURE;
    }

    /* allocate enough memory for the incoming data */
    p_dirac_params->p_in_frame_buf = av_malloc(p_dirac_params->frame_size);

    /* initialize the encoded frame queue */
    ff_dirac_schro_queue_init(&p_dirac_params->enc_frame_queue);

    return 0;
}
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, NULL ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
Exemplo n.º 3
0
QByteArray AVDecoder::WriteJPEG(AVCodecContext *pCodecCtx, AVFrame *pFrame, int width, int height)
{
    AVCodecContext *pOCodecCtx;
    AVCodec        *pOCodec;

    QByteArray data;

    pOCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);

    if (!pOCodec) {
        return data;
    }

    SwsContext *sws_ctx = sws_getContext(
                pCodecCtx->width, pCodecCtx->height,
                pCodecCtx->pix_fmt,
                width, height,
                AV_PIX_FMT_YUV420P, SWS_BICUBIC,
                NULL, NULL, NULL);

    if(!sws_ctx) {
        return data;
    }

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    AVFrame *pFrameRGB = av_frame_alloc();
#else
    AVFrame *pFrameRGB = avcodec_alloc_frame();
#endif

    if(pFrameRGB == NULL) {
        sws_freeContext(sws_ctx);
        return data;
    }

    // detect ffmpeg (>= 100) or libav (< 100)
#if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \
    (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0))
    int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 16);
#else
    int numBytes = avpicture_get_size(PIX_FMT_YUVJ420P, width, height);
#endif

    uint8_t *buffer = (uint8_t *)av_malloc(numBytes);

    if(!buffer) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return data;
    }

    // detect ffmpeg (>= 100) or libav (< 100)
#if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \
    (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0))
    av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, AV_PIX_FMT_YUV420P, width, height, 1);
#else
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_YUVJ420P, width, height);
#endif

    sws_scale(
        sws_ctx,
        pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pFrameRGB->data,
        pFrameRGB->linesize
    );

    pOCodecCtx = avcodec_alloc_context3(pOCodec);

    if(pOCodecCtx == NULL) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return  0;
    }

    pOCodecCtx->bit_rate      = pCodecCtx->bit_rate;
    pOCodecCtx->width         = width;
    pOCodecCtx->height        = height;
    pOCodecCtx->pix_fmt       = AV_PIX_FMT_YUVJ420P;
    pOCodecCtx->color_range   = AVCOL_RANGE_JPEG;
    pOCodecCtx->codec_id      = AV_CODEC_ID_MJPEG;
    pOCodecCtx->codec_type    = AVMEDIA_TYPE_VIDEO;
    pOCodecCtx->time_base.num = pCodecCtx->time_base.num;
    pOCodecCtx->time_base.den = pCodecCtx->time_base.den;

    AVDictionary *opts = NULL;
    if(avcodec_open2(pOCodecCtx, pOCodec, &opts) < 0) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
         return  0;
    }

    av_opt_set_int(pOCodecCtx, "lmin", pOCodecCtx->qmin * FF_QP2LAMBDA, 0);
    av_opt_set_int(pOCodecCtx, "lmax", pOCodecCtx->qmax * FF_QP2LAMBDA, 0);

    pOCodecCtx->mb_lmin        = pOCodecCtx->qmin * FF_QP2LAMBDA;
    pOCodecCtx->mb_lmax        = pOCodecCtx->qmax * FF_QP2LAMBDA;
    pOCodecCtx->flags          = CODEC_FLAG_QSCALE;
    pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA;

    pFrame->pts     = 1;
    pFrame->quality = pOCodecCtx->global_quality;

    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    int gotPacket;

    avcodec_encode_video2(pOCodecCtx, &pkt, pFrameRGB, &gotPacket);

    QByteArray buffer2(reinterpret_cast<char *>(pkt.data), pkt.size);

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
    av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    av_frame_free(&pFrameRGB);
#else
    avcodec_free_frame(&pFrameRGB);
#endif
    avcodec_close(pOCodecCtx);
    sws_freeContext(sws_ctx);

    return buffer2;
}
Exemplo n.º 4
0
std::unique_ptr<ImgBuf>
VideoConverterFfmpeg::convert(const ImgBuf& src)
{
    std::unique_ptr<ImgBuf> ret;    
    
    const int width = src.width;
    const int height = src.height;

    PixelFormat dst_pixFmt = fourcc_to_ffmpeg(_dst_fmt);
    assert(dst_pixFmt != PIX_FMT_NONE);
    PixelFormat src_pixFmt = PIX_FMT_RGB24;
    
#ifdef HAVE_SWSCALE_H

    if (!_swsContext.get()) {

        _swsContext.reset(new SwsContextWrapper(sws_getContext(width, height,
            src_pixFmt, width, height, dst_pixFmt, SWS_BILINEAR, nullptr, nullptr,
            nullptr)));

        if (!_swsContext->getContext()) {

            // This means we will try to assign the 
            // context again next time.
            _swsContext.reset();
            
            return ret;
        }
    }
#endif


    AVPicture srcpicture = {{src.data, nullptr, nullptr, nullptr},
        {static_cast<int>(src.stride[0]), 0, 0, 0}};
    
    
    int bufsize = avpicture_get_size(dst_pixFmt, width, height);
    if (bufsize == -1) {
        return ret;
    }

    std::uint8_t* dstbuffer = new std::uint8_t[bufsize];

    AVPicture dstpicture;
    avpicture_fill(&dstpicture, dstbuffer, dst_pixFmt, width, height);
    
 
#ifndef HAVE_SWSCALE_H
    img_convert(&dstpicture, dst_pixFmt, &srcpicture, src_pixFmt, width,
                height);
#else

    int rv = sws_scale(_swsContext->getContext(), srcpicture.data,
                       srcpicture.linesize, 0, height, dstpicture.data,
                       dstpicture.linesize);

    if (rv == -1) {
        return ret;
    }
#endif    
    ret.reset(new ImgBuf(_dst_fmt, dstbuffer, bufsize, src.width,
                         src.height));
    std::copy(dstpicture.linesize, dstpicture.linesize+4, ret->stride.begin()); 
 
    return ret;
}
int RemoteCameraRtsp::PrimeCapture()
{
    Debug( 2, "Waiting for sources" );
    for ( int i = 0; i < 100 && !rtspThread->hasSources(); i++ )
    {
        usleep( 100000 );
    }
    if ( !rtspThread->hasSources() )
        Fatal( "No RTSP sources" );

    Debug( 2, "Got sources" );

    mFormatContext = rtspThread->getFormatContext();

    // Find first video stream present
    mVideoStreamId = -1;
    
    for ( unsigned int i = 0; i < mFormatContext->nb_streams; i++ )
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
	if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
	if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream" );

    // Get a pointer to the codec context for the video stream
    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Find the decoder for the video stream
    mCodec = avcodec_find_decoder( mCodecContext->codec_id );
    if ( mCodec == NULL )
        Panic( "Unable to locate codec %d decoder", mCodecContext->codec_id );

    // Open codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Panic( "Can't open codec" );

    // Allocate space for the native video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
    mRawFrame = av_frame_alloc();
#else
    mRawFrame = avcodec_alloc_frame();
#endif

    // Allocate space for the converted video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
    mFrame = av_frame_alloc();
#else
    mFrame = avcodec_alloc_frame();
#endif

	if(mRawFrame == NULL || mFrame == NULL)
		Fatal( "Unable to allocate frame(s)");
	
	int pSize = avpicture_get_size( imagePixFormat, width, height );
	if( (unsigned int)pSize != imagesize) {
		Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
	}
/*	
#if HAVE_LIBSWSCALE
	if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
		Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
	}

	if(!sws_isSupportedOutput(imagePixFormat)) {
		Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
	}
	
#else // HAVE_LIBSWSCALE
    Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
*/

    return( 0 );
}
Exemplo n.º 6
0
static int libschroedinger_encode_init(AVCodecContext *avccontext)
{
    FfmpegSchroEncoderParams* p_schro_params = avccontext->priv_data;
    SchroVideoFormatEnum preset;

    /* Initialize the libraries that libschroedinger depends on. */
    schro_init();

    /* Create an encoder object. */
    p_schro_params->encoder = schro_encoder_new();

    if (!p_schro_params->encoder) {
        av_log(avccontext, AV_LOG_ERROR,
               "Unrecoverable Error: schro_encoder_new failed. ");
        return -1;
    }

    /* Initialize the format. */
    preset = ff_get_schro_video_format_preset(avccontext);
    p_schro_params->format =
                    schro_encoder_get_video_format(p_schro_params->encoder);
    schro_video_format_set_std_video_format(p_schro_params->format, preset);
    p_schro_params->format->width  = avccontext->width;
    p_schro_params->format->height = avccontext->height;

    if (SetSchroChromaFormat(avccontext) == -1)
        return -1;

    if (avccontext->color_primaries == AVCOL_PRI_BT709) {
        p_schro_params->format->colour_primaries = SCHRO_COLOUR_PRIMARY_HDTV;
    } else if (avccontext->color_primaries == AVCOL_PRI_BT470BG) {
        p_schro_params->format->colour_primaries = SCHRO_COLOUR_PRIMARY_SDTV_625;
    } else if (avccontext->color_primaries == AVCOL_PRI_SMPTE170M) {
        p_schro_params->format->colour_primaries = SCHRO_COLOUR_PRIMARY_SDTV_525;
    }

    if (avccontext->colorspace == AVCOL_SPC_BT709) {
        p_schro_params->format->colour_matrix = SCHRO_COLOUR_MATRIX_HDTV;
    } else if (avccontext->colorspace == AVCOL_SPC_BT470BG) {
        p_schro_params->format->colour_matrix = SCHRO_COLOUR_MATRIX_SDTV;
    }

    if (avccontext->color_trc == AVCOL_TRC_BT709) {
        p_schro_params->format->transfer_function = SCHRO_TRANSFER_CHAR_TV_GAMMA;
    }

    if (ff_get_schro_frame_format(p_schro_params->format->chroma_format,
                                  &p_schro_params->frame_format) == -1) {
        av_log(avccontext, AV_LOG_ERROR,
               "This codec currently supports only planar YUV 4:2:0, 4:2:2"
               " and 4:4:4 formats.\n");
        return -1;
    }

    p_schro_params->format->frame_rate_numerator   = avccontext->time_base.den;
    p_schro_params->format->frame_rate_denominator = avccontext->time_base.num;

    p_schro_params->frame_size = avpicture_get_size(avccontext->pix_fmt,
                                                    avccontext->width,
                                                    avccontext->height);

    avccontext->coded_frame = &p_schro_params->picture;

    if (!avccontext->gop_size) {
        schro_encoder_setting_set_double(p_schro_params->encoder,
                                         "gop_structure",
                                         SCHRO_ENCODER_GOP_INTRA_ONLY);

        if (avccontext->coder_type == FF_CODER_TYPE_VLC)
            schro_encoder_setting_set_double(p_schro_params->encoder,
                                             "enable_noarith", 1);
    } else {
        schro_encoder_setting_set_double(p_schro_params->encoder,
                                         "au_distance", avccontext->gop_size);
        avccontext->has_b_frames = 1;
    }

    /* FIXME - Need to handle SCHRO_ENCODER_RATE_CONTROL_LOW_DELAY. */
    if (avccontext->flags & CODEC_FLAG_QSCALE) {
        if (!avccontext->global_quality) {
            /* lossless coding */
            schro_encoder_setting_set_double(p_schro_params->encoder,
                                             "rate_control",
                                             SCHRO_ENCODER_RATE_CONTROL_LOSSLESS);
        } else {
            int quality;
            schro_encoder_setting_set_double(p_schro_params->encoder,
                                             "rate_control",
                                             SCHRO_ENCODER_RATE_CONTROL_CONSTANT_QUALITY);

            quality = avccontext->global_quality / FF_QP2LAMBDA;
            if (quality > 10)
                quality = 10;
            schro_encoder_setting_set_double(p_schro_params->encoder,
                                             "quality", quality);
        }
    } else {
        schro_encoder_setting_set_double(p_schro_params->encoder,
                                         "rate_control",
                                         SCHRO_ENCODER_RATE_CONTROL_CONSTANT_BITRATE);

        schro_encoder_setting_set_double(p_schro_params->encoder,
                                         "bitrate",
                                         avccontext->bit_rate);

    }

    if (avccontext->flags & CODEC_FLAG_INTERLACED_ME)
        /* All material can be coded as interlaced or progressive
           irrespective of the type of source material. */
        schro_encoder_setting_set_double(p_schro_params->encoder,
                                         "interlaced_coding", 1);

    schro_encoder_setting_set_double(p_schro_params->encoder, "open_gop",
                                     !(avccontext->flags & CODEC_FLAG_CLOSED_GOP));

    /* FIXME: Signal range hardcoded to 8-bit data until both libschroedinger
     * and libdirac support other bit-depth data. */
    schro_video_format_set_std_signal_range(p_schro_params->format,
                                            SCHRO_SIGNAL_RANGE_8BIT_VIDEO);

    /* Set the encoder format. */
    schro_encoder_set_video_format(p_schro_params->encoder,
                                   p_schro_params->format);

    /* Set the debug level. */
    schro_debug_set_level(avccontext->debug);

    schro_encoder_start(p_schro_params->encoder);

    /* Initialize the encoded frame queue. */
    ff_dirac_schro_queue_init(&p_schro_params->enc_frame_queue);
    return 0;
}
Exemplo n.º 7
0
/* Init video source 
 * file: path to open
 * width: destination frame width in pixels - use 0 for source
 * height: destination frame height in pixels - use 0 for source
 * format: PIX_FMT_GRAY8 or PIX_FMT_RGB24
 * Returns video context on succes, NULL otherwise
 */
video *video_init(char *file, int width, int height, int format)
{
    int i = 0;
	
    video *ret = (video*)malloc(sizeof(video));
    memset(ret, 0, sizeof(video));
    ret->format = format;
	
    /* Init ffmpeg */
    av_register_all();
	
    /* Open file, check usability */
    if(av_open_input_file(&ret->pFormatCtx, file, NULL, 0, NULL) ||
       av_find_stream_info(ret->pFormatCtx) < 0)
	return video_quit(ret);
	
    /* Find the first video stream */
    ret->videoStream = -1;
    for(i = 0; i < ret->pFormatCtx->nb_streams; i++)
	if(ret->pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
	    ret->videoStream = i;
	    break;
	}
	
    if(ret->videoStream == -1)
	return video_quit(ret);
	
    /* Get context for codec, pin down target width/height, find codec */
    ret->pCtx = ret->pFormatCtx->streams[ret->videoStream]->codec;
    ret->width = width? width: ret->pCtx->width;
    ret->height = height? height: ret->pCtx->height;
    ret->pCodec = avcodec_find_decoder(ret->pCtx->codec_id);
	
    if(!ret->pCodec ||
       avcodec_open(ret->pCtx, ret->pCodec) < 0)
	return video_quit(ret);
	
    /* Frame rate fix for some codecs */
    if(ret->pCtx->time_base.num > 1000 && ret->pCtx->time_base.den == 1)
	ret->pCtx->time_base.den = 1000;
	
    /* Get framebuffers */
    ret->pRaw = avcodec_alloc_frame();
    ret->pDat = avcodec_alloc_frame();
	
    if(!ret->pRaw || !ret->pDat)
	return video_quit(ret);
	
    /* Create data buffer */
    ret->buffer = (uint8_t*)malloc(avpicture_get_size(ret->format, 
					    ret->pCtx->width, ret->pCtx->height));
	
    /* Init buffers */
    avpicture_fill((AVPicture *) ret->pDat, ret->buffer, ret->format, 
		   ret->pCtx->width, ret->pCtx->height);
	
    /* Init scale & convert */
    ret->Sctx = sws_getContext(ret->pCtx->width, ret->pCtx->height, ret->pCtx->pix_fmt,
			       ret->width, ret->height, (PixelFormat)ret->format, SWS_BICUBIC, NULL, NULL, NULL);
	
    if(!ret->Sctx)
	return video_quit(ret);
	
    /* Give some info on stderr about the file & stream */
    //dump_format(ret->pFormatCtx, 0, file, 0);
	
    return ret;
}
Exemplo n.º 8
0
void djvFFmpegSave::open(const djvFileInfo & fileInfo, const djvImageIoInfo & info)
    throw (djvError)
{
    //DJV_DEBUG("djvFFmpegSave::open");
    //DJV_DEBUG_PRINT("fileInfo = " << fileInfo);
    //DJV_DEBUG_PRINT("info = " << info);
    
    close();
    
    _frame = 0;
        
    // Open the file.
    
    djvPixel::PIXEL pixel         = static_cast<djvPixel::PIXEL>(0);
    bool            bgr           = false;
    
    QString         avFormatName;
    AVCodecID       avCodecId     = static_cast<AVCodecID>(0);
    AVPixelFormat   avPixel       = static_cast<AVPixelFormat>(0);
    double          avQScale      = -1.0;
    
    _avFrameRgbPixel = static_cast<AVPixelFormat>(0);

    djvFFmpeg::Dictionary dictionary;
    QString               value;

    switch (_options.format)
    {
        /*case djvFFmpeg::H264:

            pixel            = djvPixel::RGBA_U8;
        
            avFormatName     = "mov";
            avCodecId        = AV_CODEC_ID_H264;
            
            switch (_options.quality)
            {
                case djvFFmpeg::LOW:    value = "fast";   break;
                case djvFFmpeg::MEDIUM: value = "medium"; break;
                case djvFFmpeg::HIGH:   value = "slow";   break;

                default: break;
            }

            av_dict_set(
                dictionary(),
                "preset",
                value.toLatin1().data(),
                0);

            break;*/
        
        case djvFFmpeg::MPEG4:

            pixel            = djvPixel::RGBA_U8;
            bgr              = info.bgr;

            avFormatName     = "mp4";
            avCodecId        = AV_CODEC_ID_MPEG4;
            avPixel          = AV_PIX_FMT_YUV420P;
            _avFrameRgbPixel = bgr ? AV_PIX_FMT_BGRA : AV_PIX_FMT_RGBA;

            switch (_options.quality)
            {
                case djvFFmpeg::LOW:    avQScale = 9.0; break;
                case djvFFmpeg::MEDIUM: avQScale = 3.0; break;
                case djvFFmpeg::HIGH:   avQScale = 1.0; break;

                default: break;
            }

            break;
        
        case djvFFmpeg::PRO_RES:

            pixel            = djvPixel::RGB_U16;
            bgr              = info.bgr;

            avFormatName     = "mov";
            avCodecId        = AV_CODEC_ID_PRORES;
            avPixel          = AV_PIX_FMT_YUV422P10;
            _avFrameRgbPixel = bgr ? AV_PIX_FMT_RGB48 : AV_PIX_FMT_BGR48;
         
            switch (_options.quality)
            {
                case djvFFmpeg::LOW:    value = "1"; break;
                case djvFFmpeg::MEDIUM: value = "2"; break;
                case djvFFmpeg::HIGH:   value = "3"; break;

                default: break;
            }

            av_dict_set(
                dictionary(),
                "profile",
                value.toLatin1().data(),
                0);

            break;
        
        case djvFFmpeg::MJPEG:

            pixel            = djvPixel::RGBA_U8;
            bgr              = info.bgr;

            avFormatName     = "mov";
            avCodecId        = AV_CODEC_ID_MJPEG;
            avPixel          = AV_PIX_FMT_YUVJ422P;
            _avFrameRgbPixel = bgr ? AV_PIX_FMT_BGRA : AV_PIX_FMT_RGBA;

            switch (_options.quality)
            {
                case djvFFmpeg::LOW:    avQScale = 9.0; break;
                case djvFFmpeg::MEDIUM: avQScale = 3.0; break;
                case djvFFmpeg::HIGH:   avQScale = 1.0; break;

                default: break;
            }

            break;
        
        default: break;
    }
    
    //DJV_DEBUG_PRINT("pixel = " << pixel);

    //DJV_DEBUGBUG_PRINT("av format name = " << avFormatName);
    //DJV_DEBUGBUG_PRINT("av codec id = " << avCodecId);
    //DJV_DEBUGBUG_PRINT("av pixel = " << avPixel);
    //DJV_DEBUGBUG_PRINT("av rgb pixel = " << _avFrameRgbPixel);
    //DJV_DEBUGBUG_PRINT("av qscale = " << avQScale);
    
    AVOutputFormat * avFormat = av_guess_format(
        avFormatName.toLatin1().data(),
        0, //fileInfo.fileName().toLatin1().data(),
        0);
    
    if (! avFormat)
    {
        throw djvError(
            djvFFmpeg::staticName,
            qApp->translate("djvFFmpegSave", "Cannot find format: %1").
                arg(djvFFmpeg::formatLabels()[_options.format]));
    }
    
    //DJV_DEBUGBUG_PRINT("av format extensions = " << avFormat->extensions);
    
    _avFormatContext = avformat_alloc_context();
    _avFormatContext->oformat = avFormat;

    AVCodec * avCodec = avcodec_find_encoder(avCodecId);

    if (! avCodec)
    {
        throw djvError(
            djvFFmpeg::staticName,
            qApp->translate("djvFFmpegSave", "Cannot find encoder: %1").
                arg(djvFFmpeg::formatLabels()[_options.format]));
    }

    AVCodecContext * avCodecContext = avcodec_alloc_context3(avCodec);
    
    avcodec_get_context_defaults3(avCodecContext, avCodec);
    
    //DJV_DEBUGBUG_PRINT("default bit rate = " << avCodecContext->bit_rate);
    //DJV_DEBUGBUG_PRINT("default gop = " << avCodecContext->gop_size);
    
    avCodecContext->pix_fmt       = avPixel;
    avCodecContext->width         = info.size.x;
    avCodecContext->height        = info.size.y;
    avCodecContext->time_base.den = info.sequence.speed.scale();
    avCodecContext->time_base.num = info.sequence.speed.duration();
    
    if (avFormat->flags & AVFMT_GLOBALHEADER)
        avCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;

    if (avQScale >= 0.0)
    {
        avCodecContext->flags |= CODEC_FLAG_QSCALE;
        avCodecContext->global_quality = FF_QP2LAMBDA * avQScale;
    }
    
    int r = avcodec_open2(avCodecContext, avCodec, dictionary());
    
    if (r < 0)
    {
        throw djvError(
            djvFFmpeg::staticName,
            djvFFmpeg::toString(r));
    }

    _avStream = avformat_new_stream(_avFormatContext, avCodecContext->codec);

    if (! _avStream)
    {
        throw djvError(
            djvFFmpeg::staticName,
            qApp->translate("djvFFmpegSave", "Cannot create stream"));
    }
    
    _avStream->codec         = avCodecContext;
    _avStream->time_base.den = info.sequence.speed.scale();
    _avStream->time_base.num = info.sequence.speed.duration();
    
    r = avio_open2(
        &_avIoContext,
        fileInfo.fileName().toLatin1().data(),
        AVIO_FLAG_READ_WRITE,
        0,
        0);
    
    if (r < 0)
    {
        throw djvError(
            djvFFmpeg::staticName,
            djvFFmpeg::toString(r));
    }
    
    _avFormatContext->pb = _avIoContext;

    r = avformat_write_header(_avFormatContext, 0);
    
    if (r < 0)
    {
        throw djvError(
            djvFFmpeg::staticName,
            djvFFmpeg::toString(r));
    }
    
    _info          = djvPixelDataInfo();
    _info.fileName = fileInfo;
    _info.size     = info.size;
    _info.pixel    = pixel;
    _info.bgr      = info.bgr;

    // Initialize the buffers.
    
    _image.set(_info);
    
    _avFrame         = av_frame_alloc();
    _avFrame->width  = info.size.x;
    _avFrame->height = info.size.y;
    _avFrame->format = avCodecContext->pix_fmt;

    _avFrameBuf = (uint8_t *)av_malloc(
        avpicture_get_size(
            avCodecContext->pix_fmt,
            avCodecContext->width,
            avCodecContext->height));

    avpicture_fill(
        (AVPicture *)_avFrame,
        _avFrameBuf,
        avCodecContext->pix_fmt,
        avCodecContext->width,
        avCodecContext->height);

    _avFrameRgb = av_frame_alloc();
    
    // Initialize the software scaler.

    _swsContext = sws_getContext(
        info.size.x,
        info.size.y,
        _avFrameRgbPixel,
        avCodecContext->width,
        avCodecContext->height,
        avCodecContext->pix_fmt,
        SWS_BILINEAR,
        0,
        0,
        0);

    if (! _swsContext)
    {
        throw djvError(
            djvFFmpeg::staticName,
            qApp->translate("djvFFmpegSave", "Cannot create software scaler"));
    }
}
Exemplo n.º 9
0
static int icvOpenAVI_FFMPEG( CvCaptureAVI_FFMPEG* capture, const char* filename )
{
    int err, valid = 0, video_index = -1, i;
    AVFormatContext *ic;

    capture->ic = NULL;
    capture->video_stream = -1;
    capture->video_st = NULL;
    /* register all codecs, demux and protocols */
    av_register_all();

    err = av_open_input_file(&ic, filename, NULL, 0, NULL);
    if (err < 0) {
	    CV_WARN("Error opening file");
	    goto exit_func;
    }
    capture->ic = ic;
    err = av_find_stream_info(ic);
    if (err < 0) {
	    CV_WARN("Could not find codec parameters");
	    goto exit_func;
    }
    for(i = 0; i < ic->nb_streams; i++) {
#if LIBAVFORMAT_BUILD > 4628
        AVCodecContext *enc = ic->streams[i]->codec;
#else
        AVCodecContext *enc = &ic->streams[i]->codec;
#endif
        AVCodec *codec;
    if( CODEC_TYPE_VIDEO == enc->codec_type && video_index < 0) {
        video_index = i;
        codec = avcodec_find_decoder(enc->codec_id);
        if (!codec ||
        avcodec_open(enc, codec) < 0)
        goto exit_func;
        capture->video_stream = i;
        capture->video_st = ic->streams[i];
        capture->picture = avcodec_alloc_frame();

        capture->rgb_picture.data[0] = (uchar*)cvAlloc(
                                avpicture_get_size( PIX_FMT_BGR24,
                                enc->width, enc->height ));
        avpicture_fill( (AVPicture*)&capture->rgb_picture, capture->rgb_picture.data[0],
                PIX_FMT_BGR24, enc->width, enc->height );

        cvInitImageHeader( &capture->frame, cvSize( enc->width,
                                   enc->height ), 8, 3, 0, 4 );
        cvSetData( &capture->frame, capture->rgb_picture.data[0],
                           capture->rgb_picture.linesize[0] );
        break;
    }
    }


    if(video_index >= 0)
    valid = 1;

exit_func:

    if( !valid )
        icvCloseAVI_FFMPEG( capture );

    return valid;
}
Exemplo n.º 10
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVFrame         *pFrameRGB = NULL;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer = NULL;

  AVDictionary    *optionsDict = NULL;
  struct SwsContext      *sws_ctx = NULL;
  
  if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
  }
  // Register all formats and codecs
  av_register_all();
  
  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++)
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
      videoStream=i;
      break;
    }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=av_frame_alloc();
  
  // Allocate an AVFrame structure
  pFrameRGB=av_frame_alloc();
  if(pFrameRGB==NULL)
    return -1;
  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width,
			      pCodecCtx->height);
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

  sws_ctx =
    sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        AV_PIX_FMT_RGB24,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );
  
  // Assign appropriate parts of buffer to image planes in pFrameRGB
  // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
  // of AVPicture
  avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
		 pCodecCtx->width, pCodecCtx->height);
  
  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	// Convert the image from its native format to RGB
        sws_scale
        (
            sws_ctx,
            (uint8_t const * const *)pFrame->data,
            pFrame->linesize,
            0,
            pCodecCtx->height,
            pFrameRGB->data,
            pFrameRGB->linesize
        );
	
	// Save the frame to disk
	if(++i<=5)
	  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
		    i);
      }
    }
    
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
  }
  
  // Free the RGB image
  av_free(buffer);
  av_free(pFrameRGB);
  
  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
Exemplo n.º 11
0
Arquivo: pamenc.c Projeto: AVbin/libav
static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *pict, int *got_packet)
{
    PNMContext *s     = avctx->priv_data;
    AVFrame * const p = &s->picture;
    int i, h, w, n, linesize, depth, maxval, ret;
    const char *tuple_type;
    uint8_t *ptr;

    if ((ret = ff_alloc_packet(pkt, avpicture_get_size(avctx->pix_fmt,
                                                       avctx->width,
                                                       avctx->height) + 200)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
        return ret;
    }

    *p           = *pict;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    s->bytestream_start =
    s->bytestream       = pkt->data;
    s->bytestream_end   = pkt->data + pkt->size;

    h = avctx->height;
    w = avctx->width;
    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_MONOWHITE:
        n          = (w + 7) >> 3;
        depth      = 1;
        maxval     = 1;
        tuple_type = "BLACKANDWHITE";
        break;
    case AV_PIX_FMT_GRAY8:
        n          = w;
        depth      = 1;
        maxval     = 255;
        tuple_type = "GRAYSCALE";
        break;
    case AV_PIX_FMT_RGB24:
        n          = w * 3;
        depth      = 3;
        maxval     = 255;
        tuple_type = "RGB";
        break;
    case AV_PIX_FMT_RGB32:
        n          = w * 4;
        depth      = 4;
        maxval     = 255;
        tuple_type = "RGB_ALPHA";
        break;
    default:
        return -1;
    }
    snprintf(s->bytestream, s->bytestream_end - s->bytestream,
             "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n",
             w, h, depth, maxval, tuple_type);
    s->bytestream += strlen(s->bytestream);

    ptr      = p->data[0];
    linesize = p->linesize[0];

    if (avctx->pix_fmt == AV_PIX_FMT_RGB32) {
        int j;
        unsigned int v;

        for (i = 0; i < h; i++) {
            for (j = 0; j < w; j++) {
                v = ((uint32_t *)ptr)[j];
                bytestream_put_be24(&s->bytestream, v);
                *s->bytestream++ = v >> 24;
            }
            ptr += linesize;
        }
    } else {
Exemplo n.º 12
0
int ff_picture_bytesize(int render_fmt, int w, int h) {
  const int bs = avpicture_get_size(render_fmt, w, h);
  if (bs < 0) return 0;
  return bs;
}
Exemplo n.º 13
0
static DecoderContext *init_decoder(const char *filename)
{
    DecoderContext *dc = (DecoderContext *)calloc(1, sizeof(DecoderContext));
    AVCodecContext *codecCtx;

    // Open the stream
    if(avformat_open_input(&(dc->formatCtx), filename, NULL, NULL) != 0) {
        fprintf(stderr, "Couldn't open file");
        exit(1);
    }

    // Retrieve stream information
    if(avformat_find_stream_info(dc->formatCtx, NULL) < 0) {
        fprintf(stderr, "Couldn't find stream information");
        exit(1);
    }

    // Dump information about file onto standard error
    av_dump_format(dc->formatCtx, 0, filename, 0);


    // Get video Stream
    dc->videoStream = get_video_stream(dc->formatCtx);
    if (dc->videoStream == -1) {
        fprintf(stderr, "Couldn't find video stream");
        exit(1);
    }

    codecCtx = dc->formatCtx->streams[dc->videoStream]->codec;

    /* find the decoder */
    dc->codec = avcodec_find_decoder(codecCtx->codec_id);
    if (!dc->codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    /* Allocate codec context */
    dc->codecCtx = avcodec_alloc_context3(dc->codec);
    if (!dc->codecCtx) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    if(avcodec_copy_context(dc->codecCtx, codecCtx) != 0) {
        fprintf(stderr, "Couldn't copy codec context");
        exit(1); // Error copying codec context
    }

    if(dc->codec->capabilities & CODEC_CAP_TRUNCATED)
        dc->codecCtx->flags |= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */

    /* open it */
    if (avcodec_open2(dc->codecCtx, dc->codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    dc->frame = av_frame_alloc();
    if (!dc->frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    // Allocate input buffer
    dc->numBytes = avpicture_get_size(dc->codecCtx->pix_fmt,
                                      dc->codecCtx->width,
                                      dc->codecCtx->height);

    dc->inbuf = calloc(1, dc->numBytes + FF_INPUT_BUFFER_PADDING_SIZE);

    if (!dc->inbuf) {
        fprintf(stderr, "Could not allocate buffer");
        exit(1);
    }

    memset(dc->inbuf + dc->numBytes, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    dc->frame_count = 0;

    return dc;
}
Exemplo n.º 14
0
void *encode_video_thread(void *arg)
{
    codec_state *cs = (codec_state *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int p = 0;
    int err;
    int got_packet;
    rtp_msg_t *s_video_msg;
    int video_frame_finished;
    AVFrame *s_video_frame;
    AVFrame *webcam_frame;
    s_video_frame = avcodec_alloc_frame();
    webcam_frame = avcodec_alloc_frame();
    AVPacket enc_video_packet;

    uint8_t *buffer;
    int numBytes;
    /* Determine required buffer size and allocate buffer */
    numBytes = avpicture_get_size(PIX_FMT_YUV420P, cs->webcam_decoder_ctx->width, cs->webcam_decoder_ctx->height);
    buffer = (uint8_t *)av_calloc(numBytes * sizeof(uint8_t),1);
    avpicture_fill((AVPicture *)s_video_frame, buffer, PIX_FMT_YUV420P, cs->webcam_decoder_ctx->width,
                   cs->webcam_decoder_ctx->height);
    cs->sws_ctx = sws_getContext(cs->webcam_decoder_ctx->width, cs->webcam_decoder_ctx->height,
                                 cs->webcam_decoder_ctx->pix_fmt, cs->webcam_decoder_ctx->width, cs->webcam_decoder_ctx->height, PIX_FMT_YUV420P,
                                 SWS_BILINEAR, NULL, NULL, NULL);

    while (!cs->quit && cs->send_video) {

        if (av_read_frame(cs->video_format_ctx, packet) < 0) {
            printf("error reading frame\n");

            if (cs->video_format_ctx->pb->error != 0)
                break;

            continue;
        }

        if (packet->stream_index == cs->video_stream) {
            if (avcodec_decode_video2(cs->webcam_decoder_ctx, webcam_frame, &video_frame_finished, packet) < 0) {
                printf("couldn't decode\n");
                continue;
            }

            av_free_packet(packet);
            sws_scale(cs->sws_ctx, (uint8_t const * const *)webcam_frame->data, webcam_frame->linesize, 0,
                      cs->webcam_decoder_ctx->height, s_video_frame->data, s_video_frame->linesize);
            /* create a new I-frame every 60 frames */
            ++p;

            if (p == 60) {

                s_video_frame->pict_type = AV_PICTURE_TYPE_BI ;
            } else if (p == 61) {
                s_video_frame->pict_type = AV_PICTURE_TYPE_I ;
                p = 0;
            } else {
                s_video_frame->pict_type = AV_PICTURE_TYPE_P ;
            }

            if (video_frame_finished) {
                err = avcodec_encode_video2(cs->video_encoder_ctx, &enc_video_packet, s_video_frame, &got_packet);

                if (err < 0) {
                    printf("could not encode video frame\n");
                    continue;
                }

                if (!got_packet) {
                    continue;
                }

                pthread_mutex_lock(&cs->rtp_msg_mutex_lock);
                THREADLOCK()

                if (!enc_video_packet.data) fprintf(stderr, "video packet data is NULL\n");

                s_video_msg = rtp_msg_new ( cs->_rtp_video, enc_video_packet.data, enc_video_packet.size ) ;

                if (!s_video_msg) {
                    printf("invalid message\n");
                }

                rtp_send_msg ( cs->_rtp_video, s_video_msg, cs->_networking );
                THREADUNLOCK()
                pthread_mutex_unlock(&cs->rtp_msg_mutex_lock);
                av_free_packet(&enc_video_packet);
            }
        } else {
            av_free_packet(packet);
        }
    }
Exemplo n.º 15
0
/*!
  Allocates and initializes the parameters depending on the video and the desired color type.
  One the stream is opened, it is possible to get the video encoding framerate getFramerate(),
  and the dimension of the images using getWidth() and getHeight().
  
  \param filename : Path to the video which has to be read.
  \param colortype : Desired color map used to open the video.
  The parameter can take two values : COLORED and GRAY_SCALED.
  
  \return It returns true if the paramters could be initialized. Else it returns false.
*/
bool vpFFMPEG::openStream(const char *filename, vpFFMPEGColorType colortype)
{
  this->color_type = colortype;
  
  av_register_all();
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,0,0) // libavformat 52.84.0
  if (av_open_input_file (&pFormatCtx, filename, NULL, 0, NULL) != 0)
#else
  if (avformat_open_input (&pFormatCtx, filename, NULL, NULL) != 0) // libavformat 53.4.0
#endif
  {
    vpTRACE("Couldn't open file ");
    return false;
  }

#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,21,0) // libavformat 53.21.0
  if (av_find_stream_info (pFormatCtx) < 0)
#else 
  if (avformat_find_stream_info (pFormatCtx, NULL) < 0)
#endif
      return false;
  
  videoStream = 0;
  bool found_codec = false;
  
  /*
  * Detect streams types
  */
  for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++)
  {
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51,0,0)
    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) // avutil 50.33.0
#else
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) // avutil 51.9.1
#endif
    {
      videoStream = i;
      //std::cout << "rate: " << pFormatCtx->streams[i]->r_frame_rate.num << " " << pFormatCtx->streams[i]->r_frame_rate.den << std::endl;
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(55,12,0)
      framerate_stream =  pFormatCtx->streams[i]->r_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->r_frame_rate.den;
#else
      framerate_stream =  pFormatCtx->streams[i]->avg_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->avg_frame_rate.den;
#endif
      found_codec= true;
      break;
    }
  }

  if (found_codec)
  {
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

    if (pCodec == NULL)
    {
      vpTRACE("unsuported codec");
      return false;		// Codec not found
    }
    
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,35,0) // libavcodec 53.35.0
    if (avcodec_open (pCodecCtx, pCodec) < 0)
#else
    if (avcodec_open2 (pCodecCtx, pCodec, NULL) < 0)
#endif
    {
      vpTRACE("Could not open codec");
      return false;		// Could not open codec
    }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
    pFrame = avcodec_alloc_frame();
#else
    pFrame = av_frame_alloc(); // libavcodec 55.34.1
#endif

    if (color_type == vpFFMPEG::COLORED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameRGB=avcodec_alloc_frame();
#else
      pFrameRGB=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameRGB == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height);
    }
    
    else if (color_type == vpFFMPEG::GRAY_SCALED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameGRAY=avcodec_alloc_frame();
#else
      pFrameGRAY=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameGRAY == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_GRAY8,pCodecCtx->width,pCodecCtx->height);
    }  

    /*
     * Determine required buffer size and allocate buffer
     */
    width = pCodecCtx->width ;
    height = pCodecCtx->height ;
    buffer = (uint8_t *) malloc ((unsigned int)(sizeof (uint8_t)) * (unsigned int)numBytes);
  }
  else
  {
    vpTRACE("Didn't find a video stream");
    return false;
  }
  
  if (color_type == vpFFMPEG::COLORED)
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
  
  else if (color_type == vpFFMPEG::GRAY_SCALED)
    avpicture_fill((AVPicture *)pFrameGRAY, buffer, PIX_FMT_GRAY8, pCodecCtx->width, pCodecCtx->height);
  
  streamWasOpen = true;

  return true;
}
Exemplo n.º 16
0
QList<QbPacket> VideoStream::readPackets(AVPacket *packet)
{
    QList<QbPacket> packets;

    if (!this->isValid())
        return packets;

    AVFrame iFrame;
    avcodec_get_frame_defaults(&iFrame);

    int gotFrame;

    avcodec_decode_video2(this->codecContext(),
                          &iFrame,
                          &gotFrame,
                          packet);

    if (!gotFrame)
        return packets;

    int frameSize = avpicture_get_size(this->codecContext()->pix_fmt,
                                       this->codecContext()->width,
                                       this->codecContext()->height);

    QSharedPointer<uchar> oBuffer(new uchar[frameSize]);

    if (!oBuffer)
        return packets;

    static bool sync;

    if (this->m_fst)
    {
        sync = av_frame_get_best_effort_timestamp(&iFrame)? false: true;
        this->m_pts = 0;
        this->m_duration = this->fps().invert().value() * this->timeBase().invert().value();
        this->m_fst = false;
    }
    else
        this->m_pts += this->m_duration;

    avpicture_layout((AVPicture *) &iFrame,
                     this->codecContext()->pix_fmt,
                     this->codecContext()->width,
                     this->codecContext()->height,
                     (uint8_t *) oBuffer.data(),
                     frameSize);

    QbCaps caps = this->caps();
    caps.setProperty("sync", sync);

    QbPacket oPacket(caps,
                     oBuffer,
                     frameSize);

    oPacket.setPts(this->m_pts);
    oPacket.setDuration(this->m_duration);
    oPacket.setTimeBase(this->timeBase());
    oPacket.setIndex(this->index());

    packets << oPacket;

    return packets;
}
Exemplo n.º 17
0
bool VideoDecoder::InitCodec(const uint32_t width, const uint32_t height)
{
  if (codec_initialized_)
  {
    // TODO(mani-monaj): Maybe re-initialize
    return true;
  }

  try
  {
    ThrowOnCondition(width == 0 || height == 0, std::string("Invalid frame size:") +
                     boost::lexical_cast<std::string>(width) + " x " + boost::lexical_cast<std::string>(height));

    // Very first init
    avcodec_register_all();
    av_register_all();
    av_log_set_level(AV_LOG_QUIET);

    codec_ptr_ = avcodec_find_decoder(CODEC_ID_H264);
    ThrowOnCondition(codec_ptr_ == NULL, "Codec H264 not found!");


    codec_ctx_ptr_ = avcodec_alloc_context3(codec_ptr_);
    codec_ctx_ptr_->pix_fmt = AV_PIX_FMT_YUV420P;
    codec_ctx_ptr_->skip_frame = AVDISCARD_DEFAULT;
    codec_ctx_ptr_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
    codec_ctx_ptr_->skip_loop_filter = AVDISCARD_DEFAULT;
    codec_ctx_ptr_->workaround_bugs = AVMEDIA_TYPE_VIDEO;
    codec_ctx_ptr_->codec_id = AV_CODEC_ID_H264;
    codec_ctx_ptr_->skip_idct = AVDISCARD_DEFAULT;
    codec_ctx_ptr_->width = width;
    codec_ctx_ptr_->height = height;

    ThrowOnCondition(
          avcodec_open2(codec_ctx_ptr_, codec_ptr_, NULL) < 0,
          "Can not open the decoder!");



    const uint32_t num_bytes = avpicture_get_size(PIX_FMT_RGB24, codec_ctx_ptr_->width, codec_ctx_ptr_->height);
    {
       frame_ptr_ = avcodec_alloc_frame();
       frame_rgb_ptr_ = avcodec_alloc_frame();

       ThrowOnCondition(!frame_ptr_ || !frame_rgb_ptr_, "Can not allocate memory for frames!");

       frame_rgb_raw_ptr_ = reinterpret_cast<uint8_t*>(av_malloc(num_bytes * sizeof(uint8_t)));
       ThrowOnCondition(frame_rgb_raw_ptr_ == NULL,
                        std::string("Can not allocate memory for the buffer: ") +
                        boost::lexical_cast<std::string>(num_bytes));
       ThrowOnCondition(0 == avpicture_fill(
                          reinterpret_cast<AVPicture*>(frame_rgb_ptr_), frame_rgb_raw_ptr_, PIX_FMT_RGB24,
                          codec_ctx_ptr_->width, codec_ctx_ptr_->height),
                        "Failed to initialize the picture data structure.");
    }
    av_init_packet(&packet_);
  }
  catch (const std::runtime_error& e)
  {
    ARSAL_PRINT(ARSAL_PRINT_ERROR, LOG_TAG, "%s", e.what());
    Cleanup();
    return false;
  }

  codec_initialized_ = true;
  first_iframe_recv_ = false;
  ARSAL_PRINT(ARSAL_PRINT_INFO, LOG_TAG, "H264 Codec is initialized!");
  return true;
}
Exemplo n.º 18
0
static int targa_encode_frame(AVCodecContext *avctx,
                              unsigned char *outbuf,
                              int buf_size, void *data){
    AVFrame *p = data;
    int bpp, picsize, datasize = -1;
    uint8_t *out;

    if(avctx->width > 0xffff || avctx->height > 0xffff) {
        av_log(avctx, AV_LOG_ERROR, "image dimensions too large\n");
        return AVERROR(EINVAL);
    }
    picsize = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
    if(buf_size < picsize + 45) {
        av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
        return AVERROR(EINVAL);
    }

    p->pict_type= AV_PICTURE_TYPE_I;
    p->key_frame= 1;

    /* zero out the header and only set applicable fields */
    memset(outbuf, 0, 12);
    AV_WL16(outbuf+12, avctx->width);
    AV_WL16(outbuf+14, avctx->height);
    /* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */
    outbuf[17] = 0x20 | (avctx->pix_fmt == PIX_FMT_BGRA ? 8 : 0);

    switch(avctx->pix_fmt) {
    case PIX_FMT_GRAY8:
        outbuf[2] = TGA_BW;      /* uncompressed grayscale image */
        outbuf[16] = 8;          /* bpp */
        break;
    case PIX_FMT_RGB555LE:
        outbuf[2] = TGA_RGB;     /* uncompresses true-color image */
        outbuf[16] = 16;         /* bpp */
        break;
    case PIX_FMT_BGR24:
        outbuf[2] = TGA_RGB;     /* uncompressed true-color image */
        outbuf[16] = 24;         /* bpp */
        break;
    case PIX_FMT_BGRA:
        outbuf[2] = TGA_RGB;     /* uncompressed true-color image */
        outbuf[16] = 32;         /* bpp */
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n",
               avcodec_get_pix_fmt_name(avctx->pix_fmt));
        return AVERROR(EINVAL);
    }
    bpp = outbuf[16] >> 3;

    out = outbuf + 18;  /* skip past the header we just output */

    /* try RLE compression */
    if (avctx->coder_type != FF_CODER_TYPE_RAW)
        datasize = targa_encode_rle(out, picsize, p, bpp, avctx->width, avctx->height);

    /* if that worked well, mark the picture as RLE compressed */
    if(datasize >= 0)
        outbuf[2] |= 8;

    /* if RLE didn't make it smaller, go back to no compression */
    else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height);

    out += datasize;

    /* The standard recommends including this section, even if we don't use
     * any of the features it affords. TODO: take advantage of the pixel
     * aspect ratio and encoder ID fields available? */
    memcpy(out, "\0\0\0\0\0\0\0\0TRUEVISION-XFILE.", 26);

    return out + 26 - outbuf;
}
Exemplo n.º 19
0
	VideoStream::VideoStream(const std::string& filename, unsigned int numFrameBuffered, GLenum minFilter, GLenum magFilter, GLenum sWrapping, GLenum tWrapping, int maxLevel)
	 : __ReadOnly_ComponentLayout(declareLayout(numFrameBuffered)), InputDevice(declareLayout(numFrameBuffered), "Reader"), idVideoStream(0), readFrameCount(0), timeStampFrameRate(1.0f), timeStampOffset(0), timeStampOfLastFrameRead(0), endReached(false),
	   pFormatCtx(NULL), pCodecCtx(NULL), pCodec(NULL), pFrame(NULL), pFrameRGB(NULL), buffer(NULL), pSWSCtx(NULL), idCurrentBufferForWritting(0)
	{
		#ifdef __USE_PBO__
			#ifdef __VIDEO_STREAM_VERBOSE__
				std::cout << "VideoStream::VideoStream - Using PBO for uploading data to the GPU." << std::endl;
			#endif
			pbo = NULL;
		#else
			#ifdef __VIDEO_STREAM_VERBOSE__
				std::cout << "VideoStream::VideoStream - Using standard method HdlTexture::write for uploading data to the GPU." << std::endl;
			#endif
		#endif

		int retCode = 0;

		// Open stream :
		//DEPRECATED IN libavformat : retCode = av_open_input_file(&pFormatCtx, filename.c_str(), NULL, 0, NULL)!=0);
		retCode = avformat_open_input(&pFormatCtx, filename.c_str(), NULL, NULL);

		if(retCode!=0)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at av_open_input_file).", __FILE__, __LINE__);

		// Find stream information :
		//DEPRECATED : retCode = av_find_stream_info(pFormatCtx);
		retCode = avformat_find_stream_info(pFormatCtx, NULL);

		if(retCode<0)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at av_find_stream_info).", __FILE__, __LINE__);

		// Walk through pFormatCtx->nb_streams to find a/the first video stream :
		for(idVideoStream=0; idVideoStream<pFormatCtx->nb_streams; idVideoStream++)
			//DEPRECATED : if(pFormatCtx->streams[idVideoStream]->codec->codec_type==CODEC_TYPE_VIDEO)
			if(pFormatCtx->streams[idVideoStream]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
				break;

		if(idVideoStream>=pFormatCtx->nb_streams)
			throw Exception("VideoStream::VideoStream - Failed to find video stream (at streams[idVideoStream]->codec->codec_type==CODEC_TYPE_VIDEO).", __FILE__, __LINE__);

		// Get a pointer to the codec context for the video stream :
		pCodecCtx = pFormatCtx->streams[idVideoStream]->codec;

		// Find the decoder for the video stream :
		pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

		if(pCodec==NULL)
			throw Exception("VideoStream::VideoStream - No suitable codec found (at avcodec_find_decoder).", __FILE__, __LINE__);

		// Open codec :
		//DEPRECATED : retCode = avcodec_open(pCodecCtx, pCodec);
		retCode = avcodec_open2(pCodecCtx, pCodec, NULL);

		if(retCode<0)
			throw Exception("VideoStream::VideoStream - Could not open codec (at avcodec_open).", __FILE__, __LINE__);

		// Get the framerate :
		/*float timeUnit_sec = static_cast<float>(pCodecCtx->time_base.num)/static_cast<float>(pCodecCtx->time_base.den);
		frameRate = 1.0f/(pCodecCtx->timeUnit_sec;*/

		timeStampFrameRate = static_cast<float>(pFormatCtx->streams[idVideoStream]->time_base.den)/static_cast<float>(pFormatCtx->streams[idVideoStream]->time_base.num);

		// Get the duration :
		duration_sec = pFormatCtx->duration / AV_TIME_BASE;

		#ifdef __VIDEO_STREAM_VERBOSE__
			std::cout << "VideoStream::VideoStream" << std::endl;
			std::cout << "                         - Frame rate : " << timeStampFrameRate << " frames per second (for time stamps)" << std::endl;
			std::cout << "                         - Duration   : " << duration_sec << " second(s)" << std::endl;
		#endif

		// Allocate video frame :
		pFrame = avcodec_alloc_frame();

		// Allocate an AVFrame structure :
		pFrameRGB = avcodec_alloc_frame();

		if(pFrameRGB==NULL)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at avcodec_alloc_frame).", __FILE__, __LINE__);

		// Determine required buffer size and allocate buffer :
		bufferSizeBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
		buffer = (uint8_t *)av_malloc(bufferSizeBytes*sizeof(uint8_t));

		#ifdef __VIDEO_STREAM_VERBOSE__
			std::cout << "VideoStream::VideoStream - Frame size : " << pCodecCtx->width << "x" << pCodecCtx->height << std::endl;
		#endif

		if(buffer==NULL)
			throw Exception("VideoStream::VideoStream - Unable to allocate video frame buffer.", __FILE__, __LINE__);

		// Assign appropriate parts of buffer to image planes in pFrameRGB (Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture) :
		avpicture_fill( (AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

		// Initialize libswscale :
		pSWSCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, SWS_POINT, NULL, NULL, NULL);

		// Create format :
		HdlTextureFormat frameFormat(pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, minFilter, magFilter, sWrapping, tWrapping, 0, maxLevel);

		// Create the texture :
		for(unsigned int i=0; i<numFrameBuffered; i++)
		{
			//old : addOutputPort("output" + to_string(i));
			textureBuffers.push_back( new HdlTexture(frameFormat) );

			// YOU MUST WRITE ONCE IN THE TEXTURE BEFORE USING PBO::copyToTexture ON IT.
			// We are also doing this to prevent reading from an empty (not-yet-allocated) texture.
			textureBuffers.back()->fill(0);

			// Set links :
			setTextureLink(textureBuffers.back(), i);
		}

		#ifdef __USE_PBO__
			// Create PBO for uplodaing data to GPU :
			pbo = new HdlPBO(frameFormat, GL_PIXEL_UNPACK_BUFFER_ARB,GL_STREAM_DRAW_ARB);
		#endif

		// Finish by forcing read of first frame :
		readNextFrame();
	}
Exemplo n.º 20
0
bool VideoDecoder::Load()
{
  unsigned int i;
  int numBytes;
  uint8_t *tmp;

  if ( avformat_open_input(&mFormatContext, mFilename.c_str(), NULL, NULL) != 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - av_open_input_file failed\n");
    return false;
  }

  if ( avformat_find_stream_info(mFormatContext, 0) < 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - av_find_stream_info failed\n");
    return false;
  }

  /* Some debug info */
  av_dump_format(mFormatContext, 0, mFilename.c_str(), false);

  for (i = 0; i < mFormatContext->nb_streams; i++)
  {
    if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
    {
      mVideoStream = i;
      break;
    }
  }

  if ( mVideoStream == -1 )
  {
    fprintf(stderr, "VideoDecoder::Load - No video stream found.\n");
    return false;
  }

  mCodecContext = mFormatContext->streams[mVideoStream]->codec;
  mCodec = avcodec_find_decoder(mCodecContext->codec_id);
  if ( !mCodec )
  {
    fprintf(stderr, "VideoDecoder::Load - avcodec_find_decoder failed\n");
    return false;
  }

  if ( avcodec_open2(mCodecContext, mCodec, 0) < 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - avcodec_open failed\n");
    return false;
  }

  mFrame = avcodec_alloc_frame();
  if ( !mFrame )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating frame.\n");
    return false;
  }

  mFrameRGB = avcodec_alloc_frame();
  if ( !mFrameRGB )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating RGB frame.\n");
    return false;
  }

  /* Determine required buffer size and allocate buffer */
  numBytes = avpicture_get_size(PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height);
  tmp = (uint8_t *)realloc(mBuffer, numBytes * sizeof(uint8_t));
  if ( !tmp )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating buffer.\n");
    return false;
  }
  mBuffer = tmp;

  avpicture_fill((AVPicture *)mFrameRGB, mBuffer, PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height);

  mSwsContext = sws_getContext(mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt,
                               mCodecContext->width, mCodecContext->height, PIX_FMT_RGB24,
                               SWS_BICUBIC, NULL, NULL, NULL);
  if ( !mSwsContext )
  {
    fprintf(stderr, "VideoDecoder::Load - sws_getContext failed.\n");
    return false;
  }

  mDoneReading = false;

  return true;
}
Exemplo n.º 21
0
void init_encode(uint8_t *imgbuffer, char *palette)
{
  /* register all the codecs */
  //avcodec_register_all();
  REGISTER_ENCODER(MJPEG, mjpeg);
  REGISTER_PARSER(MJPEG, mjpeg);

  //set the buffer with the captured frame
  inbuffer = imgbuffer;

  //set pixel format
  if (palette == "BGR32") {
    raw_pix_fmt = AV_PIX_FMT_BGR32;
  } else if (palette == "RGB24") {
    raw_pix_fmt = AV_PIX_FMT_RGB24 ;
  } else if (palette == "RGB32") {
    raw_pix_fmt = AV_PIX_FMT_RGB32;
  } else if (palette == "YUYV") {
    raw_pix_fmt = AV_PIX_FMT_YUYV422;
  } else if (palette == "YUV420") {
    raw_pix_fmt = AV_PIX_FMT_YUV420P;
  } else if (palette == "GREY") {
    raw_pix_fmt = AV_PIX_FMT_GRAY8;
  } else {
    raw_pix_fmt = AV_PIX_FMT_BGR24;  // default!
  }

  //calculate the bytes needed for the output image
  int nbytes = avpicture_get_size(YUV_PIX_FMT, out_width, out_height);

  //create buffer for the output image
  outbuffer = (uint8_t*)av_malloc(nbytes);

  //create ffmpeg frame structures.  These do not allocate space for image data,
  //just the pointers and other information about the image.
  inpic  = avcodec_alloc_frame();
  outpic = avcodec_alloc_frame();

  //this will set the pointers in the frame structures to the right points in
  //the input and output buffers.
  avpicture_fill((AVPicture*)inpic,  inbuffer,  raw_pix_fmt, in_width,  in_height);
  avpicture_fill((AVPicture*)outpic, outbuffer, YUV_PIX_FMT, out_width, out_height);

  //create the conversion context
  sws_ctx = sws_getContext(in_width,  in_height,  raw_pix_fmt,
                              out_width, out_height, YUV_PIX_FMT,
                              SWS_FAST_BILINEAR, NULL, NULL, NULL);

  /* find the mjpeg video encoder */
  codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
  if (!codec) {
      fprintf(stderr, "encode.c: codec not found\n");
      exit(1);
  }

  //  Allocate/init a context
  c = avcodec_alloc_context3(codec);
  if (!c) {
      fprintf(stderr, "encode.c: could not allocate video codec context\n");
      exit(1);
  }

  /* put sample parameters */
  c->bit_rate = 400000;
  /* resolution must be a multiple of two */
  c->width = 320;
  c->height = 240;
  /* frames per second */
  c->time_base = (AVRational){1,25};
  c->pix_fmt = JPG_PIX_FMT;

  init_ok = 1;
}
Exemplo n.º 22
0
static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
    PNMContext *s = avctx->priv_data;
    AVFrame *pict = data;
    AVFrame * const p= (AVFrame*)&s->picture;
    int i, h, h1, c, n, linesize;
    uint8_t *ptr, *ptr1, *ptr2;

    if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
        av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
        return -1;
    }

    *p = *pict;
    p->pict_type= FF_I_TYPE;
    p->key_frame= 1;

    s->bytestream_start=
    s->bytestream= outbuf;
    s->bytestream_end= outbuf+buf_size;

    h = avctx->height;
    h1 = h;
    switch(avctx->pix_fmt) {
    case PIX_FMT_MONOWHITE:
        c = '4';
        n = (avctx->width + 7) >> 3;
        break;
    case PIX_FMT_GRAY8:
        c = '5';
        n = avctx->width;
        break;
    case PIX_FMT_RGB24:
        c = '6';
        n = avctx->width * 3;
        break;
    case PIX_FMT_YUV420P:
        c = '5';
        n = avctx->width;
        h1 = (h * 3) / 2;
        break;
    default:
        return -1;
    }
    snprintf(s->bytestream, s->bytestream_end - s->bytestream,
             "P%c\n%d %d\n",
             c, avctx->width, h1);
    s->bytestream += strlen(s->bytestream);
    if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
        snprintf(s->bytestream, s->bytestream_end - s->bytestream,
                 "%d\n", 255);
        s->bytestream += strlen(s->bytestream);
    }

    ptr = p->data[0];
    linesize = p->linesize[0];
    for(i=0;i<h;i++) {
        memcpy(s->bytestream, ptr, n);
        s->bytestream += n;
        ptr += linesize;
    }

    if (avctx->pix_fmt == PIX_FMT_YUV420P) {
        h >>= 1;
        n >>= 1;
        ptr1 = p->data[1];
        ptr2 = p->data[2];
        for(i=0;i<h;i++) {
            memcpy(s->bytestream, ptr1, n);
            s->bytestream += n;
            memcpy(s->bytestream, ptr2, n);
            s->bytestream += n;
                ptr1 += p->linesize[1];
                ptr2 += p->linesize[2];
        }
    }
Exemplo n.º 23
0
bool VideoReaderUnit::OpenStreams(StreamSet* set) {
  // Setup FFMPEG.
  if (!ffmpeg_initialized_) {
    ffmpeg_initialized_ = true;
    av_register_all();
  }

  // Open video file.
  AVFormatContext* format_context = nullptr;
  if (avformat_open_input (&format_context, video_file_.c_str(), nullptr, nullptr) != 0) {
    LOG(ERROR) << "Could not open file: " << video_file_;
    return false;
  }

  if (avformat_find_stream_info(format_context, nullptr) < 0) {
    LOG(ERROR) << video_file_ << " is not a valid movie file.";
    return false;
  }

  // Get video stream index.
  video_stream_idx_ = -1;

  for (uint i = 0; i < format_context->nb_streams; ++i) {
    if (format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
      video_stream_idx_ = i;
      break;
    }
  }

  if (video_stream_idx_ < 0) {
    LOG(ERROR) << "Could not find video stream in " << video_file_;
    return false;
  }

  AVCodecContext* codec_context = format_context->streams[video_stream_idx_]->codec;
  AVCodec* codec = avcodec_find_decoder (codec_context->codec_id);

  if (!codec) {
    LOG(ERROR) << "Unsupported codec for file " << video_file_;
    return false;
  }

  if (avcodec_open2(codec_context, codec, nullptr) < 0) {
    LOG(ERROR) << "Could not open codec";
    return false;
  }

  AVStream* av_stream = format_context->streams[video_stream_idx_];
  fps_ = av_q2d(av_stream->avg_frame_rate);
  LOG(INFO) << "Frame rate: " << fps_;

  // if av_q2d wasn't able to figure out the frame rate, set it 24
  if (fps_ != fps_) {
    LOG(WARNING) << "Can't figure out frame rate - Defaulting to 24";
    fps_ = 24;
  }

  // Limit to meaningful values. Sometimes avg_frame_rate.* holds garbage.
  if (fps_ < 5) {
    LOG(WARNING) << "Capping video fps_ of " << fps_ << " to " << 5;
    fps_ = 5;
  }

  if (fps_ > 60) {
    LOG(WARNING) << "Capping video fps_ of " << fps_ << " to " << 60;
    fps_ = 60;
  }

  bytes_per_pixel_ = PixelFormatToNumChannels(options_.pixel_format);
  frame_width_ = codec_context->width;
  frame_height_ = codec_context->height;

  switch (options_.downscale) {
    case VideoReaderOptions::DOWNSCALE_NONE:
      output_width_ = frame_width_;
      output_height_ = frame_height_;
      downscale_factor_ = 1.0f;
      break;

    case VideoReaderOptions::DOWNSCALE_BY_FACTOR:
      if (options_.downscale_factor > 1.0f) {
        LOG(ERROR) << "Only downscaling is supported.";
        return false;
      }

      downscale_factor_ = options_.downscale_factor;
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;

    case VideoReaderOptions::DOWNSCALE_TO_MIN_SIZE:
      downscale_factor_ = std::max(options_.downscale_size * (1.0f / frame_width_),
                                   options_.downscale_size * (1.0f / frame_height_));
      // Cap to downscaling.
      downscale_factor_ = std::min(1.0f, downscale_factor_);
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;

    case VideoReaderOptions::DOWNSCALE_TO_MAX_SIZE:
      downscale_factor_ = std::min(options_.downscale_size * (1.0f / frame_width_),
                                   options_.downscale_size * (1.0f / frame_height_));
      // Cap to downscaling.
      downscale_factor_ = std::min(1.0f, downscale_factor_);
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;
  }

  if (downscale_factor_ != 1.0) {
    LOG(INFO) << "Downscaling by factor " << downscale_factor_
              << " from " << frame_width_ << ", " << frame_height_
              << " to " << output_width_ << ", " << output_height_;
  }

  // Force even resolutions.
  output_width_ += output_width_ % 2;
  output_width_step_ = output_width_ * bytes_per_pixel_;

  // Pad width_step to be a multiple of 4.
  if (output_width_step_ % 4 != 0) {
    output_width_step_ += 4 - output_width_step_ % 4;
    DCHECK_EQ(output_width_step_ % 4, 0);
  }

  // Save some infos for later use.
  codec_ = codec;
  codec_context_ = codec_context;
  format_context_ = format_context;

  // Allocate temporary structures.
  frame_yuv_ = av_frame_alloc();
  frame_bgr_ = av_frame_alloc();

  if (!frame_yuv_ || !frame_bgr_) {
    LOG(ERROR) << "Could not allocate AVFrames.";
    return false;
  }

  int pix_fmt;
  switch (options_.pixel_format) {
    case PIXEL_FORMAT_RGB24:
      pix_fmt = PIX_FMT_RGB24;
      break;
    case PIXEL_FORMAT_BGR24:
      pix_fmt = PIX_FMT_BGR24;
      break;
    case PIXEL_FORMAT_ARGB32:
      pix_fmt = PIX_FMT_ARGB;
      break;
    case PIXEL_FORMAT_ABGR32:
      pix_fmt = PIX_FMT_ABGR;
      break;
    case PIXEL_FORMAT_RGBA32:
      pix_fmt = PIX_FMT_RGBA;
      break;
    case PIXEL_FORMAT_BGRA32:
      pix_fmt = PIX_FMT_BGRA;
      break;
    case PIXEL_FORMAT_YUV422:
      pix_fmt = PIX_FMT_YUYV422;
      break;
    case PIXEL_FORMAT_LUMINANCE:
      pix_fmt = PIX_FMT_GRAY8;
      break;
  }

  uint8_t* bgr_buffer = (uint8_t*)av_malloc(avpicture_get_size((::PixelFormat)pix_fmt,
                                                               output_width_,
                                                               output_height_));

  avpicture_fill((AVPicture*)frame_bgr_,
                 bgr_buffer,
                 (::PixelFormat)pix_fmt,
                 output_width_,
                 output_height_);

  // Setup SwsContext for color conversion.
  sws_context_ = sws_getContext(frame_width_,
                                frame_height_,
                                codec_context_->pix_fmt,
                                output_width_,
                                output_height_,
                                (::PixelFormat)pix_fmt,
                                SWS_BICUBIC,
                                nullptr,
                                nullptr,
                                nullptr);
  if(!sws_context_) {
    LOG(ERROR) << "Could not setup SwsContext for color conversion.";
    return false;
  }

  current_pos_ = 0;
  used_as_root_ = set->empty();
  VideoStream* vid_stream = new VideoStream(output_width_,
                                            output_height_,
                                            output_width_step_,
                                            fps_,
                                            options_.pixel_format,
                                            options_.stream_name);

  vid_stream->set_original_width(frame_width_);
  vid_stream->set_original_height(frame_height_);

  set->push_back(shared_ptr<VideoStream>(vid_stream));
  frame_num_ = 0;
  return true;
}
Exemplo n.º 24
0
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    // Handle options
    AVDictionary *opts = 0;
    StringVector opVect = split(Options(), ",");
    
    // Set transport method as specified by method field, rtpUni is default
    if ( Method() == "rtpMulti" )
    	opVect.push_back("rtsp_transport=udp_multicast");
    else if ( Method() == "rtpRtsp" )
        opVect.push_back("rtsp_transport=tcp");
    else if ( Method() == "rtpRtspHttp" )
        opVect.push_back("rtsp_transport=http");
    
  	Debug(2, "Number of Options: %d",opVect.size());
    for (size_t i=0; i<opVect.size(); i++)
    {
    	StringVector parts = split(opVect[i],"=");
    	if (parts.size() > 1) {
    		parts[0] = trimSpaces(parts[0]);
    		parts[1] = trimSpaces(parts[1]);
    	    if ( av_dict_set(&opts, parts[0].c_str(), parts[1].c_str(), 0) == 0 ) {
    	        Debug(2, "set option %d '%s' to '%s'", i,  parts[0].c_str(), parts[1].c_str());
    	    }
    	    else
    	    {
    	        Warning( "Error trying to set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str() );
    	    }
    		  
    	}
    }    
	Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    Info( "Stream open %s", mPath.c_str() );
    startTime=av_gettime();//FIXME here or after find_Stream_info
    
    //FIXME can speed up initial analysis but need sensible parameters...
    //mFormatContext->probesize = 32;
    //mFormatContext->max_analyze_duration = 32;
    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Info( "Find stream info complete %s", mPath.c_str() );
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
		{
			mVideoStreamId = i;
			break;
		}
        if(mAudioStreamId == -1) //FIXME best way to copy all other streams?
        {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
		    if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
#else
		    if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO )
#endif
		    {
                mAudioStreamId = i;
		    }
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
Exemplo n.º 25
0
// --------------------------------------------------------------------------
// ARDrone::initVideo()
// Description  : Initialize video.
// Return value : SUCCESS: 1  FAILURE: 0
// --------------------------------------------------------------------------
int ARDrone::initVideo(void)
{
    // AR.Drone 2.0
    if (version.major == ARDRONE_VERSION_2) {
        // Open the IP address and port
        char filename[256];
        sprintf(filename, "tcp://%s:%d", ip, ARDRONE_VIDEO_PORT);
        if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) < 0) {
            CVDRONE_ERROR("avformat_open_input() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Retrive and dump stream information
        avformat_find_stream_info(pFormatCtx, NULL);
        av_dump_format(pFormatCtx, 0, filename, 0);

        // Find the decoder for the video stream
        pCodecCtx = pFormatCtx->streams[0]->codec;
        AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (pCodec == NULL) {
            CVDRONE_ERROR("avcodec_find_decoder() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Open codec
        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
            CVDRONE_ERROR("avcodec_open2() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Allocate video frames and a buffer
        pFrame = avcodec_alloc_frame();
        pFrameBGR = avcodec_alloc_frame();
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));

        // Assign appropriate parts of buffer to image planes in pFrameBGR
        avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);

        // Convert it to BGR
        pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
    }
    // AR.Drone 1.0
    else {
        // Open the IP address and port
        if (!sockVideo.open(ip, ARDRONE_VIDEO_PORT)) {
            CVDRONE_ERROR("UDPSocket::open(port=%d) was failed. (%s, %d)\n", ARDRONE_VIDEO_PORT, __FILE__, __LINE__);
            return 0;
        }

        // Set codec
        //pCodecCtx = avcodec_alloc_context();
        pCodecCtx=avcodec_alloc_context3(NULL);
        pCodecCtx->width = 320;
        pCodecCtx->height = 240;

        // Allocate a buffer
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height));
    }

    // Allocate an IplImage
    img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
    if (!img) {
        CVDRONE_ERROR("cvCreateImage() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    // Clear the image
    cvZero(img);

    // Create a mutex
    mutexVideo = new pthread_mutex_t;
    pthread_mutex_init(mutexVideo, NULL);

    // Create a thread
    threadVideo = new pthread_t;
    if (pthread_create(threadVideo, NULL, runVideo, this) != 0) {
        CVDRONE_ERROR("pthread_create() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    return 1;
}
Exemplo n.º 26
0
Arquivo: v4l2.c Projeto: AWilco/xbmc
static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
    struct video_data *s = s1->priv_data;
    AVStream *st;
    int res;
    uint32_t desired_format, capabilities;
    enum CodecID codec_id;

    st = av_new_stream(s1, 0);
    if (!st) {
        return AVERROR(ENOMEM);
    }
    av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */

    s->width  = ap->width;
    s->height = ap->height;

    capabilities = 0;
    s->fd = device_open(s1, &capabilities);
    if (s->fd < 0) {
        return AVERROR(EIO);
    }
    av_log(s1, AV_LOG_VERBOSE, "[%d]Capabilities: %x\n", s->fd, capabilities);

    if (!s->width && !s->height) {
        struct v4l2_format fmt;

        av_log(s1, AV_LOG_VERBOSE, "Querying the device for the current frame size\n");
        fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
            av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", strerror(errno));
            return AVERROR(errno);
        }
        s->width  = fmt.fmt.pix.width;
        s->height = fmt.fmt.pix.height;
        av_log(s1, AV_LOG_VERBOSE, "Setting frame size to %dx%d\n", s->width, s->height);
    }

    desired_format = device_try_init(s1, ap, &s->width, &s->height, &codec_id);
    if (desired_format == 0) {
        av_log(s1, AV_LOG_ERROR, "Cannot find a proper format for "
               "codec_id %d, pix_fmt %d.\n", s1->video_codec_id, ap->pix_fmt);
        close(s->fd);

        return AVERROR(EIO);
    }
    if (av_image_check_size(s->width, s->height, 0, s1) < 0)
        return AVERROR(EINVAL);
    s->frame_format = desired_format;

    if (v4l2_set_parameters(s1, ap) < 0)
        return AVERROR(EIO);

    st->codec->pix_fmt = fmt_v4l2ff(desired_format, codec_id);
    s->frame_size = avpicture_get_size(st->codec->pix_fmt, s->width, s->height);
    if (capabilities & V4L2_CAP_STREAMING) {
        s->io_method = io_mmap;
        res = mmap_init(s1);
        if (res == 0) {
            res = mmap_start(s1);
        }
    } else {
        s->io_method = io_read;
        res = read_init(s1);
    }
    if (res < 0) {
        close(s->fd);

        return AVERROR(EIO);
    }
    s->top_field_first = first_field(s->fd);

    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = codec_id;
    st->codec->width = s->width;
    st->codec->height = s->height;
    st->codec->time_base.den = ap->time_base.den;
    st->codec->time_base.num = ap->time_base.num;
    st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;

    return 0;
}
Exemplo n.º 27
0
int main(int argc, char *argv[])
{

    AVFormatContext *pFormatCtx = NULL;
    int i, videoStream;
    AVCodecContext *pCodecCtx;
    AVCodec *pCodec;
    AVFrame *pFrame;
    AVFrame *pFrameCropped;
    AVFrame *pFrameRGB;
    struct SwsContext * pSwsCtx;
    AVPacket packet;
    int frameFinished;
    int numBytes;
    int numBytesCroped;
    uint8_t *buffer;

    AVDictionary * p_options = NULL;
    AVInputFormat * p_in_fmt = NULL;

    pFile = fopen("screencap.out", "wb");
    if (pFile == NULL)
        return 0;

    // Register all formats and codecs
    av_register_all();
    avcodec_register_all();
    avdevice_register_all();

    av_dict_set(&p_options, "framerate", "60", 0);
    av_dict_set(&p_options, "video_size", "1920x1080", 0);
    av_dict_set(&p_options, "qscale", "1", 0);
    p_in_fmt = av_find_input_format("x11grab");

    // Open video file
    if (avformat_open_input(&pFormatCtx, ":0.0", p_in_fmt, &p_options) != 0)
    {
        printf("cannot open input file!\n");
        return -1; // Couldn't open file
    }

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            videoStream = i;
            break;
        }
    if (videoStream == -1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL)
    {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, 0) < 0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame = avcodec_alloc_frame();

    int crop_x = 0, crop_y = 0, crop_h = 1080, crop_w = 1920;
    pFrameCropped = avcodec_alloc_frame();

    if (pFrameCropped == NULL)
        return -1;

    // Allocate an AVFrame structure
    pFrameRGB = avcodec_alloc_frame();
    if (pFrameRGB == NULL)
        return -1;

    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size(AV_PIX_FMT_YUV420P, crop_w, crop_h);
    buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *) pFrameRGB, buffer, AV_PIX_FMT_YUV420P, crop_w, crop_h);

    pSwsCtx = sws_getContext(crop_w, crop_h, pCodecCtx->pix_fmt, crop_w, crop_h, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR,
        NULL, NULL, NULL);

    if (pSwsCtx == NULL)
    {
        fprintf(stderr, "Cannot initialize the sws context\n");
        return -1;
    }

    // Read frames and save first five frames to disk
    i = 0;
    FILE* fp = fopen("encodec.mpg", "wb");
    while (av_read_frame(pFormatCtx, &packet) >= 0)
    {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream)
        { // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

            // Did we get a video frame?
            if (frameFinished)
            {
                sws_scale(pSwsCtx, (const uint8_t * const *) pFrame->data, pFrame->linesize, 0, crop_h, pFrameRGB->data,
                    pFrameRGB->linesize);
                int y, x;
                /* Y */
                for (y = 0; y < crop_h; y++)
                {
                    for (x = 0; x < crop_w; x++)
                    {
                        //fwrite(pFrameRGB->data[0] + y * pFrameRGB->linesize[0] + x, sizeof(uint8_t), 1, fp);
                    }
                }
                /* Cb and Cr */
                for (y = 0; y < crop_h / 2; y++)
                {
                    for (x = 0; x < crop_w / 2; x++)
                    {
                        //fwrite(pFrameRGB->data[1] + y * pFrameRGB->linesize[1] + x, sizeof(uint8_t), 1, fp);
                        //fwrite(pFrameRGB->data[2] + y * pFrameRGB->linesize[2] + x, sizeof(uint8_t), 1, fp);
                    }
                }

                video_encode_example(pFrameRGB, fp);

                // Save the frame to disk
                if (++i >= 100)
                    break;
            }
        }

        av_free_packet(&packet);
    }

    fclose(fp);
    printf("Frames read %d\n", i);
    // Free the RGB image
    av_free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    // Close file
    fclose(pFile);
    return 0;
}
Exemplo n.º 28
0
bool vpFFMPEG::openEncoder(const char *filename, unsigned int w, unsigned int h, AVCodecID codec)
#endif
{
  av_register_all();

  /* find the mpeg1 video encoder */
  pCodec = avcodec_find_encoder(codec);
  if (pCodec == NULL) {
    fprintf(stderr, "codec not found\n");
    return false;
  }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,5,0) // libavcodec 53.5.0
  pCodecCtx = avcodec_alloc_context();
#else
  pCodecCtx = avcodec_alloc_context3(NULL);
#endif

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
  pFrame = avcodec_alloc_frame();
  pFrameRGB = avcodec_alloc_frame();
#else
  pFrame = av_frame_alloc(); // libavcodec 55.34.1
  pFrameRGB = av_frame_alloc(); // libavcodec 55.34.1
#endif

  /* put sample parameters */
  pCodecCtx->bit_rate = (int)bit_rate;
  /* resolution must be a multiple of two */
  pCodecCtx->width = (int)w;
  pCodecCtx->height = (int)h;
  this->width = (int)w;
  this->height = (int)h;
  /* frames per second */
  pCodecCtx->time_base.num = 1;
  pCodecCtx->time_base.den = framerate_encoder;
  pCodecCtx->gop_size = 10; /* emit one intra frame every ten frames */
  pCodecCtx->max_b_frames=1;
  pCodecCtx->pix_fmt = PIX_FMT_YUV420P;

  /* open it */
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,35,0) // libavcodec 53.35.0
  if (avcodec_open (pCodecCtx, pCodec) < 0) {
#else
  if (avcodec_open2 (pCodecCtx, pCodec, NULL) < 0) {
#endif
    fprintf(stderr, "could not open codec\n");
    return false;
  }

  /* the codec gives us the frame size, in samples */

  f = fopen(filename, "wb");
  if (!f) {
    fprintf(stderr, "could not open %s\n", filename);
    return false;
  }

  outbuf_size = 100000;
  outbuf = new uint8_t[outbuf_size];

  numBytes = avpicture_get_size (PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height);
  picture_buf = new uint8_t[numBytes];
  avpicture_fill((AVPicture *)pFrame, picture_buf, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

  numBytes = avpicture_get_size (PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height);
  buffer = new uint8_t[numBytes];
  avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

  img_convert_ctx= sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height,PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  
  encoderWasOpened = true;

  return true;
}


/*!
  Saves the image I as frame of the video.
  
  \param I : the image to save.
  
  \return It returns true if the image could be saved.
*/
bool vpFFMPEG::saveFrame(vpImage<vpRGBa> &I)
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  writeBitmap(I);
  sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, pCodecCtx->height, pFrame->data, pFrame->linesize);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
  out_size = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, pFrame);
  fwrite(outbuf, 1, (size_t)out_size, f);
#else
  AVPacket pkt;
  av_init_packet(&pkt);
  pkt.data = NULL;    // packet data will be allocated by the encoder
  pkt.size = 0;

  int got_output;
  int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
  if (ret < 0) {
    std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
    return false;
  }
  if (got_output) {
    fwrite(pkt.data, 1, pkt.size, f);
    av_free_packet(&pkt);
  }
#endif
  fflush(stdout);
  return true;
}


/*!
  Saves the image I as frame of the video.
  
  \param I : the image to save.
  
  \return It returns true if the image could be saved.
*/
bool vpFFMPEG::saveFrame(vpImage<unsigned char> &I)
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  writeBitmap(I);
  sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, pCodecCtx->height, pFrame->data, pFrame->linesize);  
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
  out_size = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, pFrame);
  fwrite(outbuf, 1, (size_t)out_size, f);
#else
  AVPacket pkt;
  av_init_packet(&pkt);
  pkt.data = NULL;    // packet data will be allocated by the encoder
  pkt.size = 0;

  int got_output;
  int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
  if (ret < 0) {
    std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
    return false;
  }
  if (got_output) {
    fwrite(pkt.data, 1, pkt.size, f);
    av_free_packet(&pkt);
  }
#endif

  fflush(stdout);
  return true;
}

/*!
  Ends the writing of the video and close the file.
  
  \return It returns true if the file was closed without problem
*/
bool vpFFMPEG::endWrite()
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  int ret = 1;
  while (ret != 0)
  {

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
    ret = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, NULL);
    fwrite(outbuf, 1, (size_t)out_size, f);
#else
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;    // packet data will be allocated by the encoder
    pkt.size = 0;
    int got_output;
    ret = avcodec_encode_video2(pCodecCtx, &pkt, NULL, &got_output);
    if (ret < 0) {
      std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
      return false;
    }
    if (got_output) {
      fwrite(pkt.data, 1, pkt.size, f);
      av_free_packet(&pkt);
    }
#endif
  }

  /*The end of a mpeg file*/
  outbuf[0] = 0x00;
  outbuf[1] = 0x00;
  outbuf[2] = 0x01;
  outbuf[3] = 0xb7;
  fwrite(outbuf, 1, 4, f);
  fclose(f);
  f = NULL;
  return true;
}

/*!
  This method enables to fill the frame bitmap thanks to the vpImage bitmap.
*/
void vpFFMPEG::writeBitmap(vpImage<vpRGBa> &I)
{
  unsigned char* beginInput = (unsigned char*)I.bitmap;
  unsigned char* input = NULL;
  unsigned char* beginOutput = (unsigned char*)pFrameRGB->data[0];
  int widthStep = pFrameRGB->linesize[0];
  
  for(int i=0 ; i < height ; i++)
  {
    input = beginInput + 4 * i * width;
    unsigned char *output = beginOutput + i * widthStep;
    for(int j=0 ; j < width ; j++)
    {
      *(output++) = *(input);
      *(output++) = *(input+1);
      *(output++) = *(input+2);

      input+=4;
    }
  }
}
Exemplo n.º 29
0
status_t MediaPlayer::prepareVideo()
{
	__android_log_print(ANDROID_LOG_INFO, TAG, "prepareVideo\n");
	// Find the first video stream
	mVideoStreamIndex = -1;
	for (int i = 0; i < mMovieFile->nb_streams; i++) {
		if (mMovieFile->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			mVideoStreamIndex = i;
			break;
		}
	}
	
	if (mVideoStreamIndex == -1) {
		return INVALID_OPERATION;
	}
	
	AVStream* stream = mMovieFile->streams[mVideoStreamIndex];
	// Get a pointer to the codec context for the video stream
	AVCodecContext* codec_ctx = stream->codec;
	AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
	if (codec == NULL) {
		return INVALID_OPERATION;
	}
	
    AVDictionary * opts = NULL;
    av_dict_set(&opts, "threads", "auto", 0);
	// Open codec
	//if (avcodec_open(codec_ctx, codec) < 0) {
	if (avcodec_open2(codec_ctx, codec, &opts) < 0) {
		return INVALID_OPERATION;
	}
	
	mVideoWidth = codec_ctx->width;
	mVideoHeight = codec_ctx->height;
	mDuration =  mMovieFile->duration;
	
	mConvertCtx = sws_getContext(stream->codec->width,
								 stream->codec->height,
								 stream->codec->pix_fmt,
								 stream->codec->width,
								 stream->codec->height,
								 PIX_FMT_RGB565,
								 SWS_POINT,
								 NULL,
								 NULL,
								 NULL);

	if (mConvertCtx == NULL) {
		return INVALID_OPERATION;
	}

	void*		pixels;
    int         size; 
#if 1

    ///// &pixels is pointed to a Native bitmap. 
	if (Output::VideoDriver_getPixels(stream->codec->width,
									  stream->codec->height,
									  &pixels) != 0) {
		return INVALID_OPERATION;
	}
#else

    /* create temporary picture */
    size = avpicture_get_size(PIX_FMT_RGB565, stream->codec->width, stream->codec->height);
    pixels  = av_malloc(size);
    if (!pixels)
        return INVALID_OPERATION;

#endif

	mFrame = avcodec_alloc_frame();
	if (mFrame == NULL) {
		return INVALID_OPERATION;
	}
	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *) mFrame,
				   (uint8_t *) pixels,
				   PIX_FMT_RGB565,
				   stream->codec->width,
				   stream->codec->height);



	__android_log_print(ANDROID_LOG_INFO, TAG, "prepareVideo  DONE \n ");

	return NO_ERROR;
}
Exemplo n.º 30
0
static int video_open(video_t *video, const char *filename) {
    video->format = PIX_FMT_RGB24;
    if (avformat_open_input(&video->format_context, filename, NULL, NULL) ||
            avformat_find_stream_info(video->format_context, NULL) < 0) {
        fprintf(stderr, ERROR("cannot open video stream %s\n"), filename);
        goto failed;
    }

    video->stream_idx = -1;
    for (int i = 0; i < video->format_context->nb_streams; i++) {
        if (video->format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video->stream_idx = i;
            break;
        }
    }

    if (video->stream_idx == -1) {
        fprintf(stderr, ERROR("cannot find video stream\n"));
        goto failed;
    }

    AVStream *stream = video->format_context->streams[video->stream_idx];
    video->codec_context = stream->codec;
    video->codec = avcodec_find_decoder(video->codec_context->codec_id);

    /* Save Width/Height */
    video->width = video->codec_context->width;
    video->height = video->codec_context->height;

	
    if (!video->codec || avcodec_open2(video->codec_context, video->codec, NULL) < 0) {
        fprintf(stderr, ERROR("cannot open codec\n"));
        goto failed;
    }

    video->buffer_width = video->codec_context->width;
    video->buffer_height = video->codec_context->height;

    fprintf(stderr, INFO("pixel aspect ratio: %d/%d, size: %dx%d buffer size: %dx%d\n"), 
        video->codec_context->sample_aspect_ratio.num,
        video->codec_context->sample_aspect_ratio.den,
        video->width,
        video->height,
        video->buffer_width,
        video->buffer_height
    );

    video->par = (float)video->codec_context->sample_aspect_ratio.num / video->codec_context->sample_aspect_ratio.den;
    if (video->par == 0)
        video->par = 1;

    /* Frame rate fix for some codecs */
    if (video->codec_context->time_base.num > 1000 && video->codec_context->time_base.den == 1)
        video->codec_context->time_base.den = 1000;

    /* Get FPS */
    // http://libav-users.943685.n4.nabble.com/Retrieving-Frames-Per-Second-FPS-td946533.html
    if ((stream->time_base.den != stream->r_frame_rate.num) ||
            (stream->time_base.num != stream->r_frame_rate.den)) {
        video->fps = 1.0 / stream->r_frame_rate.den * stream->r_frame_rate.num;
    } else {
        video->fps = 1.0 / stream->time_base.num * stream->time_base.den;
    }
    fprintf(stderr, INFO("fps: %lf\n"), video->fps);

    /* Get framebuffers */
    video->raw_frame = avcodec_alloc_frame();
    video->scaled_frame = avcodec_alloc_frame();

    if (!video->raw_frame || !video->scaled_frame) {
        fprintf(stderr, ERROR("cannot preallocate frames\n"));
        goto failed;
    }

    /* Create data buffer */
    video->buffer = av_malloc(avpicture_get_size(
        video->format, 
        video->buffer_width, 
        video->buffer_height
    ));

    /* Init buffers */
    avpicture_fill(
        (AVPicture *) video->scaled_frame, 
        video->buffer, 
        video->format, 
        video->buffer_width, 
        video->buffer_height
    );

    /* Init scale & convert */
    video->scaler = sws_getContext(
        video->buffer_width,
        video->buffer_height,
        video->codec_context->pix_fmt,
        video->buffer_width, 
        video->buffer_height, 
        video->format, 
        SWS_BICUBIC, 
        NULL, 
        NULL, 
        NULL
    );

    if (!video->scaler) {
        fprintf(stderr, ERROR("scale context init failed\n"));
        goto failed;
    }

    /* Give some info on stderr about the file & stream */
    av_dump_format(video->format_context, 0, filename, 0);
    return 1;
failed:
    video_free(video);
    return 0;
}