Пример #1
1
JNIEXPORT jint JNICALL Java_bits_jav_codec_JavFrame_nFillVideoFrame
(JNIEnv *env, jclass clazz, jlong pointer, jint w, jint h, jint pixFmt, jobject buf, jint bufOff, jint bufLen )
{
	AVFrame *pic = *(AVFrame**)&pointer;
	AVBufferRef *ref = NULL;

	int err;

	if( buf ) {
	    ref = jav_buffer_wrap_bytebuffer( env, buf, bufOff, bufLen, 0 );
		//err = avpicture_fill( (AVPicture*)pic, ref->data, pixFmt, w, h );
		err = av_image_fill_arrays( pic->data, pic->linesize, ref->data, pixFmt, w, h, 1 );
	} else {
		//err = avpicture_fill( (AVPicture*)pic, NULL, pixFmt, w, h );
		err = av_image_fill_arrays( pic->data, pic->linesize, NULL, pixFmt, w, h, 1 );
	}

	if( err >= 0 ) {
	    pic->buf[0] = ref;
		pic->width  = w;
		pic->height = h;
		pic->format = pixFmt;
	} else {
	    av_buffer_unref( &ref );
    }

	return err;
}
Пример #2
0
//初始化输入输出frame对象和像素缓存
void Init_video_frame_in_out(AVFrame **frameIn, AVFrame **frameOut, unsigned char **frame_buffer_in, unsigned char **frame_buffer_out, int frameWidth, int frameHeight)
{
	*frameIn = av_frame_alloc();  
	*frame_buffer_in = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, frameWidth,frameHeight,1));  
	av_image_fill_arrays((*frameIn)->data, (*frameIn)->linesize,*frame_buffer_in, AV_PIX_FMT_YUV420P,frameWidth,frameHeight,1);  

	*frameOut = av_frame_alloc();  
	*frame_buffer_out = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, frameWidth,frameHeight,1));  
	av_image_fill_arrays((*frameOut)->data, (*frameOut)->linesize,*frame_buffer_out, AV_PIX_FMT_YUV420P,frameWidth,frameHeight,1);  

	(*frameIn)->width = frameWidth;  
	(*frameIn)->height = frameHeight;  
	(*frameIn)->format = AV_PIX_FMT_YUV420P;
}
Пример #3
0
inline int avpicture_fill(AVPicture *picture, uint8_t *ptr,
                          enum AVPixelFormat pix_fmt, int width, int height)
{
    AVFrame *frame = reinterpret_cast<AVFrame *>(picture);
    return av_image_fill_arrays(frame->data, frame->linesize,
                                ptr, pix_fmt, width, height, 1);
}
Пример #4
0
FF_DISABLE_DEPRECATION_WARNINGS
int avpicture_fill(AVPicture *picture, const uint8_t *ptr,
                   enum AVPixelFormat pix_fmt, int width, int height)
{
    return av_image_fill_arrays(picture->data, picture->linesize,
                                ptr, pix_fmt, width, height, 1);
}
Пример #5
0
AVFrame * RtspStreamFrameFormatter::scaleFrame(AVFrame* avFrame)
{
    Q_ASSERT(avFrame->width != 0);
    Q_ASSERT(avFrame->height != 0);
    m_width = avFrame->width;
    m_height = avFrame->height;

    updateSWSContext();

    if (!m_sws_context)
        return NULL;

    int bufSize  = av_image_get_buffer_size(m_pixelFormat, m_width, m_height, 1);
    uint8_t *buf = (uint8_t*) av_malloc(bufSize);

    AVFrame *result = av_frame_alloc();

    av_image_fill_arrays(result->data, result->linesize, buf, m_pixelFormat, m_width, m_height, 1);
    sws_scale(m_sws_context, (const uint8_t**)avFrame->data, avFrame->linesize, 0, m_height,
              result->data, result->linesize);

    result->width = m_width;
    result->height = m_height;
    result->pts = avFrame->pts;

    return result;
}
Пример #6
0
/**
 * @brief copy frame data from buffer to AVFrame, handling stride.
 * @param f destination AVFrame
 * @param src source buffer, does not use any line-stride
 * @param width width of the video frame
 * @param height height of the video frame
 */
static void copy_frame(AVFrame *f, const uint8_t *src, int width, int height)
{
    uint8_t *src_data[4];
    int src_linesize[4];
    av_image_fill_arrays(src_data, src_linesize, src,
                         f->format, width, height, 1);
    av_image_copy(f->data, f->linesize, (const uint8_t **)src_data, src_linesize,
                  f->format, width, height);
}
Пример #7
0
bool VideoFrame::convertToYUV420()
{
    QMutexLocker locker(&biglock);

    if (frameYUV420)
        return true;

    AVFrame* sourceFrame;
    if (frameOther)
    {
        sourceFrame = frameOther;
    }
    else if (frameRGB24)
    {
        sourceFrame = frameRGB24;
    }
    else
    {
        qCritical() << "None of the frames are valid! Did someone release us?";
        return false;
    }
    //std::cout << "converting to YUV420" << std::endl;

    frameYUV420=av_frame_alloc();
    if (!frameYUV420)
    {
        qCritical() << "av_frame_alloc failed";
        return false;
    }

    int imgBufferSize = av_image_get_buffer_size(AV_PIX_FMT_RGB24, width, height, 1);
    uint8_t* buf = (uint8_t*)av_malloc(imgBufferSize);
    if (!buf)
    {
        qCritical() << "av_malloc failed";
        av_frame_free(&frameYUV420);
        return false;
    }
    frameYUV420->opaque = buf;

    uint8_t** data = frameYUV420->data;
    int* linesize = frameYUV420->linesize;
    av_image_fill_arrays(data, linesize, buf, AV_PIX_FMT_YUV420P, width, height, 1);

    SwsContext *swsCtx =  sws_getContext(width, height, (AVPixelFormat)pixFmt,
                                          width, height, AV_PIX_FMT_YUV420P,
                                          SWS_BILINEAR, nullptr, nullptr, nullptr);
    sws_scale(swsCtx, (uint8_t const * const *)sourceFrame->data,
                sourceFrame->linesize, 0, height,
                frameYUV420->data, frameYUV420->linesize);
    sws_freeContext(swsCtx);

    return true;
}
Пример #8
0
int movie_picture_buffer::write(AVFrame* pFrame, double dPts)
{
    movie_picture* pMoviePicture = nullptr;
    std::unique_lock<std::mutex> picBufLock(mutex);
    while(unsafe_full() && !aborting)
    {
        cond.wait(picBufLock);
    }
    picBufLock.unlock();

    if(aborting) { return -1; }

    pMoviePicture = &picture_queue[write_index];
    std::unique_lock<std::mutex> pictureLock(pMoviePicture->mutex);

    if(pMoviePicture->buffer)
    {
        sws_context = sws_getCachedContext(sws_context, pFrame->width, pFrame->height, (AVPixelFormat)pFrame->format, pMoviePicture->width, pMoviePicture->height, pMoviePicture->pixel_format, SWS_BICUBIC, nullptr, nullptr, nullptr);
        if(sws_context == nullptr)
        {
            std::cerr << "Failed to initialize SwsContext\n";
            return 1;
        }

        /* Allocate a new frame and buffer for the destination RGB24 data. */
        AVFrame *pFrameRGB = av_frame_alloc();
#if (defined(CORSIX_TH_USE_LIBAV) && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54, 6, 0)) || \
    (defined(CORSIX_TH_USE_FFMPEG) && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 63, 100))
        av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, pMoviePicture->buffer, pMoviePicture->pixel_format, pMoviePicture->width, pMoviePicture->height, 1);
#else
        avpicture_fill((AVPicture *)pFrameRGB, pMoviePicture->buffer, pMoviePicture->pixel_format, pMoviePicture->width, pMoviePicture->height);
#endif

        /* Rescale the frame data and convert it to RGB24. */
        sws_scale(sws_context, pFrame->data, pFrame->linesize, 0, pFrame->height, pFrameRGB->data, pFrameRGB->linesize);

        av_frame_free(&pFrameRGB);

        pMoviePicture->pts = dPts;

        pictureLock.unlock();
        write_index++;
        if(write_index == picture_buffer_size)
        {
            write_index = 0;
        }
        picBufLock.lock();
        picture_count++;
        picBufLock.unlock();
    }

    return 0;
}
Пример #9
0
int THMoviePictureBuffer::write(AVFrame* pFrame, double dPts)
{
    THMoviePicture* pMoviePicture = nullptr;
    SDL_LockMutex(m_pMutex);
    while(full() && !m_fAborting)
    {
        SDL_CondWait(m_pCond, m_pMutex);
    }
    SDL_UnlockMutex(m_pMutex);
    if(m_fAborting) { return -1; }

    pMoviePicture = &m_aPictureQueue[m_iWriteIndex];
    SDL_LockMutex(pMoviePicture->m_pMutex);

    if(pMoviePicture->m_pBuffer)
    {
        m_pSwsContext = sws_getCachedContext(m_pSwsContext, pFrame->width, pFrame->height, (AVPixelFormat)pFrame->format, pMoviePicture->m_iWidth, pMoviePicture->m_iHeight, pMoviePicture->m_pixelFormat, SWS_BICUBIC, nullptr, nullptr, nullptr);
        if(m_pSwsContext == nullptr)
        {
            SDL_UnlockMutex(m_aPictureQueue[m_iWriteIndex].m_pMutex);
            std::cerr << "Failed to initialize SwsContext\n";
            return 1;
        }

        /* Allocate a new frame and buffer for the destination RGB24 data. */
        AVFrame *pFrameRGB = av_frame_alloc();
#if (defined(CORSIX_TH_USE_LIBAV) && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54, 6, 0)) || \
    (defined(CORSIX_TH_USE_FFMPEG) && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51, 63, 100))
        av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, pMoviePicture->m_pBuffer, pMoviePicture->m_pixelFormat, pMoviePicture->m_iWidth, pMoviePicture->m_iHeight, 1);
#else
        avpicture_fill((AVPicture *)pFrameRGB, pMoviePicture->m_pBuffer, pMoviePicture->m_pixelFormat, pMoviePicture->m_iWidth, pMoviePicture->m_iHeight);
#endif

        /* Rescale the frame data and convert it to RGB24. */
        sws_scale(m_pSwsContext, pFrame->data, pFrame->linesize, 0, pFrame->height, pFrameRGB->data, pFrameRGB->linesize);

        av_frame_free(&pFrameRGB);

        pMoviePicture->m_dPts = dPts;

        SDL_UnlockMutex(m_aPictureQueue[m_iWriteIndex].m_pMutex);
        m_iWriteIndex++;
        if(m_iWriteIndex == ms_pictureBufferSize)
        {
            m_iWriteIndex = 0;
        }
        SDL_LockMutex(m_pMutex);
        m_iCount++;
        SDL_UnlockMutex(m_pMutex);
    }

    return 0;
}
Пример #10
0
VideoFrame::VideoFrame(const uint8_t *data, size_t size, PixelFormat pixelFormat, int width, int height, int align)
    : VideoFrame(pixelFormat, width, height, align)
{
    size_t calcSize = av_image_get_buffer_size(pixelFormat, width, height, align);
    if (calcSize != size)
        throw length_error("Data size and required buffer for this format/width/height/align not equal");

    uint8_t *buf[4];
    int      linesize[4];
    av_image_fill_arrays(buf, linesize, data, pixelFormat, width, height, align);

    // copy data
    for (size_t i = 0; i < 4 && buf[i]; ++i) {
        std::copy(buf[i], buf[i]+linesize[i], m_raw->data[i]);
    }
}
Пример #11
0
static int ffmal_copy_frame(AVCodecContext *avctx,  AVFrame *frame,
                            MMAL_BUFFER_HEADER_T *buffer)
{
    MMALDecodeContext *ctx = avctx->priv_data;
    int ret = 0;

    if (avctx->pix_fmt == AV_PIX_FMT_MMAL) {
        if (!ctx->pool_out)
            return AVERROR_UNKNOWN; // format change code failed with OOM previously

        if ((ret = ff_decode_frame_props(avctx, frame)) < 0)
            goto done;

        if ((ret = ffmmal_set_ref(frame, ctx->pool_out, buffer)) < 0)
            goto done;
    } else {
        int w = FFALIGN(avctx->width, 32);
        int h = FFALIGN(avctx->height, 16);
        uint8_t *src[4];
        int linesize[4];

        if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
            goto done;

        av_image_fill_arrays(src, linesize,
                             buffer->data + buffer->type->video.offset[0],
                             avctx->pix_fmt, w, h, 1);
        av_image_copy(frame->data, frame->linesize, src, linesize,
                      avctx->pix_fmt, avctx->width, avctx->height);
    }

    frame->pts = buffer->pts == MMAL_TIME_UNKNOWN ? AV_NOPTS_VALUE : buffer->pts;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
    frame->pkt_pts = frame->pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
    frame->pkt_dts = AV_NOPTS_VALUE;

done:
    return ret;
}
Пример #12
0
 unsigned int Encode(unsigned char* pFrame, bool bKeyframe)
 {
     uint8_t * pImage[1] = { pFrame }; // RGB32 have one plane
     int nLinesize[1] = { m_pAVCodecContext->width * 4 };
     if (sws_scale(m_Context, pImage, nLinesize, 0, m_X264CodecContext.GetHeight(), m_pYuvPlanes, m_nYuvStrides) != m_X264CodecContext.GetHeight())
     {
         throw std::runtime_error("sws_scale failed");
     }
     m_pFrame->pict_type = bKeyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_NONE;
     CHECK_FFMPEG(av_image_fill_arrays(m_pFrame->data, m_pFrame->linesize, m_pYuvFrame.get(), static_cast<AVPixelFormat>(m_pFrame->format), m_X264CodecContext.GetWidth(), m_X264CodecContext.GetHeight(), 1));
     int nGotPacketPtr;
     CHECK_FFMPEG((avcodec_encode_video2(m_pAVCodecContext.get(), &m_Packet, m_pFrame, &nGotPacketPtr) == 0 && nGotPacketPtr));
     unsigned int nSize = m_Packet.size;
     if (m_X264CodecContext.GetSaveOutputToFile())
     {
         fwrite(m_Packet.data, 1, m_Packet.size, m_pOutputFile);
     }
     m_Packet.size = 2 * 1024 * 1024;
     return nSize;
 }
Пример #13
0
BOOL  ffplayer::CapturePic(char *pSaveFile, int iType)
{
	if (m_iWidth == 0 || m_iHeight == 0 || !m_picSnopshot)
	{
		return false;
	}

	unsigned char *buf = NULL;

	if (NULL == buf)
	{
		buf = (unsigned char*)malloc(m_iWidth*m_iHeight * 3);

	}

	AVFrame dstFrame;

	if (NULL != buf)
	{
		av_image_fill_arrays(dstFrame.data, dstFrame.linesize, (uint8_t*)m_picSnopshot->Data, AV_PIX_FMT_YUV420P, m_pCodecCtx->width, m_pCodecCtx->height, 1);
		YUV420_RGB_KP(&dstFrame, buf, m_iWidth, m_iHeight);//YUV420_RGB_KP是内部函数
		
	}

	if (strstr(pSaveFile, ".jpg") || strstr(pSaveFile, ".JPG") || strstr(pSaveFile, ".jpeg"))
	{
		writeJPGFile(pSaveFile, (char*)buf, m_iWidth, m_iHeight);
	}
	else
	{
		writeBMPFile(pSaveFile, (char*)buf, m_iWidth, m_iHeight);
	}

	if (buf)
	{
		free(buf);
		buf = NULL;
	}
	return true;
}
Пример #14
0
int VideoFrame::allocate()
{
    Q_D(VideoFrame);
    if (pixelFormatFFmpeg() == QTAV_PIX_FMT_C(NONE) || width() <=0 || height() <= 0) {
        qWarning("Not valid format(%s) or size(%dx%d)", qPrintable(format().name()), width(), height());
        return 0;
    }
#if 0
    const int align = 16;
    int bytes = av_image_get_buffer_size((AVPixelFormat)d->format.pixelFormatFFmpeg(), width(), height(), align);
    d->data.resize(bytes);
    av_image_fill_arrays(d->planes.data(), d->line_sizes.data()
                         , (const uint8_t*)d->data.constData()
                         , (AVPixelFormat)d->format.pixelFormatFFmpeg()
                         , width(), height(), align);
    return bytes;
#endif
    int bytes = avpicture_get_size((AVPixelFormat)pixelFormatFFmpeg(), width(), height());
    if (d->data.size() < bytes) {
        d->data = QByteArray(bytes, 0);
    }
    init();
    return bytes;
}
Пример #15
0
int MP4Decoder::InitDecoder(const char *mp4Path) {
    // 1.注册所有组件
    av_register_all();
    // 2.创建AVFormatContext结构体
    pFormatCtx = avformat_alloc_context();

    // 3.打开一个输入文件
    if (avformat_open_input(&pFormatCtx, mp4Path, NULL, NULL) != 0) {
        LOGE("could not open input stream");
        return -1;
    }
    // 4.获取媒体的信息
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        LOGE("could not find stream information");
        return -1;
    }
    //获取视频轨的下标
    int videoIndex = -1;
    for (int i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoIndex = i;
            break;
        }
    if (videoIndex == -1) {
        LOGE("could not find a video stream");
        return -1;
    }
    // 5.查找解码器
    pCodec = avcodec_find_decoder(pFormatCtx->streams[videoIndex]->codecpar->codec_id);
    if (pCodec == NULL) {
        LOGE("could not find Codec");
        return -1;
    }

    // 6.配置解码器
    pCodecCtx = avcodec_alloc_context3(pCodec);
    avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[videoIndex]->codecpar);
    pCodecCtx->thread_count = 1;

    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        LOGE("could not open codec");
        return -1;
    }

    pFrame = av_frame_alloc();
    pFrameYUV = av_frame_alloc();
    int bufferSize = av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
                                              pCodecCtx->width,
                                              pCodecCtx->height, 1);
    uint8_t *out_buffer = (unsigned char *) av_malloc(bufferSize);
    av_image_fill_arrays(pFrameYUV->data,
                         pFrameYUV->linesize,
                         out_buffer,
                         AV_PIX_FMT_YUV420P,
                         pCodecCtx->width,
                         pCodecCtx->height, 1);

    pAvPacket = (AVPacket *) av_malloc(sizeof(AVPacket));

    pSwsContext = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                                 pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P,
                                 SWS_BICUBIC, NULL, NULL, NULL);
    return 0;
}
Пример #16
0
bool ThumbFinder::getFrameImage(bool needKeyFrame, int64_t requiredPTS)
{
    AVPacket pkt;
    AVFrame orig;
    AVFrame retbuf;
    memset(&orig, 0, sizeof(AVFrame));
    memset(&retbuf, 0, sizeof(AVFrame));

    av_init_packet(&pkt);

    int frameFinished = 0;
    int keyFrame;
    int frameCount = 0;
    bool gotKeyFrame = false;

    while (av_read_frame(m_inputFC, &pkt) >= 0 && !frameFinished)
    {
        if (pkt.stream_index == m_videostream)
        {
            frameCount++;

            keyFrame = pkt.flags & AV_PKT_FLAG_KEY;

            if (m_startPTS == -1 && pkt.dts != AV_NOPTS_VALUE)
            {
                m_startPTS = pkt.dts;
                m_frameTime = pkt.duration;
            }

            if (keyFrame)
                gotKeyFrame = true;

            if (!gotKeyFrame && needKeyFrame)
            {
                av_packet_unref(&pkt);
                continue;
            }

            if (m_firstIFramePTS == -1)
                m_firstIFramePTS = pkt.dts;

            av_frame_unref(m_frame);
            frameFinished = 0;
            int ret = avcodec_receive_frame(m_codecCtx, m_frame);
            if (ret == 0)
                frameFinished = 1;
            if (ret == 0 || ret == AVERROR(EAGAIN))
                ret = avcodec_send_packet(m_codecCtx, &pkt);
            if (requiredPTS != -1 && pkt.dts != AV_NOPTS_VALUE && pkt.dts < requiredPTS)
                frameFinished = false;

            m_currentPTS = pkt.dts;
        }

        av_packet_unref(&pkt);
    }

    if (frameFinished)
    {
        av_image_fill_arrays(retbuf.data, retbuf.linesize, m_outputbuf,
            AV_PIX_FMT_RGB32, m_frameWidth, m_frameHeight, IMAGE_ALIGN);
        AVFrame *tmp = m_frame;

        m_deinterlacer->DeinterlaceSingle(tmp, tmp);

        m_copy.Copy(&retbuf, AV_PIX_FMT_RGB32, tmp, m_codecCtx->pix_fmt,
                    m_frameWidth, m_frameHeight);

        QImage img(m_outputbuf, m_frameWidth, m_frameHeight,
                   QImage::Format_RGB32);

        QByteArray ffile = m_frameFile.toLocal8Bit();
        if (!img.save(ffile.constData(), "JPEG"))
        {
            LOG(VB_GENERAL, LOG_ERR, "Failed to save thumb: " + m_frameFile);
        }

        if (m_updateFrame)
        {
            MythImage *mimage =
                GetMythMainWindow()->GetCurrentPainter()->GetFormatImage();
            mimage->Assign(img);
            m_frameImage->SetImage(mimage);
            mimage->DecrRef();
        }

        updateCurrentPos();
    }

    return true;
}
Пример #17
0
//http://blog.csdn.net/leixiaohua1020/article/details/39770947
int main(int argc, char **argv){
    
    AVFormatContext *fmt_ctx = NULL;
    AVCodec * c;
    AVOutputFormat* fmt;
    AVStream * vstream;
    AVFrame *frame = NULL;
    AVCodecContext *video_enc_ctx = NULL;
    AVPacket pkt;
    unsigned char * frame_buf; 
    int ret, size, y_size;
    int got_frame=0;

    if(argc != 2){
        fprintf(stderr, "usage as:%s filename\n", argv[0]);
        exit(1);
    }
    av_register_all();

inityuv();
    //Method1 方法1.组合使用几个函数
    fmt_ctx = avformat_alloc_context();
    //Guess Format 猜格式
    fmt = av_guess_format(NULL, argv[1], NULL);
    fmt_ctx->oformat = fmt;
    
    //Method 2 方法2.更加自动化一些
    //avformat_alloc_output_context2(&fmt_ctx, NULL, NULL, argv[1]);
    //fmt = fmt_ctx->oformat;
    
    
    c = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!c){
        printf("Can not find encoder! \n");
        exit(4);
    }
    vstream = avformat_new_stream(fmt_ctx, c);
    if(vstream == NULL){
        printf("avformat_new_stream fail\n");
        exit(3);
    }
    vstream->time_base = (AVRational){1,25};
    vstream->codec->codec_id = AV_CODEC_ID_H264;
    vstream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    vstream->codec->pix_fmt = AV_PIX_FMT_YUV420P;
    vstream->codec->width = 128;
    vstream->codec->height = 128;
    vstream->codec->time_base = (AVRational){1,25};
    vstream->codec->gop_size = 5; //短视屏这个也不用设置很大
    vstream->codec->max_b_frames = 0; //对于很短的视频来说设置bframe导致丢掉结尾的画面
    vstream->codec->qmin = 15;
    vstream->codec->qmax = 35;
    if(vstream->codec->codec_id == AV_CODEC_ID_H264){
        printf("set priv_data\n");
       av_opt_set(vstream->codec->priv_data, "preset", "slow", 0);
    }
    av_dump_format(fmt_ctx, 0, argv[1], 1);

    if (avcodec_open2(vstream->codec, c, NULL) < 0){
        printf("Failed to open encoder! \n");
        exit(5);
     }  
    video_enc_ctx = vstream->codec;

    if(avio_open(&fmt_ctx->pb, argv[1], AVIO_FLAG_READ_WRITE) < 0){
        printf("avio_open my.pm4 fail\n");
        exit(3);
    }
    if(avformat_write_header(fmt_ctx, NULL)<0){
        printf("avformat_write_header fail\n");
        exit(3);
    }

    frame = av_frame_alloc();
    //avframe_get_size 这个函数已经不存在了
    size = av_image_get_buffer_size(video_enc_ctx->pix_fmt, video_enc_ctx->width, video_enc_ctx->height, 1);
    frame_buf = (uint8_t *)av_malloc(size);
    //avframe_fill avpicture_fill 这两个函数都废弃了
    av_image_fill_arrays(frame->data, frame->linesize, frame_buf, video_enc_ctx->pix_fmt, video_enc_ctx->width, video_enc_ctx->height, 1);
    
    y_size = video_enc_ctx->width * video_enc_ctx->height;

    //av_new_packet(&pkt,y_size*3);
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    int i = 0;
    //yuv file还是要自己去读取,没有什么av_read_frame之类的函数
    while(fread(frame_buf, 1, y_size * 3 / 2, vfile)>0){
        frame->data[0] = frame_buf;  // 亮度Y
        frame->data[1] = frame_buf+ y_size;  // U 
        frame->data[2] = frame_buf+ y_size*5/4; // V
        //PTS
        frame->pts=i*90000/25; //转为mp4设置时间戳
        got_frame=0;
        //Encode 编码
        ret = avcodec_encode_video2(video_enc_ctx, &pkt,frame, &got_frame);
        if(ret < 0){
            printf("Failed to encode! 编码错误!\n");
            return -1;
        }
        if (got_frame==1){
            printf("Succeed to encode 1 frame! 编码成功1帧!\n");
            pkt.stream_index = vstream->index;
            ret = av_write_frame(fmt_ctx, &pkt);
            // av_free_packet(&pkt);
            av_packet_unref(&pkt);
        }

    }
    if(!feof(vfile)){
        printf("fread error:%s\n", strerror(errno));
        exit(1);
    }

    ret = avcodec_encode_video2(video_enc_ctx, &pkt, NULL, &got_frame); //frame is null to flush
    av_write_trailer(fmt_ctx);


    avcodec_close(video_enc_ctx);
    avformat_close_input(&fmt_ctx);
    if (vfile)
        fclose(vfile);
    av_frame_free(&frame);
    av_packet_unref(&pkt); //h264toyuv.c  也需要,还没加上去
}
Пример #18
0
int hardsubx_process_data(struct lib_hardsubx_ctx *ctx)
{
	// Get the required media attributes and initialize structures
	av_register_all();
	
	if(avformat_open_input(&ctx->format_ctx, ctx->inputfile[0], NULL, NULL)!=0)
	{
		fatal (EXIT_READ_ERROR, "Error reading input file!\n");
	}

	if(avformat_find_stream_info(ctx->format_ctx, NULL)<0)
	{
		fatal (EXIT_READ_ERROR, "Error reading input stream!\n");
	}

	// Important call in order to determine media information using ffmpeg
	// TODO: Handle multiple inputs
	av_dump_format(ctx->format_ctx, 0, ctx->inputfile[0], 0);
	

	ctx->video_stream_id = -1;
	for(int i = 0; i < ctx->format_ctx->nb_streams; i++)
	{
		if(ctx->format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			ctx->video_stream_id = i;
			break;
		}
	}
	if(ctx->video_stream_id == -1)
	{
		fatal (EXIT_READ_ERROR, "Video Stream not found!\n");
	}

	ctx->codec_ctx = ctx->format_ctx->streams[ctx->video_stream_id]->codec;
	ctx->codec = avcodec_find_decoder(ctx->codec_ctx->codec_id);
	if(ctx->codec == NULL)
	{
		fatal (EXIT_READ_ERROR, "Input codec is not supported!\n");
	}

	if(avcodec_open2(ctx->codec_ctx, ctx->codec, &ctx->options_dict) < 0)
	{
		fatal (EXIT_READ_ERROR, "Error opening input codec!\n");
	}

	ctx->frame = av_frame_alloc();
	ctx->rgb_frame = av_frame_alloc();
	if(!ctx->frame || !ctx->rgb_frame)
	{
		fatal(EXIT_NOT_ENOUGH_MEMORY, "Not enough memory to initialize frame!");
	}

	int frame_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, ctx->codec_ctx->width, ctx->codec_ctx->height, 16);
	ctx->rgb_buffer = (uint8_t *)av_malloc(frame_bytes*sizeof(uint8_t));
	
	ctx->sws_ctx = sws_getContext(
			ctx->codec_ctx->width,
			ctx->codec_ctx->height,
			ctx->codec_ctx->pix_fmt,
			ctx->codec_ctx->width,
			ctx->codec_ctx->height,
			AV_PIX_FMT_RGB24,
			SWS_BILINEAR,
			NULL,NULL,NULL
		);

	av_image_fill_arrays(ctx->rgb_frame->data, ctx->rgb_frame->linesize, ctx->rgb_buffer, AV_PIX_FMT_RGB24, ctx->codec_ctx->width, ctx->codec_ctx->height, 1);

	// int frame_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, 1280, 720, 16);
	// ctx->rgb_buffer = (uint8_t *)av_malloc(frame_bytes*sizeof(uint8_t));
	
	// ctx->sws_ctx = sws_getContext(
	// 		ctx->codec_ctx->width,
	// 		ctx->codec_ctx->height,
	// 		ctx->codec_ctx->pix_fmt,
	// 		1280,
	// 		720,
	// 		AV_PIX_FMT_RGB24,
	// 		SWS_BILINEAR,
	// 		NULL,NULL,NULL
	// 	);
	// avpicture_fill((AVPicture*)ctx->rgb_frame, ctx->rgb_buffer, AV_PIX_FMT_RGB24, 1280, 720);
	// av_image_fill_arrays(ctx->rgb_frame->data, ctx->rgb_frame->linesize, ctx->rgb_buffer, AV_PIX_FMT_RGB24, 1280, 720, 1);

	// Pass on the processing context to the appropriate functions
	struct encoder_ctx *enc_ctx;
	enc_ctx = init_encoder(&ccx_options.enc_cfg);
	
	mprint("Beginning burned-in subtitle detection...\n");
	hardsubx_process_frames_linear(ctx, enc_ctx);

	dinit_encoder(&enc_ctx, 0); //TODO: Replace 0 with end timestamp

	// Free the allocated memory for frame processing
	av_free(ctx->rgb_buffer);
	av_free(ctx->rgb_frame);
	av_free(ctx->frame);
	avcodec_close(ctx->codec_ctx);
	avformat_close_input(&ctx->format_ctx);
}
Пример #19
0
QByteArray AVDecoder::WriteJPEG(AVCodecContext *pCodecCtx, AVFrame *pFrame, int width, int height)
{
    AVCodecContext *pOCodecCtx;
    AVCodec        *pOCodec;

    QByteArray data;

    pOCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);

    if (!pOCodec) {
        return data;
    }

    SwsContext *sws_ctx = sws_getContext(
                pCodecCtx->width, pCodecCtx->height,
                pCodecCtx->pix_fmt,
                width, height,
                AV_PIX_FMT_YUV420P, SWS_BICUBIC,
                NULL, NULL, NULL);

    if(!sws_ctx) {
        return data;
    }

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    AVFrame *pFrameRGB = av_frame_alloc();
#else
    AVFrame *pFrameRGB = avcodec_alloc_frame();
#endif

    if(pFrameRGB == NULL) {
        sws_freeContext(sws_ctx);
        return data;
    }

    // detect ffmpeg (>= 100) or libav (< 100)
#if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \
    (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0))
    int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 16);
#else
    int numBytes = avpicture_get_size(PIX_FMT_YUVJ420P, width, height);
#endif

    uint8_t *buffer = (uint8_t *)av_malloc(numBytes);

    if(!buffer) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return data;
    }

    // detect ffmpeg (>= 100) or libav (< 100)
#if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \
    (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0))
    av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, AV_PIX_FMT_YUV420P, width, height, 1);
#else
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_YUVJ420P, width, height);
#endif

    sws_scale(
        sws_ctx,
        pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pFrameRGB->data,
        pFrameRGB->linesize
    );

    pOCodecCtx = avcodec_alloc_context3(pOCodec);

    if(pOCodecCtx == NULL) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return  0;
    }

    pOCodecCtx->bit_rate      = pCodecCtx->bit_rate;
    pOCodecCtx->width         = width;
    pOCodecCtx->height        = height;
    pOCodecCtx->pix_fmt       = AV_PIX_FMT_YUVJ420P;
    pOCodecCtx->color_range   = AVCOL_RANGE_JPEG;
    pOCodecCtx->codec_id      = AV_CODEC_ID_MJPEG;
    pOCodecCtx->codec_type    = AVMEDIA_TYPE_VIDEO;
    pOCodecCtx->time_base.num = pCodecCtx->time_base.num;
    pOCodecCtx->time_base.den = pCodecCtx->time_base.den;

    AVDictionary *opts = NULL;
    if(avcodec_open2(pOCodecCtx, pOCodec, &opts) < 0) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
         return  0;
    }

    av_opt_set_int(pOCodecCtx, "lmin", pOCodecCtx->qmin * FF_QP2LAMBDA, 0);
    av_opt_set_int(pOCodecCtx, "lmax", pOCodecCtx->qmax * FF_QP2LAMBDA, 0);

    pOCodecCtx->mb_lmin        = pOCodecCtx->qmin * FF_QP2LAMBDA;
    pOCodecCtx->mb_lmax        = pOCodecCtx->qmax * FF_QP2LAMBDA;
    pOCodecCtx->flags          = CODEC_FLAG_QSCALE;
    pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA;

    pFrame->pts     = 1;
    pFrame->quality = pOCodecCtx->global_quality;

    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    int gotPacket;

    avcodec_encode_video2(pOCodecCtx, &pkt, pFrameRGB, &gotPacket);

    QByteArray buffer2(reinterpret_cast<char *>(pkt.data), pkt.size);

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
    av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    av_frame_free(&pFrameRGB);
#else
    avcodec_free_frame(&pFrameRGB);
#endif
    avcodec_close(pOCodecCtx);
    sws_freeContext(sws_ctx);

    return buffer2;
}
int decode_thread(void *arg) {

  VideoState *is = (VideoState *)arg;
  AVFormatContext *pFormatCtx = NULL;
  AVPacket pkt1, *packet = &pkt1;

  AVDictionary *io_dict = NULL;
  AVIOInterruptCB callback;

  int video_index = -1;
  int audio_index = -1;
  int i;

  is->videoStream=-1;
  is->audioStream=-1;

  global_video_state = is;
  // will interrupt blocking functions if we quit!
  callback.callback = decode_interrupt_cb;
  callback.opaque = is;
  if (avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict))
  {
    fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
    return -1;
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL)!=0)
    return -1; // Couldn't open file

  is->pFormatCtx = pFormatCtx;
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, is->filename, 0);
  
  // Find the first video stream

  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       video_index < 0) {
      video_index=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audio_index < 0) {
      audio_index=i;
    }
  }
  if(audio_index >= 0) {
    stream_component_open(is, audio_index);
  }
  if(video_index >= 0) {
    stream_component_open(is, video_index);
  }   

  if(is->videoStream < 0 || is->audioStream < 0) {
    fprintf(stderr, "%s: could not open codecs\n", is->filename);
    goto fail;
  }
    
    AVCodecContext  *pCodecCtx = NULL;
    pCodecCtx =pFormatCtx->streams[video_index]->codec;
    m_pSdlTexture = SDL_CreateTexture(m_pRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
                                      pCodecCtx->width, pCodecCtx->height);
    
    m_pFrameYUV = av_frame_alloc();
    uint8_t *  out_buffer = (uint8_t *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    av_image_fill_arrays(m_pFrameYUV->data , m_pFrameYUV->linesize, out_buffer,AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height,1);

  // main decode loop

  for(;;) {
    if(is->quit) {
      break;
    }
    // seek stuff goes here
    if(is->audioq.size > MAX_AUDIOQ_SIZE ||
       is->videoq.size > MAX_VIDEOQ_SIZE) {
      SDL_Delay(10);
      continue;
    }
    if(av_read_frame(is->pFormatCtx, packet) < 0) {
      if(is->pFormatCtx->pb->error == 0) {
	SDL_Delay(100); /* no error; wait for user input */
	continue;
      } else {
	break;
      }
    }
    // Is this a packet from the video stream?
    if(packet->stream_index == is->videoStream) {
      packet_queue_put(&is->videoq, packet);
    } else if(packet->stream_index == is->audioStream) {
      packet_queue_put(&is->audioq, packet);
    } else {
      av_free_packet(packet);
    }
  }
  /* all done - wait for it */
  while(!is->quit) {
    SDL_Delay(100);
  }

 fail:
  {
    SDL_Event event;
    event.type = FF_QUIT_EVENT;
    event.user.data1 = is;
    SDL_PushEvent(&event);
  }
  return 0;
}
int main(int argc, char* argv[])
{

	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame,*pFrameYUV;
	unsigned char *out_buffer;
	AVPacket *packet;
	int ret, got_picture;

	//------------SDL----------------
	int screen_w,screen_h;
	SDL_Window *screen; 
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect;
	SDL_Thread *video_tid;
	SDL_Event event;

	struct SwsContext *img_convert_ctx;

	//char filepath[]="bigbuckbunny_480x272.h265";
	char filepath[]="Titanic.ts";

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if(avformat_find_stream_info(pFormatCtx,NULL)<0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) 
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}
	if(videoindex==-1){
		printf("Didn't find a video stream.\n");
		return -1;
	}
	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL){
		printf("Codec not found.\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
		printf("Could not open codec.\n");
		return -1;
	}
	pFrame=av_frame_alloc();
	pFrameYUV=av_frame_alloc();

	out_buffer=(unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P,  pCodecCtx->width, pCodecCtx->height,1));
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize,out_buffer,
		AV_PIX_FMT_YUV420P,pCodecCtx->width, pCodecCtx->height,1);

	//Output Info-----------------------------
	printf("---------------- File Information ---------------\n");
	av_dump_format(pFormatCtx,0,filepath,0);
	printf("-------------------------------------------------\n");
	
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 
	

	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	} 
	//SDL 2.0 Support for multiple windows
	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,SDL_WINDOW_OPENGL);

	if(!screen) {  
		printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}
	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  

	sdlRect.x=0;
	sdlRect.y=0;
	sdlRect.w=screen_w;
	sdlRect.h=screen_h;

	packet=(AVPacket *)av_malloc(sizeof(AVPacket));

	video_tid = SDL_CreateThread(sfp_refresh_thread,NULL,NULL);
	//------------SDL End------------
	//Event Loop
	
	for (;;) {
		//Wait
		SDL_WaitEvent(&event);
		if(event.type==SFM_REFRESH_EVENT){
			while(1){
				if(av_read_frame(pFormatCtx, packet)<0)
					thread_exit=1;

				if(packet->stream_index==videoindex)
					break;
			}
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if(ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if(got_picture){
				sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
				//SDL---------------------------
				SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
				SDL_RenderClear( sdlRenderer );  
				//SDL_RenderCopy( sdlRenderer, sdlTexture, &sdlRect, &sdlRect );  
				SDL_RenderCopy( sdlRenderer, sdlTexture, NULL, NULL);  
				SDL_RenderPresent( sdlRenderer );  
				//SDL End-----------------------
			}
			av_free_packet(packet);
		}else if(event.type==SDL_KEYDOWN){
			//Pause
			if(event.key.keysym.sym==SDLK_SPACE)
				thread_pause=!thread_pause;
		}else if(event.type==SDL_QUIT){
			thread_exit=1;
		}else if(event.type==SFM_BREAK_EVENT){
			break;
		}

	}

	sws_freeContext(img_convert_ctx);

	SDL_Quit();
	//--------------
	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
Пример #22
0
bool CFFmpegImage::CreateThumbnailFromSurface(unsigned char* bufferin, unsigned int width,
                                             unsigned int height, unsigned int format,
                                             unsigned int pitch,
                                             const std::string& destFile,
                                             unsigned char* &bufferout,
                                             unsigned int &bufferoutSize)
{
  // It seems XB_FMT_A8R8G8B8 mean RGBA and not ARGB
  if (format != XB_FMT_A8R8G8B8)
  {
    CLog::Log(LOGERROR, "Supplied format: %d is not supported.", format);
    return false;
  }

  bool jpg_output = false;
  if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg")
    jpg_output = true;
  else if (m_strMimeType == "image/png")
    jpg_output = false;
  else
  {
    CLog::Log(LOGERROR, "Output Format is not supported: %s is not supported.", destFile.c_str());
    return false;
  }

  ThumbDataManagement tdm;

  tdm.codec = avcodec_find_encoder(jpg_output ? AV_CODEC_ID_MJPEG : AV_CODEC_ID_PNG);
  if (!tdm.codec)
  {
    CLog::Log(LOGERROR, "You are missing a working encoder for format: %s", jpg_output ? "JPEG" : "PNG");
    return false;
  }

  tdm.avOutctx = avcodec_alloc_context3(tdm.codec);
  if (!tdm.avOutctx)
  {
    CLog::Log(LOGERROR, "Could not allocate context for thumbnail: %s", destFile.c_str());
    return false;
  }

  tdm.avOutctx->height = height;
  tdm.avOutctx->width = width;
  tdm.avOutctx->time_base.num = 1;
  tdm.avOutctx->time_base.den = 1;
  tdm.avOutctx->pix_fmt = jpg_output ? AV_PIX_FMT_YUVJ420P : AV_PIX_FMT_RGBA;
  tdm.avOutctx->flags = AV_CODEC_FLAG_QSCALE;
  tdm.avOutctx->mb_lmin = tdm.avOutctx->qmin * FF_QP2LAMBDA;
  tdm.avOutctx->mb_lmax = tdm.avOutctx->qmax * FF_QP2LAMBDA;
  tdm.avOutctx->global_quality = tdm.avOutctx->qmin * FF_QP2LAMBDA;

  unsigned int internalBufOutSize = 0;

  int size = av_image_get_buffer_size(tdm.avOutctx->pix_fmt, tdm.avOutctx->width, tdm.avOutctx->height, 16);
  if (size < 0)
  {
    CLog::Log(LOGERROR, "Could not compute picture size for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }
  internalBufOutSize = (unsigned int) size;

  tdm.intermediateBuffer = (uint8_t*) av_malloc(internalBufOutSize);
  if (!tdm.intermediateBuffer)
  {
    CLog::Log(LOGERROR, "Could not allocate memory for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  if (avcodec_open2(tdm.avOutctx, tdm.codec, NULL) < 0)
  {
    CLog::Log(LOGERROR, "Could not open avcodec context thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  tdm.frame_input = av_frame_alloc();
  if (!tdm.frame_input)
  {
    CLog::Log(LOGERROR, "Could not allocate frame for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  // convert the RGB32 frame to AV_PIX_FMT_YUV420P - we use this later on as AV_PIX_FMT_YUVJ420P
  tdm.frame_temporary = av_frame_alloc();
  if (!tdm.frame_temporary)
  {
    CLog::Log(LOGERROR, "Could not allocate frame for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  if (av_image_fill_arrays(tdm.frame_temporary->data, tdm.frame_temporary->linesize, tdm.intermediateBuffer, jpg_output ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_RGBA, width, height, 16) < 0)
  {
    CLog::Log(LOGERROR, "Could not fill picture for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  uint8_t* src[] = { bufferin, NULL, NULL, NULL };
  int srcStride[] = { (int) pitch, 0, 0, 0};

  //input size == output size which means only pix_fmt conversion
  tdm.sws = sws_getContext(width, height, AV_PIX_FMT_RGB32, width, height, jpg_output ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_RGBA, 0, 0, 0, 0);
  if (!tdm.sws)
  {
    CLog::Log(LOGERROR, "Could not setup scaling context for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  // Setup jpeg range for sws
  if (jpg_output)
  {
    int* inv_table = nullptr;
    int* table = nullptr;
    int srcRange, dstRange, brightness, contrast, saturation;

    if (sws_getColorspaceDetails(tdm.sws, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation) < 0)
    {
      CLog::Log(LOGERROR, "SWS_SCALE failed to get ColorSpaceDetails for thumbnail: %s", destFile.c_str());
      CleanupLocalOutputBuffer();
      return false;
    }
    dstRange = 1; // jpeg full range yuv420p output
    srcRange = 0; // full range RGB32 input
    if (sws_setColorspaceDetails(tdm.sws, inv_table, srcRange, table, dstRange, brightness, contrast, saturation) < 0)
    {
      CLog::Log(LOGERROR, "SWS_SCALE failed to set ColorSpace Details for thumbnail: %s", destFile.c_str());
      CleanupLocalOutputBuffer();
      return false;
    }
  }

  if (sws_scale(tdm.sws, src, srcStride, 0, height, tdm.frame_temporary->data, tdm.frame_temporary->linesize) < 0)
  {
    CLog::Log(LOGERROR, "SWS_SCALE failed for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }
  tdm.frame_input->pts = 1;
  tdm.frame_input->quality = tdm.avOutctx->global_quality;
  tdm.frame_input->data[0] = tdm.frame_temporary->data[0];
  tdm.frame_input->data[1] = tdm.frame_temporary->data[1];
  tdm.frame_input->data[2] = tdm.frame_temporary->data[2];
  tdm.frame_input->height = height;
  tdm.frame_input->width = width;
  tdm.frame_input->linesize[0] = tdm.frame_temporary->linesize[0];
  tdm.frame_input->linesize[1] = tdm.frame_temporary->linesize[1];
  tdm.frame_input->linesize[2] = tdm.frame_temporary->linesize[2];
  // this is deprecated but mjpeg is not yet transitioned
  tdm.frame_input->format = jpg_output ? AV_PIX_FMT_YUVJ420P : AV_PIX_FMT_RGBA;

  int got_package = 0;
  AVPacket avpkt;
  av_init_packet(&avpkt);
  // encoder will allocate memory
  avpkt.data = nullptr;
  avpkt.size = 0;

  int ret = EncodeFFmpegFrame(tdm.avOutctx, &avpkt, &got_package, tdm.frame_input);

  if ((ret < 0) || (got_package == 0))
  {
    CLog::Log(LOGERROR, "Could not encode thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  bufferoutSize = avpkt.size;
  m_outputBuffer = (uint8_t*) av_malloc(bufferoutSize);
  if (!m_outputBuffer)
  {
    CLog::Log(LOGERROR, "Could not generate allocate memory for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    av_packet_unref(&avpkt);
    return false;
  }
  // update buffer ptr for caller
  bufferout = m_outputBuffer;

  // copy avpkt data into outputbuffer
  memcpy(m_outputBuffer, avpkt.data, avpkt.size);
  av_packet_unref(&avpkt);

  return true;
}
Пример #23
0
bool CFFmpegImage::DecodeFrame(AVFrame* frame, unsigned int width, unsigned int height, unsigned int pitch, unsigned char * const pixels)
{
  if (pixels == nullptr)
  {
    CLog::Log(LOGERROR, "%s - No valid buffer pointer (nullptr) passed", __FUNCTION__);
    return false;
  }

  AVFrame* pictureRGB = av_frame_alloc();
  if (!pictureRGB)
  {
    CLog::LogF(LOGERROR, "AVFrame could not be allocated");
    return false;
  }

  // we align on 16 as the input provided by the Texture also aligns the buffer size to 16
  int size = av_image_fill_arrays(pictureRGB->data, pictureRGB->linesize, NULL, AV_PIX_FMT_RGB32, width, height, 16);
  if (size < 0)
  {
    CLog::LogF(LOGERROR, "Could not allocate AVFrame member with %i x %i pixes", width, height);
    av_frame_free(&pictureRGB);
    return false;
  }

  bool needsCopy = false;
  int pixelsSize = pitch * height;
  bool aligned = (((uintptr_t)(const void *)(pixels)) % (32) == 0);
  if (!aligned)
    CLog::Log(LOGDEBUG, "Alignment of external buffer is not suitable for ffmpeg intrinsics - please fix your malloc");

  if (aligned && size == pixelsSize && (int)pitch == pictureRGB->linesize[0])
  {
    // We can use the pixels buffer directly
    pictureRGB->data[0] = pixels;
  }
  else
  {
    // We need an extra buffer and copy it manually afterwards
    pictureRGB->format = AV_PIX_FMT_RGB32;
    pictureRGB->width = width;
    pictureRGB->height = height;
    // we copy the data manually later so give a chance to intrinsics (e.g. mmx, neon)
    if (av_frame_get_buffer(pictureRGB, 32) < 0)
    {
      CLog::LogF(LOGERROR, "Could not allocate temp buffer of size %i bytes", size);
      av_frame_free(&pictureRGB);
      return false;
    }
    needsCopy = true;
  }

  // Especially jpeg formats are full range this we need to take care here
  // Input Formats like RGBA are handled correctly automatically
  AVColorRange range = frame->color_range;
  AVPixelFormat pixFormat = ConvertFormats(frame);

  // assumption quadratic maximums e.g. 2048x2048
  float ratio = m_width / (float)m_height;
  unsigned int nHeight = m_originalHeight;
  unsigned int nWidth = m_originalWidth;
  if (nHeight > height)
  {
    nHeight = height;
    nWidth = (unsigned int)(nHeight * ratio + 0.5f);
  }
  if (nWidth > width)
  {
    nWidth = width;
    nHeight = (unsigned int)(nWidth / ratio + 0.5f);
  }

  struct SwsContext* context = sws_getContext(m_originalWidth, m_originalHeight, pixFormat,
    nWidth, nHeight, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);

  if (range == AVCOL_RANGE_JPEG)
  {
    int* inv_table = nullptr;
    int* table = nullptr;
    int srcRange, dstRange, brightness, contrast, saturation;
    sws_getColorspaceDetails(context, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation);
    srcRange = 1;
    sws_setColorspaceDetails(context, inv_table, srcRange, table, dstRange, brightness, contrast, saturation);
  }

  sws_scale(context, frame->data, frame->linesize, 0, m_originalHeight,
    pictureRGB->data, pictureRGB->linesize);
  sws_freeContext(context);

  if (needsCopy)
  {
    int minPitch = std::min((int)pitch, pictureRGB->linesize[0]);
    if (minPitch < 0)
    {
      CLog::LogF(LOGERROR, "negative pitch or height");
      av_frame_free(&pictureRGB);
      return false;
    }
    const unsigned char *src = pictureRGB->data[0];
    unsigned char* dst = pixels;

    for (unsigned int y = 0; y < nHeight; y++)
    {
      memcpy(dst, src, minPitch);
      src += pictureRGB->linesize[0];
      dst += pitch;
    }
    av_frame_free(&pictureRGB);
  }
  else
  {
    // we only lended the data so don't get it deleted
    pictureRGB->data[0] = nullptr;
    av_frame_free(&pictureRGB);
  }

  // update width and height original dimensions are kept
  m_height = nHeight;
  m_width = nWidth;

  return true;
}
Пример #24
0
int main(int argc, char *argv[]) {
	// Initalizing these to NULL prevents segfaults!
	AVFormatContext   *pFormatCtx = nullptr;
	int               i, videoStream,audioStream;
	AVCodecContext    *pCodecCtxOrig = nullptr;
	AVCodecContext    *pCodecCtx = nullptr;
	AVCodec           *pCodec = nullptr;
	AVFrame           *pFrame = nullptr;
	AVFrame           *pFrameYUV = nullptr;
	AVPacket          packet;
	int               frameFinished;
	int               numBytes;
	uint8_t           *buffer = nullptr;
	struct SwsContext *sws_ctx = nullptr;
	HANDLE hAudioPlay = nullptr;
	const char *inputFile = "rtmp://live.hkstv.hk.lxdns.com/live/hks";

	//SDL---------------------------  
	int screen_w = 0, screen_h = 0;
	SDL_Window *screen;
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect;

	// Register all formats and codecs
	av_register_all();
	avformat_network_init();

	// Open video file
	if (avformat_open_input(&pFormatCtx, inputFile, NULL, NULL) != 0)
		return -1; // Couldn't open file

				   // Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
		return -1; // Couldn't find stream information

				   // Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, inputFile, 0);

	// Find the first video stream
	videoStream = -1;
	audioStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
	{
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
			videoStream = i;
		else if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
			audioStream = i;
	}
		
		
	if (videoStream == -1)
		return -1; // Didn't find a video stream

	if (audioStream == -1)
		return -1; // Didn't find a audio stream

	// Get a pointer to the codec context for the video stream
	pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec;
	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id);
	if (pCodec == NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Copy context
	pCodecCtx = avcodec_alloc_context3(pCodec);
	if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
		fprintf(stderr, "Couldn't copy codec context");
		return -1; // Error copying codec context
	}

	// Open codec
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
		return -1; // Could not open codec

				   // Allocate video frame
	pFrame = av_frame_alloc();

	// Allocate an AVFrame structure
	pFrameYUV = av_frame_alloc();
	if (pFrameYUV == NULL)
		return -1;

	// Determine required buffer size and allocate buffer
	numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width,
		pCodecCtx->height, 1);
	buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameYUV
	// Note that pFrameYUV is an AVFrame, but AVFrame is a superset
	// of AVPicture
	int linesize = 0;
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, buffer, AV_PIX_FMT_YUV420P,
		pCodecCtx->width, pCodecCtx->height, 1);

	// initialize SWS context for software scaling
	sws_ctx = sws_getContext(pCodecCtx->width,
		pCodecCtx->height,
		pCodecCtx->pix_fmt,
		pCodecCtx->width,
		pCodecCtx->height,
		AV_PIX_FMT_YUV420P,
		SWS_BILINEAR,
		NULL,
		NULL,
		NULL
		);

	//SDL
	if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		printf("Could not initialize SDL - %s\n", SDL_GetError());
		return -1;
	}
	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	//SDL 2.0 Support for multiple windows  
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,
		SDL_WINDOW_OPENGL);

	if (!screen) {
		printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
		return -1;
	}

	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);

	sdlRect.x = 0;
	sdlRect.y = 0;
	sdlRect.w = screen_w;
	sdlRect.h = screen_h;

	// Read frames and save first five frames to disk
	i = 0;
	while (av_read_frame(pFormatCtx, &packet) >= 0) {
		// Is this a packet from the video stream?
		if (packet.stream_index == videoStream) {
			// Decode video frame
			int iret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if (frameFinished) {
				// Convert the image from its native format to YUV
				sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
					pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);

				// SDL
				SDL_UpdateTexture(sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0]);
				SDL_RenderClear(sdlRenderer);
				SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &sdlRect);
				SDL_RenderPresent(sdlRenderer);
				//SDL End-----------------------  
				double time = (double)packet.duration * 1000 / (((*(pFormatCtx->streams))->time_base.den) / ((*(pFormatCtx->streams))->time_base.num));
				SDL_Delay(time);

			}
		}

		// Free the packet that was allocated by av_read_frame
		av_packet_unref(&packet);
	}

	SDL_Quit();
	// Free the YUV image
	//av_free(buffer);
	av_frame_free(&pFrameYUV);

	// Free the YUV frame
	av_frame_free(&pFrame);

	// Close the codecs
	avcodec_close(pCodecCtx);
	avcodec_close(pCodecCtxOrig);

	// Close the video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
Пример #25
0
static int omx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *frame, int *got_packet)
{
    OMXCodecContext *s = avctx->priv_data;
    int ret = 0;
    OMX_BUFFERHEADERTYPE* buffer;
    OMX_ERRORTYPE err;

    if (frame) {
        uint8_t *dst[4];
        int linesize[4];
        int need_copy;
        buffer = get_buffer(&s->input_mutex, &s->input_cond,
                            &s->num_free_in_buffers, s->free_in_buffers, 1);

        buffer->nFilledLen = av_image_fill_arrays(dst, linesize, buffer->pBuffer, avctx->pix_fmt, s->stride, s->plane_size, 1);

        if (s->input_zerocopy) {
            uint8_t *src[4] = { NULL };
            int src_linesize[4];
            av_image_fill_arrays(src, src_linesize, frame->data[0], avctx->pix_fmt, s->stride, s->plane_size, 1);
            if (frame->linesize[0] == src_linesize[0] &&
                frame->linesize[1] == src_linesize[1] &&
                frame->linesize[2] == src_linesize[2] &&
                frame->data[1] == src[1] &&
                frame->data[2] == src[2]) {
                // If the input frame happens to have all planes stored contiguously,
                // with the right strides, just clone the frame and set the OMX
                // buffer header to point to it
                AVFrame *local = av_frame_clone(frame);
                if (!local) {
                    // Return the buffer to the queue so it's not lost
                    append_buffer(&s->input_mutex, &s->input_cond, &s->num_free_in_buffers, s->free_in_buffers, buffer);
                    return AVERROR(ENOMEM);
                } else {
                    buffer->pAppPrivate = local;
                    buffer->pOutputPortPrivate = NULL;
                    buffer->pBuffer = local->data[0];
                    need_copy = 0;
                }
            } else {
                // If not, we need to allocate a new buffer with the right
                // size and copy the input frame into it.
                uint8_t *buf = NULL;
                int image_buffer_size = av_image_get_buffer_size(avctx->pix_fmt, s->stride, s->plane_size, 1);
                if (image_buffer_size >= 0)
                    buf = av_malloc(image_buffer_size);
                if (!buf) {
                    // Return the buffer to the queue so it's not lost
                    append_buffer(&s->input_mutex, &s->input_cond, &s->num_free_in_buffers, s->free_in_buffers, buffer);
                    return AVERROR(ENOMEM);
                } else {
                    buffer->pAppPrivate = buf;
                    // Mark that pAppPrivate is an av_malloc'ed buffer, not an AVFrame
                    buffer->pOutputPortPrivate = (void*) 1;
                    buffer->pBuffer = buf;
                    need_copy = 1;
                    buffer->nFilledLen = av_image_fill_arrays(dst, linesize, buffer->pBuffer, avctx->pix_fmt, s->stride, s->plane_size, 1);
                }
            }
        } else {
            need_copy = 1;
        }
        if (need_copy)
            av_image_copy(dst, linesize, (const uint8_t**) frame->data, frame->linesize, avctx->pix_fmt, avctx->width, avctx->height);
        buffer->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
        buffer->nOffset = 0;
        // Convert the timestamps to microseconds; some encoders can ignore
        // the framerate and do VFR bit allocation based on timestamps.
        buffer->nTimeStamp = to_omx_ticks(av_rescale_q(frame->pts, avctx->time_base, AV_TIME_BASE_Q));
        err = OMX_EmptyThisBuffer(s->handle, buffer);
        if (err != OMX_ErrorNone) {
            append_buffer(&s->input_mutex, &s->input_cond, &s->num_free_in_buffers, s->free_in_buffers, buffer);
            av_log(avctx, AV_LOG_ERROR, "OMX_EmptyThisBuffer failed: %x\n", err);
            return AVERROR_UNKNOWN;
        }
        s->num_in_frames++;
    }

    while (!*got_packet && ret == 0) {
        // Only wait for output if flushing and not all frames have been output
        buffer = get_buffer(&s->output_mutex, &s->output_cond,
                            &s->num_done_out_buffers, s->done_out_buffers,
                            !frame && s->num_out_frames < s->num_in_frames);
        if (!buffer)
            break;

        if (buffer->nFlags & OMX_BUFFERFLAG_CODECCONFIG && avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
            if ((ret = av_reallocp(&avctx->extradata, avctx->extradata_size + buffer->nFilledLen + AV_INPUT_BUFFER_PADDING_SIZE)) < 0) {
                avctx->extradata_size = 0;
                goto end;
            }
            memcpy(avctx->extradata + avctx->extradata_size, buffer->pBuffer + buffer->nOffset, buffer->nFilledLen);
            avctx->extradata_size += buffer->nFilledLen;
            memset(avctx->extradata + avctx->extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
        } else {
            if (buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME)
                s->num_out_frames++;
            if (!(buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) || !pkt->data) {
                // If the output packet isn't preallocated, just concatenate everything in our
                // own buffer
                int newsize = s->output_buf_size + buffer->nFilledLen + AV_INPUT_BUFFER_PADDING_SIZE;
                if ((ret = av_reallocp(&s->output_buf, newsize)) < 0) {
                    s->output_buf_size = 0;
                    goto end;
                }
                memcpy(s->output_buf + s->output_buf_size, buffer->pBuffer + buffer->nOffset, buffer->nFilledLen);
                s->output_buf_size += buffer->nFilledLen;
                if (buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) {
                    if ((ret = av_packet_from_data(pkt, s->output_buf, s->output_buf_size)) < 0) {
                        av_freep(&s->output_buf);
                        s->output_buf_size = 0;
                        goto end;
                    }
                    s->output_buf = NULL;
                    s->output_buf_size = 0;
                }
            } else {
                // End of frame, and the caller provided a preallocated frame
                if ((ret = ff_alloc_packet2(avctx, pkt, s->output_buf_size + buffer->nFilledLen, 0)) < 0) {
                    av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n",
                           (int)(s->output_buf_size + buffer->nFilledLen));
                    goto end;
                }
                memcpy(pkt->data, s->output_buf, s->output_buf_size);
                memcpy(pkt->data + s->output_buf_size, buffer->pBuffer + buffer->nOffset, buffer->nFilledLen);
                av_freep(&s->output_buf);
                s->output_buf_size = 0;
            }
            if (buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) {
                pkt->pts = av_rescale_q(from_omx_ticks(buffer->nTimeStamp), AV_TIME_BASE_Q, avctx->time_base);
                // We don't currently enable B-frames for the encoders, so set
                // pkt->dts = pkt->pts. (The calling code behaves worse if the encoder
                // doesn't set the dts).
                pkt->dts = pkt->pts;
                if (buffer->nFlags & OMX_BUFFERFLAG_SYNCFRAME)
                    pkt->flags |= AV_PKT_FLAG_KEY;
                *got_packet = 1;
            }
        }
end:
        err = OMX_FillThisBuffer(s->handle, buffer);
        if (err != OMX_ErrorNone) {
            append_buffer(&s->output_mutex, &s->output_cond, &s->num_done_out_buffers, s->done_out_buffers, buffer);
            av_log(avctx, AV_LOG_ERROR, "OMX_FillThisBuffer failed: %x\n", err);
            ret = AVERROR_UNKNOWN;
        }
    }
    return ret;
}
int main(int argc, char* argv[])
{
    AVFormatContext *pFormatCtx;
    int             i, videoindex;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame *pFrame, *pFrameYUV;
    unsigned char *out_buffer;
    AVPacket *packet;
    int y_size;
    int ret, got_picture;
    struct SwsContext *img_convert_ctx;

    char filepath[] = "F:\\codes\\simplest_ffmpeg_player\\simplest_ffmpeg_player\\bigbuckbunny_480x272.h265";
    //SDL---------------------------
    int screen_w = 0, screen_h = 0;
//  SDL_Window *screen;
//  SDL_Renderer* sdlRenderer;
//  SDL_Texture* sdlTexture;
//  SDL_Rect sdlRect;

    FILE *fp_yuv;

    av_register_all();
    avformat_network_init();
    pFormatCtx = avformat_alloc_context();

    /* filepath should be absolute path?! */
    if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0) {
        printf("Couldn't open input stream.\n");
        return -1;
    }
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        printf("Couldn't find stream information.\n");
        return -1;
    }
    videoindex = -1;
    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoindex = i;
            break;
        }
    if (videoindex == -1) {
        printf("Didn't find a video stream.\n");
        return -1;
    }

    pCodecCtx = pFormatCtx->streams[videoindex]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        printf("Codec not found.\n");
        return -1;
    }
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        printf("Could not open codec.\n");
        return -1;
    }

    pFrame = av_frame_alloc();
    pFrameYUV = av_frame_alloc();
    out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
    av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer,
                         AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);

    packet = (AVPacket *)av_malloc(sizeof(AVPacket));
    //Output Info-----------------------------
    printf("--------------- File Information ----------------\n");
    av_dump_format(pFormatCtx, 0, filepath, 0);
    printf("-------------------------------------------------\n");
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                                     pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P,
                                     SWS_BICUBIC, NULL, NULL, NULL);

#if OUTPUT_YUV420P
    fp_yuv = fopen("output.yuv", "wb+");
#endif

//  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
//      printf( "Could not initialize SDL - %s\n", SDL_GetError());
//      return -1;
//  }

    screen_w = pCodecCtx->width;
    screen_h = pCodecCtx->height;
    //SDL 2.0 Support for multiple windows
//  screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
//      screen_w, screen_h,
//      SDL_WINDOW_OPENGL);

//  if(!screen) {
//      printf("SDL: could not create window - exiting:%s\n",SDL_GetError());
//      return -1;
//  }

//  sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
//  //IYUV: Y + U + V  (3 planes)
//  //YV12: Y + V + U  (3 planes)
//  sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);

//  sdlRect.x=0;
//  sdlRect.y=0;
//  sdlRect.w=screen_w;
//  sdlRect.h=screen_h;

    //SDL End----------------------
    while (av_read_frame(pFormatCtx, packet) >= 0) {
        if (packet->stream_index == videoindex) {
            ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
            if (ret < 0) {
                printf("Decode Error.\n");
                return -1;
            }
            if (got_picture) {
                /* scale operation */
                sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data,
                          pFrame->linesize, 0, pCodecCtx->height,
                          pFrameYUV->data, pFrameYUV->linesize);

#if OUTPUT_YUV420P
                y_size = pCodecCtx->width * pCodecCtx->height;
                fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
                fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
                fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
#endif
                //SDL---------------------------
#if 0
                SDL_UpdateTexture(sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0]);
#else
//              SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
//              pFrameYUV->data[0], pFrameYUV->linesize[0],
//              pFrameYUV->data[1], pFrameYUV->linesize[1],
//              pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif

//              SDL_RenderClear( sdlRenderer );
//              SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);
//              SDL_RenderPresent( sdlRenderer );
//              //SDL End-----------------------
//              //Delay 40ms
//              SDL_Delay(40);
            }
        }
        av_free_packet(packet);
    }
    //flush decoder
    //FIX: Flush Frames remained in Codec
    while (1) {
        ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
        if (ret < 0)
            break;
        if (!got_picture)
            break;
        sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data,
                  pFrame->linesize, 0, pCodecCtx->height,
                  pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
        int y_size = pCodecCtx->width * pCodecCtx->height;
        fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
        fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
        fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
#endif
        //SDL---------------------------
//      SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );
//      SDL_RenderClear( sdlRenderer );
//      SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);
//      SDL_RenderPresent( sdlRenderer );
//      //SDL End-----------------------
//      //Delay 40ms
//      SDL_Delay(40);
    }

    sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P
    fclose(fp_yuv);
#endif

//  SDL_Quit();

    av_frame_free(&pFrameYUV);
    av_frame_free(&pFrame);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}
Пример #27
0
AkPacket ConvertVideo::convert(const AkPacket &packet)
{
    AkVideoPacket videoPacket(packet);

    // Convert input format.
    QString format = AkVideoCaps::pixelFormatToString(videoPacket.caps().format());
    AVPixelFormat iFormat = av_get_pix_fmt(format.toStdString().c_str());

    // Initialize rescaling context.
    this->m_scaleContext = sws_getCachedContext(this->m_scaleContext,
                                                videoPacket.caps().width(),
                                                videoPacket.caps().height(),
                                                iFormat,
                                                videoPacket.caps().width(),
                                                videoPacket.caps().height(),
                                                AV_PIX_FMT_BGRA,
                                                SWS_FAST_BILINEAR,
                                                NULL,
                                                NULL,
                                                NULL);

    if (!this->m_scaleContext)
        return AkPacket();

    // Create iPicture.
    AVFrame iFrame;
    memset(&iFrame, 0, sizeof(AVFrame));

    if (av_image_fill_arrays((uint8_t **) iFrame.data,
                         iFrame.linesize,
                         (const uint8_t *) videoPacket.buffer().constData(),
                         iFormat,
                         videoPacket.caps().width(),
                         videoPacket.caps().height(),
                         1) < 0)
        return AkPacket();

    // Create oPicture
    int frameSize = av_image_get_buffer_size(AV_PIX_FMT_BGRA,
                                             videoPacket.caps().width(),
                                             videoPacket.caps().height(),
                                             1);

    QByteArray oBuffer(frameSize, Qt::Uninitialized);
    AVFrame oFrame;
    memset(&oFrame, 0, sizeof(AVFrame));

    if (av_image_fill_arrays((uint8_t **) oFrame.data,
                         oFrame.linesize,
                         (const uint8_t *) oBuffer.constData(),
                         AV_PIX_FMT_BGRA,
                         videoPacket.caps().width(),
                         videoPacket.caps().height(),
                         1) < 0)
        return AkPacket();

    // Convert picture format
    sws_scale(this->m_scaleContext,
              iFrame.data,
              iFrame.linesize,
              0,
              videoPacket.caps().height(),
              oFrame.data,
              oFrame.linesize);

    // Create packet
    AkVideoPacket oPacket(packet);
    oPacket.caps().format() = AkVideoCaps::Format_bgra;
    oPacket.buffer() = oBuffer;

    return oPacket.toPacket();
}
Пример #28
0
bool VideoFrame::convertToRGB24(QSize size)
{
    QMutexLocker locker(&biglock);

    AVFrame* sourceFrame;
    if (frameOther)
    {
        sourceFrame = frameOther;
    }
    else if (frameYUV420)
    {
        sourceFrame = frameYUV420;
    }
    else
    {
        qWarning() << "None of the frames are valid! Did someone release us?";
        return false;
    }
    //std::cout << "converting to RGB24" << std::endl;

    if (size.isEmpty())
    {
        size.setWidth(sourceFrame->width);
        size.setHeight(sourceFrame->height);
    }

    if (frameRGB24)
    {
        if (frameRGB24->width == size.width() && frameRGB24->height == size.height())
            return true;

        av_free(frameRGB24->opaque);
        av_frame_unref(frameRGB24);
        av_frame_free(&frameRGB24);
    }

    frameRGB24=av_frame_alloc();
    if (!frameRGB24)
    {
        qCritical() << "av_frame_alloc failed";
        return false;
    }

    int imgBufferSize = av_image_get_buffer_size(AV_PIX_FMT_RGB24, size.width(), size.height(), 1);
    uint8_t* buf = (uint8_t*)av_malloc(imgBufferSize);
    if (!buf)
    {
        qCritical() << "av_malloc failed";
        av_frame_free(&frameRGB24);
        return false;
    }
    frameRGB24->opaque = buf;

    uint8_t** data = frameRGB24->data;
    int* linesize = frameRGB24->linesize;
    av_image_fill_arrays(data, linesize, buf, AV_PIX_FMT_RGB24, size.width(), size.height(), 1);
    frameRGB24->width = size.width();
    frameRGB24->height = size.height();

    // Bilinear is better for shrinking, bicubic better for upscaling
    int resizeAlgo = size.width()<=width ? SWS_BILINEAR : SWS_BICUBIC;

    SwsContext *swsCtx =  sws_getContext(width, height, (AVPixelFormat)pixFmt,
                                          size.width(), size.height(), AV_PIX_FMT_RGB24,
                                          resizeAlgo, nullptr, nullptr, nullptr);
    sws_scale(swsCtx, (uint8_t const * const *)sourceFrame->data,
                sourceFrame->linesize, 0, height,
                frameRGB24->data, frameRGB24->linesize);
    sws_freeContext(swsCtx);

    return true;
}
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream, audioStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVPacket        packet;
  int             frameFinished;
  //float           aspect_ratio;
  
  AVCodecContext  *aCodecCtx = NULL;
  AVCodec         *aCodec = NULL;

    //SDL_Overlay     *bmp = NULL;
    //SDL_Surface     *screen = NULL;
    SDL_Window *m_pWindow = NULL;
    SDL_Renderer *m_pRenderer = NULL;
    
  SDL_Rect        rect;
  SDL_Event       event;
  SDL_AudioSpec   wanted_spec, spec;

  //struct SwsContext   *sws_ctx            = NULL;
  AVDictionary        *videoOptionsDict   = NULL;
  AVDictionary        *audioOptionsDict   = NULL;

  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();
  
  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
  {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  audioStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       videoStream < 0) {
      videoStream=i;
       // printf("video stream:%d",i);
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audioStream < 0) {
      audioStream=i;
       // printf("audio stream:%d",i);
    }
  }
    
//    for(i=0; i<pFormatCtx->nb_streams; i++) {
//        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
//            printf("video stream:%d\n",i);
//        }
//        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO ) {
//            printf("audio stream:%d\n",i);
//        }
//    }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  if(audioStream==-1)
    return -1;
   
  aCodecCtx=pFormatCtx->streams[audioStream]->codec;
    
    
    int count = SDL_GetNumAudioDevices(0);
    
    for (int i = 0; i < count; ++i) {
        SDL_Log("Audio device %d: %s", i, SDL_GetAudioDeviceName(i, 0));
    }
    
    
  // Set audio settings from codec info
  wanted_spec.freq = aCodecCtx->sample_rate;
  wanted_spec.format = AUDIO_S16SYS;
  wanted_spec.channels = aCodecCtx->channels;
  wanted_spec.silence = 0;
  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  wanted_spec.callback = audio_callback;
  wanted_spec.userdata = aCodecCtx;
  
//  if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
//  {
//    fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
//    return -1;
//  }
    SDL_AudioDeviceID dev;
    dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FORMAT_CHANGE);
    if(dev == 0)
    {
         fprintf(stderr, "Failed to open audio: %s\n", SDL_GetError());
    }
    else
    {
        if(wanted_spec.format != spec.format){
               fprintf(stderr, "We didn't get AUDIO_S16SYS audio format.\n");
               return -1;
        }
    }
    
    
  aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
  if(!aCodec)
  {
    fprintf(stderr, "Unsupported codec!\n");
    return -1;
  }
  avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict);

  // audio_st = pFormatCtx->streams[index]
  packet_queue_init(&audioq);
  //SDL_PauseAudio(0);
    SDL_PauseAudioDevice(dev,0);
    
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL)
  {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
    pFrame=av_frame_alloc();
    AVFrame*   m_pFrameYUV = av_frame_alloc();
    //int t_alloc_ret = av_image_alloc(m_pFrameYUV->data,m_pFrameYUV->linesize,pCodecCtx->width,pCodecCtx->height,AV_PIX_FMT_YUV420P,1);
//    int t_size0 = avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height);
//    int t_size1 = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height,1);
    //uint8_t *  out_buffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height));
    uint8_t *  out_buffer = (uint8_t *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    //avpicture_fill((AVPicture *)m_pFrameYUV , out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->coded_width, pCodecCtx->coded_height);
    av_image_fill_arrays(m_pFrameYUV->data , m_pFrameYUV->linesize, out_buffer,AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height,1);
    
    
    struct SwsContext *img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,pCodecCtx->sw_pix_fmt,
                                                        pCodecCtx->width, pCodecCtx->height,AV_PIX_FMT_YUV420P,
                                                        SWS_BICUBIC,
                                                        NULL, NULL, NULL);

  // Make a screen to put our video

//#ifndef __DARWIN__
//        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
//#else
//        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
//#endif
//    
//    
//  if(!screen) {
//    fprintf(stderr, "SDL: could not set video mode - exiting\n");
//    exit(1);
//  }
  
  // Allocate a place to put our YUV image on that screen
//  bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
//				 pCodecCtx->height,
//				 SDL_YV12_OVERLAY,
//				 screen);
    // Make a screen to put our video
    m_pWindow = SDL_CreateWindow("test windows", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,pCodecCtx->width, pCodecCtx->height,SDL_WINDOW_SHOWN);
    if(!m_pWindow)
    {
        printf("SDL: could not create window - exiting:%s\n",SDL_GetError());
        return -1;
    }
    
    m_pRenderer = SDL_CreateRenderer(m_pWindow, -1, 0);
    SDL_RenderClear(m_pRenderer);
    SDL_Texture *m_pSdlTexture = SDL_CreateTexture(m_pRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,
                                                   pCodecCtx->width, pCodecCtx->height);
    rect.x = 0;
    rect.y = 0;
    rect.w = pCodecCtx->width;
    rect.h = pCodecCtx->height;
    
    
    
    
  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished)
      {
    
//SDL_LockTexture(m_pSdlTexture, &rect, m_pFrameYUV->data, m_pFrameYUV->linesize);
          
	//SDL_LockYUVOverlay(bmp);

//	AVPicture pict;
//	pict.data[0] = bmp->pixels[0];
//	pict.data[1] = bmp->pixels[2];
//	pict.data[2] = bmp->pixels[1];
//
//	pict.linesize[0] = bmp->pitches[0];
//	pict.linesize[1] = bmp->pitches[2];
//	pict.linesize[2] = bmp->pitches[1];

	// Convert the image into YUV format that SDL uses
//    sws_scale
//    (
//        sws_ctx, 
//        (uint8_t const * const *)pFrame->data, 
//        pFrame->linesize, 
//        0,
//        pCodecCtx->height,
//        pict.data,
//        pict.linesize
//    );
      sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
                m_pFrameYUV->data, m_pFrameYUV->linesize);
          
	//SDL_UnlockYUVOverlay(bmp);
     // SDL_UnlockTexture(m_pSdlTexture);
          
      SDL_UpdateYUVTexture(m_pSdlTexture, &rect,
                           m_pFrameYUV->data[0], m_pFrameYUV->linesize[0],
                           m_pFrameYUV->data[1], m_pFrameYUV->linesize[1],
                           m_pFrameYUV->data[2], m_pFrameYUV->linesize[2]);
//	rect.x = 0;
//	rect.y = 0;
//	rect.w = pCodecCtx->width;
//	rect.h = pCodecCtx->height;
	//SDL_DisplayYUVOverlay(bmp, &rect);
      SDL_RenderClear( m_pRenderer );//this line seems nothing to do
      SDL_RenderCopy( m_pRenderer, m_pSdlTexture,  NULL, &rect);
      
      SDL_RenderPresent(m_pRenderer);
      
      SDL_Delay(38);
          
//	av_free_packet(&packet);
      av_packet_unref(&packet);
      }
    }
    else if(packet.stream_index==audioStream)
    {
      packet_queue_put(&audioq, &packet);
    }
    else
    {
//      av_free_packet(&packet);
      av_packet_unref(&packet);
    }
    // Free the packet that was allocated by av_read_frame
    SDL_PollEvent(&event);
    switch(event.type)
    {
    case SDL_QUIT:
      quit = 1;
      SDL_Quit();
      exit(0);
      break;
    default:
      break;
    }

  }

  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
Пример #30
0
void ffplayer::VideoDecoderThreadRun()
{
	bool bFirst = true;
	while (1)
	{
	//	SDL_Delay(1);
		if (m_bStop){
			break;
		}
		
		PInfo pFrameInfo = m_videoPacketBuf.getInfoFromList();
		if (!pFrameInfo)//事件等待
		{
			SDL_Delay(1);
			continue;
		}

		AVPacket packet;
		av_init_packet(&packet);
		packet.data = pFrameInfo->Data;
		packet.size = pFrameInfo->DataLen;
		packet.pts = pFrameInfo->frameInfo.iTimestampObsolute;

		int got = 0;	
		decode(packet, got, pFrameInfo->frameInfo.bNewPos);

		if (got)
		{	
			int size = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, m_pCodecCtx->width, m_pCodecCtx->height, 1);
			PInfo pYuvInfo = NULL;
			int count = m_freeRenderBuf.getCurCount(); //为了暂停后抓图,指针指向的pInfo没有被覆盖,保留最后一帧
			if (count == 1)
			{
			//	pYuvInfo = CBuffer::createBuf(m_pCodecCtx->width* m_pCodecCtx->height * 3 / 2);
			//	pYuvInfo = CBuffer::createBuf(m_pFrame->linesize[0]* m_pCodecCtx->height * 3 / 2);
				pYuvInfo = CBuffer::createBuf(size);
			}
			else
			{
				//pYuvInfo = m_freeRenderBuf.getFreeInfoFromList(m_pCodecCtx->width* m_pCodecCtx->height * 3 / 2);
				//pYuvInfo = m_freeRenderBuf.getFreeInfoFromList(m_pFrame->linesize[0] * m_pCodecCtx->height * 3 / 2);;
				pYuvInfo = m_freeRenderBuf.getFreeInfoFromList(size);
			}
				

			AVFrame dstFrame;
			av_image_fill_arrays(dstFrame.data, dstFrame.linesize, (uint8_t*)pYuvInfo->Data, AV_PIX_FMT_YUV420P, m_pCodecCtx->width, m_pCodecCtx->height, 1);
			
			if (m_pFrame->format != AV_PIX_FMT_YUV420P)
			{			
				struct SwsContext* img_convert_ctx = NULL;
				img_convert_ctx = sws_getContext(m_pCodecCtx->width, m_pCodecCtx->height, (AVPixelFormat)m_pFrame->format,
					m_pCodecCtx->width, m_pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);


				int n = sws_scale(img_convert_ctx, m_pFrame->data, m_pFrame->linesize,
					0, m_pCodecCtx->height, dstFrame.data, dstFrame.linesize);
				sws_freeContext(img_convert_ctx);
			}
			else
			{
				av_image_copy(dstFrame.data, dstFrame.linesize, (const uint8_t **)m_pFrame->data, m_pFrame->linesize, AV_PIX_FMT_YUV420P, m_pCodecCtx->width, m_pCodecCtx->height);
			}
					
			pYuvInfo->width = m_pCodecCtx->width;
			pYuvInfo->height = m_pCodecCtx->height;
			m_iWidth = m_pCodecCtx->width;
			m_iHeight = m_pCodecCtx->height;
			double pts= av_frame_get_best_effort_timestamp(m_pFrame);//av_frame_get_best_effort_timestamp
			
			pYuvInfo->frameInfo.iTimestampObsolute = pts;
			pYuvInfo->frameInfo.bNewPos = pFrameInfo->frameInfo.bNewPos;
			pYuvInfo->frameInfo.serial = pFrameInfo->frameInfo.serial;

			while (m_renderBuf.getCurCount() >= MAX_YUV_SIZE)
			{
				SDL_Delay(10);
				if (m_bStop){
					CBuffer::freeBuf(&pYuvInfo);
					CBuffer::freeBuf(&pFrameInfo);
					return;
				}
			}

			m_renderBuf.insertList(pYuvInfo);
			
		}
		
		CBuffer::freeBuf(&pFrameInfo);
	}//while
}