예제 #1
0
static void output_ffmpeg_set_uri(const char *uri,
				     output_update_meta_cb_t meta_cb) {
	Log_error("ffmpeg", "Set uri to '%s'", uri);
	free(gsuri_);

	gsuri_ = (uri && *uri) ? strdup(uri) : NULL;
	meta_update_callback_ = meta_cb;
	SongMetaData_clear(&song_meta_);
    if (s_pthread_id)
    {
        s_Exit_Flag = TRUE;
        // 是否处在暂停阶段
        if (get_current_player_state() == FFMPEG_STATE_PAUSED)
        {
            wakeup_pause_status();
        }
        Log_error("ffmpeg", " get_current_player_state '%d'", get_current_player_state());
        pthread_join(s_pthread_id, NULL);
        s_pthread_id = NULL;
    }
    Log_error("ffmpeg", "Set uri to 2 '%s'", uri);
    //char * pMp3 = "http://cdn.sh.gg/icam/canyou.mp3";
	int ret = 0;
    if ((ret = avformat_open_input(&s_pFormatCtx, gsuri_, NULL, NULL))!=0)
    {

        Log_error("ffmpeg", "avformat_open_input %s = %d output_ffmpeg_set_uri", gsuri_,ret);
        exit(0);
      	goto end;
    }
    if ((ret = avformat_find_stream_info(s_pFormatCtx, NULL)) < 0)
    {
    	goto end;
    }
    gboolean flag = FALSE;
    for (unsigned int i = 0; i < s_pFormatCtx->nb_streams; ++i)
    {
        if (s_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            s_audioStreamIndex = i;
            flag = TRUE;
            break;
        }
    }
    if (!flag)
    {
        ret = -3;
        goto end;
    }
    
    s_pCodecCtx = s_pFormatCtx->streams[s_audioStreamIndex]->codec;

    AVCodec *pCodec = avcodec_find_decoder(s_pCodecCtx->codec_id);
    if (NULL == pCodec)
    {
    	ret = -4;
        goto end;
    }

    if (avcodec_open2(s_pCodecCtx, pCodec, NULL) < 0)
    {
        ret = -5;
        goto end;
    }
end:
    Log_error("ffmpeg", "set url ret = %d ", ret);
    last_known_time_.duration = s_pFormatCtx->duration /  AV_TIME_BASE * s_one_sec_unit;
    if (ret != 0)
    {    
        Log_error("ffmpeg", "duration = %d ", last_known_time_.duration);
        return ;
    }

}
예제 #2
0
bool COMXAudioCodecOMX::Open(CDVDStreamInfo &hints)
{
  AVCodec* pCodec = NULL;
  m_bOpenedCodec = false;

  if (hints.codec == AV_CODEC_ID_DTS && CSettings::Get().GetBool("audiooutput.supportdtshdcpudecoding"))
    pCodec = avcodec_find_decoder_by_name("libdcadec");

  if (!pCodec)
    pCodec = avcodec_find_decoder(hints.codec);

  if (!pCodec)
  {
    CLog::Log(LOGDEBUG,"COMXAudioCodecOMX::Open() Unable to find codec %d", hints.codec);
    return false;
  }

  m_bFirstFrame = true;
  m_pCodecContext = avcodec_alloc_context3(pCodec);
  m_pCodecContext->debug_mv = 0;
  m_pCodecContext->debug = 0;
  m_pCodecContext->workaround_bugs = 1;

  if (pCodec->capabilities & CODEC_CAP_TRUNCATED)
    m_pCodecContext->flags |= CODEC_FLAG_TRUNCATED;

  m_channels = 0;
  m_pCodecContext->channels = hints.channels;
  m_pCodecContext->sample_rate = hints.samplerate;
  m_pCodecContext->block_align = hints.blockalign;
  m_pCodecContext->bit_rate = hints.bitrate;
  m_pCodecContext->bits_per_coded_sample = hints.bitspersample;
  if (hints.codec == AV_CODEC_ID_TRUEHD)
  {
    if (CAEFactory::HasStereoAudioChannelCount())
      m_pCodecContext->request_channel_layout = AV_CH_LAYOUT_STEREO;
    else if (!CAEFactory::HasHDAudioChannelCount())
      m_pCodecContext->request_channel_layout = AV_CH_LAYOUT_5POINT1;
  }
  if (m_pCodecContext->request_channel_layout)
    CLog::Log(LOGNOTICE,"COMXAudioCodecOMX::Open() Requesting channel layout of %x", (unsigned)m_pCodecContext->request_channel_layout);

  // vorbis and wma2v2 have variable sized planar output, so skip concatenation
  if (hints.codec == AV_CODEC_ID_VORBIS || hints.codec == AV_CODEC_ID_WMAV2)
    m_bNoConcatenate = true;

  if(m_pCodecContext->bits_per_coded_sample == 0)
    m_pCodecContext->bits_per_coded_sample = 16;

  if( hints.extradata && hints.extrasize > 0 )
  {
    m_pCodecContext->extradata_size = hints.extrasize;
    m_pCodecContext->extradata = (uint8_t*)av_mallocz(hints.extrasize + FF_INPUT_BUFFER_PADDING_SIZE);
    memcpy(m_pCodecContext->extradata, hints.extradata, hints.extrasize);
  }

  if (avcodec_open2(m_pCodecContext, pCodec, NULL) < 0)
  {
    CLog::Log(LOGDEBUG,"COMXAudioCodecOMX::Open() Unable to open codec");
    Dispose();
    return false;
  }

  m_pFrame1 = av_frame_alloc();
  m_bOpenedCodec = true;
  m_iSampleFormat = AV_SAMPLE_FMT_NONE;
  m_desiredSampleFormat = m_pCodecContext->sample_fmt == AV_SAMPLE_FMT_S16 ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_FLTP;
  return true;
}
예제 #3
0
bool CDVDAudioCodecFFmpeg::Open(CDVDStreamInfo &hints, CDVDCodecOptions &options)
{
  AVCodec* pCodec = NULL;
  m_bOpenedCodec = false;

  bool allow_dtshd_decoding = true;
#if defined(TARGET_RASPBERRY_PI) || defined(HAS_IMXVPU)
  allow_dtshd_decoding = CSettings::Get().GetBool("audiooutput.supportdtshdcpudecoding");
#endif
  if (hints.codec == AV_CODEC_ID_DTS && allow_dtshd_decoding)
    pCodec = avcodec_find_decoder_by_name("libdcadec");

  if (!pCodec)
    pCodec = avcodec_find_decoder(hints.codec);

  if (!pCodec)
  {
    CLog::Log(LOGDEBUG,"CDVDAudioCodecFFmpeg::Open() Unable to find codec %d", hints.codec);
    return false;
  }

  m_pCodecContext = avcodec_alloc_context3(pCodec);
  m_pCodecContext->debug_mv = 0;
  m_pCodecContext->debug = 0;
  m_pCodecContext->workaround_bugs = 1;

  if (pCodec->capabilities & CODEC_CAP_TRUNCATED)
    m_pCodecContext->flags |= CODEC_FLAG_TRUNCATED;

  m_matrixEncoding = AV_MATRIX_ENCODING_NONE;
  m_channels = 0;
  m_pCodecContext->channels = hints.channels;
  m_pCodecContext->sample_rate = hints.samplerate;
  m_pCodecContext->block_align = hints.blockalign;
  m_pCodecContext->bit_rate = hints.bitrate;
  m_pCodecContext->bits_per_coded_sample = hints.bitspersample;

  if(m_pCodecContext->bits_per_coded_sample == 0)
    m_pCodecContext->bits_per_coded_sample = 16;

  if( hints.extradata && hints.extrasize > 0 )
  {
    m_pCodecContext->extradata = (uint8_t*)av_mallocz(hints.extrasize + FF_INPUT_BUFFER_PADDING_SIZE);
    if(m_pCodecContext->extradata)
    {
      m_pCodecContext->extradata_size = hints.extrasize;
      memcpy(m_pCodecContext->extradata, hints.extradata, hints.extrasize);
    }
  }

  if (g_advancedSettings.m_audioApplyDrc >= 0.0)
    av_opt_set_double(m_pCodecContext, "drc_scale", g_advancedSettings.m_audioApplyDrc, AV_OPT_SEARCH_CHILDREN);

  if (avcodec_open2(m_pCodecContext, pCodec, NULL) < 0)
  {
    CLog::Log(LOGDEBUG,"CDVDAudioCodecFFmpeg::Open() Unable to open codec");
    Dispose();
    return false;
  }

  m_pFrame1 = av_frame_alloc();
  m_bOpenedCodec = true;
  m_iSampleFormat = AV_SAMPLE_FMT_NONE;
  m_matrixEncoding = AV_MATRIX_ENCODING_NONE;

  return true;
}
void convert_image(AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avpkt, int *got_packet_ptr) {
	AVCodecContext *codecCtx;
	AVCodec *codec;
	
	*got_packet_ptr = 0;

	codec = avcodec_find_encoder(TARGET_IMAGE_CODEC);
	if (!codec) {
	    printf("avcodec_find_decoder() failed to find decoder\n");
		goto fail;
	}

    codecCtx = avcodec_alloc_context3(codec);
	if (!codecCtx) {
		printf("avcodec_alloc_context3 failed\n");
		goto fail;
	}

	codecCtx->bit_rate = pCodecCtx->bit_rate;
	codecCtx->width = pCodecCtx->width;
	codecCtx->height = pCodecCtx->height;
	codecCtx->pix_fmt = TARGET_IMAGE_FORMAT;
	codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
	codecCtx->time_base.num = pCodecCtx->time_base.num;
	codecCtx->time_base.den = pCodecCtx->time_base.den;

	if (!codec || avcodec_open2(codecCtx, codec, NULL) < 0) {
	  	printf("avcodec_open2() failed\n");
		goto fail;
	}

	AVFrame *pFrameRGB = avcodec_alloc_frame();
	
	if (!pFrameRGB) {
		goto fail;
	}

    avpicture_alloc((AVPicture *) pFrameRGB,
    		TARGET_IMAGE_FORMAT,
    		codecCtx->width,
    		codecCtx->height);
    
	struct SwsContext *scalerCtx = sws_getContext(pCodecCtx->width, 
			pCodecCtx->height, 
			pCodecCtx->pix_fmt, 
			pCodecCtx->width, 
			pCodecCtx->height, 
			TARGET_IMAGE_FORMAT, 
	        SWS_FAST_BILINEAR, 0, 0, 0);
	
	if (!scalerCtx) {
		printf("sws_getContext() failed\n");
		goto fail;
	}
    
    sws_scale(scalerCtx,
    		(const uint8_t * const *) pFrame->data,
    		pFrame->linesize,
    		0,
    		pFrame->height,
    		pFrameRGB->data,
    		pFrameRGB->linesize);
	
	int ret = avcodec_encode_video2(codecCtx, avpkt, pFrameRGB, got_packet_ptr);
	
	if (ret < 0) {
		*got_packet_ptr = 0;
	}
	
	// TODO is this right?
	fail:
	av_free(pFrameRGB);
	
	if (codecCtx) {
		avcodec_close(codecCtx);
	    av_free(codecCtx);
	}
	
	if (scalerCtx) {
		sws_freeContext(scalerCtx);
	}
	
	if (ret < 0 || !*got_packet_ptr) {
		av_free_packet(avpkt);
	}
}
예제 #5
0
int main(int argc, char *argv[])
{
    AVCodecContext* video_dec_ctx = NULL;
    AVCodec* video_dec = NULL;
    AVPacket pkt;
    AVFrame *frame = NULL;
    int read_eos = 0;
    int decode_count = 0;
    int render_count = 0;
    int video_stream_index = -1, i;
    uint8_t *frame_copy = NULL;
    FILE *dump_yuv = NULL;

    // parse command line parameters
    process_cmdline(argc, argv);
    if (!input_file) {
        ERROR("no input file specified\n");
        return -1;
    }

    // libav* init
    av_register_all();

    // open input file
    AVFormatContext* pFormat = NULL;
    if (avformat_open_input(&pFormat, input_file, NULL, NULL) < 0) {
        ERROR("fail to open input file: %s by avformat\n", input_file);
        return -1;
    }
    if (avformat_find_stream_info(pFormat, NULL) < 0) {
        ERROR("fail to find out stream info\n");
        return -1;
    }
    av_dump_format(pFormat,0,input_file,0);

    // find out video stream
    for (i = 0; i < pFormat->nb_streams; i++) {
        if (pFormat->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_dec_ctx = pFormat->streams[i]->codec;
            video_stream_index = i;
            break;
        }
    }
    ASSERT(video_dec_ctx && video_stream_index>=0);

    // open video codec
    video_dec = avcodec_find_decoder(video_dec_ctx->codec_id);
    video_dec_ctx->coder_type = render_mode ? render_mode -1 : render_mode; // specify output frame type
    if (avcodec_open2(video_dec_ctx, video_dec, NULL) < 0) {
        ERROR("fail to open codec\n");
        return -1;
    }

    // decode frames one by one
    av_init_packet(&pkt);
    while (1) {
        if(read_eos == 0 && av_read_frame(pFormat, &pkt) < 0) {
            read_eos = 1;
        }
        if (read_eos) {
            pkt.data = NULL;
            pkt.size = 0;
        }

        if (pkt.stream_index == video_stream_index) {
            frame = av_frame_alloc();
            int got_picture = 0,ret = 0;
            ret = avcodec_decode_video2(video_dec_ctx, frame, &got_picture, &pkt);
            if (ret < 0) { // decode fail (or decode finished)
                DEBUG("exit ...\n");
                break;
            }

            if (read_eos && ret>=0 && !got_picture) {
                DEBUG("ret=%d, exit ...\n", ret);
                break; // eos has been processed
            }

            decode_count++;
            if (got_picture) {
                switch (render_mode) {
                case 0: // dump raw video frame to disk file
                case 1: { // draw raw frame data as texture
                    // assumed I420 format
                    int height[3] = {video_dec_ctx->height, video_dec_ctx->height/2, video_dec_ctx->height/2};
                    int width[3] = {video_dec_ctx->width, video_dec_ctx->width/2, video_dec_ctx->width/2};
                    int plane, row;

                    if (render_mode == 0) {
                        if (!dump_yuv) {
                            char out_file[256];
                            sprintf(out_file, "./dump_%dx%d.I420", video_dec_ctx->width, video_dec_ctx->height);
                            dump_yuv = fopen(out_file, "ab");
                            if (!dump_yuv) {
                                ERROR("fail to create file for dumped yuv data\n");
                                return -1;
                            }
                        }
                        for (plane=0; plane<3; plane++) {
                            for (row = 0; row<height[plane]; row++)
                                fwrite(frame->data[plane]+ row*frame->linesize[plane], width[plane], 1, dump_yuv);
                        }
                    } else {
                        // glTexImage2D  doesn't handle pitch, make a copy of video data
                        frame_copy = malloc(video_dec_ctx->height * video_dec_ctx->width * 3 / 2);
                        unsigned char* ptr = frame_copy;

                        for (plane=0; plane<3; plane++) {
                            for (row=0; row<height[plane]; row++) {
                                memcpy(ptr, frame->data[plane]+row*frame->linesize[plane], width[plane]);
                                ptr += width[plane];
                            }
                        }

                        drawVideo((uintptr_t)frame_copy, 0, video_dec_ctx->width, video_dec_ctx->height, 0);
                    }
                }
                    break;
                case 2: // draw video frame as texture with drm handle
                case 3: // draw video frame as texture with dma_buf handle
                    drawVideo((uintptr_t)frame->data[0], render_mode -1, video_dec_ctx->width, video_dec_ctx->height, (uintptr_t)frame->data[1]);
                    break;
                default:
                    break;
                }
                render_count++;
            }
        }
    }

    if (frame)
        av_frame_free(&frame);
    if (frame_copy)
        free(frame_copy);
    if (dump_yuv)
        fclose(dump_yuv);
    deinit_egl();
    PRINTF("decode %s ok, decode_count=%d, render_count=%d\n", input_file, decode_count, render_count);

    return 0;
}
예제 #6
0
파일: aplayer.cpp 프로젝트: joamag/aplayer
int open_aplayer(const char *filename, struct aplayer_t *player) {
    // creates a new format (file container) context to be
    // used to detect the kind of file in use
    AVFormatContext *container = avformat_alloc_context();
    if(avformat_open_input(&container, filename, NULL, NULL) < 0) {
        WARN("Could not open file");
    }
    if(avformat_find_stream_info(container, NULL) < 0) {
        WARN("Could not find file info");
    }

#ifdef _DEBUG
    // dumps the format information to the standard outpu
    // this should print information on the container file
    av_dump_format(container, 0, filename, false);
#endif

    // starts the (audio) stream id with an invalid value and then
    // iterates over the complete set of stream to find one that complies
    // with the audio "interface"
    int stream_id = -1;
    for(unsigned int index = 0; index < container->nb_streams; index++) {
        // retrieves the current stram and checks if the
        // codec type is of type audio in case it's not
        // continues the loop (nothing to be done)
        AVStream *stream = container->streams[index];
        if(stream->codec->codec_type != AVMEDIA_TYPE_AUDIO) { continue; }

        // sets the current index as the stream identifier
        // to be used from now on
        stream_id = index;
        break;
    }
    if(stream_id == -1) { WARN("Could not find Audio Stream"); }

    // retrieves the codec context associted with the audio stream
    // that was just discovered
    AVCodecContext *codec_ctx = container->streams[stream_id]->codec;

    // tries to find the codec for the current codec context and
    // opens it for the current execution
    AVCodec *codec = avcodec_find_decoder(codec_ctx->codec_id);
    if(codec == NULL) { WARN("Cannot find codec"); }
    if(avcodec_open2(codec_ctx, codec, NULL) < 0) { WARN("Codec cannot be found"); }

    // initializes the ao structure creating the device associated
    // with the created structures this is going to be used
    ao_device *device = init_ao(codec_ctx);

    // allocates the buffer to be used in the packet for the
    // unpacking of the various packets
    uint8_t buffer[AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];

    // creates the packet structure to be used and initializes
    // it, this is going to be the payload for each iteration
    // then sets its data and size
    av_init_packet(&player->packet);
    player->packet.data = buffer;
    player->packet.size = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;

    // allocates a new frame structure to be used for the audio
    // frames in iteration
    AVFrame *frame = avcodec_alloc_frame();

    // updates the player structure with all the attributes that
    // were retrieved for the current context
    player->device = device;
    player->stream_id = stream_id;
    player->frame = frame;
    player->container = container;
    player->codec_ctx = codec_ctx;

    return 0;
}
예제 #7
0
static void video_encode_example(AVFrame* in_frame, FILE* fp)

{
    AVCodec *codec;
    AVCodecContext *c = NULL;
    int i, ret, got_output;
    FILE *f;
    AVFrame * frame = in_frame;
    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_MPEG2VIDEO);
    if (!codec)
    {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c)
    {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 1920;
    c->height = 1080;
    /* frames per second */
    c->time_base = (AVRational ) { 1, 60 };
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    /* open it */
    if (avcodec_open2(c, codec, 0) < 0)
    {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fp;
    if (!f)
    {
        fprintf(stderr, "Could not open\n");
        exit(1);
    }

    {
        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;

        fflush(stdout);

        frame->pts = i;

        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
        if (ret < 0)
        {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output)
        {
            printf("Write frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++)
    {
        fflush(stdout);

        ret = avcodec_encode_video2(c, &pkt, 0, &got_output);
        if (ret < 0)
        {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output)
        {
            printf("Write frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);

    avcodec_close(c);
    av_free(c);

    printf("\n");

}
예제 #8
0
파일: vpFFMPEG.cpp 프로젝트: 976717326/visp
/*!
  Allocates and initializes the parameters depending on the video and the desired color type.
  One the stream is opened, it is possible to get the video encoding framerate getFramerate(),
  and the dimension of the images using getWidth() and getHeight().
  
  \param filename : Path to the video which has to be read.
  \param colortype : Desired color map used to open the video.
  The parameter can take two values : COLORED and GRAY_SCALED.
  
  \return It returns true if the paramters could be initialized. Else it returns false.
*/
bool vpFFMPEG::openStream(const char *filename, vpFFMPEGColorType colortype)
{
  this->color_type = colortype;
  
  av_register_all();
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,0,0) // libavformat 52.84.0
  if (av_open_input_file (&pFormatCtx, filename, NULL, 0, NULL) != 0)
#else
  if (avformat_open_input (&pFormatCtx, filename, NULL, NULL) != 0) // libavformat 53.4.0
#endif
  {
    vpTRACE("Couldn't open file ");
    return false;
  }

#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,21,0) // libavformat 53.21.0
  if (av_find_stream_info (pFormatCtx) < 0)
#else 
  if (avformat_find_stream_info (pFormatCtx, NULL) < 0)
#endif
      return false;
  
  videoStream = 0;
  bool found_codec = false;
  
  /*
  * Detect streams types
  */
  for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++)
  {
#if LIBAVUTIL_VERSION_INT < AV_VERSION_INT(51,0,0)
    if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) // avutil 50.33.0
#else
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) // avutil 51.9.1
#endif
    {
      videoStream = i;
      //std::cout << "rate: " << pFormatCtx->streams[i]->r_frame_rate.num << " " << pFormatCtx->streams[i]->r_frame_rate.den << std::endl;
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(55,12,0)
      framerate_stream =  pFormatCtx->streams[i]->r_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->r_frame_rate.den;
#else
      framerate_stream =  pFormatCtx->streams[i]->avg_frame_rate.num;
      framerate_stream /= pFormatCtx->streams[i]->avg_frame_rate.den;
#endif
      found_codec= true;
      break;
    }
  }

  if (found_codec)
  {
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

    if (pCodec == NULL)
    {
      vpTRACE("unsuported codec");
      return false;		// Codec not found
    }
    
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,35,0) // libavcodec 53.35.0
    if (avcodec_open (pCodecCtx, pCodec) < 0)
#else
    if (avcodec_open2 (pCodecCtx, pCodec, NULL) < 0)
#endif
    {
      vpTRACE("Could not open codec");
      return false;		// Could not open codec
    }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
    pFrame = avcodec_alloc_frame();
#else
    pFrame = av_frame_alloc(); // libavcodec 55.34.1
#endif

    if (color_type == vpFFMPEG::COLORED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameRGB=avcodec_alloc_frame();
#else
      pFrameRGB=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameRGB == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height);
    }
    
    else if (color_type == vpFFMPEG::GRAY_SCALED)
    {
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
      pFrameGRAY=avcodec_alloc_frame();
#else
      pFrameGRAY=av_frame_alloc(); // libavcodec 55.34.1
#endif
    
      if (pFrameGRAY == NULL)
        return false;
      
      numBytes = avpicture_get_size (PIX_FMT_GRAY8,pCodecCtx->width,pCodecCtx->height);
    }  

    /*
     * Determine required buffer size and allocate buffer
     */
    width = pCodecCtx->width ;
    height = pCodecCtx->height ;
    buffer = (uint8_t *) malloc ((unsigned int)(sizeof (uint8_t)) * (unsigned int)numBytes);
  }
  else
  {
    vpTRACE("Didn't find a video stream");
    return false;
  }
  
  if (color_type == vpFFMPEG::COLORED)
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
  
  else if (color_type == vpFFMPEG::GRAY_SCALED)
    avpicture_fill((AVPicture *)pFrameGRAY, buffer, PIX_FMT_GRAY8, pCodecCtx->width, pCodecCtx->height);
  
  streamWasOpen = true;

  return true;
}
예제 #9
0
파일: av_sub.c 프로젝트: C3MA/fc_mplayer
/**
 * Decode a subtitle packet via libavcodec.
 * \return < 0 on error, > 0 if further processing is needed
 */
int decode_avsub(struct sh_sub *sh, uint8_t **data, int *size,
                 double *pts, double *endpts)
{
    AVCodecContext *ctx = sh->context;
    enum AVCodecID cid = AV_CODEC_ID_NONE;
    int new_type = 0;
    int res;
    int got_sub;
    AVSubtitle sub;
    AVPacket pkt;

    switch (sh->type) {
    case 'b':
        cid = AV_CODEC_ID_DVB_SUBTITLE; break;
    case 'p':
        cid = AV_CODEC_ID_HDMV_PGS_SUBTITLE; break;
    case 'x':
        cid = AV_CODEC_ID_XSUB; break;
    }

    av_init_packet(&pkt);
    pkt.data = *data;
    pkt.size = *size;
    pkt.pts = *pts * 1000;
    if (*pts != MP_NOPTS_VALUE && *endpts != MP_NOPTS_VALUE)
        pkt.convergence_duration = (*endpts - *pts) * 1000;
    if (!ctx) {
        AVCodec *sub_codec;
        init_avcodec();
        ctx = avcodec_alloc_context3(NULL);
        if (!ctx) {
            mp_msg(MSGT_SUBREADER, MSGL_FATAL,
                   "Could not allocate subtitle decoder context\n");
            return -1;
        }
        ctx->extradata_size = sh->extradata_len;
        ctx->extradata = sh->extradata;
        sub_codec = avcodec_find_decoder(cid);
        if (!sub_codec || avcodec_open2(ctx, sub_codec, NULL) < 0) {
            mp_msg(MSGT_SUBREADER, MSGL_FATAL,
                   "Could not open subtitle decoder\n");
            av_freep(&ctx);
            return -1;
        }
        sh->context = ctx;
    }
    res = avcodec_decode_subtitle2(ctx, &sub, &got_sub, &pkt);
    if (res < 0)
        return res;
    if (*pts != MP_NOPTS_VALUE) {
        if (sub.end_display_time > sub.start_display_time)
            *endpts = *pts + sub.end_display_time / 1000.0;
        *pts += sub.start_display_time / 1000.0;
    }
    if (got_sub && vo_spudec && sub.num_rects == 0)
        spudec_set_paletted(vo_spudec, NULL, 0, NULL, 0, 0, 0, 0, *pts, *endpts);
    if (got_sub && sub.num_rects > 0) {
        switch (sub.rects[0]->type) {
        case SUBTITLE_BITMAP:
            if (!vo_spudec)
                vo_spudec = spudec_new_scaled(NULL, ctx->width, ctx->height, NULL, 0);
            avsub_to_spudec(sub.rects, sub.num_rects, *pts, *endpts);
            vo_osd_changed(OSDTYPE_SPU);
            break;
        case SUBTITLE_TEXT:
            *data = strdup(sub.rects[0]->text);
            *size = strlen(*data);
            new_type = 't';
            break;
        case SUBTITLE_ASS:
            *data = strdup(sub.rects[0]->ass);
            *size = strlen(*data);
            new_type = 'a';
            break;
        }
    }
    if (got_sub)
        avsubtitle_free(&sub);
    return new_type;
}
예제 #10
0
bool FFmpegImportFileHandle::InitCodecs()
{
   // Allocate the array of pointers to hold stream contexts pointers
   // Some of the allocated space may be unused (corresponds to video, subtitle, or undecodeable audio streams)
   mScs = std::make_shared<Scs>(mFormatContext->nb_streams);
   // Fill the stream contexts
   for (unsigned int i = 0; i < mFormatContext->nb_streams; i++)
   {
      if (mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
      {
         //Create a context
         auto sc = std::make_unique<streamContext>();

         sc->m_stream = mFormatContext->streams[i];
         sc->m_codecCtx = sc->m_stream->codec;

         AVCodec *codec = avcodec_find_decoder(sc->m_codecCtx->codec_id);
         if (codec == NULL)
         {
            wxLogError(wxT("FFmpeg : avcodec_find_decoder() failed. Index[%02d], Codec[%02x - %s]"),i,sc->m_codecCtx->codec_id,sc->m_codecCtx->codec_name);
            //FFmpeg can't decode this stream, skip it
            continue;
         }
         if (codec->type != sc->m_codecCtx->codec_type)
         {
            wxLogError(wxT("FFmpeg : Codec type mismatch, skipping. Index[%02d], Codec[%02x - %s]"),i,sc->m_codecCtx->codec_id,sc->m_codecCtx->codec_name);
            //Non-audio codec reported as audio? Nevertheless, we don't need THIS.
            continue;
         }

         if (avcodec_open2(sc->m_codecCtx, codec, NULL) < 0)
         {
            wxLogError(wxT("FFmpeg : avcodec_open() failed. Index[%02d], Codec[%02x - %s]"),i,sc->m_codecCtx->codec_id,sc->m_codecCtx->codec_name);
            //Can't open decoder - skip this stream
            continue;
         }

         // Stream is decodeable and it is audio. Add it and its decription to the arrays
         wxString strinfo;
         int duration = 0;
         if (sc->m_stream->duration > 0)
            duration = sc->m_stream->duration * sc->m_stream->time_base.num / sc->m_stream->time_base.den;
         else
            duration = mFormatContext->duration / AV_TIME_BASE;
         wxString bitrate = wxT("");
         if (sc->m_codecCtx->bit_rate > 0)
            bitrate.Printf(wxT("%d"),sc->m_codecCtx->bit_rate);
         else
            bitrate.Printf(wxT("?"));

         AVDictionaryEntry *tag = av_dict_get(sc->m_stream->metadata, "language", NULL, 0);
         wxString lang;
         if (tag)
         {
            lang.FromUTF8(tag->value);
         }
         strinfo.Printf(_("Index[%02x] Codec[%s], Language[%s], Bitrate[%s], Channels[%d], Duration[%d]"),sc->m_stream->id,codec->name,lang.c_str(),bitrate.c_str(),sc->m_stream->codec->channels, duration);
         mStreamInfo.Add(strinfo);
         mScs->get()[mNumStreams++] = std::move(sc);
      }
      //for video and unknown streams do nothing
   }
   //It doesn't really returns false, but GetStreamCount() will return 0 if file is composed entierly of unreadable streams
   return true;
}
예제 #11
0
파일: vpFFMPEG.cpp 프로젝트: 976717326/visp
bool vpFFMPEG::openEncoder(const char *filename, unsigned int w, unsigned int h, AVCodecID codec)
#endif
{
  av_register_all();

  /* find the mpeg1 video encoder */
  pCodec = avcodec_find_encoder(codec);
  if (pCodec == NULL) {
    fprintf(stderr, "codec not found\n");
    return false;
  }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,5,0) // libavcodec 53.5.0
  pCodecCtx = avcodec_alloc_context();
#else
  pCodecCtx = avcodec_alloc_context3(NULL);
#endif

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,34,0)
  pFrame = avcodec_alloc_frame();
  pFrameRGB = avcodec_alloc_frame();
#else
  pFrame = av_frame_alloc(); // libavcodec 55.34.1
  pFrameRGB = av_frame_alloc(); // libavcodec 55.34.1
#endif

  /* put sample parameters */
  pCodecCtx->bit_rate = (int)bit_rate;
  /* resolution must be a multiple of two */
  pCodecCtx->width = (int)w;
  pCodecCtx->height = (int)h;
  this->width = (int)w;
  this->height = (int)h;
  /* frames per second */
  pCodecCtx->time_base.num = 1;
  pCodecCtx->time_base.den = framerate_encoder;
  pCodecCtx->gop_size = 10; /* emit one intra frame every ten frames */
  pCodecCtx->max_b_frames=1;
  pCodecCtx->pix_fmt = PIX_FMT_YUV420P;

  /* open it */
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,35,0) // libavcodec 53.35.0
  if (avcodec_open (pCodecCtx, pCodec) < 0) {
#else
  if (avcodec_open2 (pCodecCtx, pCodec, NULL) < 0) {
#endif
    fprintf(stderr, "could not open codec\n");
    return false;
  }

  /* the codec gives us the frame size, in samples */

  f = fopen(filename, "wb");
  if (!f) {
    fprintf(stderr, "could not open %s\n", filename);
    return false;
  }

  outbuf_size = 100000;
  outbuf = new uint8_t[outbuf_size];

  numBytes = avpicture_get_size (PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height);
  picture_buf = new uint8_t[numBytes];
  avpicture_fill((AVPicture *)pFrame, picture_buf, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

  numBytes = avpicture_get_size (PIX_FMT_RGB24,pCodecCtx->width,pCodecCtx->height);
  buffer = new uint8_t[numBytes];
  avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

  img_convert_ctx= sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height,PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  
  encoderWasOpened = true;

  return true;
}


/*!
  Saves the image I as frame of the video.
  
  \param I : the image to save.
  
  \return It returns true if the image could be saved.
*/
bool vpFFMPEG::saveFrame(vpImage<vpRGBa> &I)
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  writeBitmap(I);
  sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, pCodecCtx->height, pFrame->data, pFrame->linesize);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
  out_size = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, pFrame);
  fwrite(outbuf, 1, (size_t)out_size, f);
#else
  AVPacket pkt;
  av_init_packet(&pkt);
  pkt.data = NULL;    // packet data will be allocated by the encoder
  pkt.size = 0;

  int got_output;
  int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
  if (ret < 0) {
    std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
    return false;
  }
  if (got_output) {
    fwrite(pkt.data, 1, pkt.size, f);
    av_free_packet(&pkt);
  }
#endif
  fflush(stdout);
  return true;
}


/*!
  Saves the image I as frame of the video.
  
  \param I : the image to save.
  
  \return It returns true if the image could be saved.
*/
bool vpFFMPEG::saveFrame(vpImage<unsigned char> &I)
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  writeBitmap(I);
  sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize, 0, pCodecCtx->height, pFrame->data, pFrame->linesize);  
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
  out_size = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, pFrame);
  fwrite(outbuf, 1, (size_t)out_size, f);
#else
  AVPacket pkt;
  av_init_packet(&pkt);
  pkt.data = NULL;    // packet data will be allocated by the encoder
  pkt.size = 0;

  int got_output;
  int ret = avcodec_encode_video2(pCodecCtx, &pkt, pFrame, &got_output);
  if (ret < 0) {
    std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
    return false;
  }
  if (got_output) {
    fwrite(pkt.data, 1, pkt.size, f);
    av_free_packet(&pkt);
  }
#endif

  fflush(stdout);
  return true;
}

/*!
  Ends the writing of the video and close the file.
  
  \return It returns true if the file was closed without problem
*/
bool vpFFMPEG::endWrite()
{
  if (encoderWasOpened == false)
  {
    vpTRACE("Couldn't save a frame. The parameters have to be initialized before ");
    return false;
  }
  
  int ret = 1;
  while (ret != 0)
  {

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(54,2,100) // libavcodec 54.2.100
    ret = avcodec_encode_video(pCodecCtx, outbuf, outbuf_size, NULL);
    fwrite(outbuf, 1, (size_t)out_size, f);
#else
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;    // packet data will be allocated by the encoder
    pkt.size = 0;
    int got_output;
    ret = avcodec_encode_video2(pCodecCtx, &pkt, NULL, &got_output);
    if (ret < 0) {
      std::cerr << "Error encoding frame in " << __FILE__ << " " << __LINE__ << " " << __FUNCTION__ << std::endl;
      return false;
    }
    if (got_output) {
      fwrite(pkt.data, 1, pkt.size, f);
      av_free_packet(&pkt);
    }
#endif
  }

  /*The end of a mpeg file*/
  outbuf[0] = 0x00;
  outbuf[1] = 0x00;
  outbuf[2] = 0x01;
  outbuf[3] = 0xb7;
  fwrite(outbuf, 1, 4, f);
  fclose(f);
  f = NULL;
  return true;
}

/*!
  This method enables to fill the frame bitmap thanks to the vpImage bitmap.
*/
void vpFFMPEG::writeBitmap(vpImage<vpRGBa> &I)
{
  unsigned char* beginInput = (unsigned char*)I.bitmap;
  unsigned char* input = NULL;
  unsigned char* beginOutput = (unsigned char*)pFrameRGB->data[0];
  int widthStep = pFrameRGB->linesize[0];
  
  for(int i=0 ; i < height ; i++)
  {
    input = beginInput + 4 * i * width;
    unsigned char *output = beginOutput + i * widthStep;
    for(int j=0 ; j < width ; j++)
    {
      *(output++) = *(input);
      *(output++) = *(input+1);
      *(output++) = *(input+2);

      input+=4;
    }
  }
}
예제 #12
0
int _tmain(int argc, _TCHAR* argv[])
{
	if (argc != 3)
	{
		printf("usage: %s in_file out_file\n", argv[0]);
		return -1;
	}

	AVCodec *p_codec = NULL;
	AVCodecContext *p_codec_ctx = NULL;
	int i = 0, ret = 0, got_output = 0, y_size = 0, frame_cnt = 0;
	FILE *p_fin = NULL, *p_fout = NULL;
	AVFrame *p_frame = NULL;
	AVPacket pkt;

	const char *p_in_fpath = argv[1], *p_out_fpath = argv[2];
	AVCodecID codec_id = AV_CODEC_ID_NONE;

#if TEST_HEVC
	codec_id = AV_CODEC_ID_HEVC;
#else
	codec_id = AV_CODEC_ID_H264;
#endif

	int in_w = 480, in_h = 272;
	int frame_num = 100;

	avcodec_register_all();

	if (NULL == (p_codec = avcodec_find_encoder(codec_id)))
	{
		printf("Codec not found\n");
		return -1;
	}
	if (NULL == (p_codec_ctx = avcodec_alloc_context3(p_codec)))
	{
		printf("Could not allocate video codec context\n");
		return -1;
	}
	p_codec_ctx->bit_rate = 40000;
	p_codec_ctx->width = in_w;
	p_codec_ctx->height = in_h;
	p_codec_ctx->time_base.num = 1;
	p_codec_ctx->time_base.den = 25;
	p_codec_ctx->gop_size = 10;
	p_codec_ctx->max_b_frames = 1;
	p_codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;

	if (AV_CODEC_ID_H264 == codec_id)
	{
		av_opt_set(p_codec_ctx->priv_data, "preset", "slow", 0);
	}

	if (avcodec_open2(p_codec_ctx, p_codec, NULL) < 0)
	{
		printf("Could not open codec\n");
		return -1;
	}

	if (NULL == (p_frame == av_frame_alloc()))
	{
		printf("Could not allocate video frame\n");
		return -1;
	}
	p_frame->format = p_codec_ctx->pix_fmt;
	p_frame->width = p_codec_ctx->width;
	p_frame->height = p_codec_ctx->height;

	if ((ret = av_image_alloc(p_frame->data, p_frame->linesize, p_codec_ctx->width,
		p_codec_ctx->height, p_codec_ctx->pix_fmt, 16)) < 0)
	{
		printf("Could not allocate raw picture buffer\n");
		return -1;
	}

	///< Input raw data
	if (NULL == (p_fin = fopen(p_in_fpath, "rb")))
	{
		printf("Could not open %s\n", p_in_fpath);
		return -1;
	}
	///< Output bitstream
	if (NULL == (p_fout = fopen(p_out_fpath, "wb")))
	{
		printf("Could not open %s\n");
		return -1;
	}

	y_size = p_codec_ctx->width * p_codec_ctx->height;

	///< Encode
	for (i = 0; i < frame_num; ++i)
	{
		av_init_packet(&pkt);
		pkt.data = NULL; ///< packet data will be allocate by encoder
		pkt.size = 0;
		///< read raw yuv data
		if (fread(p_frame->data[0], 1, y_size, p_fin) <= 0 || ///< Y
			fread(p_frame->data[1], 1, y_size / 4, p_fin) <= 0 || ///< U
			fread(p_frame->data[2], 1, y_size / 4, p_fin) <= 0) ///< V
		{
			return -1;
		}
		else if (feof(p_fin))
		{
			break;
		}

		p_frame->pts = i;
		
		///< encode the image
		if ((ret = avcodec_encode_video2(p_codec_ctx, &pkt, p_frame, &got_output) < 0))
		{
			printf("Error encoding frame\n");
			return -1;
		}
		if (got_output)
		{
			printf("Succeed to encode frame: %5d\tsize: %5d\n", frame_cnt, pkt.size);
			++frame_cnt;
			fwrite(pkt.data, 1, pkt.size, p_fout);
			av_free_packet(&pkt);
		}
	}

	///< Flush Encoder
	for (got_output = 1; got_output; ++i)
	{
		if ((ret = avcodec_encode_video2(p_codec_ctx, &pkt, NULL, &got_output)) < 0)
		{
			printf("Error encoding frame\n");
			return -1;
		}
		if (got_output)
		{
			printf("Flush encoder: Succeed to encode 1 frame!\tsize: %5d\n", pkt.size);
			fwrite(pkt.data, 1, pkt.size, p_fout);
			av_free_packet(&pkt);
		}
	}

	fclose(p_fout);
	avcodec_close(p_codec_ctx);
	av_free(p_codec_ctx);
	av_freep(&p_frame->data[0]);
	av_frame_free(&p_frame);

	return 0;
}
예제 #13
0
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    // Handle options
    AVDictionary *opts = 0;
    StringVector opVect = split(Options(), ",");
    
    // Set transport method as specified by method field, rtpUni is default
    if ( Method() == "rtpMulti" )
    	opVect.push_back("rtsp_transport=udp_multicast");
    else if ( Method() == "rtpRtsp" )
        opVect.push_back("rtsp_transport=tcp");
    else if ( Method() == "rtpRtspHttp" )
        opVect.push_back("rtsp_transport=http");
    
  	Debug(2, "Number of Options: %d",opVect.size());
    for (size_t i=0; i<opVect.size(); i++)
    {
    	StringVector parts = split(opVect[i],"=");
    	if (parts.size() > 1) {
    		parts[0] = trimSpaces(parts[0]);
    		parts[1] = trimSpaces(parts[1]);
    	    if ( av_dict_set(&opts, parts[0].c_str(), parts[1].c_str(), 0) == 0 ) {
    	        Debug(2, "set option %d '%s' to '%s'", i,  parts[0].c_str(), parts[1].c_str());
    	    }
    	    else
    	    {
    	        Warning( "Error trying to set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str() );
    	    }
    		  
    	}
    }    
	Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    Info( "Stream open %s", mPath.c_str() );
    startTime=av_gettime();//FIXME here or after find_Stream_info
    
    //FIXME can speed up initial analysis but need sensible parameters...
    //mFormatContext->probesize = 32;
    //mFormatContext->max_analyze_duration = 32;
    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Info( "Find stream info complete %s", mPath.c_str() );
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
		{
			mVideoStreamId = i;
			break;
		}
        if(mAudioStreamId == -1) //FIXME best way to copy all other streams?
        {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
		    if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
#else
		    if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO )
#endif
		    {
                mAudioStreamId = i;
		    }
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
예제 #14
0
bool VideoDecoder::Load()
{
  unsigned int i;
  int numBytes;
  uint8_t *tmp;

  if ( avformat_open_input(&mFormatContext, mFilename.c_str(), NULL, NULL) != 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - av_open_input_file failed\n");
    return false;
  }

  if ( avformat_find_stream_info(mFormatContext, 0) < 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - av_find_stream_info failed\n");
    return false;
  }

  /* Some debug info */
  av_dump_format(mFormatContext, 0, mFilename.c_str(), false);

  for (i = 0; i < mFormatContext->nb_streams; i++)
  {
    if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
    {
      mVideoStream = i;
      break;
    }
  }

  if ( mVideoStream == -1 )
  {
    fprintf(stderr, "VideoDecoder::Load - No video stream found.\n");
    return false;
  }

  mCodecContext = mFormatContext->streams[mVideoStream]->codec;
  mCodec = avcodec_find_decoder(mCodecContext->codec_id);
  if ( !mCodec )
  {
    fprintf(stderr, "VideoDecoder::Load - avcodec_find_decoder failed\n");
    return false;
  }

  if ( avcodec_open2(mCodecContext, mCodec, 0) < 0 )
  {
    fprintf(stderr, "VideoDecoder::Load - avcodec_open failed\n");
    return false;
  }

  mFrame = avcodec_alloc_frame();
  if ( !mFrame )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating frame.\n");
    return false;
  }

  mFrameRGB = avcodec_alloc_frame();
  if ( !mFrameRGB )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating RGB frame.\n");
    return false;
  }

  /* Determine required buffer size and allocate buffer */
  numBytes = avpicture_get_size(PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height);
  tmp = (uint8_t *)realloc(mBuffer, numBytes * sizeof(uint8_t));
  if ( !tmp )
  {
    fprintf(stderr, "VideoDecoder::Load - Failed allocating buffer.\n");
    return false;
  }
  mBuffer = tmp;

  avpicture_fill((AVPicture *)mFrameRGB, mBuffer, PIX_FMT_RGB24, mCodecContext->width, mCodecContext->height);

  mSwsContext = sws_getContext(mCodecContext->width, mCodecContext->height, mCodecContext->pix_fmt,
                               mCodecContext->width, mCodecContext->height, PIX_FMT_RGB24,
                               SWS_BICUBIC, NULL, NULL, NULL);
  if ( !mSwsContext )
  {
    fprintf(stderr, "VideoDecoder::Load - sws_getContext failed.\n");
    return false;
  }

  mDoneReading = false;

  return true;
}
int RemoteCameraRtsp::PrimeCapture()
{
    Debug( 2, "Waiting for sources" );
    for ( int i = 0; i < 100 && !rtspThread->hasSources(); i++ )
    {
        usleep( 100000 );
    }
    if ( !rtspThread->hasSources() )
        Fatal( "No RTSP sources" );

    Debug( 2, "Got sources" );

    mFormatContext = rtspThread->getFormatContext();

    // Find first video stream present
    mVideoStreamId = -1;
    
    for ( unsigned int i = 0; i < mFormatContext->nb_streams; i++ )
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
	if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
	if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream" );

    // Get a pointer to the codec context for the video stream
    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Find the decoder for the video stream
    mCodec = avcodec_find_decoder( mCodecContext->codec_id );
    if ( mCodec == NULL )
        Panic( "Unable to locate codec %d decoder", mCodecContext->codec_id );

    // Open codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Panic( "Can't open codec" );

    // Allocate space for the native video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
    mRawFrame = av_frame_alloc();
#else
    mRawFrame = avcodec_alloc_frame();
#endif

    // Allocate space for the converted video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
    mFrame = av_frame_alloc();
#else
    mFrame = avcodec_alloc_frame();
#endif

	if(mRawFrame == NULL || mFrame == NULL)
		Fatal( "Unable to allocate frame(s)");
	
	int pSize = avpicture_get_size( imagePixFormat, width, height );
	if( (unsigned int)pSize != imagesize) {
		Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
	}
/*	
#if HAVE_LIBSWSCALE
	if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
		Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
	}

	if(!sws_isSupportedOutput(imagePixFormat)) {
		Fatal("swscale does not support the target format: %c%c%c%c",(imagePixFormat)&0xff,((imagePixFormat>>8)&0xff),((imagePixFormat>>16)&0xff),((imagePixFormat>>24)&0xff));
	}
	
#else // HAVE_LIBSWSCALE
    Fatal( "You must compile ffmpeg with the --enable-swscale option to use RTSP cameras" );
#endif // HAVE_LIBSWSCALE
*/

    return( 0 );
}
예제 #16
0
파일: muxing.c 프로젝트: BtbN/FFmpeg
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    AVCodecContext *c;
    int nb_samples;
    int ret;
    AVDictionary *opt = NULL;

    c = ost->enc;

    /* open it */
    av_dict_copy(&opt, opt_arg, 0);
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
        exit(1);
    }

    /* init signal generator */
    ost->t     = 0;
    ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
    /* increment frequency by 110 Hz per second */
    ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;

    if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
        nb_samples = 10000;
    else
        nb_samples = c->frame_size;

    ost->frame     = alloc_audio_frame(c->sample_fmt, c->channel_layout,
                                       c->sample_rate, nb_samples);
    ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
                                       c->sample_rate, nb_samples);

    /* copy the stream parameters to the muxer */
    ret = avcodec_parameters_from_context(ost->st->codecpar, c);
    if (ret < 0) {
        fprintf(stderr, "Could not copy the stream parameters\n");
        exit(1);
    }

    /* create resampler context */
        ost->swr_ctx = swr_alloc();
        if (!ost->swr_ctx) {
            fprintf(stderr, "Could not allocate resampler context\n");
            exit(1);
        }

        /* set options */
        av_opt_set_int       (ost->swr_ctx, "in_channel_count",   c->channels,       0);
        av_opt_set_int       (ost->swr_ctx, "in_sample_rate",     c->sample_rate,    0);
        av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
        av_opt_set_int       (ost->swr_ctx, "out_channel_count",  c->channels,       0);
        av_opt_set_int       (ost->swr_ctx, "out_sample_rate",    c->sample_rate,    0);
        av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);

        /* initialize the resampling context */
        if ((ret = swr_init(ost->swr_ctx)) < 0) {
            fprintf(stderr, "Failed to initialize the resampling context\n");
            exit(1);
        }
}
예제 #17
0
// --------------------------------------------------------------------------
// ARDrone::initVideo()
// Description  : Initialize video.
// Return value : SUCCESS: 1  FAILURE: 0
// --------------------------------------------------------------------------
int ARDrone::initVideo(void)
{
    // AR.Drone 2.0
    if (version.major == ARDRONE_VERSION_2) {
        // Open the IP address and port
        char filename[256];
        sprintf(filename, "tcp://%s:%d", ip, ARDRONE_VIDEO_PORT);
        if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) < 0) {
            CVDRONE_ERROR("avformat_open_input() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Retrive and dump stream information
        avformat_find_stream_info(pFormatCtx, NULL);
        av_dump_format(pFormatCtx, 0, filename, 0);

        // Find the decoder for the video stream
        pCodecCtx = pFormatCtx->streams[0]->codec;
        AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (pCodec == NULL) {
            CVDRONE_ERROR("avcodec_find_decoder() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Open codec
        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
            CVDRONE_ERROR("avcodec_open2() was failed. (%s, %d)\n", __FILE__, __LINE__);
            return 0;
        }

        // Allocate video frames and a buffer
        pFrame = avcodec_alloc_frame();
        pFrameBGR = avcodec_alloc_frame();
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height) * sizeof(uint8_t));

        // Assign appropriate parts of buffer to image planes in pFrameBGR
        avpicture_fill((AVPicture*)pFrameBGR, bufferBGR, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);

        // Convert it to BGR
        pConvertCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_BGR24, SWS_SPLINE, NULL, NULL, NULL);
    }
    // AR.Drone 1.0
    else {
        // Open the IP address and port
        if (!sockVideo.open(ip, ARDRONE_VIDEO_PORT)) {
            CVDRONE_ERROR("UDPSocket::open(port=%d) was failed. (%s, %d)\n", ARDRONE_VIDEO_PORT, __FILE__, __LINE__);
            return 0;
        }

        // Set codec
        //pCodecCtx = avcodec_alloc_context();
        pCodecCtx=avcodec_alloc_context3(NULL);
        pCodecCtx->width = 320;
        pCodecCtx->height = 240;

        // Allocate a buffer
        bufferBGR = (uint8_t*)av_mallocz(avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height));
    }

    // Allocate an IplImage
    img = cvCreateImage(cvSize(pCodecCtx->width, (pCodecCtx->height == 368) ? 360 : pCodecCtx->height), IPL_DEPTH_8U, 3);
    if (!img) {
        CVDRONE_ERROR("cvCreateImage() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    // Clear the image
    cvZero(img);

    // Create a mutex
    mutexVideo = new pthread_mutex_t;
    pthread_mutex_init(mutexVideo, NULL);

    // Create a thread
    threadVideo = new pthread_t;
    if (pthread_create(threadVideo, NULL, runVideo, this) != 0) {
        CVDRONE_ERROR("pthread_create() was failed. (%s, %d)\n", __FILE__, __LINE__);
        return 0;
    }

    return 1;
}
예제 #18
0
/***********************
* DecodePacket 
*	Decodifica un packete
************************/
int H263Decoder1996::DecodePacket(BYTE *in,DWORD len,int lost,int last)
{
	int ret = 1;

	//Check not empty last packet
	if (len)
	{
		//Comprobamos que quepa
		if(len<4)
			return Error("Recived short packed [%d]\n",len);

		//Get header
		H263HeadersBasic* headers = H263HeadersBasic::CreateHeaders(in[0]);

		//Parse it
		DWORD parsed = headers->Parse(in,len);

		//Check
		if (!parsed)
			return Error("Error parsing H263 RFC 2190 packet\n");

		//Si ha cambiado el formato
		if (!src)
		{
			//Guardamos el formato
			src = headers->src;
		} else if(src!=headers->src) {
			Log("-Source format changed, reopening codec [%d,%d]\n",src,headers->src);

			//Cerramos el codec
			avcodec_close(ctx);

			//Y lo reabrimos
			avcodec_open2(ctx, codec, NULL);

			//Guardamos el formato
			src = headers->src;

			//Nos cargamos el buffer
			bufLen = 0;
		}

		//Aumentamos el puntero
		in+=parsed;
		len-=parsed;

		//POnemos en blanco el primer bit hasta el comienzo
		in[0] &= 0xff >> headers->sbits;

		//Y el final
		if (len>0)
			in[len-1] &= 0xff << headers->ebits;

		//Si el hay solapamiento de bytes
		if(headers->sbits!=0 && bufLen>0)
		{
			//A�adimos lo que falta
			buffer[bufLen-1] |= in[0];

			//Y avanzamos
			in++;
			len--;
		}

		//Free headers
		delete(headers);

		//Nos aseguramos que quepa
		if (len<0 || bufLen+len+FF_INPUT_BUFFER_PADDING_SIZE>bufSize)
			return Error("Wrong size of packet [%d,%d]\n",bufLen,len);

		//Copiamos
		memcpy(buffer+bufLen,in,len);

		//Aumentamos la longitud
		bufLen+=len;
	}

	//Si es el ultimo
	if(last)
	{
		//Borramos el final
		memset(buffer+bufLen,0,FF_INPUT_BUFFER_PADDING_SIZE);

		//Decode
		ret = Decode(buffer,bufLen);
		//Y resetamos el buffer
		bufLen=0;
	}
	//Return
	return ret;
}
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, NULL ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
bool VideoDecoder::InitCodec(const uint32_t width, const uint32_t height)
{
  if (codec_initialized_)
  {
    // TODO(mani-monaj): Maybe re-initialize
    return true;
  }

  try
  {
    ThrowOnCondition(width == 0 || height == 0, std::string("Invalid frame size:") +
                     boost::lexical_cast<std::string>(width) + " x " + boost::lexical_cast<std::string>(height));

    // Very first init
    avcodec_register_all();
    av_register_all();
    av_log_set_level(AV_LOG_QUIET);

    codec_ptr_ = avcodec_find_decoder(CODEC_ID_H264);
    ThrowOnCondition(codec_ptr_ == NULL, "Codec H264 not found!");


    codec_ctx_ptr_ = avcodec_alloc_context3(codec_ptr_);
    codec_ctx_ptr_->pix_fmt = AV_PIX_FMT_YUV420P;
    codec_ctx_ptr_->skip_frame = AVDISCARD_DEFAULT;
    codec_ctx_ptr_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
    codec_ctx_ptr_->skip_loop_filter = AVDISCARD_DEFAULT;
    codec_ctx_ptr_->workaround_bugs = AVMEDIA_TYPE_VIDEO;
    codec_ctx_ptr_->codec_id = AV_CODEC_ID_H264;
    codec_ctx_ptr_->skip_idct = AVDISCARD_DEFAULT;
    codec_ctx_ptr_->width = width;
    codec_ctx_ptr_->height = height;

    ThrowOnCondition(
          avcodec_open2(codec_ctx_ptr_, codec_ptr_, NULL) < 0,
          "Can not open the decoder!");



    const uint32_t num_bytes = avpicture_get_size(PIX_FMT_RGB24, codec_ctx_ptr_->width, codec_ctx_ptr_->height);
    {
       frame_ptr_ = avcodec_alloc_frame();
       frame_rgb_ptr_ = avcodec_alloc_frame();

       ThrowOnCondition(!frame_ptr_ || !frame_rgb_ptr_, "Can not allocate memory for frames!");

       frame_rgb_raw_ptr_ = reinterpret_cast<uint8_t*>(av_malloc(num_bytes * sizeof(uint8_t)));
       ThrowOnCondition(frame_rgb_raw_ptr_ == NULL,
                        std::string("Can not allocate memory for the buffer: ") +
                        boost::lexical_cast<std::string>(num_bytes));
       ThrowOnCondition(0 == avpicture_fill(
                          reinterpret_cast<AVPicture*>(frame_rgb_ptr_), frame_rgb_raw_ptr_, PIX_FMT_RGB24,
                          codec_ctx_ptr_->width, codec_ctx_ptr_->height),
                        "Failed to initialize the picture data structure.");
    }
    av_init_packet(&packet_);
  }
  catch (const std::runtime_error& e)
  {
    ARSAL_PRINT(ARSAL_PRINT_ERROR, LOG_TAG, "%s", e.what());
    Cleanup();
    return false;
  }

  codec_initialized_ = true;
  first_iframe_recv_ = false;
  ARSAL_PRINT(ARSAL_PRINT_INFO, LOG_TAG, "H264 Codec is initialized!");
  return true;
}
예제 #21
0
int main(int argc, char *argv[])
{

    AVFormatContext *pFormatCtx = NULL;
    int i, videoStream;
    AVCodecContext *pCodecCtx;
    AVCodec *pCodec;
    AVFrame *pFrame;
    AVFrame *pFrameCropped;
    AVFrame *pFrameRGB;
    struct SwsContext * pSwsCtx;
    AVPacket packet;
    int frameFinished;
    int numBytes;
    int numBytesCroped;
    uint8_t *buffer;

    AVDictionary * p_options = NULL;
    AVInputFormat * p_in_fmt = NULL;

    pFile = fopen("screencap.out", "wb");
    if (pFile == NULL)
        return 0;

    // Register all formats and codecs
    av_register_all();
    avcodec_register_all();
    avdevice_register_all();

    av_dict_set(&p_options, "framerate", "60", 0);
    av_dict_set(&p_options, "video_size", "1920x1080", 0);
    av_dict_set(&p_options, "qscale", "1", 0);
    p_in_fmt = av_find_input_format("x11grab");

    // Open video file
    if (avformat_open_input(&pFormatCtx, ":0.0", p_in_fmt, &p_options) != 0)
    {
        printf("cannot open input file!\n");
        return -1; // Couldn't open file
    }

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            videoStream = i;
            break;
        }
    if (videoStream == -1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL)
    {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, 0) < 0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame = avcodec_alloc_frame();

    int crop_x = 0, crop_y = 0, crop_h = 1080, crop_w = 1920;
    pFrameCropped = avcodec_alloc_frame();

    if (pFrameCropped == NULL)
        return -1;

    // Allocate an AVFrame structure
    pFrameRGB = avcodec_alloc_frame();
    if (pFrameRGB == NULL)
        return -1;

    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size(AV_PIX_FMT_YUV420P, crop_w, crop_h);
    buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *) pFrameRGB, buffer, AV_PIX_FMT_YUV420P, crop_w, crop_h);

    pSwsCtx = sws_getContext(crop_w, crop_h, pCodecCtx->pix_fmt, crop_w, crop_h, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR,
        NULL, NULL, NULL);

    if (pSwsCtx == NULL)
    {
        fprintf(stderr, "Cannot initialize the sws context\n");
        return -1;
    }

    // Read frames and save first five frames to disk
    i = 0;
    FILE* fp = fopen("encodec.mpg", "wb");
    while (av_read_frame(pFormatCtx, &packet) >= 0)
    {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream)
        { // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

            // Did we get a video frame?
            if (frameFinished)
            {
                sws_scale(pSwsCtx, (const uint8_t * const *) pFrame->data, pFrame->linesize, 0, crop_h, pFrameRGB->data,
                    pFrameRGB->linesize);
                int y, x;
                /* Y */
                for (y = 0; y < crop_h; y++)
                {
                    for (x = 0; x < crop_w; x++)
                    {
                        //fwrite(pFrameRGB->data[0] + y * pFrameRGB->linesize[0] + x, sizeof(uint8_t), 1, fp);
                    }
                }
                /* Cb and Cr */
                for (y = 0; y < crop_h / 2; y++)
                {
                    for (x = 0; x < crop_w / 2; x++)
                    {
                        //fwrite(pFrameRGB->data[1] + y * pFrameRGB->linesize[1] + x, sizeof(uint8_t), 1, fp);
                        //fwrite(pFrameRGB->data[2] + y * pFrameRGB->linesize[2] + x, sizeof(uint8_t), 1, fp);
                    }
                }

                video_encode_example(pFrameRGB, fp);

                // Save the frame to disk
                if (++i >= 100)
                    break;
            }
        }

        av_free_packet(&packet);
    }

    fclose(fp);
    printf("Frames read %d\n", i);
    // Free the RGB image
    av_free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    // Close file
    fclose(pFile);
    return 0;
}
예제 #22
0
	VideoStream::VideoStream(const std::string& filename, unsigned int numFrameBuffered, GLenum minFilter, GLenum magFilter, GLenum sWrapping, GLenum tWrapping, int maxLevel)
	 : __ReadOnly_ComponentLayout(declareLayout(numFrameBuffered)), InputDevice(declareLayout(numFrameBuffered), "Reader"), idVideoStream(0), readFrameCount(0), timeStampFrameRate(1.0f), timeStampOffset(0), timeStampOfLastFrameRead(0), endReached(false),
	   pFormatCtx(NULL), pCodecCtx(NULL), pCodec(NULL), pFrame(NULL), pFrameRGB(NULL), buffer(NULL), pSWSCtx(NULL), idCurrentBufferForWritting(0)
	{
		#ifdef __USE_PBO__
			#ifdef __VIDEO_STREAM_VERBOSE__
				std::cout << "VideoStream::VideoStream - Using PBO for uploading data to the GPU." << std::endl;
			#endif
			pbo = NULL;
		#else
			#ifdef __VIDEO_STREAM_VERBOSE__
				std::cout << "VideoStream::VideoStream - Using standard method HdlTexture::write for uploading data to the GPU." << std::endl;
			#endif
		#endif

		int retCode = 0;

		// Open stream :
		//DEPRECATED IN libavformat : retCode = av_open_input_file(&pFormatCtx, filename.c_str(), NULL, 0, NULL)!=0);
		retCode = avformat_open_input(&pFormatCtx, filename.c_str(), NULL, NULL);

		if(retCode!=0)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at av_open_input_file).", __FILE__, __LINE__);

		// Find stream information :
		//DEPRECATED : retCode = av_find_stream_info(pFormatCtx);
		retCode = avformat_find_stream_info(pFormatCtx, NULL);

		if(retCode<0)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at av_find_stream_info).", __FILE__, __LINE__);

		// Walk through pFormatCtx->nb_streams to find a/the first video stream :
		for(idVideoStream=0; idVideoStream<pFormatCtx->nb_streams; idVideoStream++)
			//DEPRECATED : if(pFormatCtx->streams[idVideoStream]->codec->codec_type==CODEC_TYPE_VIDEO)
			if(pFormatCtx->streams[idVideoStream]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
				break;

		if(idVideoStream>=pFormatCtx->nb_streams)
			throw Exception("VideoStream::VideoStream - Failed to find video stream (at streams[idVideoStream]->codec->codec_type==CODEC_TYPE_VIDEO).", __FILE__, __LINE__);

		// Get a pointer to the codec context for the video stream :
		pCodecCtx = pFormatCtx->streams[idVideoStream]->codec;

		// Find the decoder for the video stream :
		pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

		if(pCodec==NULL)
			throw Exception("VideoStream::VideoStream - No suitable codec found (at avcodec_find_decoder).", __FILE__, __LINE__);

		// Open codec :
		//DEPRECATED : retCode = avcodec_open(pCodecCtx, pCodec);
		retCode = avcodec_open2(pCodecCtx, pCodec, NULL);

		if(retCode<0)
			throw Exception("VideoStream::VideoStream - Could not open codec (at avcodec_open).", __FILE__, __LINE__);

		// Get the framerate :
		/*float timeUnit_sec = static_cast<float>(pCodecCtx->time_base.num)/static_cast<float>(pCodecCtx->time_base.den);
		frameRate = 1.0f/(pCodecCtx->timeUnit_sec;*/

		timeStampFrameRate = static_cast<float>(pFormatCtx->streams[idVideoStream]->time_base.den)/static_cast<float>(pFormatCtx->streams[idVideoStream]->time_base.num);

		// Get the duration :
		duration_sec = pFormatCtx->duration / AV_TIME_BASE;

		#ifdef __VIDEO_STREAM_VERBOSE__
			std::cout << "VideoStream::VideoStream" << std::endl;
			std::cout << "                         - Frame rate : " << timeStampFrameRate << " frames per second (for time stamps)" << std::endl;
			std::cout << "                         - Duration   : " << duration_sec << " second(s)" << std::endl;
		#endif

		// Allocate video frame :
		pFrame = avcodec_alloc_frame();

		// Allocate an AVFrame structure :
		pFrameRGB = avcodec_alloc_frame();

		if(pFrameRGB==NULL)
			throw Exception("VideoStream::VideoStream - Failed to open stream (at avcodec_alloc_frame).", __FILE__, __LINE__);

		// Determine required buffer size and allocate buffer :
		bufferSizeBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
		buffer = (uint8_t *)av_malloc(bufferSizeBytes*sizeof(uint8_t));

		#ifdef __VIDEO_STREAM_VERBOSE__
			std::cout << "VideoStream::VideoStream - Frame size : " << pCodecCtx->width << "x" << pCodecCtx->height << std::endl;
		#endif

		if(buffer==NULL)
			throw Exception("VideoStream::VideoStream - Unable to allocate video frame buffer.", __FILE__, __LINE__);

		// Assign appropriate parts of buffer to image planes in pFrameRGB (Note that pFrameRGB is an AVFrame, but AVFrame is a superset of AVPicture) :
		avpicture_fill( (AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);

		// Initialize libswscale :
		pSWSCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB24, SWS_POINT, NULL, NULL, NULL);

		// Create format :
		HdlTextureFormat frameFormat(pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, minFilter, magFilter, sWrapping, tWrapping, 0, maxLevel);

		// Create the texture :
		for(unsigned int i=0; i<numFrameBuffered; i++)
		{
			//old : addOutputPort("output" + to_string(i));
			textureBuffers.push_back( new HdlTexture(frameFormat) );

			// YOU MUST WRITE ONCE IN THE TEXTURE BEFORE USING PBO::copyToTexture ON IT.
			// We are also doing this to prevent reading from an empty (not-yet-allocated) texture.
			textureBuffers.back()->fill(0);

			// Set links :
			setTextureLink(textureBuffers.back(), i);
		}

		#ifdef __USE_PBO__
			// Create PBO for uplodaing data to GPU :
			pbo = new HdlPBO(frameFormat, GL_PIXEL_UNPACK_BUFFER_ARB,GL_STREAM_DRAW_ARB);
		#endif

		// Finish by forcing read of first frame :
		readNextFrame();
	}
예제 #23
0
QByteArray AVDecoder::WriteJPEG(AVCodecContext *pCodecCtx, AVFrame *pFrame, int width, int height)
{
    AVCodecContext *pOCodecCtx;
    AVCodec        *pOCodec;

    QByteArray data;

    pOCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);

    if (!pOCodec) {
        return data;
    }

    SwsContext *sws_ctx = sws_getContext(
                pCodecCtx->width, pCodecCtx->height,
                pCodecCtx->pix_fmt,
                width, height,
                AV_PIX_FMT_YUV420P, SWS_BICUBIC,
                NULL, NULL, NULL);

    if(!sws_ctx) {
        return data;
    }

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    AVFrame *pFrameRGB = av_frame_alloc();
#else
    AVFrame *pFrameRGB = avcodec_alloc_frame();
#endif

    if(pFrameRGB == NULL) {
        sws_freeContext(sws_ctx);
        return data;
    }

    // detect ffmpeg (>= 100) or libav (< 100)
#if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \
    (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0))
    int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P, width, height, 16);
#else
    int numBytes = avpicture_get_size(PIX_FMT_YUVJ420P, width, height);
#endif

    uint8_t *buffer = (uint8_t *)av_malloc(numBytes);

    if(!buffer) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return data;
    }

    // detect ffmpeg (>= 100) or libav (< 100)
#if (LIBAVUTIL_VERSION_MICRO >= 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,63,100)) || \
    (LIBAVUTIL_VERSION_MICRO < 100 && LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(54,6,0))
    av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, AV_PIX_FMT_YUV420P, width, height, 1);
#else
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_YUVJ420P, width, height);
#endif

    sws_scale(
        sws_ctx,
        pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pFrameRGB->data,
        pFrameRGB->linesize
    );

    pOCodecCtx = avcodec_alloc_context3(pOCodec);

    if(pOCodecCtx == NULL) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
        return  0;
    }

    pOCodecCtx->bit_rate      = pCodecCtx->bit_rate;
    pOCodecCtx->width         = width;
    pOCodecCtx->height        = height;
    pOCodecCtx->pix_fmt       = AV_PIX_FMT_YUVJ420P;
    pOCodecCtx->color_range   = AVCOL_RANGE_JPEG;
    pOCodecCtx->codec_id      = AV_CODEC_ID_MJPEG;
    pOCodecCtx->codec_type    = AVMEDIA_TYPE_VIDEO;
    pOCodecCtx->time_base.num = pCodecCtx->time_base.num;
    pOCodecCtx->time_base.den = pCodecCtx->time_base.den;

    AVDictionary *opts = NULL;
    if(avcodec_open2(pOCodecCtx, pOCodec, &opts) < 0) {
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
        av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
        av_frame_free(&pFrameRGB);
#else
        avcodec_free_frame(&pFrameRGB);
#endif
        sws_freeContext(sws_ctx);
         return  0;
    }

    av_opt_set_int(pOCodecCtx, "lmin", pOCodecCtx->qmin * FF_QP2LAMBDA, 0);
    av_opt_set_int(pOCodecCtx, "lmax", pOCodecCtx->qmax * FF_QP2LAMBDA, 0);

    pOCodecCtx->mb_lmin        = pOCodecCtx->qmin * FF_QP2LAMBDA;
    pOCodecCtx->mb_lmax        = pOCodecCtx->qmax * FF_QP2LAMBDA;
    pOCodecCtx->flags          = CODEC_FLAG_QSCALE;
    pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA;

    pFrame->pts     = 1;
    pFrame->quality = pOCodecCtx->global_quality;

    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    int gotPacket;

    avcodec_encode_video2(pOCodecCtx, &pkt, pFrameRGB, &gotPacket);

    QByteArray buffer2(reinterpret_cast<char *>(pkt.data), pkt.size);

#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,52,0)
        avcodec_free_context(&pOCodecCtx);
#else
        avcodec_close(pOCodecCtx);
        av_free(pOCodecCtx);
#endif
    av_free(buffer);
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55,28,1)
    av_frame_free(&pFrameRGB);
#else
    avcodec_free_frame(&pFrameRGB);
#endif
    avcodec_close(pOCodecCtx);
    sws_freeContext(sws_ctx);

    return buffer2;
}
예제 #24
0
/**
    \fn saveAsJpg
    \brief save current image into filename, into jpg format
*/
bool  ADMImage::saveAsJpgInternal(const char *filename)
{

AVCodecContext   *context=NULL;   
AVFrame          *frame=NULL;
bool             result=false;
AVCodec          *codec=NULL;
int              r=0;
ADM_byteBuffer   byteBuffer;

    frame=av_frame_alloc();
    if(!frame)
    {
        printf("[saveAsJpg] Cannot allocate frame\n");
        goto  jpgCleanup;
    }

    codec=avcodec_find_encoder(AV_CODEC_ID_MJPEG);
    if(!codec)
    {
        printf("[saveAsJpg] Cannot allocate codec\n");
        goto  jpgCleanup;
    }

    context=avcodec_alloc_context3(codec);
    if(!context) 
    {
        printf("[saveAsJpg] Cannot allocate context\n");
        goto  jpgCleanup;
    }

    context->pix_fmt =AV_PIX_FMT_YUV420P;
    context->strict_std_compliance = -1;
    context->time_base.den=1;
    context->time_base.num=1;
    context->width=_width;
    context->height=_height;
    context->flags |= CODEC_FLAG_QSCALE;
    r=avcodec_open2(context, codec, NULL); 
    if(r<0)
    {
        printf("[saveAsJpg] Cannot mix codec and context\n");
        ADM_dealloc (context);
        return false;
    }
    // Setup our image & stuff....
        frame->width=_width;
        frame->height=_height;
        frame->format=AV_PIX_FMT_YUV420P;

        frame->linesize[0] = GetPitch(PLANAR_Y);
        frame->linesize[2] = GetPitch(PLANAR_U);
        frame->linesize[1] = GetPitch(PLANAR_V);

        frame->data[0] = GetWritePtr(PLANAR_Y);
        frame->data[2] = GetWritePtr(PLANAR_U);
        frame->data[1] = GetWritePtr(PLANAR_V);
    // Grab a temp buffer
    
    // Encode!
     
      frame->quality = (int) floor (FF_QP2LAMBDA * 2+ 0.5);

      byteBuffer.setSize(_width*_height*4);
	  

      AVPacket pkt;
      av_init_packet(&pkt);
      int gotSomething;
      pkt.size=_width*_height*4;
      pkt.data=byteBuffer.at(0);
      r=avcodec_encode_video2(context,&pkt,frame,&gotSomething);
      if(r || !gotSomething)
      {
            ADM_error("[jpeg] Error %d encoding video\n",r);
            goto  jpgCleanup;
      }
      
        // Ok now write our file...
        {
            FILE *f=ADM_fopen(filename,"wb");
            if(f)
            {
                fwrite(byteBuffer.at(0),pkt.size,1,f);
                fclose(f);
                result=true;
            }else
            {
                printf("[saveAsJpeg] Cannot open %s for writing!\n",filename);

            }
       }

// Cleanup
jpgCleanup:
    if(context)
    {
        avcodec_close (context);
        av_free (context);
        context=NULL;
    }

    if(frame)
    {
        av_frame_free(&frame);
        frame=NULL;
    }

    return result;
}
예제 #25
0
파일: image_writer.c 프로젝트: zeffy/mpv
static bool write_lavc(struct image_writer_ctx *ctx, mp_image_t *image, FILE *fp)
{
    bool success = 0;
    AVFrame *pic = NULL;
    AVPacket pkt = {0};
    int got_output = 0;

    av_init_packet(&pkt);

    struct AVCodec *codec = avcodec_find_encoder(ctx->writer->lavc_codec);
    AVCodecContext *avctx = NULL;
    if (!codec)
        goto print_open_fail;
    avctx = avcodec_alloc_context3(codec);
    if (!avctx)
        goto print_open_fail;

    avctx->time_base = AV_TIME_BASE_Q;
    avctx->width = image->w;
    avctx->height = image->h;
    avctx->pix_fmt = imgfmt2pixfmt(image->imgfmt);
    if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
        MP_ERR(ctx, "Image format %s not supported by lavc.\n",
               mp_imgfmt_to_name(image->imgfmt));
        goto error_exit;
    }
    if (ctx->writer->lavc_codec == AV_CODEC_ID_PNG) {
        avctx->compression_level = ctx->opts->png_compression;
        avctx->prediction_method = ctx->opts->png_filter;
    }

    if (avcodec_open2(avctx, codec, NULL) < 0) {
     print_open_fail:
        MP_ERR(ctx, "Could not open libavcodec encoder for saving images\n");
        goto error_exit;
    }

    pic = av_frame_alloc();
    if (!pic)
        goto error_exit;
    for (int n = 0; n < 4; n++) {
        pic->data[n] = image->planes[n];
        pic->linesize[n] = image->stride[n];
    }
    pic->format = avctx->pix_fmt;
    pic->width = avctx->width;
    pic->height = avctx->height;
    if (ctx->opts->tag_csp) {
        pic->color_primaries = mp_csp_prim_to_avcol_pri(image->params.primaries);
        pic->color_trc = mp_csp_trc_to_avcol_trc(image->params.gamma);
    }
    int ret = avcodec_encode_video2(avctx, &pkt, pic, &got_output);
    if (ret < 0)
        goto error_exit;

    fwrite(pkt.data, pkt.size, 1, fp);

    success = !!got_output;
error_exit:
    if (avctx)
        avcodec_close(avctx);
    av_free(avctx);
    av_frame_free(&pic);
    av_packet_unref(&pkt);
    return success;
}
예제 #26
0
/**
    \fn saveAsPngInternal
    \brief save current image into filename in PNG format
*/
bool ADMImage::saveAsPngInternal(const char *filename)
{
    AVCodecContext *context=NULL;
    AVFrame *frame=NULL;
    AVCodec *codec=NULL;
    bool result=false;
    int sz=_width*_height*3, r=0;
    uint8_t *out;
    int xss[3], xds[3];
    uint8_t *xsp[3], *xdp[3];
    ADMColorScalerSimple converter(_width, _height, ADM_COLOR_YV12, ADM_COLOR_RGB24);
    ADM_byteBuffer byteBuffer;

    frame=av_frame_alloc();
    if(!frame)
    {
        ADM_error("Cannot allocate frame\n");
        goto __cleanup;
    }

    codec=avcodec_find_encoder(AV_CODEC_ID_PNG);
    if(!codec)
    {
        ADM_error("Cannot allocate codec\n");
        goto __cleanup;
    }

    context=avcodec_alloc_context3(codec);
    if(!context)
    {
        ADM_error("Cannot allocate context\n");
        goto __cleanup;
    }

    context->pix_fmt=AV_PIX_FMT_RGB24;
    context->strict_std_compliance = -1;
    context->time_base.den=1;
    context->time_base.num=1;
    context->width=_width;
    context->height=_height;

    r=avcodec_open2(context, codec, NULL);
    if(r<0)
    {
        ADM_error("Cannot combine codec and context\n");
        ADM_dealloc(context);
        return false;
    }

    // swap U & V planes first
    out=(uint8_t *)ADM_alloc(sz);
    xss[0] = this->GetPitch(PLANAR_Y);
    xss[1] = this->GetPitch(PLANAR_V);
    xss[2] = this->GetPitch(PLANAR_U);
    xsp[0] = this->GetReadPtr(PLANAR_Y);
    xsp[1] = this->GetReadPtr(PLANAR_V);
    xsp[2] = this->GetReadPtr(PLANAR_U);
    xds[0] = _width*3;
    xds[1] = xds[2] = 0;
    xdp[0] = out;
    xdp[1] = xdp[2] = NULL;

    // convert colorspace
    converter.convertPlanes(xss,xds,xsp,xdp);

    // setup AVFrame
    frame->width = _width;
    frame->height = _height;
    frame->format = AV_PIX_FMT_RGB24;

    frame->linesize[0] = _width*3;
    frame->linesize[1] = 0;
    frame->linesize[2] = 0;

    frame->data[0] = out;
    frame->data[1] = NULL;
    frame->data[2] = NULL;

    // Grab a temp buffer
    byteBuffer.setSize(sz);

    // Encode
    AVPacket pkt;
    av_init_packet(&pkt);
    int gotSomething;
    pkt.size=sz;
    pkt.data=byteBuffer.at(0);
    r=avcodec_encode_video2(context,&pkt,frame,&gotSomething);
    if(r || !gotSomething)
    {
        ADM_error("Error %d encoding image\n",r);
        goto __cleanup;
    }

    // Ok now write our file...
    {
        FILE *f=ADM_fopen(filename,"wb");
        if(f)
        {
            fwrite(byteBuffer.at(0),pkt.size,1,f);
            fclose(f);
            result=true;
        }else
        {
            ADM_error("Cannot open %s for writing!\n",filename);
        }
    }

__cleanup:
    // Cleanup
    if(context)
    {
        avcodec_close(context);
        av_free(context);
        context=NULL;
    }

    if(frame)
    {
        av_frame_free(&frame);
        frame=NULL;
    }

    return result;
}
예제 #27
0
status_t MediaPlayer::prepareVideo()
{
	__android_log_print(ANDROID_LOG_INFO, TAG, "prepareVideo\n");
	// Find the first video stream
	mVideoStreamIndex = -1;
	for (int i = 0; i < mMovieFile->nb_streams; i++) {
		if (mMovieFile->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			mVideoStreamIndex = i;
			break;
		}
	}
	
	if (mVideoStreamIndex == -1) {
		return INVALID_OPERATION;
	}
	
	AVStream* stream = mMovieFile->streams[mVideoStreamIndex];
	// Get a pointer to the codec context for the video stream
	AVCodecContext* codec_ctx = stream->codec;
	AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
	if (codec == NULL) {
		return INVALID_OPERATION;
	}
	
    AVDictionary * opts = NULL;
    av_dict_set(&opts, "threads", "auto", 0);
	// Open codec
	//if (avcodec_open(codec_ctx, codec) < 0) {
	if (avcodec_open2(codec_ctx, codec, &opts) < 0) {
		return INVALID_OPERATION;
	}
	
	mVideoWidth = codec_ctx->width;
	mVideoHeight = codec_ctx->height;
	mDuration =  mMovieFile->duration;
	
	mConvertCtx = sws_getContext(stream->codec->width,
								 stream->codec->height,
								 stream->codec->pix_fmt,
								 stream->codec->width,
								 stream->codec->height,
								 PIX_FMT_RGB565,
								 SWS_POINT,
								 NULL,
								 NULL,
								 NULL);

	if (mConvertCtx == NULL) {
		return INVALID_OPERATION;
	}

	void*		pixels;
    int         size; 
#if 1

    ///// &pixels is pointed to a Native bitmap. 
	if (Output::VideoDriver_getPixels(stream->codec->width,
									  stream->codec->height,
									  &pixels) != 0) {
		return INVALID_OPERATION;
	}
#else

    /* create temporary picture */
    size = avpicture_get_size(PIX_FMT_RGB565, stream->codec->width, stream->codec->height);
    pixels  = av_malloc(size);
    if (!pixels)
        return INVALID_OPERATION;

#endif

	mFrame = avcodec_alloc_frame();
	if (mFrame == NULL) {
		return INVALID_OPERATION;
	}
	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *) mFrame,
				   (uint8_t *) pixels,
				   PIX_FMT_RGB565,
				   stream->codec->width,
				   stream->codec->height);



	__android_log_print(ANDROID_LOG_INFO, TAG, "prepareVideo  DONE \n ");

	return NO_ERROR;
}
예제 #28
0
bool VideoReaderUnit::OpenStreams(StreamSet* set) {
  // Setup FFMPEG.
  if (!ffmpeg_initialized_) {
    ffmpeg_initialized_ = true;
    av_register_all();
  }

  // Open video file.
  AVFormatContext* format_context = nullptr;
  if (avformat_open_input (&format_context, video_file_.c_str(), nullptr, nullptr) != 0) {
    LOG(ERROR) << "Could not open file: " << video_file_;
    return false;
  }

  if (avformat_find_stream_info(format_context, nullptr) < 0) {
    LOG(ERROR) << video_file_ << " is not a valid movie file.";
    return false;
  }

  // Get video stream index.
  video_stream_idx_ = -1;

  for (uint i = 0; i < format_context->nb_streams; ++i) {
    if (format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
      video_stream_idx_ = i;
      break;
    }
  }

  if (video_stream_idx_ < 0) {
    LOG(ERROR) << "Could not find video stream in " << video_file_;
    return false;
  }

  AVCodecContext* codec_context = format_context->streams[video_stream_idx_]->codec;
  AVCodec* codec = avcodec_find_decoder (codec_context->codec_id);

  if (!codec) {
    LOG(ERROR) << "Unsupported codec for file " << video_file_;
    return false;
  }

  if (avcodec_open2(codec_context, codec, nullptr) < 0) {
    LOG(ERROR) << "Could not open codec";
    return false;
  }

  AVStream* av_stream = format_context->streams[video_stream_idx_];
  fps_ = av_q2d(av_stream->avg_frame_rate);
  LOG(INFO) << "Frame rate: " << fps_;

  // if av_q2d wasn't able to figure out the frame rate, set it 24
  if (fps_ != fps_) {
    LOG(WARNING) << "Can't figure out frame rate - Defaulting to 24";
    fps_ = 24;
  }

  // Limit to meaningful values. Sometimes avg_frame_rate.* holds garbage.
  if (fps_ < 5) {
    LOG(WARNING) << "Capping video fps_ of " << fps_ << " to " << 5;
    fps_ = 5;
  }

  if (fps_ > 60) {
    LOG(WARNING) << "Capping video fps_ of " << fps_ << " to " << 60;
    fps_ = 60;
  }

  bytes_per_pixel_ = PixelFormatToNumChannels(options_.pixel_format);
  frame_width_ = codec_context->width;
  frame_height_ = codec_context->height;

  switch (options_.downscale) {
    case VideoReaderOptions::DOWNSCALE_NONE:
      output_width_ = frame_width_;
      output_height_ = frame_height_;
      downscale_factor_ = 1.0f;
      break;

    case VideoReaderOptions::DOWNSCALE_BY_FACTOR:
      if (options_.downscale_factor > 1.0f) {
        LOG(ERROR) << "Only downscaling is supported.";
        return false;
      }

      downscale_factor_ = options_.downscale_factor;
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;

    case VideoReaderOptions::DOWNSCALE_TO_MIN_SIZE:
      downscale_factor_ = std::max(options_.downscale_size * (1.0f / frame_width_),
                                   options_.downscale_size * (1.0f / frame_height_));
      // Cap to downscaling.
      downscale_factor_ = std::min(1.0f, downscale_factor_);
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;

    case VideoReaderOptions::DOWNSCALE_TO_MAX_SIZE:
      downscale_factor_ = std::min(options_.downscale_size * (1.0f / frame_width_),
                                   options_.downscale_size * (1.0f / frame_height_));
      // Cap to downscaling.
      downscale_factor_ = std::min(1.0f, downscale_factor_);
      output_width_ = std::ceil(frame_width_ * downscale_factor_);
      output_height_ = std::ceil(frame_height_ * downscale_factor_);
      break;
  }

  if (downscale_factor_ != 1.0) {
    LOG(INFO) << "Downscaling by factor " << downscale_factor_
              << " from " << frame_width_ << ", " << frame_height_
              << " to " << output_width_ << ", " << output_height_;
  }

  // Force even resolutions.
  output_width_ += output_width_ % 2;
  output_width_step_ = output_width_ * bytes_per_pixel_;

  // Pad width_step to be a multiple of 4.
  if (output_width_step_ % 4 != 0) {
    output_width_step_ += 4 - output_width_step_ % 4;
    DCHECK_EQ(output_width_step_ % 4, 0);
  }

  // Save some infos for later use.
  codec_ = codec;
  codec_context_ = codec_context;
  format_context_ = format_context;

  // Allocate temporary structures.
  frame_yuv_ = av_frame_alloc();
  frame_bgr_ = av_frame_alloc();

  if (!frame_yuv_ || !frame_bgr_) {
    LOG(ERROR) << "Could not allocate AVFrames.";
    return false;
  }

  int pix_fmt;
  switch (options_.pixel_format) {
    case PIXEL_FORMAT_RGB24:
      pix_fmt = PIX_FMT_RGB24;
      break;
    case PIXEL_FORMAT_BGR24:
      pix_fmt = PIX_FMT_BGR24;
      break;
    case PIXEL_FORMAT_ARGB32:
      pix_fmt = PIX_FMT_ARGB;
      break;
    case PIXEL_FORMAT_ABGR32:
      pix_fmt = PIX_FMT_ABGR;
      break;
    case PIXEL_FORMAT_RGBA32:
      pix_fmt = PIX_FMT_RGBA;
      break;
    case PIXEL_FORMAT_BGRA32:
      pix_fmt = PIX_FMT_BGRA;
      break;
    case PIXEL_FORMAT_YUV422:
      pix_fmt = PIX_FMT_YUYV422;
      break;
    case PIXEL_FORMAT_LUMINANCE:
      pix_fmt = PIX_FMT_GRAY8;
      break;
  }

  uint8_t* bgr_buffer = (uint8_t*)av_malloc(avpicture_get_size((::PixelFormat)pix_fmt,
                                                               output_width_,
                                                               output_height_));

  avpicture_fill((AVPicture*)frame_bgr_,
                 bgr_buffer,
                 (::PixelFormat)pix_fmt,
                 output_width_,
                 output_height_);

  // Setup SwsContext for color conversion.
  sws_context_ = sws_getContext(frame_width_,
                                frame_height_,
                                codec_context_->pix_fmt,
                                output_width_,
                                output_height_,
                                (::PixelFormat)pix_fmt,
                                SWS_BICUBIC,
                                nullptr,
                                nullptr,
                                nullptr);
  if(!sws_context_) {
    LOG(ERROR) << "Could not setup SwsContext for color conversion.";
    return false;
  }

  current_pos_ = 0;
  used_as_root_ = set->empty();
  VideoStream* vid_stream = new VideoStream(output_width_,
                                            output_height_,
                                            output_width_step_,
                                            fps_,
                                            options_.pixel_format,
                                            options_.stream_name);

  vid_stream->set_original_width(frame_width_);
  vid_stream->set_original_height(frame_height_);

  set->push_back(shared_ptr<VideoStream>(vid_stream));
  frame_num_ = 0;
  return true;
}
예제 #29
0
파일: probe.c 프로젝트: erkl-old/probe
int main(int argc, const char *argv[]) {
  char buf[256];
  unsigned int i;
  int r;

  AVFormatContext *format_ctx;
  AVStream *stream;
  AVCodecContext *codec_ctx;
  AVCodec *codec;

  /* Initialize libav. */
  av_register_all();
  av_log_set_level(AV_LOG_QUIET);

  /* Open stdin. */
  format_ctx = avformat_alloc_context();
  if (format_ctx == NULL)
    return 1;

  r = avformat_open_input(&format_ctx, "pipe:0", NULL, NULL);
  if (r < 0)
    return 1;

  r = avformat_find_stream_info(format_ctx, NULL);
  if (r < 0)
    return 1;

  printf("{\n  \"streams\": [");

  /* Dump information for each stream. */
  for (i = 0; i < format_ctx->nb_streams; i++) {
    stream = format_ctx->streams[i];
    codec_ctx = stream->codec;

    codec = avcodec_find_decoder(codec_ctx->codec_id);
    if (codec == NULL)
      return -1;

    r = avcodec_open2(codec_ctx, codec, NULL);
    if (r < 0)
      return -1;

    /* Open the stream's JSON object. */
    printf(i == 0 ? "\n    {" : ",\n    {");
    printf("\n      \"index\": %d", stream->index);

    switch (codec_ctx->codec_type) {
    case AVMEDIA_TYPE_VIDEO:    printf(",\n      \"type\": \"video\"");    break;
    case AVMEDIA_TYPE_AUDIO:    printf(",\n      \"type\": \"audio\"");    break;
    case AVMEDIA_TYPE_SUBTITLE: printf(",\n      \"type\": \"subtitle\""); break;
    default:                    printf(",\n      \"type\": \"unknown\"");
    }

    printf(",\n      \"codec\": \"%s\"", codec->name);
    printf(",\n      \"start_time\": %f", stream->start_time > 0 ? stream->start_time * av_q2d(stream->time_base) : 0.0);
    printf(",\n      \"duration\": %f", stream->duration > 0 ? stream->duration * av_q2d(stream->time_base) : 0.0);

    /* Video-specific fields. */
    if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
      printf(",\n      \"width\": %d", codec_ctx->width);
      printf(",\n      \"height\": %d", codec_ctx->height);
      printf(",\n      \"bit_rate\": %d", codec_ctx->bit_rate);
      printf(",\n      \"frames\": %lld", stream->nb_frames);
      printf(",\n      \"frame_rate\": %f", stream->nb_frames > 0 ? av_q2d(stream->avg_frame_rate) : 0.0);

      if (codec_ctx->pix_fmt != -1) {
        printf(",\n      \"pixel_format\": \"%s\"", av_get_pix_fmt_name(codec_ctx->pix_fmt));
      } else {
        printf(",\n      \"pixel_format\": \"unknown\"");
      }
    }

    /* Audio-specific fields. */
    if (codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
      printf(",\n      \"channels\": %d", codec_ctx->channels);

      if (codec_ctx->channel_layout != -1) {
        av_get_channel_layout_string(&buf[0], 256, codec_ctx->channels, codec_ctx->channel_layout);
        printf(",\n      \"channel_layout\": \"%s\"", buf);
      } else {
        printf(",\n      \"channel_layout\": \"unknown\"");
      }

      printf(",\n      \"bit_rate\": %d", codec_ctx->bit_rate);
      printf(",\n      \"sample_rate\": %d", codec_ctx->sample_rate);

      if (codec_ctx->sample_fmt != -1) {
        printf(",\n      \"sample_format\": \"%s\"", av_get_sample_fmt_name(codec_ctx->sample_fmt));
      }
    }

    /* Close the stream's JSON object. */
    printf("\n    }");
  }

  printf("\n  ]\n}\n");

  /* Close the input. */
  avformat_close_input(&format_ctx);

  return 0;
}
예제 #30
0
파일: audio_muxer.c 프로젝트: jeljeli/gpac
int dc_ffmpeg_audio_muxer_open(AudioOutputFile *audio_output_file, char *filename)
{
	AVStream *audio_stream;
	AVOutputFormat *output_fmt;
	AVDictionary *opts = NULL;

	AVCodecContext *audio_codec_ctx = audio_output_file->codec_ctx;
	audio_output_file->av_fmt_ctx = NULL;

//	strcpy(audio_output_file->filename, audio_data_conf->filename);
//	audio_output_file->abr = audio_data_conf->bitrate;
//	audio_output_file->asr = audio_data_conf->samplerate;
//	audio_output_file->ach = audio_data_conf->channels;
//	strcpy(audio_output_file->codec, audio_data_conf->codec);

	/* Find output format */
	output_fmt = av_guess_format(NULL, filename, NULL);
	if (!output_fmt) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot find suitable output format\n"));
		return -1;
	}

	audio_output_file->av_fmt_ctx = avformat_alloc_context();
	if (!audio_output_file->av_fmt_ctx) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot allocate memory for pOutVideoFormatCtx\n"));
		return -1;
	}

	audio_output_file->av_fmt_ctx->oformat = output_fmt;
	strcpy(audio_output_file->av_fmt_ctx->filename, filename);

	/* Open the output file */
	if (!(output_fmt->flags & AVFMT_NOFILE)) {
		if (avio_open(&audio_output_file->av_fmt_ctx->pb, filename, URL_WRONLY) < 0) {
			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot not open '%s'\n", filename));
			return -1;
		}
	}

	audio_stream = avformat_new_stream(audio_output_file->av_fmt_ctx, audio_output_file->codec);
	if (!audio_stream) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot create output video stream\n"));
		return -1;
	}

	audio_stream->codec->codec_id = audio_output_file->codec->id;
	audio_stream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
	audio_stream->codec->bit_rate = audio_codec_ctx->bit_rate;//audio_output_file->audio_data_conf->bitrate;
	audio_stream->codec->sample_rate = audio_codec_ctx->sample_rate;//audio_output_file->audio_data_conf->samplerate;
	audio_stream->codec->channels = audio_codec_ctx->channels;//audio_output_file->audio_data_conf->channels;
	assert(audio_codec_ctx->codec->sample_fmts);
	audio_stream->codec->sample_fmt = audio_codec_ctx->codec->sample_fmts[0];

//	if (audio_output_file->av_fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
//		audio_output_file->codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

	//video_stream->codec = video_output_file->codec_ctx;

	/* open the video codec */
	av_dict_set(&opts, "strict", "experimental", 0);
	if (avcodec_open2(audio_stream->codec, audio_output_file->codec, &opts) < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Cannot open output video codec\n"));
		av_dict_free(&opts);
		return -1;
	}
	av_dict_free(&opts);

	avformat_write_header(audio_output_file->av_fmt_ctx, NULL);

	return 0;
}