コード例 #1
0
ファイル: ffmpeg.c プロジェクト: Crashtown/flussonic-old
void loop() {
  int64_t dts_shift = AV_NOPTS_VALUE;

  uint32_t buf_size = 10240;
  char *buf = (char *)malloc(buf_size);
  while(1) {
    uint32_t len;
    int idx = 0;
    int read_bytes = 0;
    if((read_bytes = read1(in_fd, &len, 4)) != 4) {
      if(read_bytes == 0) {
        _exit(0);
      }
      error("Can't read input length: %d", read_bytes);
    }
    len = ntohl(len);
    if(len > buf_size) {
      buf_size = len;
      free(buf);
      buf = (char *)malloc(buf_size);
    }

    if((read_bytes = read1(in_fd, buf, len)) != len) error("Can't read %d bytes from input: %d", len, read_bytes);
    int version = 0;
    ei_decode_version(buf, &idx, &version);
    int command_idx = idx;

    int arity = 0;
    if(ei_decode_tuple_header(buf, &idx, &arity) == -1) error("must pass tuple");


    int t = 0;
    int size = 0;
    ei_get_type(buf, &idx, &t, &size);
    if(t != ERL_ATOM_EXT) error("first element must be atom");
    char command[MAXATOMLEN+1];
    ei_decode_atom(buf, &idx, command); arity--;


    if(!strcmp(command, "ping")) {
      pong();
      continue;
    }
    if(!strcmp(command, "exit")) {
      return;
    }
    if(!strcmp(command, "init_input")) {
      if(arity != 3) error("Must provide 3 arguments to init_input command");
      char content[1024];
      char codec[1024];
      if(ei_decode_atom(buf, &idx, content) == -1) error("Must provide content as an atom");
      if(ei_decode_atom(buf, &idx, codec) == -1) error("Must provide codec as an atom");

      int decoder_config_len = 0;
      ei_get_type(buf, &idx, &t, &decoder_config_len);
      if(t != ERL_BINARY_EXT) error("decoder config must be a binary");
      uint8_t *decoder_config = av_mallocz(decoder_config_len + FF_INPUT_BUFFER_PADDING_SIZE);
      long bin_len = 0;
      ei_decode_binary(buf, &idx, decoder_config, &bin_len);

      Track *t = NULL;
      if(!strcmp(content, "video")) {
        t = &input_video;
      } else if(!strcmp(content, "audio")) {
        t = &input_audio;
      } else {
        error("Unknown media content: '%s'", content);
      }
      if(t->codec) error("Double initialization of media '%s'", content);

      t->codec = avcodec_find_decoder_by_name(codec);
      t->ctx = avcodec_alloc_context3(t->codec);
      if(!t->codec || !t->ctx) 
        error("Unknown %s decoder '%s'", content, codec);
      t->ctx->time_base = (AVRational){1, 90};
      t->ctx->extradata_size = decoder_config_len;
      t->ctx->extradata = decoder_config;
      if(avcodec_open2(t->ctx, t->codec, NULL) < 0) 
        error("failed to allocate %s decoder", content);

      reply_atom("ready");
      continue;
    }

    if(!strcmp(command, "init_output")) {
      if(arity != 4) error("Must provide 4 arguments to init_output command");
      char content[1024];
      char codec[1024];
      if(ei_decode_atom(buf, &idx, content) == -1) error("Must provide content as an atom");
      if(ei_decode_atom(buf, &idx, codec) == -1) error("Must provide codec as an atom");

      long track_id = -1;
      if(ei_decode_long(buf, &idx, &track_id) == -1) error("track_id must be integer");
      if(track_id < 1 || track_id > MAX_OUTPUT_TRACKS+1) error("track_id must be from 1 to %d", MAX_OUTPUT_TRACKS+1);
      track_id--;

      Track *t = NULL;
      if(!strcmp(content, "audio")) {
        t = &output_audio[out_audio_count++];
      } else if(!strcmp(content, "video")) {
        t = &output_video[out_video_count++];
      } else {
        error("invalid_content '%s'", content);
      }
      t->track_id = track_id;

      t->codec = avcodec_find_encoder_by_name(codec);
      t->ctx = avcodec_alloc_context3(t->codec);
      if(!t->codec || !t->ctx) error("Unknown encoder '%s'", codec);

      AVCodecContext* ctx = t->ctx;
      AVDictionary *opts = NULL;


      int options_count = 0;
      if(ei_decode_list_header(buf, &idx, &options_count) < 0) error("options must be a proplist");
      while(options_count > 0) {
        int arity1 = 0;

        int t,s;
        ei_get_type(buf, &idx, &t, &s);
        if(t == ERL_NIL_EXT) {
          ei_skip_term(buf, &idx);
          break;
        }

        if(ei_decode_tuple_header(buf, &idx, &arity1) < 0) error("options must be a proper proplist");
        if(arity1 != 2) error("tuples in options proplist must be arity 2");

        char key[MAXATOMLEN];
        if(ei_decode_atom(buf, &idx, key) == 0) {

          if(!strcmp(key, "width")) {
            long w = 0;
            if(ei_decode_long(buf, &idx, &w) < 0) error("width must be integer");
            ctx->width = w;
            continue;
          }

          if(!strcmp(key, "height")) {
            long h = 0;
            if(ei_decode_long(buf, &idx, &h) < 0) error("height must be integer");
            ctx->height = h;
            continue;
          }

          if(!strcmp(key, "bitrate")) {
            long b = 0;
            if(ei_decode_long(buf, &idx, &b) < 0) error("bitrate must be integer");
            ctx->bit_rate = b;
            continue;
          }

          if(!strcmp(key, "sample_rate")) {
            long sr = 0;
            if(ei_decode_long(buf, &idx, &sr) < 0) error("sample_rate must be integer");
            ctx->sample_rate = sr;
            continue;
          }

          if(!strcmp(key, "channels")) {
            long ch = 0;
            if(ei_decode_long(buf, &idx, &ch) < 0) error("channels must be integer");
            ctx->channels = ch;
            continue;
          }

          fprintf(stderr, "Unknown key: '%s'\r\n", key);
          ei_skip_term(buf, &idx);
          continue;
        } else if(ei_decode_string(buf, &idx, key) == 0) {
          char value[MAXATOMLEN];
          if(ei_decode_string(buf, &idx, value) < 0) error("key-value must be strings");
          av_dict_set(&opts, key, value, 0);
        } else {
          error("Invalid options proplist");
        }
      }

      if(!strcmp(content, "video")) {
        ctx->pix_fmt = AV_PIX_FMT_YUV420P;
      }
      if(!strcmp(content, "audio")) {
        ctx->sample_fmt = AV_SAMPLE_FMT_S16;
        ctx->profile = FF_PROFILE_AAC_MAIN;
      }
      ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
      ctx->time_base = (AVRational){1,90};

      if(avcodec_open2(ctx, t->codec, &opts) < 0) error("failed to allocate video encoder");

      AVPacket config;
      config.dts = config.pts = 0;
      config.flags = CODEC_FLAG_GLOBAL_HEADER;
      config.data = ctx->extradata;
      config.size = ctx->extradata_size;
      reply_avframe(&config, t->codec);      
      continue;
    }

    if(!strcmp(command, "video_frame")) {
      idx = command_idx;
      struct video_frame *fr = read_video_frame(buf, &idx);

      AVPacket packet;
      av_new_packet(&packet, fr->body.size);
      memcpy(packet.data, fr->body.data, fr->body.size);
      packet.size = fr->body.size;
      packet.dts = fr->dts*90;
      packet.pts = fr->pts*90;
      packet.stream_index = fr->track_id;

      // if(packet_size != pkt_size) error("internal error in reading frame body");

      if(fr->content == frame_content_audio) {
        if(!input_audio.ctx) error("input audio uninitialized");

        AVFrame *decoded_frame = avcodec_alloc_frame();
        int got_output = 0;
        int ret = avcodec_decode_audio4(input_audio.ctx, decoded_frame, &got_output, &packet);
        if(got_output) {
          reply_atom("ok");
        } else {
          error("Got: %d, %d\r\n", ret, got_output);
        }
        free(fr);
        continue;
      }

      if(fr->content == frame_content_video) {
        if(!input_video.ctx) error("input video uninitialized");
        AVFrame *decoded_frame = avcodec_alloc_frame();
        int could_decode = 0;
        int ret = avcodec_decode_video2(input_video.ctx, decoded_frame, &could_decode, &packet);
        if(ret < 0) {
          error("failed to decode video");
        }
        if(could_decode) {
          decoded_frame->pts = av_frame_get_best_effort_timestamp(decoded_frame);
          int sent_config = 0;

          AVPacket pkt;
          av_init_packet(&pkt);
          pkt.data = NULL;
          pkt.size = 0;

          int could_encode = 0;

          if(out_video_count <= 0) error("trying to transcode uninitialized video");
          if(avcodec_encode_video2(output_video[0].ctx, &pkt, decoded_frame, &could_encode) != 0) 
            error("Failed to encode h264");

          if(could_encode) {
            if(dts_shift == AV_NOPTS_VALUE) {
              dts_shift = -pkt.dts;
            }
            pkt.dts += dts_shift;
            reply_avframe(&pkt, output_video[0].codec);
          } else if(!sent_config) {
            reply_atom("ok");
          }
          free(fr);
          continue;
        } else {
          reply_atom("ok");
          free(fr);
          continue;
        }
      }

      error("Unknown content");
    }

    // AVCodecContext
    // AVPacket
    // AVFrame



    char *s = (char *)malloc(1024);
    ei_s_print_term(&s, buf, &command_idx);
    error("Unknown command: %s", s);
  }
}
コード例 #2
0
ファイル: logo.cpp プロジェクト: crubia/wt
int Logo::Load(const char* fileName)
{
	AVFormatContext *fctx = NULL;
	AVCodecContext *ctx = NULL;
	AVCodec *codec = NULL;
	AVFrame *logoRGB = NULL;
	AVFrame* logo = NULL;
	SwsContext *sws = NULL;
	AVPacket packet;
	int res = 0;
	int gotLogo = 0;
	int numpixels = 0;
	int size = 0;

	//Create context from file
	if(avformat_open_input(&fctx, fileName, NULL, NULL)<0)
		return Error("Couldn't open the logo image file [%s]\n",fileName);

	//Check it's ok
	if(avformat_find_stream_info(fctx,NULL)<0)
	{
		//Set error
		res = Error("Couldn't find stream information for the logo image file...\n");
		//Free resources
		goto end;
	}

	//Get codec from file fromat
	if (!(ctx = fctx->streams[0]->codec))
	{
		//Set errror
		res = Error("Context codec not valid\n");
		//Free resources
		goto end;
	}

	//Get decoder for format
	if (!(codec = avcodec_find_decoder(ctx->codec_id)))
	{
		//Set errror
		res = Error("Couldn't find codec for the logo image file...\n");
		//Free resources
		goto end;
	}
	//Only one thread
	ctx->thread_count	= 1;
	
	//Open codec
	if (avcodec_open2(ctx, codec, NULL)<0)
	{
		//Set errror
		res = Error("Couldn't open codec for the logo image file...\n");
		//Free resources
		goto end;
	}

	//Read logo frame
	if (av_read_frame(fctx, &packet)<0)
	{
		//Set errror
		res = Error("Couldn't read frame from the image file...\n");
		//Free resources
		goto end;
	}

	//Alloc frame
	if (!(logoRGB = av_frame_alloc()))
	{
		//Set errror
		res = Error("Couldn't alloc frame\n");
		//Free resources
		goto end;
	}

	//Use only one thread to avoid decoding on background and logo not displayed
	ctx->thread_count = 1;

	//Decode logo
	if (avcodec_decode_video2(ctx, logoRGB, &gotLogo, &packet)<0)
	{
		//Set errror
		res = Error("Couldn't decode logo\n");
		//Free resources
		av_free_packet(&packet);
		goto end;
	}

	av_free_packet(&packet);

	//If it we don't have a logo
	if (!gotLogo)
	{
		//Set errror
		res = Error("No logo on file\n");
		//Free resources
		goto end;
	}

	//Allocate new one
	if (!(logo = av_frame_alloc()))
	{
		//Set errror
		res = Error("Couldn't alloc frame\n");
		//Free resources
		goto end;
	}

	//Get frame sizes
	width = ctx->width;
	height = ctx->height;

	// Create YUV rescaller cotext
	if (!(sws = sws_alloc_context()))
	{
		//Set errror
		res = Error("Couldn't alloc sws context\n");
		// Exit
		goto end;
	}

	// Set property's of YUV rescaller context
	av_opt_set_defaults(sws);
	av_opt_set_int(sws, "srcw",       width			,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "srch",       height		,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "src_format", ctx->pix_fmt		,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "dstw",       width			,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "dsth",       height		,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "dst_format", AV_PIX_FMT_YUV420P	,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "sws_flags",  SWS_FAST_BILINEAR	,AV_OPT_SEARCH_CHILDREN);
	
	// Init YUV rescaller context
	if (sws_init_context(sws, NULL, NULL) < 0)
	{
		//Set errror
		res = Error("Couldn't init sws context\n");
		// Exit
		goto end;
	}

	//Check if we already had one
	if (frame)
		//Free memory
		free(frame);
	//Check if we already had one
	if (frameRGBA)
		//Free memory
		free(frameRGBA);

	//Get size with padding
	size = (((width/32+1)*32)*((height/32+1)*32)*3)/2+FF_INPUT_BUFFER_PADDING_SIZE+32;

	//And numer of pixels
	numpixels = width*height;

	//Allocate frame
	frame = (BYTE*)malloc32(size); /* size for YUV 420 */
	frameRGBA = (BYTE*)malloc32(numpixels*4);

	//Alloc data
	logo->data[0] = frame;
	logo->data[1] = logo->data[0] + numpixels;
	logo->data[2] = logo->data[1] + numpixels / 4;

	//Set size for planes
	logo->linesize[0] = width;
	logo->linesize[1] = width/2;
	logo->linesize[2] = width/2;

	//Convert
	sws_scale(sws, logoRGB->data, logoRGB->linesize, 0, height, logo->data, logo->linesize);

	//Copy logo from rgbA to rgb
	for (int j=0;j<height;j++)
		for (int i=0;i<width;i++)
			//Copy line by line
			memcpy(frameRGBA+(width*j+i)*4,logoRGB->data[0]+logoRGB->linesize[0]*j+i*3,3);
	
	//Everything was ok
	res = 1;

end:
	if (logo)
		av_free(logo);

	if (logoRGB)
		av_free(logoRGB);

	if (ctx)
		avcodec_close(ctx);

	if (sws)
		sws_freeContext(sws);

	if (fctx)
		avformat_close_input(&fctx);

	//Exit
	return res;	
}
コード例 #3
0
ファイル: ffmpeg_codecs.c プロジェクト: RockHardJim/idphone
/*
 * Decode frame.
 */
static pj_status_t ffmpeg_codec_decode_whole(pjmedia_vid_codec *codec,
					     const pjmedia_frame *input,
					     unsigned output_buf_len,
					     pjmedia_frame *output)
{
    ffmpeg_private *ff = (ffmpeg_private*)codec->codec_data;
    AVFrame avframe;
    AVPacket avpacket;
    int err, got_picture;

    /* Check if decoder has been opened */
    PJ_ASSERT_RETURN(ff->dec_ctx, PJ_EINVALIDOP);

    /* Reset output frame bit info */
    output->bit_info = 0;

    /* Validate output buffer size */
    // Do this validation later after getting decoding result, where the real
    // decoded size will be assured.
    //if (ff->dec_vafp.framebytes > output_buf_len)
	//return PJ_ETOOSMALL;

    /* Init frame to receive the decoded data, the ffmpeg codec context will
     * automatically provide the decoded buffer (single buffer used for the
     * whole decoding session, and seems to be freed when the codec context
     * closed).
     */
    avcodec_get_frame_defaults(&avframe);

    /* Init packet, the container of the encoded data */
    av_init_packet(&avpacket);
    avpacket.data = (pj_uint8_t*)input->buf;
    avpacket.size = input->size;

    /* ffmpeg warns:
     * - input buffer padding, at least FF_INPUT_BUFFER_PADDING_SIZE
     * - null terminated
     * Normally, encoded buffer is allocated more than needed, so lets just
     * bzero the input buffer end/pad, hope it will be just fine.
     */
    pj_bzero(avpacket.data+avpacket.size, FF_INPUT_BUFFER_PADDING_SIZE);

    output->bit_info = 0;
    output->timestamp = input->timestamp;

#if LIBAVCODEC_VER_AT_LEAST(52,72)
    //avpacket.flags = AV_PKT_FLAG_KEY;
#else
    avpacket.flags = 0;
#endif

#if LIBAVCODEC_VER_AT_LEAST(52,72)
    err = avcodec_decode_video2(ff->dec_ctx, &avframe, 
                                &got_picture, &avpacket);
#else
    err = avcodec_decode_video(ff->dec_ctx, &avframe,
                               &got_picture, avpacket.data, avpacket.size);
#endif
    if (err < 0) {
	pjmedia_event event;

	output->type = PJMEDIA_FRAME_TYPE_NONE;
	output->size = 0;
        print_ffmpeg_err(err);

	/* Broadcast missing keyframe event */
	pjmedia_event_init(&event, PJMEDIA_EVENT_KEYFRAME_MISSING,
			   &input->timestamp, codec);
	pjmedia_event_publish(NULL, codec, &event, 0);

	return PJMEDIA_CODEC_EBADBITSTREAM;
    } else if (got_picture) {
        pjmedia_video_apply_fmt_param *vafp = &ff->dec_vafp;
        pj_uint8_t *q = (pj_uint8_t*)output->buf;
	unsigned i;
	pj_status_t status;

	/* Check decoding result, e.g: see if the format got changed,
	 * keyframe found/missing.
	 */
	status = check_decode_result(codec, &input->timestamp,
				     avframe.key_frame);
	if (status != PJ_SUCCESS)
	    return status;

	/* Check provided buffer size */
	if (vafp->framebytes > output_buf_len)
	    return PJ_ETOOSMALL;

	/* Get the decoded data */
	for (i = 0; i < ff->dec_vfi->plane_cnt; ++i) {
	    pj_uint8_t *p = avframe.data[i];

	    /* The decoded data may contain padding */
	    if (avframe.linesize[i]!=vafp->strides[i]) {
		/* Padding exists, copy line by line */
		pj_uint8_t *q_end;
                    
		q_end = q+vafp->plane_bytes[i];
		while(q < q_end) {
		    pj_memcpy(q, p, vafp->strides[i]);
		    q += vafp->strides[i];
		    p += avframe.linesize[i];
		}
	    } else {
		/* No padding, copy the whole plane */
		pj_memcpy(q, p, vafp->plane_bytes[i]);
		q += vafp->plane_bytes[i];
	    }
	}

	output->type = PJMEDIA_FRAME_TYPE_VIDEO;
        output->size = vafp->framebytes;
    } else {
	output->type = PJMEDIA_FRAME_TYPE_NONE;
	output->size = 0;
    }
    
    return PJ_SUCCESS;
}
コード例 #4
0
ファイル: FFmpegImage.cpp プロジェクト: louis89/xbmc
bool CFFmpegImage::LoadImageFromMemory(unsigned char* buffer, unsigned int bufSize,
                                      unsigned int width, unsigned int height)
{
  
  uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE);
  MemBuffer buf;
  buf.data = buffer;
  buf.size = bufSize;
  buf.pos = 0;

  AVIOContext* ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &buf,
                                          mem_file_read, NULL, mem_file_seek);

  if (!ioctx)
  {
    av_free(fbuffer);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext");
    return false;
  }

  AVFormatContext* fctx = avformat_alloc_context();
  if (!fctx)
  {
    av_free(ioctx->buffer);
    av_free(ioctx);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext");
    return false;
  }

  fctx->pb = ioctx;
  ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE;

  if (avformat_open_input(&fctx, "", NULL, NULL) < 0)
  {
    avformat_close_input(&fctx);
    FreeIOCtx(ioctx);
    return false;
  }

  AVCodecContext* codec_ctx = fctx->streams[0]->codec;
  AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
  if (avcodec_open2(codec_ctx, codec, NULL) < 0)
  {
    avformat_close_input(&fctx);
    FreeIOCtx(ioctx);
    return false;
  }

  AVPacket pkt;
  AVFrame* frame = av_frame_alloc();
  av_read_frame(fctx, &pkt);
  int frame_decoded;
  int ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
  if (ret < 0)
    CLog::Log(LOGDEBUG, "Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));

  if (frame_decoded != 0)
  {
    av_frame_free(&m_pFrame);
    m_pFrame = av_frame_clone(frame);

    if (m_pFrame)
    {
      m_height = m_pFrame->height;
      m_width = m_pFrame->width;
    }    
    else
    {
      CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate a picture data buffer");
      frame_decoded = 0;
    }
  }
  else
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not decode a frame");

  av_frame_free(&frame);
  av_free_packet(&pkt);
  avcodec_close(codec_ctx);
  avformat_close_input(&fctx);
  FreeIOCtx(ioctx);

  return (frame_decoded != 0);
}
コード例 #5
0
ファイル: avcodec.cpp プロジェクト: anonmangod/LAVFilters
STDMETHODIMP CDecAvcodec::Decode(const BYTE *buffer, int buflen, REFERENCE_TIME rtStartIn, REFERENCE_TIME rtStopIn, BOOL bSyncPoint, BOOL bDiscontinuity)
{
  int     got_picture = 0;
  int     used_bytes  = 0;
  BOOL    bParserFrame = FALSE;
  BOOL    bFlush = (buffer == NULL);
  BOOL    bEndOfSequence = FALSE;

  AVPacket avpkt;
  av_init_packet(&avpkt);

  if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
    if (!m_bFFReordering) {
      m_tcThreadBuffer[m_CurrentThread].rtStart = rtStartIn;
      m_tcThreadBuffer[m_CurrentThread].rtStop  = rtStopIn;
    }

    m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
  } else if (m_bBFrameDelay) {
    m_tcBFrameDelay[m_nBFramePos].rtStart = rtStartIn;
    m_tcBFrameDelay[m_nBFramePos].rtStop = rtStopIn;
    m_nBFramePos = !m_nBFramePos;
  }

  uint8_t *pDataBuffer = NULL;
  if (!bFlush && buflen > 0) {
    if (!m_bInputPadded && (!(m_pAVCtx->active_thread_type & FF_THREAD_FRAME) || m_pParser)) {
      // Copy bitstream into temporary buffer to ensure overread protection
      // Verify buffer size
      if (buflen > m_nFFBufferSize) {
        m_nFFBufferSize	= buflen;
        m_pFFBuffer = (BYTE *)av_realloc_f(m_pFFBuffer, m_nFFBufferSize + FF_INPUT_BUFFER_PADDING_SIZE, 1);
        if (!m_pFFBuffer) {
          m_nFFBufferSize = 0;
          return E_OUTOFMEMORY;
        }
      }
      
      memcpy(m_pFFBuffer, buffer, buflen);
      memset(m_pFFBuffer+buflen, 0, FF_INPUT_BUFFER_PADDING_SIZE);
      pDataBuffer = m_pFFBuffer;
    } else {
      pDataBuffer = (uint8_t *)buffer;
    }

    if (m_nCodecId == AV_CODEC_ID_H264) {
      BOOL bRecovered = m_h264RandomAccess.searchRecoveryPoint(pDataBuffer, buflen);
      if (!bRecovered) {
        return S_OK;
      }
    } else if (m_nCodecId == AV_CODEC_ID_VP8 && m_bWaitingForKeyFrame) {
      if (!(pDataBuffer[0] & 1)) {
        DbgLog((LOG_TRACE, 10, L"::Decode(): Found VP8 key-frame, resuming decoding"));
        m_bWaitingForKeyFrame = FALSE;
      } else {
        return S_OK;
      }
    }
  }

  while (buflen > 0 || bFlush) {
    REFERENCE_TIME rtStart = rtStartIn, rtStop = rtStopIn;

    if (!bFlush) {
      avpkt.data = pDataBuffer;
      avpkt.size = buflen;
      avpkt.pts = rtStartIn;
      if (rtStartIn != AV_NOPTS_VALUE && rtStopIn != AV_NOPTS_VALUE)
        avpkt.duration = (int)(rtStopIn - rtStartIn);
      else
        avpkt.duration = 0;
      avpkt.flags = AV_PKT_FLAG_KEY;

      if (m_bHasPalette) {
        m_bHasPalette = FALSE;
        uint32_t *pal = (uint32_t *)av_packet_new_side_data(&avpkt, AV_PKT_DATA_PALETTE, AVPALETTE_SIZE);
        int pal_size = FFMIN((1 << m_pAVCtx->bits_per_coded_sample) << 2, m_pAVCtx->extradata_size);
        uint8_t *pal_src = m_pAVCtx->extradata + m_pAVCtx->extradata_size - pal_size;

        for (int i = 0; i < pal_size/4; i++)
          pal[i] = 0xFF<<24 | AV_RL32(pal_src+4*i);
      }
    } else {
      avpkt.data = NULL;
      avpkt.size = 0;
    }

    // Parse the data if a parser is present
    // This is mandatory for MPEG-1/2
    if (m_pParser) {
      BYTE *pOut = NULL;
      int pOut_size = 0;

      used_bytes = av_parser_parse2(m_pParser, m_pAVCtx, &pOut, &pOut_size, avpkt.data, avpkt.size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);

      if (used_bytes == 0 && pOut_size == 0 && !bFlush) {
        DbgLog((LOG_TRACE, 50, L"::Decode() - could not process buffer, starving?"));
        break;
      }

      // Update start time cache
      // If more data was read then output, update the cache (incomplete frame)
      // If output is bigger, a frame was completed, update the actual rtStart with the cached value, and then overwrite the cache
      if (used_bytes > pOut_size) {
        if (rtStartIn != AV_NOPTS_VALUE)
          m_rtStartCache = rtStartIn;
      } else if (used_bytes == pOut_size || ((used_bytes + 9) == pOut_size)) {
        // Why +9 above?
        // Well, apparently there are some broken MKV muxers that like to mux the MPEG-2 PICTURE_START_CODE block (which is 9 bytes) in the package with the previous frame
        // This would cause the frame timestamps to be delayed by one frame exactly, and cause timestamp reordering to go wrong.
        // So instead of failing on those samples, lets just assume that 9 bytes are that case exactly.
        m_rtStartCache = rtStartIn = AV_NOPTS_VALUE;
      } else if (pOut_size > used_bytes) {
        rtStart = m_rtStartCache;
        m_rtStartCache = rtStartIn;
        // The value was used once, don't use it for multiple frames, that ends up in weird timings
        rtStartIn = AV_NOPTS_VALUE;
      }

       bParserFrame = (pOut_size > 0);

      if (pOut_size > 0 || bFlush) {

        if (pOut && pOut_size > 0) {
          if (pOut_size > m_nFFBufferSize2) {
            m_nFFBufferSize2	= pOut_size;
            m_pFFBuffer2 = (BYTE *)av_realloc_f(m_pFFBuffer2, m_nFFBufferSize2 + FF_INPUT_BUFFER_PADDING_SIZE, 1);
            if (!m_pFFBuffer2) {
              m_nFFBufferSize2 = 0;
              return E_OUTOFMEMORY;
            }
          }
          memcpy(m_pFFBuffer2, pOut, pOut_size);
          memset(m_pFFBuffer2+pOut_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

          avpkt.data = m_pFFBuffer2;
          avpkt.size = pOut_size;
          avpkt.pts = rtStart;
          avpkt.duration = 0;

          const uint8_t *eosmarker = CheckForEndOfSequence(m_nCodecId, avpkt.data, avpkt.size, &m_MpegParserState);
          if (eosmarker) {
            bEndOfSequence = TRUE;
          }
        } else {
          avpkt.data = NULL;
          avpkt.size = 0;
        }

        int ret2 = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
        if (ret2 < 0) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - decoding failed despite successfull parsing"));
          got_picture = 0;
        }
      } else {
        got_picture = 0;
      }
    } else {
      used_bytes = avcodec_decode_video2 (m_pAVCtx, m_pFrame, &got_picture, &avpkt);
    }

    if (FAILED(PostDecode())) {
      av_frame_unref(m_pFrame);
      return E_FAIL;
    }

    // Decoding of this frame failed ... oh well!
    if (used_bytes < 0) {
      av_frame_unref(m_pFrame);
      return S_OK;
    }

    // When Frame Threading, we won't know how much data has been consumed, so it by default eats everything.
    // In addition, if no data got consumed, and no picture was extracted, the frame probably isn't all that useufl.
    // The MJPEB decoder is somewhat buggy and doesn't let us know how much data was consumed really...
    if ((!m_pParser && (m_pAVCtx->active_thread_type & FF_THREAD_FRAME || (!got_picture && used_bytes == 0))) || m_bNoBufferConsumption || bFlush) {
      buflen = 0;
    } else {
      buflen -= used_bytes;
      pDataBuffer += used_bytes;
    }

    // Judge frame usability
    // This determines if a frame is artifact free and can be delivered
    // For H264 this does some wicked magic hidden away in the H264RandomAccess class
    // MPEG-2 and VC-1 just wait for a keyframe..
    if (m_nCodecId == AV_CODEC_ID_H264 && (bParserFrame || !m_pParser || got_picture)) {
      m_h264RandomAccess.judgeFrameUsability(m_pFrame, &got_picture);
    } else if (m_bResumeAtKeyFrame) {
      if (m_bWaitingForKeyFrame && got_picture) {
        if (m_pFrame->key_frame) {
          DbgLog((LOG_TRACE, 50, L"::Decode() - Found Key-Frame, resuming decoding at %I64d", m_pFrame->pkt_pts));
          m_bWaitingForKeyFrame = FALSE;
        } else {
          got_picture = 0;
        }
      }
    }

    // Handle B-frame delay for frame threading codecs
    if ((m_pAVCtx->active_thread_type & FF_THREAD_FRAME) && m_bBFrameDelay) {
      m_tcBFrameDelay[m_nBFramePos] = m_tcThreadBuffer[m_CurrentThread];
      m_nBFramePos = !m_nBFramePos;
    }

    if (!got_picture || !m_pFrame->data[0]) {
      if (!avpkt.size)
        bFlush = FALSE; // End flushing, no more frames
      av_frame_unref(m_pFrame);
      continue;
    }

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // Determine the proper timestamps for the frame, based on different possible flags.
    ///////////////////////////////////////////////////////////////////////////////////////////////
    if (m_bFFReordering) {
      rtStart = m_pFrame->pkt_pts;
      if (m_pFrame->pkt_duration)
        rtStop = m_pFrame->pkt_pts + m_pFrame->pkt_duration;
      else
        rtStop = AV_NOPTS_VALUE;
    } else if (m_bBFrameDelay && m_pAVCtx->has_b_frames) {
      rtStart = m_tcBFrameDelay[m_nBFramePos].rtStart;
      rtStop  = m_tcBFrameDelay[m_nBFramePos].rtStop;
    } else if (m_pAVCtx->active_thread_type & FF_THREAD_FRAME) {
      unsigned index = m_CurrentThread;
      rtStart = m_tcThreadBuffer[index].rtStart;
      rtStop  = m_tcThreadBuffer[index].rtStop;
    }

    if (m_bRVDropBFrameTimings && m_pFrame->pict_type == AV_PICTURE_TYPE_B) {
      rtStart = AV_NOPTS_VALUE;
    }

    if (m_bCalculateStopTime)
      rtStop = AV_NOPTS_VALUE;

    ///////////////////////////////////////////////////////////////////////////////////////////////
    // All required values collected, deliver the frame
    ///////////////////////////////////////////////////////////////////////////////////////////////
    LAVFrame *pOutFrame = NULL;
    AllocateFrame(&pOutFrame);

    AVRational display_aspect_ratio;
    int64_t num = (int64_t)m_pFrame->sample_aspect_ratio.num * m_pFrame->width;
    int64_t den = (int64_t)m_pFrame->sample_aspect_ratio.den * m_pFrame->height;
    av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, num, den, 1 << 30);

    pOutFrame->width        = m_pFrame->width;
    pOutFrame->height       = m_pFrame->height;
    pOutFrame->aspect_ratio = display_aspect_ratio;
    pOutFrame->repeat       = m_pFrame->repeat_pict;
    pOutFrame->key_frame    = m_pFrame->key_frame;
    pOutFrame->frame_type   = av_get_picture_type_char(m_pFrame->pict_type);
    pOutFrame->ext_format   = GetDXVA2ExtendedFlags(m_pAVCtx, m_pFrame);

    if (m_pFrame->interlaced_frame || (!m_pAVCtx->progressive_sequence && (m_nCodecId == AV_CODEC_ID_H264 || m_nCodecId == AV_CODEC_ID_MPEG2VIDEO)))
      m_iInterlaced = 1;
    else if (m_pAVCtx->progressive_sequence)
      m_iInterlaced = 0;

    pOutFrame->interlaced   = (m_pFrame->interlaced_frame || (m_iInterlaced == 1 && m_pSettings->GetDeinterlacingMode() == DeintMode_Aggressive) || m_pSettings->GetDeinterlacingMode() == DeintMode_Force) && !(m_pSettings->GetDeinterlacingMode() == DeintMode_Disable);

    LAVDeintFieldOrder fo   = m_pSettings->GetDeintFieldOrder();
    pOutFrame->tff          = (fo == DeintFieldOrder_Auto) ? m_pFrame->top_field_first : (fo == DeintFieldOrder_TopFieldFirst);

    pOutFrame->rtStart      = rtStart;
    pOutFrame->rtStop       = rtStop;

    PixelFormatMapping map  = getPixFmtMapping((AVPixelFormat)m_pFrame->format);
    pOutFrame->format       = map.lavpixfmt;
    pOutFrame->bpp          = map.bpp;

    if (m_nCodecId == AV_CODEC_ID_MPEG2VIDEO || m_nCodecId == AV_CODEC_ID_MPEG1VIDEO)
      pOutFrame->avgFrameDuration = GetFrameDuration();

    if (map.conversion) {
      ConvertPixFmt(m_pFrame, pOutFrame);
    } else {
      for (int i = 0; i < 4; i++) {
        pOutFrame->data[i]   = m_pFrame->data[i];
        pOutFrame->stride[i] = m_pFrame->linesize[i];
      }

      pOutFrame->priv_data = av_frame_alloc();
      av_frame_ref((AVFrame *)pOutFrame->priv_data, m_pFrame);
      pOutFrame->destruct  = lav_avframe_free;
    }

    if (bEndOfSequence)
      pOutFrame->flags |= LAV_FRAME_FLAG_END_OF_SEQUENCE;

    if (pOutFrame->format == LAVPixFmt_DXVA2) {
      pOutFrame->data[0] = m_pFrame->data[4];
      HandleDXVA2Frame(pOutFrame);
    } else {
      Deliver(pOutFrame);
    }

    if (bEndOfSequence) {
      bEndOfSequence = FALSE;
      if (pOutFrame->format == LAVPixFmt_DXVA2) {
        HandleDXVA2Frame(m_pCallback->GetFlushFrame());
      } else {
        Deliver(m_pCallback->GetFlushFrame());
      }
    }

    if (bFlush) {
      m_CurrentThread = (m_CurrentThread + 1) % m_pAVCtx->thread_count;
    }
    av_frame_unref(m_pFrame);
  }

  return S_OK;
}
コード例 #6
0
JNIEXPORT jint JNICALL Java_com_frank_ffmpeg_VideoPlayer_filter
        (JNIEnv * env, jclass clazz, jstring filePath, jobject surface, jstring filterDescr){

    int ret;
    const char * file_name = (*env)->GetStringUTFChars(env, filePath, JNI_FALSE);
    const char *filter_descr = (*env)->GetStringUTFChars(env, filterDescr, JNI_FALSE);
    //打开输入文件
    if(!is_playing){
        LOGI("open_input...");
        if((ret = open_input(env, file_name, surface)) < 0){
            LOGE("Couldn't allocate video frame.");
            goto end;
        }
        //注册滤波器
        avfilter_register_all();
        filter_frame = av_frame_alloc();
        if(filter_frame == NULL) {
            LOGE("Couldn't allocate filter frame.");
            ret = -1;
            goto end;
        }
        //初始化音频解码器
        if ((ret = init_audio(env, clazz)) < 0){
            LOGE("Couldn't init_audio.");
            goto end;
        }

    }

    //初始化滤波器
    if ((ret = init_filters(filter_descr)) < 0){
        LOGE("init_filter error, ret=%d\n", ret);
        goto end;
    }

    is_playing = 1;
    int frameFinished;
    AVPacket packet;

    while(av_read_frame(pFormatCtx, &packet)>=0 && !release) {
        //切换滤波器,退出当初播放
        if(again){
            goto again;
        }
        //判断是否为视频流
        if(packet.stream_index == video_stream_index) {
            //对该帧进行解码
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

            if (frameFinished) {
                //把解码后视频帧添加到filter_graph
                if (av_buffersrc_add_frame_flags(buffersrc_ctx, pFrame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                    LOGE("Error while feeding the filter_graph\n");
                    break;
                }
                //把滤波后的视频帧从filter graph取出来
                ret = av_buffersink_get_frame(buffersink_ctx, filter_frame);
                if (ret >= 0){
                    // lock native window
                    ANativeWindow_lock(nativeWindow, &windowBuffer, 0);
                    // 格式转换
                    sws_scale(sws_ctx, (uint8_t const * const *)filter_frame->data,
                              filter_frame->linesize, 0, pCodecCtx->height,
                              pFrameRGBA->data, pFrameRGBA->linesize);
                    // 获取stride
                    uint8_t * dst = windowBuffer.bits;
                    int dstStride = windowBuffer.stride * 4;
                    uint8_t * src = pFrameRGBA->data[0];
                    int srcStride = pFrameRGBA->linesize[0];
                    // 由于window的stride和帧的stride不同,因此需要逐行复制
                    int h;
                    for (h = 0; h < pCodecCtx->height; h++) {
                        memcpy(dst + h * dstStride, src + h * srcStride, (size_t) srcStride);
                    }
                    ANativeWindow_unlockAndPost(nativeWindow);
                }
                av_frame_unref(filter_frame);
            }
            //延迟等待
            if (!playAudio){
                usleep((unsigned long) (1000 * 40));//1000 * 40
            }
        } else if(packet.stream_index == audio_stream_index){//音频帧
            if (playAudio){
                play_audio(env, &packet, pFrame);
            }
        }
        av_packet_unref(&packet);
    }
    end:
    is_playing = 0;
    //释放内存以及关闭文件
    av_free(buffer);
    av_free(pFrameRGBA);
    av_free(filter_frame);
    av_free(pFrame);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);
    avfilter_free(buffersrc_ctx);
    avfilter_free(buffersink_ctx);
    avfilter_graph_free(&filter_graph);
    avcodec_close(audioCodecCtx);
    free(buffer);
    free(sws_ctx);
    free(&windowBuffer);
    free(out_buffer);
    free(audio_swr_ctx);
    free(audio_track);
    free(audio_track_write_mid);
    ANativeWindow_release(nativeWindow);
    (*env)->ReleaseStringUTFChars(env, filePath, file_name);
    (*env)->ReleaseStringUTFChars(env, filterDescr, filter_descr);
    LOGE("do release...");
    again:
    again = 0;
    LOGE("play again...");
    return ret;
}
コード例 #7
0
ファイル: fa_imageloader.c プロジェクト: Hr-/showtime
static pixmap_t *
fa_image_from_video2(const char *url0, const image_meta_t *im, 
		     const char *cacheid)
{
  pixmap_t *pm = NULL;
  char *url = mystrdupa(url0);
  char *tim = strchr(url, '#');

  *tim++ = 0;

  if(ifv_url == NULL || strcmp(url, ifv_url)) {
    // Need to open
    int i;
    AVFormatContext *fctx;
    AVIOContext *avio;
    
    if((avio = fa_libav_open(url, 65536, NULL, 0, 0)) == NULL)
      return NULL;

    if((fctx = fa_libav_open_format(avio, url, NULL, 0, NULL)) == NULL) {
      fa_libav_close(avio);
      return NULL;
    }

    if(!strcmp(fctx->iformat->name, "avi"))
      fctx->flags |= AVFMT_FLAG_GENPTS;

    AVCodecContext *ctx = NULL;
    for(i = 0; i < fctx->nb_streams; i++) {
      if(fctx->streams[i]->codec != NULL && 
	 fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	ctx = fctx->streams[i]->codec;
	break;
      }
    }
    if(ctx == NULL) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    AVCodec *codec = avcodec_find_decoder(ctx->codec_id);
    if(codec == NULL) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    if(avcodec_open(ctx, codec) < 0) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    ifv_close();

    ifv_stream = i;
    ifv_url = strdup(url);
    ifv_fctx = fctx;
    ifv_ctx = ctx;
  }

  AVPacket pkt;
  AVFrame *frame = avcodec_alloc_frame();
  int got_pic;


  int secs = atoi(tim);

  AVStream *st = ifv_fctx->streams[ifv_stream];
  int64_t ts = av_rescale(secs, st->time_base.den, st->time_base.num);

  if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) {
    ifv_close();
    return NULL;
  }
  
  avcodec_flush_buffers(ifv_ctx);


  
  int cnt = 500;
  while(1) {
    int r;

    r = av_read_frame(ifv_fctx, &pkt);
    
    if(r == AVERROR(EAGAIN))
      continue;
    
    if(r == AVERROR_EOF)
      break;

    if(r != 0) {
      ifv_close();
      break;
    }

    if(pkt.stream_index != ifv_stream) {
      av_free_packet(&pkt);
      continue;
    }
    cnt--;
    int want_pic = pkt.pts >= ts || cnt <= 0;

    ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF;
    
    avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt);
    if(got_pic == 0 || !want_pic)
      continue;

    int w,h;

    if(im->req_width != -1 && im->req_height != -1) {
      w = im->req_width;
      h = im->req_height;
    } else if(im->req_width != -1) {
      w = im->req_width;
      h = im->req_width * ifv_ctx->height / ifv_ctx->width;

    } else if(im->req_height != -1) {
      w = im->req_height * ifv_ctx->width / ifv_ctx->height;
      h = im->req_height;
    } else {
      w = im->req_width;
      h = im->req_height;
    }

    pm = pixmap_create(w, h, PIX_FMT_RGB24);

    struct SwsContext *sws;
    sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt,
			 w, h, PIX_FMT_RGB24, SWS_LANCZOS, NULL, NULL, NULL);
    if(sws == NULL) {
      ifv_close();
      return NULL;
    }
    
    uint8_t *ptr[4] = {0,0,0,0};
    int strides[4] = {0,0,0,0};

    ptr[0] = pm->pm_pixels;
    strides[0] = pm->pm_linesize;

    sws_scale(sws, (const uint8_t **)frame->data, frame->linesize,
	      0, ifv_ctx->height, ptr, strides);

    sws_freeContext(sws);

    if(pngencoder != NULL) {
      AVFrame *oframe = avcodec_alloc_frame();

      memset(&frame, 0, sizeof(frame));
      oframe->data[0] = pm->pm_pixels;
      oframe->linesize[0] = pm->pm_linesize;
      
      size_t outputsize = pm->pm_linesize * h;
      void *output = malloc(outputsize);
      pngencoder->width = w;
      pngencoder->height = h;
      pngencoder->pix_fmt = PIX_FMT_RGB24;

      r = avcodec_encode_video(pngencoder, output, outputsize, oframe);
    
      if(r > 0) 
	blobcache_put(cacheid, "videothumb", output, outputsize, 86400 * 5);
      free(output);
      av_free(oframe);
    }
    break;
  }

  av_free(frame);
  return pm;
}
コード例 #8
0
ファイル: qplayvid.c プロジェクト: CNCBASHER/openlase
size_t decode_video(PlayerCtx *ctx, AVPacket *packet, int new_packet, int32_t seekid)
{
	int decoded;
	int got_frame;

	if (!new_packet)
		fprintf(stderr, "warn: multi-frame video packets, pts might be inaccurate\n");

	ctx->v_pkt_pts = packet->pts;

	decoded = avcodec_decode_video2(ctx->v_codec_ctx, ctx->v_frame, &got_frame, packet);
	if (decoded < 0) {
		fprintf(stderr, "Error while decoding video frame\n");
		return packet->size;
	}
	if (!got_frame)
		return decoded;

	// The pts magic guesswork
	int64_t pts = AV_NOPTS_VALUE;
	int64_t *p_pts = (int64_t*)ctx->v_frame->opaque;
	int64_t frame_pts = AV_NOPTS_VALUE;
	if (p_pts)
		frame_pts = *p_pts;
	if (packet->dts != AV_NOPTS_VALUE) {
		ctx->v_faulty_dts += packet->dts <= ctx->v_last_dts;
		ctx->v_last_dts = packet->dts;
	}
	if (frame_pts != AV_NOPTS_VALUE) {
		ctx->v_faulty_pts += frame_pts <= ctx->v_last_pts;
		ctx->v_last_pts = frame_pts;
	}
	if ((ctx->v_faulty_pts <= ctx->v_faulty_dts || packet->dts == AV_NOPTS_VALUE)
		&& frame_pts != AV_NOPTS_VALUE)
		pts = frame_pts;
	else
		pts = packet->dts;

	if (pts == AV_NOPTS_VALUE) {
		if (ctx->v_last_pts != AV_NOPTS_VALUE) {
			pts = ctx->v_last_pts++;
		} else if (ctx->v_last_dts != AV_NOPTS_VALUE) {
			pts = ctx->v_last_dts++;
		}
	}

	if (pts == AV_NOPTS_VALUE) {
		if (ctx->v_last_pts != AV_NOPTS_VALUE) {
			pts = ctx->v_last_pts++;
		} else if (ctx->v_last_dts != AV_NOPTS_VALUE) {
			pts = ctx->v_last_dts++;
		} else {
			pts = 0;
		}
	}

	pthread_mutex_lock(&ctx->v_buf_mutex);
	while (((ctx->v_buf_put + 1) % ctx->v_buf_len) == ctx->v_buf_get) {
		printf("Wait for space in video buffer\n");
		pthread_cond_wait(&ctx->v_buf_not_full, &ctx->v_buf_mutex);
	}
	pthread_mutex_unlock(&ctx->v_buf_mutex);

	VideoFrame *frame = ctx->v_bufs[ctx->v_buf_put];
	if (!frame) {
		frame = malloc(sizeof(VideoFrame));
		frame->stride = ctx->v_frame->linesize[0];
		frame->data_size = frame->stride * ctx->height;
		frame->data = malloc(frame->data_size);
		ctx->v_bufs[ctx->v_buf_put] = frame;
	}

	if (frame->stride != ctx->v_frame->linesize[0]) {
		fprintf(stderr, "stride mismatch: %d != %d\n", (int)frame->stride, ctx->v_frame->linesize[0]);
		return decoded;
	}

	if (!ctx->v_sws_ctx) {
		ctx->v_sws_ctx = sws_getContext(ctx->width, ctx->height, ctx->v_codec_ctx->pix_fmt,
										ctx->width, ctx->height, PIX_FMT_GRAY8, SWS_BICUBIC,
										NULL, NULL, NULL);
	}

	AVPicture pict;
	pict.data[0] = frame->data;
	pict.linesize[0] = frame->stride;

	sws_scale(ctx->v_sws_ctx, ctx->v_frame->data, ctx->v_frame->linesize, 0, ctx->height, pict.data, pict.linesize);

	frame->pts = av_q2d(ctx->v_stream->time_base) * pts;
	frame->seekid = seekid;

	printf("Put frame %d (pts:%f seekid:%d)\n", ctx->v_buf_put, frame->pts, seekid);
	pthread_mutex_lock(&ctx->v_buf_mutex);
	if (++ctx->v_buf_put == ctx->v_buf_len)
		ctx->v_buf_put = 0;
	pthread_cond_signal(&ctx->v_buf_not_empty);
	pthread_mutex_unlock(&ctx->v_buf_mutex);

	return decoded;
}
コード例 #9
0
void Video::WriteNextFrame()
{
    int frameFinished = 0;
    AVPacket packet;

    if (texturesRecreatedCount != gTexturesRecreatedCount)
    {
        texturesRecreatedCount = gTexturesRecreatedCount;

        SDL_DestroyTexture(pTexture);
        pTexture =
            SDL_CreateTexture(
                gpRenderer,
                IsYUVFormat(pCodecContext->pix_fmt) ? SDL_PIXELFORMAT_YV12 : SDL_PIXELFORMAT_ARGB8888,
                SDL_TEXTUREACCESS_STREAMING,
                width,
                height);
        SDL_SetTextureBlendMode(pTexture, SDL_BLENDMODE_BLEND);

        unsigned char *pPixels = NULL;
        int pitch = 0;

        SDL_LockTexture(pTexture, NULL, reinterpret_cast<void **>(&pPixels), &pitch);
        memcpy(pPixels, pCachedTexturePixels, pitch * height);
        SDL_UnlockTexture(pTexture);
    }

    while (!frameFinished)
    {
        if (av_read_frame(pFormatContext, &packet) != 0)
        {
            break;
        }

        if (packet.stream_index == videoStream && avcodec_decode_video2(pCodecContext, pFrame, &frameFinished, &packet) >= 0)
        {
            if (frameFinished)
            {
                AVPicture picture;
                unsigned char *pPixels = NULL;
                int pitch = 0;

                SDL_LockTexture(pTexture, NULL, reinterpret_cast<void **>(&pPixels), &pitch);

                if (IsYUVFormat(pCodecContext->pix_fmt))
                {
                    picture.data[0] = pFrame->data[0];
                    picture.data[1] = pFrame->data[1];
                    picture.data[2] = pFrame->data[2];
                    picture.linesize[0] = pFrame->linesize[0];
                    picture.linesize[1] = pFrame->linesize[1];
                    picture.linesize[2] = pFrame->linesize[2];
                }
                else
                {
                    picture.data[0] = pPixels;
                    picture.linesize[0] = pitch;
                }

                sws_scale(pImageConvertContext, pFrame->data, pFrame->linesize, 0, pFrame->height, picture.data, picture.linesize);

                if (IsYUVFormat(pCodecContext->pix_fmt))
                {
                    if (pitch == picture.linesize[0])
                    {
                        int size = pitch * pFrame->height;

                        memcpy(pPixels, picture.data[0], size);
                        memcpy(pPixels + size, picture.data[2], size / 4);
                        memcpy(pPixels + size * 5 / 4, picture.data[1], size / 4);
                    }
                    else
                    {
                        unsigned char *y1, *y2, *y3, *i1, *i2, *i3;
                        y1 = pPixels;
                        y3 = pPixels + pitch * pFrame->height;
                        y2 = pPixels + pitch * pFrame->height * 5 / 4;

                        i1 = picture.data[0];
                        i2 = picture.data[1];
                        i3 = picture.data[2];

                        for (int i = 0; i < pFrame->height / 2; i++)
                        {
                            memcpy(y1, i1, pitch);
                            i1 += picture.linesize[0];
                            y1 += pitch;
                            memcpy(y1, i1, pitch);

                            memcpy(y2, i2, pitch / 2);
                            memcpy(y3, i3, pitch / 2);

                            y1 += pitch;
                            y2 += pitch / 2;
                            y3 += pitch / 2;
                            i1 += picture.linesize[0];
                            i2 += picture.linesize[1];
                            i3 += picture.linesize[2];
                        }
                    }
                }

                memcpy(pCachedTexturePixels, pPixels, pitch * height);
                SDL_UnlockTexture(pTexture);
            }
        }

        av_free_packet(&packet);
    }
}
コード例 #10
0
int main(int argc, char* argv[])
{
	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame,*pFrameYUV;
	uint8_t *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;

	char filepath[]="bigbuckbunny_480x272.h265";
	//SDL---------------------------
	int screen_w=0,screen_h=0;
	SDL_Window *screen; 
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect;

	FILE *fp_yuv;

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if(avformat_find_stream_info(pFormatCtx,NULL)<0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) 
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}
	if(videoindex==-1){
		printf("Didn't find a video stream.\n");
		return -1;
	}
	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL){
		printf("Codec not found.\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
		printf("Could not open codec.\n");
		return -1;
	}
	
	pFrame=av_frame_alloc();
	pFrameYUV=av_frame_alloc();
	out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	packet=(AVPacket *)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx,0,filepath,0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
		pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

#if OUTPUT_YUV420P 
    fp_yuv=fopen("output.yuv","wb+");  
#endif  
	
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	} 

	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	//SDL 2.0 Support for multiple windows
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,
		SDL_WINDOW_OPENGL);

	if(!screen) {  
		printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}

	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  

	sdlRect.x=0;
	sdlRect.y=0;
	sdlRect.w=screen_w;
	sdlRect.h=screen_h;

	//SDL End----------------------
	while(av_read_frame(pFormatCtx, packet)>=0){
		if(packet->stream_index==videoindex){
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if(ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if(got_picture){
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
					pFrameYUV->data, pFrameYUV->linesize);
				
#if OUTPUT_YUV420P
				y_size=pCodecCtx->width*pCodecCtx->height;  
				fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
				fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
				//SDL---------------------------
#if 0
				SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
#else
				SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
				pFrameYUV->data[0], pFrameYUV->linesize[0],
				pFrameYUV->data[1], pFrameYUV->linesize[1],
				pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif	
				
				SDL_RenderClear( sdlRenderer );  
				SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
				SDL_RenderPresent( sdlRenderer );  
				//SDL End-----------------------
				//Delay 40ms
				SDL_Delay(40);
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
			pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
		int y_size=pCodecCtx->width*pCodecCtx->height;  
		fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
		fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
		//SDL---------------------------
		SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
		SDL_RenderClear( sdlRenderer );  
		SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
		SDL_RenderPresent( sdlRenderer );  
		//SDL End-----------------------
		//Delay 40ms
		SDL_Delay(40);
	}

	sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P 
    fclose(fp_yuv);
#endif 

	SDL_Quit();

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
コード例 #11
0
ファイル: libavcodec-test.cpp プロジェクト: rookies/videolib
int main (int argc, char **argv)
{
	/*
	 * Variable definitions:
	*/
	/*
	 * videofile_format_context
	 * video_stream
	 * videofile_codec_context_video
	 * sws_ctx
	 * 
	 * videofile_packet
	 * videofile_video_frame_finished
	 * videofile_video_frame
	 * videofile_video_frame_rgb
	 */
	AVFormatContext *videofile_format_context = NULL;
	AVCodecContext *videofile_codec_context_video = NULL;
	AVCodec *videofile_codec_video = NULL;
	AVDictionary *options_dict = NULL;
	int video_stream = -1;
	int i;
	AVFrame *videofile_video_frame = NULL;
	AVFrame *videofile_video_frame_rgb = NULL;
	sf::Uint8 *videofile_buffer;
	AVPacket videofile_packet;
	int videofile_video_frame_finished;
	sf::RenderWindow window;
	sf::Event event;
	sf::Texture texture;
	sf::Sprite sprite;
	struct SwsContext *sws_ctx = NULL;
	/*
	 * Check arguments:
	*/
	if (argc < 2)
	{
		std::cerr << "Usage: " << argv[0] << " file" << std::endl;
		return 1;
	};
	/*
	 * Register formats & codecs:
	*/
	av_register_all();
	/*
	 * Open video file:
	*/
	if (avformat_open_input(&videofile_format_context, argv[1], NULL, NULL) !=0)
	{
		std::cerr << "Error: avformat_open_input() failed! Maybe the file doesn't exist?" << std::endl;
		return 1;
	};
	/*
	 * Get stream information:
	*/
	if (avformat_find_stream_info(videofile_format_context, NULL) < 0)
	{
		std::cerr << "Error: avformat_find_stream_info() failed!" << std::endl;
		return 1;
	};
	/*
	 * Dump file information:
	*/
	av_dump_format(videofile_format_context, 0, argv[1], 0);
	/*
	 * Find first video stream:
	*/
	for (i=0; i < videofile_format_context->nb_streams; i++)
	{
		if (videofile_format_context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			video_stream = i;
			break;
		};
	}
	if (video_stream == -1)
	{
		std::cerr << "Error: Couldn't find a video stream!" << std::endl;
		return 1;
	};
	videofile_codec_context_video = videofile_format_context->streams[video_stream]->codec;
	/*
	 * Find a decoder:
	*/
	videofile_codec_video = avcodec_find_decoder(videofile_codec_context_video->codec_id);
	if (videofile_codec_video == NULL)
	{
		std::cerr << "Error: Unsupported video codec!" << std::endl;
		return 1;
	};
	/*
	 * Open codec:
	*/
	if (avcodec_open2(videofile_codec_context_video, videofile_codec_video, &options_dict) < 0)
	{
		std::cerr << "Error: Couldn't open video codec!" << std::endl;
		return 1;
	};
	/*
	 * Create window & texture:
	*/
	window.create(
		sf::VideoMode(
			videofile_codec_context_video->width,
			videofile_codec_context_video->height,
			24
		),
		"FLOATING"
	);
	texture.create(
		videofile_codec_context_video->width,
		videofile_codec_context_video->height
	);
	/*
	 * Allocate video frames:
	*/
	videofile_video_frame = avcodec_alloc_frame();
	videofile_video_frame_rgb = avcodec_alloc_frame();
	/*
	 * Init sw_scale:
	*/
	int numBytes = avpicture_get_size(PIX_FMT_RGBA, videofile_codec_context_video->width, videofile_codec_context_video->height);
	videofile_buffer = (sf::Uint8 *)av_malloc(numBytes * sizeof(sf::Uint8));
	avpicture_fill(
		(AVPicture *)videofile_video_frame_rgb,
		videofile_buffer,
		PIX_FMT_RGBA,
		videofile_codec_context_video->width,
		videofile_codec_context_video->height
	);
	sws_ctx = sws_getContext(
		videofile_codec_context_video->width,
		videofile_codec_context_video->height,
		videofile_codec_context_video->pix_fmt,
		videofile_codec_context_video->width,
		videofile_codec_context_video->height,
		PIX_FMT_RGBA,
		SWS_BILINEAR,
		NULL,
		NULL,
		NULL
	);
	/*
	 * Read frames:
	*/
	while (av_read_frame(videofile_format_context, &videofile_packet) >= 0)
	{
		if (videofile_packet.stream_index == video_stream)
		{
			avcodec_decode_video2(videofile_codec_context_video, videofile_video_frame, &videofile_video_frame_finished, &videofile_packet);
			if (videofile_video_frame_finished)
			{
				sws_scale(
					sws_ctx,
					videofile_video_frame->data,
					videofile_video_frame->linesize,
					0,
					videofile_codec_context_video->height,
					videofile_video_frame_rgb->data,
					videofile_video_frame_rgb->linesize
				);
				texture.update(videofile_buffer);
				sprite.setTexture(texture);
				window.draw(sprite);
				window.display();
			};
		};
		av_free_packet(&videofile_packet);
		while (window.pollEvent(event))
		{
			switch (event.type)
			{
				case sf::Event::Closed:
					return 1; // exit
					break;
			}
		}
	}
	return 0;
}
コード例 #12
0
ファイル: api-example.c プロジェクト: Arcen/libav
static void video_decode_example(const char *outfilename, const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int frame, got_picture, len;
    FILE *f;
    AVFrame *picture;
    uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
    char buf[1024];
    AVPacket avpkt;

    av_init_packet(&avpkt);

    /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
    memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    printf("Video decoding\n");

    /* find the mpeg1 video decoder */
    codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    picture= avcodec_alloc_frame();

    if(codec->capabilities&CODEC_CAP_TRUNCATED)
        c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

    /* For some codecs, such as msmpeg4 and mpeg4, width and height
       MUST be initialized there because this information is not
       available in the bitstream. */

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    /* the codec gives us the frame size, in samples */

    f = fopen(filename, "rb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }

    frame = 0;
    for(;;) {
        avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
        if (avpkt.size == 0)
            break;

        /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
           and this is the only method to use them because you cannot
           know the compressed data size before analysing it.

           BUT some other codecs (msmpeg4, mpeg4) are inherently frame
           based, so you must call them with all the data for one
           frame exactly. You must also initialize 'width' and
           'height' before initializing them. */

        /* NOTE2: some codecs allow the raw parameters (frame size,
           sample rate) to be changed at any frame. We handle this, so
           you should also take care of it */

        /* here, we use a stream based decoder (mpeg1video), so we
           feed decoder and see if it could decode a frame */
        avpkt.data = inbuf;
        while (avpkt.size > 0) {
            len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
            if (len < 0) {
                fprintf(stderr, "Error while decoding frame %d\n", frame);
                exit(1);
            }
            if (got_picture) {
                printf("saving frame %3d\n", frame);
                fflush(stdout);

                /* the picture is allocated by the decoder. no need to
                   free it */
                snprintf(buf, sizeof(buf), outfilename, frame);
                pgm_save(picture->data[0], picture->linesize[0],
                         c->width, c->height, buf);
                frame++;
            }
            avpkt.size -= len;
            avpkt.data += len;
        }
    }

    /* some codecs, such as MPEG, transmit the I and P frame with a
       latency of one frame. You must do the following to have a
       chance to get the last frame of the video */
    avpkt.data = NULL;
    avpkt.size = 0;
    len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
    if (got_picture) {
        printf("saving last frame %3d\n", frame);
        fflush(stdout);

        /* the picture is allocated by the decoder. no need to
           free it */
        snprintf(buf, sizeof(buf), outfilename, frame);
        pgm_save(picture->data[0], picture->linesize[0],
                 c->width, c->height, buf);
        frame++;
    }

    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_free(picture);
    printf("\n");
}
コード例 #13
0
void Utility::VideoLoader::loadP() {
	isRunning_=true;
	target_->setIsComplete(false);
	// Contains information about the stream
	AVFormatContext *formatContext = NULL;

	// Contains information about the codex
	AVCodecContext *codecContext = NULL;

	// The coder with wich to decode the video
	AVCodec *codec = NULL;

	// Open video file
	// avformat_open_input(context, path, format, options)
	// format = NULL means autodetect
	if(!path_.isEmpty()
	        && avformat_open_input(&formatContext, path_.toUtf8(), NULL, NULL)!=0) {
		target_->setIsComplete(true);
		return;
	}

	// Retrieve stream information
	if(avformat_find_stream_info(formatContext, NULL)<0) {
		target_->setIsComplete(true);
		return;
	}

	// Print stream information
	// av_dump_format(formatContext, 0, path_.toUtf8(), 0);


	// Find the best video stream in context
	int videoStreamIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
	if(videoStreamIndex == -1) {
		target_->setIsComplete(true);
		return;
	}

	// Get a pointer to the codec context for the video stream
	codecContext = formatContext->streams[videoStreamIndex]->codec;

	// Find the decoder for the video stream
	codec = avcodec_find_decoder(codecContext->codec_id);
	if(codec == NULL) {
		target_->setIsComplete(true);
		return;
	}

	// Open codec
	if(avcodec_open2(codecContext, codec, &dict_) < 0) {
		target_->setIsComplete(true);
		return;
	}

	struct SwsContext      *sws_ctx = NULL;

	averageBitrate_=codecContext->bit_rate;
	codec_=QString(av_codec_get_codec_descriptor(codecContext)->name);
	if(codec_=="")
		codec_="N/A";

	sws_ctx =
	    sws_getContext
	    (
	        codecContext->width,
	        codecContext->height,
	        codecContext->pix_fmt,
	        codecContext->width,
	        codecContext->height,
	        AV_PIX_FMT_RGB24,
	        0,
	        0,
	        0,
	        0
	    );

	AVPacket packet;
	AVFrame *frame = NULL;
	frame = av_frame_alloc();

	AVFrame* rgbframe=NULL;
	uint8_t* buffer = NULL;
	int numbytes=avpicture_get_size(AV_PIX_FMT_RGB24, codecContext->width,codecContext->height);

	target_->setFps(codecContext->framerate.num);
	av_init_packet(&packet);
	packet.data = NULL;
	packet.size = 0;
	int gotPicture = 0;
	while(av_read_frame(formatContext, &packet) >= 0&&isRunning_) {
		avcodec_decode_video2(codecContext, frame, &gotPicture, &packet);

		if(gotPicture != 0) {
			rgbframe=av_frame_alloc();

			buffer=(uint8_t *)av_malloc(numbytes*sizeof(uint8_t));
			avpicture_fill((AVPicture *)rgbframe, buffer, AV_PIX_FMT_RGB24,codecContext->width,
			               codecContext->height);
			rgbframe->width=codecContext->width;
			rgbframe->height=codecContext->height;
			rgbframe->format=AV_PIX_FMT_RGB24;
			rgbframe->pkt_size=frame->pkt_size;

			sws_scale
			(
			    sws_ctx,
			    frame->data,
			    frame->linesize,
			    0,
			    codecContext->height,
			    rgbframe->data,
			    rgbframe->linesize
			);

			target_->appendFrame(rgbframe);
		}
	}

	packet.data=NULL;
	packet.size=0;

	while(isRunning_) {
		avcodec_decode_video2(codecContext, frame, &gotPicture, &packet);

		if(gotPicture == 0)
			break;

		rgbframe=av_frame_alloc();

		buffer=(uint8_t *)av_malloc(numbytes*sizeof(uint8_t));
		avpicture_fill((AVPicture *)rgbframe, buffer, AV_PIX_FMT_RGB24,codecContext->width,
		               codecContext->height);
		rgbframe->width=codecContext->width;
		rgbframe->height=codecContext->height;
		rgbframe->format=AV_PIX_FMT_RGB24;
		rgbframe->pkt_size=frame->pkt_size;

		sws_scale
		(
		    sws_ctx,
		    frame->data,
		    frame->linesize,
		    0,
		    codecContext->height,
		    rgbframe->data,
		    rgbframe->linesize
		);

		target_->appendFrame(rgbframe);
	}
	av_frame_unref(frame);
	av_frame_free(&frame);
	avcodec_close(codecContext);
	avformat_close_input(&formatContext);
	isRunning_=false;
	if(dict_) {
		free(dict_);
	}
	target_->setIsComplete(true);
}
コード例 #14
0
ファイル: api-seek-test.c プロジェクト: kevleyski/FFmpeg
static int compute_crc_of_packets(AVFormatContext *fmt_ctx, int video_stream,
                                  AVCodecContext *ctx, AVFrame *fr, uint64_t ts_start, uint64_t ts_end, int no_seeking)
{
    int number_of_written_bytes;
    int got_frame = 0;
    int result;
    int end_of_stream = 0;
    int byte_buffer_size;
    uint8_t *byte_buffer;
    int64_t crc;
    AVPacket pkt;

    byte_buffer_size = av_image_get_buffer_size(ctx->pix_fmt, ctx->width, ctx->height, 16);
    byte_buffer = av_malloc(byte_buffer_size);
    if (!byte_buffer) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate buffer\n");
        return AVERROR(ENOMEM);
    }

    if (!no_seeking) {
        result = av_seek_frame(fmt_ctx, video_stream, ts_start, AVSEEK_FLAG_ANY);
        printf("Seeking to %"PRId64", computing crc for frames with pts < %"PRId64"\n", ts_start, ts_end);
        if (result < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error in seeking\n");
            return result;
        }
        avcodec_flush_buffers(ctx);
    }

    av_init_packet(&pkt);
    do {
        if (!end_of_stream)
            if (av_read_frame(fmt_ctx, &pkt) < 0)
                end_of_stream = 1;
        if (end_of_stream) {
            pkt.data = NULL;
            pkt.size = 0;
        }
        if (pkt.stream_index == video_stream || end_of_stream) {
            got_frame = 0;
            if ((pkt.pts == AV_NOPTS_VALUE) && (!end_of_stream)) {
                av_log(NULL, AV_LOG_ERROR, "Error: frames doesn't have pts values\n");
                return -1;
            }
            result = avcodec_decode_video2(ctx, fr, &got_frame, &pkt);
            if (result < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding frame\n");
                return result;
            }
            if (got_frame) {
                number_of_written_bytes = av_image_copy_to_buffer(byte_buffer, byte_buffer_size,
                                          (const uint8_t* const *)fr->data, (const int*) fr->linesize,
                                          ctx->pix_fmt, ctx->width, ctx->height, 1);
                if (number_of_written_bytes < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Can't copy image to buffer\n");
                    return number_of_written_bytes;
                }
                if ((fr->pts > ts_end) && (!no_seeking))
                    break;
                crc = av_adler32_update(0, (const uint8_t*)byte_buffer, number_of_written_bytes);
                printf("%10"PRId64", 0x%08lx\n", fr->pts, crc);
                if (no_seeking) {
                    if (add_crc_to_array(crc, fr->pts) < 0)
                        return -1;
                }
                else {
                    if (compare_crc_in_array(crc, fr->pts) < 0)
                        return -1;
                }
            }
        }
        av_packet_unref(&pkt);
        av_init_packet(&pkt);
    } while ((!end_of_stream || got_frame) && (no_seeking || (fr->pts + av_frame_get_pkt_duration(fr) <= ts_end)));

    av_packet_unref(&pkt);
    av_freep(&byte_buffer);

    return 0;
}
コード例 #15
0
void VideoThread::run(){

	/* alloco i frame YVU e RGB */
	pFrame = avcodec_alloc_frame();
	pFrameRGB = avcodec_alloc_frame();

	/* da questo momento in poi permetto alla finestra di resfreshare */
	_is->window->startdisplay();

	//Calculate the size in bytes that a picture of the given width and height would occupy if stored in the given picture format.
	bytes = avpicture_get_size(CONV_FORMAT, _is->video_st->codec->width, _is->video_st->codec->height);

	uint8_t *video_buffer = (uint8_t*)av_malloc( bytes * sizeof(uint8_t) );
	
	avpicture_fill((AVPicture *)pFrameRGB, video_buffer, CONV_FORMAT, _is->video_st->codec->width, _is->video_st->codec->height);

	/*
	ciclo di lettura dei frame
	prelevo dalla coda dei pkt
	decodifico il frame YUV
	trasformo il frame in RGB
	aggiungo il frameRGB alla nuova coda
	*/
	while(1) {

		if(_is->ut.getPauseValue() && !_is->ut.getStopValue()){
			continue;
			//this->usleep(10000);
		};

		// leggo i paccehtti dalla coda
		if(_is->videoq.Get(packet, 1) < 0){
			// means we quit getting packets
			
			//qDebug() << "quitting getting packets - videothread";
			break;
		}

		//controllo se ho letto pacchetto di FLUSH
		if(packet->data == _is->flush_pkt->data){
			
			//qDebug() << "VideoThread - letto FLUSH PKT";
			
			avcodec_flush_buffers(_is->video_st->codec);

			_is->pictq.Flush();

			_is->frame_last_pts = AV_NOPTS_VALUE;
			_is->frame_last_delay = 0;
			_is->frame_timer = (double)av_gettime() / 1000000.0;

			continue;
		}

		pts = 0;									//resetto il pts a 0, ovvero non trovato

		//Save global pts to be stored in pFrame in first call
		_is->global_video_pkt_pts = packet->pts;
		
		// Decode video frame
		avcodec_decode_video2(_is->video_st->codec, pFrame, &frameFinished, packet);

		//nota: opaque è una variabile interna a pFrame lasciata libera
		//per essere usata dall'utente come variabile di appoggio per dei dati
		
		/* caso in cui NON RIESCO a reperire DTS, ma sho allocato il buffer */
		if (packet->dts == (int64_t)AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE)
        {
			//vado a reperire il PTS del primo pacchetto, messo in opaque dalla nostra funzione
			//di allocazione del buffer
            pts = *(uint64_t *) pFrame->opaque;	
        }
		/* caso in cui RIESCO a reperire DTS */
        else if (packet->dts != (int64_t)AV_NOPTS_VALUE)
        {
            pts = packet->dts;
        }
        else
        {
            pts = 0;
        }

		/**
		PTS = PTS * (time_base convertito in double)
		ottengo cosi il PTS in secondi
		*/
        pts *= av_q2d(_is->video_st->time_base);

		// Did we get a video frame?
		if(frameFinished) {

			synchronize_video();								//sincronizzazione del PTS

			/* conversione pFrame -> pFrameRGB */
			sws_scale(_is->sws_ctx, (uint8_t const * const *)pFrame->data,
				pFrame->linesize, 0, _is->video_st->codec->height, pFrameRGB->data, 
						pFrameRGB->linesize);

			while(_is->pictq.getSize() > VIDEO_PICTURE_QUEUE_SIZE && (_is->ut.getStopValue() == false)){
				this->usleep(1000);
			}

			/* aggiunta del frame RGB alla nuova coda */
			if(_is->pictq.Put(pFrameRGB, pts) < 0) {
				
				//qDebug() << "quitting putting frame - videothread";
				
				break;
			}

		}
		av_free_packet(packet);
	}

	av_free(pFrame);
	av_free(pFrameRGB);

	return;
}
コード例 #16
0
ファイル: ffbbdec.cpp プロジェクト: BurtKim/FFCameraSample
void* decoding_thread(void* arg)
{
    ffdec_context *ffd_context = (ffdec_context*) arg;
    ffdec_reserved *ffd_reserved = (ffdec_reserved*) ffd_context->reserved;
    AVCodecContext *codec_context = ffd_context->codec_context;

    AVPacket packet;
    int got_frame;

    int decode_buffer_length = 4096;
    uint8_t decode_buffer[decode_buffer_length + FF_INPUT_BUFFER_PADDING_SIZE];
    memset(decode_buffer + decode_buffer_length, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    AVFrame *frame = avcodec_alloc_frame();

    while (ffd_reserved->running)
    {
        if (ffd_reserved->read_callback) packet.size = ffd_reserved->read_callback(ffd_context,
                decode_buffer, decode_buffer_length, ffd_reserved->read_callback_arg);

        if (packet.size <= 0) break;

        packet.data = decode_buffer;

        while (ffd_reserved->running && packet.size > 0)
        {
            // reset the AVPacket
            av_init_packet(&packet);

            got_frame = 0;
            int decode_result = avcodec_decode_video2(codec_context, frame, &got_frame, &packet);

            if (decode_result < 0)
            {
                fprintf(stderr, "Error while decoding video\n");
                ffd_reserved->running = false;
                break;
            }

            if (got_frame)
            {
                if (ffd_reserved->frame_callback) ffd_reserved->frame_callback(
                        ffd_context, frame, ffd_reserved->frame_callback_arg);

                display_frame(ffd_context, frame);
            }

            packet.size -= decode_result;
            packet.data += decode_result;
        }
    }

    if (ffd_reserved->running)
    {
        // reset the AVPacket
        av_init_packet(&packet);
        packet.data = NULL;
        packet.size = 0;

        got_frame = 0;
        avcodec_decode_video2(codec_context, frame, &got_frame, &packet);

        if (got_frame)
        {
            if (ffd_reserved->frame_callback) ffd_reserved->frame_callback(
                    ffd_context, frame, ffd_reserved->frame_callback_arg);

            display_frame(ffd_context, frame);
        }
    }

    av_free(frame);
    frame = NULL;

    if (ffd_reserved->close_callback) ffd_reserved->close_callback(
            ffd_context, ffd_reserved->close_callback_arg);

    return 0;
}
コード例 #17
0
C_RESULT ffmpeg_stage_decoding_transform(ffmpeg_stage_decoding_config_t *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  static const int        sws_flags = SWS_FAST_BILINEAR;
  AVCodecContext  *pCodecCtxMP4 = cfg->pCodecCtxMP4;
  AVCodecContext  *pCodecCtxH264 = cfg->pCodecCtxH264;
  AVFrame         *pFrame = cfg->pFrame;
  AVFrame	  *pFrameOutput = cfg->pFrameOutput;
  static AVPacket packet;
  int	frameFinished = 0;
    
  bool_t frameDimChanged = FALSE;
  static parrot_video_encapsulation_t PaVE, prevPaVE;
    
#if WAIT_FOR_I_FRAME
  static bool_t waitForIFrame = TRUE;
#endif
    
#ifdef NUM_SAMPLES
  static struct timeval start_time, start_time2;
  static int numsamples = 0;
#endif	
    
  if (0 == in->size) // No frame
    {
      FFMPEG_DEBUG ("in->size is zero, don't do anything");
      return C_OK;
    }
  
  vp_os_mutex_lock( &out->lock );
  
  if(out->status == VP_API_STATUS_INIT) // Init only code
    {		
      out->numBuffers   = 1;
      out->buffers      = cfg->bufferArray;
      out->buffers[0]   = NULL;
      out->indexBuffer  = 0;
      out->lineSize     = 0;
        
      av_init_packet(&packet);
 
        
#if __FFMPEG_DEBUG_ENABLED
#else
      av_log_set_callback (&empty_av_log_callback);
#endif
    }
 
  if (! check_and_copy_PaVE(&PaVE, in, &prevPaVE, &frameDimChanged))
    {
      FFMPEG_DEBUG("Received a frame without PaVE informations");
      vp_os_mutex_unlock( &out->lock );
      return C_FAIL;
    }
    
  if ((out->status == VP_API_STATUS_INIT) || frameDimChanged) // Init and "new frame dimensions" code
    {
      pCodecCtxMP4->width = PaVE.encoded_stream_width;
      pCodecCtxMP4->height = PaVE.encoded_stream_height;
      pCodecCtxH264->width = PaVE.encoded_stream_width;
      pCodecCtxH264->height = PaVE.encoded_stream_height;
		
      cfg->src_picture.width = PaVE.display_width;
      cfg->src_picture.height = PaVE.display_height;
      cfg->src_picture.format = pCodecCtxH264->pix_fmt;
      cfg->dst_picture.width = PaVE.display_width;
      cfg->dst_picture.height = PaVE.display_height;
		
      out->size = avpicture_get_size(cfg->dst_picture.format, cfg->dst_picture.width, cfg->dst_picture.height);
      cfg->buffer = (uint8_t *)av_realloc(cfg->buffer, out->size * sizeof(uint8_t));
      out->buffers[0] = cfg->buffer;
		
      avpicture_fill((AVPicture *)pFrameOutput, (uint8_t*)out->buffers[out->indexBuffer], cfg->dst_picture.format,
                     cfg->dst_picture.width, cfg->dst_picture.height);
		
        
      cfg->img_convert_ctx = sws_getCachedContext(cfg->img_convert_ctx, PaVE.display_width, PaVE.display_height,
                                             pCodecCtxH264->pix_fmt, PaVE.display_width, PaVE.display_height,
                                             cfg->dst_picture.format, sws_flags, NULL, NULL, NULL);

      if (out->status == VP_API_STATUS_INIT)
        {
#ifdef NUM_SAMPLES
          gettimeofday(&start_time, NULL);
#endif		
          out->status = VP_API_STATUS_PROCESSING;
          FFMPEG_DEBUG("End of init");
        }
    }

#if	WAIT_FOR_I_FRAME
  if ( (PaVE.frame_number != (prevPaVE.frame_number +1)) 
        && 
        ( PaVE.frame_number != prevPaVE.frame_number || PaVE.slice_index != (prevPaVE.slice_index+1) )   )
    {
      FFMPEG_DEBUG ("Missed a frame :\nPrevious was %d of type %d\nNew is %d of type %d", prevPaVE.frame_number, prevPaVE.frame_type,
                    PaVE.frame_number, PaVE.frame_type);
      waitForIFrame = TRUE;  
    }
    
#if DISPLAY_DROPPED_FRAMES
  if (waitForIFrame && PaVE.frame_type == FRAME_TYPE_P_FRAME)
    {
      FFMPEG_DEBUG ("Dropped a P frame\n");
      dropped_frames++;
    }
#endif
    
  if(out->status == VP_API_STATUS_PROCESSING && (!waitForIFrame || (PaVE.frame_type == FRAME_TYPE_IDR_FRAME) || (PaVE.frame_type == FRAME_TYPE_I_FRAME))) // Processing code
    {
      waitForIFrame = FALSE;
#else
      if(out->status == VP_API_STATUS_PROCESSING) // Processing code  
        {
#endif
          /* The 'check_and_copy_PaVE' function already removed the PaVE from the 'in' buffer */
          packet.data = ((unsigned char*)in->buffers[in->indexBuffer]);
          packet.size = in->size;
          FFMPEG_DEBUG("Size : %d", packet.size);
        
#ifdef NUM_SAMPLES
          struct timeval end_time;
          static float32_t frame_decoded_time = 0;

          gettimeofday(&start_time2, NULL);
#endif
          // Decode video frame
          if (PaVE.video_codec == CODEC_MPEG4_VISUAL)
            {
              avcodec_decode_video2 (pCodecCtxMP4, pFrame, &frameFinished, &packet);
            }
          else if (PaVE.video_codec == CODEC_MPEG4_AVC)
            {
              avcodec_decode_video2 (pCodecCtxH264, pFrame, &frameFinished, &packet);
            }
        
          // Did we get a video frame?
          if(frameFinished)
            {
              pFrameOutput->data[0] = (uint8_t*)out->buffers[out->indexBuffer];
              sws_scale(cfg->img_convert_ctx, (const uint8_t *const*)pFrame->data, 
                        pFrame->linesize, 0, 
                        PaVE.display_height,
                        pFrameOutput->data, pFrameOutput->linesize);
				
              cfg->num_picture_decoded++;

#ifdef NUM_SAMPLES
              gettimeofday(&end_time, NULL);
              frame_decoded_time += ((end_time.tv_sec * 1000.0 + end_time.tv_usec / 1000.0) - (start_time2.tv_sec * 1000.0 + start_time2.tv_usec / 1000.0));

              if(numsamples++ > NUM_SAMPLES)
                {
                  float32_t value = ((end_time.tv_sec * 1000.0 + end_time.tv_usec / 1000.0) - (start_time.tv_sec * 1000.0 + start_time.tv_usec / 1000.0));
					
                  printf("Frames decoded in average %f fps, received and decoded in average %f fps\n", (1000.0 / (frame_decoded_time / (float32_t)NUM_SAMPLES)), 1000.0 / (value / (float32_t)NUM_SAMPLES));
                  gettimeofday(&start_time, NULL);
                  frame_decoded_time = 0;
                  numsamples = 0;
                }					
#endif
            }
          else
            {
              printf ("Decoding failed for a %s\n", (PaVE.frame_type == FRAME_TYPE_P_FRAME) ? "P Frame" : "I Frame");
            }
        
#if DISPLAY_DROPPED_FRAMES
          if ((PaVE.frame_type == FRAME_TYPE_IDR_FRAME) || (PaVE.frame_type == FRAME_TYPE_I_FRAME))
            {
              if (previous_ok_frame != 0)
                {
                  static int globalMiss = 0, globalDrop = 0, globalFrames = 0;
                  globalMiss += missed_frames;
                  globalDrop += dropped_frames;
                  int globalMissDrop = globalMiss + globalDrop;
                  int total_miss = missed_frames + dropped_frames;
                  int total_frames = PaVE.frame_number - previous_ok_frame;
                  globalFrames += total_frames;
                  float missPercent = (100.0 * missed_frames) / (1.0 * total_frames);
                  float dropPercent = (100.0 * dropped_frames) / (1.0 * total_frames);
                  float totalPercent = (100.0 * total_miss) / (1.0 * total_frames);
                  float missMean = (100.0 * globalMiss) / (1.0 * globalFrames);
                  float dropMean = (100.0 * globalDrop) / (1.0 * globalFrames);
                  float totalMean = (100.0 * globalMissDrop) / (1.0 * globalFrames);
                  printf ("LAST %4d F => M %4d (%4.1f%%) / D %4d (%4.1f%%) / T %4d (%4.1f%%) <=> ALL %4d F => M %4d (%4.1f%%) / D %4d (%4.1f%%) / T %4d (%4.1f%%)\n", total_frames, missed_frames, missPercent, dropped_frames, dropPercent, total_miss, totalPercent, globalFrames, globalMiss, missMean, globalDrop, dropMean, globalMissDrop, totalMean);
                }
              missed_frames = 0; dropped_frames = 0;
              previous_ok_frame = PaVE.frame_number;
            }
#endif
        
	}
	
      vp_os_mutex_unlock( &out->lock );
	
      return C_OK;
    }

#define FFMPEG_CHECK_AND_FREE(pointer, freeFunc)        \
  do                                                    \
    {                                                   \
      if (NULL != pointer)                              \
        {                                               \
          freeFunc (pointer);                           \
          pointer = NULL;                               \
        }                                               \
    } while (0)

#define FFMPEG_CHECK_AND_FREE_WITH_CALL(pointer, func, freeFunc)        \
  do                                                                    \
    {                                                                   \
      if (NULL != pointer)                                              \
        {                                                               \
          func (pointer);                                               \
          freeFunc (pointer);                                           \
          pointer = NULL;                                               \
        }                                                               \
    } while (0)
  

  C_RESULT ffmpeg_stage_decoding_close(ffmpeg_stage_decoding_config_t *cfg)
  {
    FFMPEG_CHECK_AND_FREE_WITH_CALL(cfg->pCodecCtxMP4, avcodec_close, av_free);
    FFMPEG_CHECK_AND_FREE_WITH_CALL(cfg->pCodecCtxH264, avcodec_close, av_free);
    FFMPEG_CHECK_AND_FREE(cfg->pFrame, av_free);
    FFMPEG_CHECK_AND_FREE(cfg->pFrameOutput, av_free);
    FFMPEG_CHECK_AND_FREE(cfg->bufferArray, vp_os_free);
    FFMPEG_CHECK_AND_FREE(cfg->buffer, av_free);
    FFMPEG_CHECK_AND_FREE(cfg->img_convert_ctx, sws_freeContext);
    return C_OK;
  }
コード例 #18
0
ファイル: AvVideoDecoder.cpp プロジェクト: rawern/DirectLook
	void AvVideoDecoder::run()
	{
		//reads the actual frame depending on the FormatContext and stores the information inside the packet
		if(av_read_frame(m_pFormatCtx, &m_pPacket) >= 0)
		{
			//checks if the packet actually uses the declared video stream
			if (m_pPacket.stream_index == m_pVideostream)
			{
				//the actual decoding is done here. 
				//-using the CodecContext for the decoding, 
				//-storing the data inside pFrame, 
				//-sets the frameFinished variable to mark if the Frame has been completely loaded and decoded.
				//	if not the frame will be marked as not finished and the function has to be called until the frame
				//  is actually finished.
				//-stores the data inside the packet
				avcodec_decode_video2(m_pCodecCtx, m_pFrame, &m_pFrameFinished, &m_pPacket);
				//checks if the frame is completely decoded
				if (m_pFrameFinished){
					static struct SwsContext *img_convert_ctx;

					//if the context hasn't been initialized
					if (img_convert_ctx == NULL){						
						int w = m_pCodecCtx->width;
						int h = m_pCodecCtx->height;

						//context will be set with the ffmpeg function sws_getContext

						//parameter for sws:getContext:
						//width of the source, height of the source,
						//Pixel format (primarily know by the CodecContext, destination width, destination height
						//Pixel format of the destination (RGB24 is similar to GL_RGB), some flags for the converting,
						//source Filter, destination Filter, parameter for the filter

						img_convert_ctx = sws_getContext(w, h,
														m_pCodecCtx->pix_fmt, w, h, 
														PIX_FMT_RGB24, SWS_BICUBIC,
														NULL, NULL, NULL);

						//if the image conversion failed the error handling can do something
						if (img_convert_ctx == NULL)
						{
							handle_error(7);
						}						
					}
					//depending on the context the scale function actually converts the frame
					//parameter: SWSContext holding the conversion information, data of the source, linesize of the source,
					//slizeY, slizeH (height), destination data, destination linesize
					int ret = sws_scale(img_convert_ctx, m_pFrame->data, m_pFrame->linesize, 
										0, m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);

					/*
					*the result is stored to another pointer to have the possibility to check the ret value and see 
					*if errors occured without overwriting the last frames data in case something failed
					*BEWARE: this structure needs to be modified if used in multiple threads
					*/
					m_pResultFrame = m_pFrameRGB;
				}
				else
				{
					//simply runs this function until a full frame has been finished the decoding
					run();
				}
			}
			//frees the allocated packet
			av_free_packet(&m_pPacket);
		}
		else
		{
			//this block simply seeks to position 0 (which is defined as the start of the video)
			//in case the time of the video has reached the end (video has been fully playbacked)
			int64_t pos = 0;
			int64_t seek_target = 0;
		
			AVRational myAVTIMEBASEQ = {1, AV_TIME_BASE};

			seek_target = av_rescale_q(seek_target, (AVRational) myAVTIMEBASEQ, m_pFormatCtx->streams[m_pVideostream]->time_base);

			if (av_seek_frame(m_pFormatCtx, m_pVideostream, seek_target, AVSEEK_FLAG_ANY))
			{
				handle_error(10);
			}
		}
	
	}
コード例 #19
0
ファイル: h264dec.c プロジェクト: darcyg/h264decoder
//h264数据流序列0x67 0x68 0x65 0x61 0x61...0x67 x68 0x65 0x61 0x61...
//             SPS  PPS   I   P     P ... SPS PPS  I    P    P  ...
//ph264buf缓冲区中的数据SPS+PPS+I或者P
//参数值:decObjIdx--解码对象索引值
//      ph264buf--h264缓冲区
//		h264buflen--h264缓冲区长度
//      isfinished--该帧数据是否解码完成
//		pyuvbuf--yuv缓冲区
//		yuvbuflen--yuv缓冲区长度
//将多个输入参赛封装进结构中去
int h264dec(int decObjIdx, SH264decParams* pSH264decParams)
{
	printf("h264dec_yuv()--here!\n");
	if (decObjIdx < 0 || decObjIdx > max_channel)
	{
		return -1;
	}
	if (NULL == pSH264decParams->ph264buf || pSH264decParams->h264buflen <= 0 )
	{
		return -2;
	}

	uint8_t *p = pSH264decParams->ph264buf;
	CH264Decode *pCH264Decode = &g_CH264Decodes[decObjIdx];
	//如果解码对象未初始化,返回失败
	if (pCH264Decode->bInitOK == 0 || pCH264Decode->bUsed == 0)
	{
		return -3;
	}

	//从h264缓冲区中解析出宽高
	if (pCH264Decode->nImageHeight == 0 || pCH264Decode->nImageWidth == 0)
	{
		if (1 != h264dec_getInfo(decObjIdx, p, pSH264decParams->h264buflen, &pCH264Decode->nImageWidth, &pCH264Decode->nImageHeight))
			return -4;
	}

	pSH264decParams->decwidth = pCH264Decode->nImageWidth;
	pSH264decParams->decheight = pCH264Decode->nImageHeight;

	if (pSH264decParams->pyuvbuf == NULL && pSH264decParams->prgbbuf == NULL)
	{
		return 1;
	}

	pCH264Decode->packet.data = p;
	pCH264Decode->packet.size = pSH264decParams->h264buflen;
	//解码
	int ret = avcodec_decode_video2(pCH264Decode->pCodecCtx, pCH264Decode->pFrameYUV, &pSH264decParams->isfinished, &pCH264Decode->packet);
	if (ret < 0)
	{
		//解码失败
		return -5;
	}
	//未解码完一帧数据
	if(pSH264decParams->isfinished == 0)
	{
		return -6;
	}
	//将解码出的yuv数据拷贝到外部缓冲区
	if (pSH264decParams->pyuvbuf != NULL && pSH264decParams->yuvbuflen > 0)
	{
		int i = 0;
		int j = 0;

		int yuvwidth = 0;
		int yuvheight = 0;
		int k = 0;
		for (; i < 3; ++i)
		{
			if (i == 0)//Y
			{
				yuvwidth = pCH264Decode->pFrameYUV->width;
				yuvheight = pCH264Decode->pFrameYUV->height;
			}
			else//U V
			{
				yuvwidth = pCH264Decode->pFrameYUV->width/2;
				yuvheight = pCH264Decode->pFrameYUV->height/2;
			}
			//从data[i]中以固定长度linesize为间隔拷贝有效长度为yuvwidth的数据到pyuvbuf中
			int blocksize = yuvheight * pCH264Decode->pFrameYUV->linesize[i];
			for (k = 0;k < blocksize;)
			{
				memcpy(&pSH264decParams->pyuvbuf[j], &pCH264Decode->pFrameYUV->data[i][k], yuvwidth);
				k+=pCH264Decode->pFrameYUV->linesize[i];
				j+=yuvwidth;
			}
		}
		pSH264decParams->yuvbuflen = j;
	}

	if (pSH264decParams->prgbbuf == NULL || pSH264decParams->rgbbuflen <= 0)
	{
		return 2;
	}

    int src_w, src_h, dst_w, dst_h;
    src_w = dst_w = pCH264Decode->nImageWidth;
    src_h = dst_h = pCH264Decode->nImageHeight;

   if (NULL == pCH264Decode->prgbBuf || 0 == pCH264Decode->rgbbuflen)
   {
	   pCH264Decode->rgbbuflen = avpicture_get_size(AV_PIX_FMT_BGR24, pCH264Decode->pCodecCtx->width,
	          		pCH264Decode->pCodecCtx->height);
	   pCH264Decode->prgbBuf = av_malloc(pCH264Decode->rgbbuflen);

	   ret = avpicture_fill((AVPicture *)(pCH264Decode->pFrameRGB), pCH264Decode->prgbBuf, AV_PIX_FMT_BGR24,
			pCH264Decode->pCodecCtx->width, pCH264Decode->pCodecCtx->height);
   }

   /* create scaling context */
    if (NULL == pCH264Decode->sws_ctx)
    {
    	pCH264Decode->sws_ctx = sws_getCachedContext(pCH264Decode->sws_ctx, src_w, src_h, pCH264Decode->pCodecCtx->pix_fmt,
    		                             dst_w, dst_h, AV_PIX_FMT_BGR24,
    		                             SWS_BICUBIC, NULL, NULL, NULL);
    	if (NULL == pCH264Decode->sws_ctx)
    	{
    		printf("sws_getCachedContext failed!\n");
    		return -7;
    	}
    }

    //避免rgb倒置,变换yuv数据的起始地址
    pCH264Decode->pFrameYUV->data[0] += pCH264Decode->pFrameYUV->linesize[0] * (pCH264Decode->pCodecCtx->height-1);
    pCH264Decode->pFrameYUV->linesize[0] *= -1;
    pCH264Decode->pFrameYUV->data[1] += pCH264Decode->pFrameYUV->linesize[1] * (pCH264Decode->pCodecCtx->height/2 - 1);;
    pCH264Decode->pFrameYUV->linesize[1] *= -1;
    pCH264Decode->pFrameYUV->data[2] += pCH264Decode->pFrameYUV->linesize[2] * (pCH264Decode->pCodecCtx->height/2 - 1);;
    pCH264Decode->pFrameYUV->linesize[2] *= -1;


    /* convert to destination format */
    sws_scale(pCH264Decode->sws_ctx, (const uint8_t * const*)pCH264Decode->pFrameYUV->data,
    		pCH264Decode->pFrameYUV->linesize, 0, src_h, pCH264Decode->pFrameRGB->data, pCH264Decode->pFrameRGB->linesize);

    //拷贝rbg缓冲区
	memcpy(pSH264decParams->prgbbuf, pCH264Decode->prgbBuf, pCH264Decode->rgbbuflen);
	pSH264decParams->rgbbuflen = pCH264Decode->rgbbuflen;

	return 3;
}
コード例 #20
0
ファイル: vf_watermark.c プロジェクト: silicontrip/lavtools
static int config_props(AVFilterLink *outlink)
{

	AVFilterContext *ctx = outlink->src;
	OverlayContext *ovl = ctx->priv;

    AVFilterLink *inlink = outlink->src->inputs[0];

    AVFormatContext *pFormatCtx;
	AVInputFormat *avif = NULL;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
	AVPacket        packet;
	AVFrame *overlay, *tempMask;

	int avStream = -1;
	int frameFinished;

	struct SwsContext *sws;

	uint8_t *data;
	uint8_t *maskData;
	uint8_t *tempData;

    av_log(ctx, AV_LOG_DEBUG, ">>> config_props().\n");



	// make sure Chroma planes align.
	avcodec_get_chroma_sub_sample(outlink->format, &ovl->hsub, &ovl->vsub);

//	av_log(ctx,AV_LOG_INFO,"hsub: %d vsub: %d iformat: %d oformat %d\n",ovl->hsub,ovl->vsub,inlink->format,outlink->format);

	if ((ovl->printX % (1<<ovl->hsub) && ovl->hsub!=1)||(ovl->printY % (1<<ovl->vsub) && ovl->vsub!=1)) {
			av_log(ctx, AV_LOG_ERROR, "Cannot use this position with this chroma subsampling. Chroma plane will not align. (continuing with unaligned chroma planes, your watermark may look distorted)\n");
	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avformat_open_input(%s).\n",ovl->imageName);

    pFormatCtx = avformat_alloc_context();

	// open overlay image
	// avformat_open_input
	if(avformat_open_input(&pFormatCtx, ovl->imageName, avif, NULL)!=0) {
		av_log(ctx, AV_LOG_FATAL, "Cannot open overlay image (%s).\n",ovl->imageName);
		return -1;

	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avformat_find_stream_info.\n");


	if(avformat_find_stream_info(pFormatCtx,NULL)<0) {
		av_log(ctx, AV_LOG_FATAL, "Cannot find stream in overlay image.\n");
		return -1;

	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() pFormatCtx->streams.\n");


	for(int i=0; i<pFormatCtx->nb_streams; i++)
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
			avStream=i;
			break;
		}

	if(avStream==-1) {
		av_log (ctx,AV_LOG_FATAL,"could not find an image stream in overlay image\n");
		return -1;
	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avcodec_find_decoder.\n");


	pCodecCtx=pFormatCtx->streams[avStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);

	if(pCodec==NULL) {
		av_log(ctx, AV_LOG_FATAL ,"could not find codec for overlay image\n");
		return -1;

	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avcodec_open2.\n");


	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0) {
		av_log(ctx, AV_LOG_FATAL,"could not open codec for overlay image\n");
		return -1;

	}

	// check for appropriate format.
	if (pCodecCtx->pix_fmt != PIX_FMT_ARGB &&
		pCodecCtx->pix_fmt != PIX_FMT_RGBA &&
		pCodecCtx->pix_fmt != PIX_FMT_ABGR &&
		pCodecCtx->pix_fmt != PIX_FMT_BGRA)
	{
		// warn if no alpha channel
		av_log(ctx,AV_LOG_WARNING, "overlay image has no alpha channel (assuming completely opaque)");

	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avcodec_alloc_frame.\n");


	overlay = avcodec_alloc_frame();

	// read overlay file into overlay AVFrame

	av_read_frame(pFormatCtx, &packet);
	avcodec_decode_video2(pCodecCtx, overlay, &frameFinished, &packet);

	// will always be GRAY8

	// should be all or nothing, so no real need to test both
	// testing both incase one was missed.
	if (ovl->printW == -1 || ovl->printH == -1)
	{
		ovl->printW = pCodecCtx->width;
		ovl->printH = pCodecCtx->height;
	}

	// Allocate AVFrames and image buffers
	ovl->pFrame=avcodec_alloc_frame();
	ovl->maskFrame=avcodec_alloc_frame();
	tempMask = avcodec_alloc_frame();

	data = (uint8_t *) av_malloc(avpicture_get_size(inlink->format, ovl->printW, ovl->printH));
	maskData = (uint8_t *) av_malloc(avpicture_get_size(PIX_FMT_GRAY8, ovl->printW, ovl->printH));
	tempData = (uint8_t *) av_malloc(avpicture_get_size(PIX_FMT_GRAY8, pCodecCtx->width, pCodecCtx->height));

	avpicture_fill((AVPicture *)tempMask, tempData, PIX_FMT_GRAY8, pCodecCtx->width, pCodecCtx->height);
	avpicture_fill((AVPicture *)ovl->maskFrame, maskData, PIX_FMT_GRAY8, ovl->printW, ovl->printH);
	avpicture_fill((AVPicture *)ovl->pFrame, data, inlink->format, ovl->printW, ovl->printH);


	av_log(ctx,AV_LOG_DEBUG,"mask linesize %d\n",ovl->maskFrame->linesize[0]);

	// copy the alpha mask, it appears to be getting lost during sws_scale
	/*	copy the alpha mask, it appears to be getting lost during sws_scale
		copy the alpha if it exists and then scale it. */
	ovl->mask=0;
	if (pCodecCtx->pix_fmt == PIX_FMT_ARGB ||
		pCodecCtx->pix_fmt == PIX_FMT_RGBA ||
		pCodecCtx->pix_fmt == PIX_FMT_ABGR ||
		pCodecCtx->pix_fmt == PIX_FMT_BGRA)
	{

		// copy the alpha if it exists and then scale it.
		int alpha = 0;
		if (pCodecCtx->pix_fmt == PIX_FMT_RGBA || pCodecCtx->pix_fmt == PIX_FMT_BGRA) { alpha = 3; }

		for (int y=0; y < pCodecCtx->height; y++) {
			// memcpy((tempMask->data[0] + y * tempMask->linesize[0]),
			for (int x=0; x < pCodecCtx->width; x++) {
				*(tempMask->data[0] + y * tempMask->linesize[0] + x ) = *(overlay->data[0] + y * overlay->linesize[0] + x* 4 + alpha);
			}
		}
		// scale and copy

		av_log(ctx,AV_LOG_DEBUG," in: %dx%d, out %dx%d\n",pCodecCtx->width, pCodecCtx->height,ovl->printW, ovl->printH);

		// scale & copy, even if we don't scale, we still need to copy

		sws=sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_GRAY8,
						   ovl->printW, ovl->printH, PIX_FMT_GRAY8,
						   SWS_BILINEAR, NULL, NULL, NULL);

		sws_scale(sws, (const uint8_t * const *)tempMask->data, tempMask->linesize, 0, pCodecCtx->height,
				  ovl->maskFrame->data, ovl->maskFrame->linesize);

				  ovl->mask = 1;

	}

	av_log(ctx,AV_LOG_DEBUG, "config_props() sws_getContext\n");


	av_log(ctx,AV_LOG_DEBUG,"inlink format %d, png format %d\n",inlink->format,pCodecCtx->pix_fmt);

	// convert to output frame format.


	sws=sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
					   ovl->printW, ovl->printH, inlink->format,
					   SWS_BILINEAR, NULL, NULL, NULL);

	// set the output filter frame size to the input frame size.

	outlink->w = inlink->w;
    outlink->h = inlink->h;

	// convert the image

	sws_scale(sws, (const uint8_t * const *)overlay->data, overlay->linesize, 0, pCodecCtx->height,
				ovl->pFrame->data, ovl->pFrame->linesize);


	av_free(tempMask);
	av_free(overlay);
	sws_freeContext(sws);
    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    av_log(ctx, AV_LOG_DEBUG, "<<< config_props().\n");


    return 0;


}
コード例 #21
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream, audioStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVPacket        packet;
  int             frameFinished;
  //float           aspect_ratio;
  
  AVCodecContext  *aCodecCtx = NULL;
  AVCodec         *aCodec = NULL;

  SDL_Overlay     *bmp = NULL;
  SDL_Surface     *screen = NULL;
  SDL_Rect        rect;
  SDL_Event       event;
  SDL_AudioSpec   wanted_spec, spec;

  struct SwsContext   *sws_ctx            = NULL;
  AVDictionary        *videoOptionsDict   = NULL;
  AVDictionary        *audioOptionsDict   = NULL;

  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();
  
  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  audioStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       videoStream < 0) {
      videoStream=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audioStream < 0) {
      audioStream=i;
    }
  }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  if(audioStream==-1)
    return -1;
   
  aCodecCtx=pFormatCtx->streams[audioStream]->codec;
  // Set audio settings from codec info
  wanted_spec.freq = aCodecCtx->sample_rate;
  wanted_spec.format = AUDIO_S16SYS;
  wanted_spec.channels = aCodecCtx->channels;
  wanted_spec.silence = 0;
  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  wanted_spec.callback = audio_callback;
  wanted_spec.userdata = aCodecCtx;
  
  if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
    fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
    return -1;
  }
  aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
  if(!aCodec) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1;
  }
  avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict);

  // audio_st = pFormatCtx->streams[index]
  packet_queue_init(&audioq);
  SDL_PauseAudio(0);

  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=avcodec_alloc_frame();

  // Make a screen to put our video

#ifndef __DARWIN__
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
  if(!screen) {
    fprintf(stderr, "SDL: could not set video mode - exiting\n");
    exit(1);
  }
  
  // Allocate a place to put our YUV image on that screen
  bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
				 pCodecCtx->height,
				 SDL_YV12_OVERLAY,
				 screen);
  sws_ctx =
    sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );


  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	SDL_LockYUVOverlay(bmp);

	AVPicture pict;
	pict.data[0] = bmp->pixels[0];
	pict.data[1] = bmp->pixels[2];
	pict.data[2] = bmp->pixels[1];

	pict.linesize[0] = bmp->pitches[0];
	pict.linesize[1] = bmp->pitches[2];
	pict.linesize[2] = bmp->pitches[1];

	// Convert the image into YUV format that SDL uses
    sws_scale
    (
        sws_ctx, 
        (uint8_t const * const *)pFrame->data, 
        pFrame->linesize, 
        0,
        pCodecCtx->height,
        pict.data,
        pict.linesize
    );
	
	SDL_UnlockYUVOverlay(bmp);
	
	rect.x = 0;
	rect.y = 0;
	rect.w = pCodecCtx->width;
	rect.h = pCodecCtx->height;
	SDL_DisplayYUVOverlay(bmp, &rect);
	av_free_packet(&packet);
      }
    } else if(packet.stream_index==audioStream) {
      packet_queue_put(&audioq, &packet);
    } else {
      av_free_packet(&packet);
    }
    // Free the packet that was allocated by av_read_frame
    SDL_PollEvent(&event);
    switch(event.type) {
    case SDL_QUIT:
      quit = 1;
      SDL_Quit();
      exit(0);
      break;
    default:
      break;
    }

  }

  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
コード例 #22
0
ファイル: VideoPlay.cpp プロジェクト: forbe/VideoPlay
void VideoPlay::Decode()
{
	AVFrame *pFrameRGB = av_frame_alloc();
	AVFrame *pFrame = av_frame_alloc();
	AVPacket *packet = (AVPacket *) av_malloc(sizeof(AVPacket));
	int viedeoBuffer_size = avpicture_get_size(PIX_FMT_RGB24, m_nWidth,
			m_height);
	uint8_t * viedeoBuffer = (uint8_t *) av_malloc(viedeoBuffer_size);
	avpicture_fill((AVPicture *) pFrameRGB, viedeoBuffer, PIX_FMT_RGB24,
			m_nWidth, m_height);
	SwsContext* img_convert_ctx = sws_getContext(m_nWidth, m_height,
			m_pVideoCodecCtx->pix_fmt, m_nWidth, m_height, PIX_FMT_RGB24,
			SWS_BICUBIC, NULL, NULL, NULL);
	int ret = 0;
	LOGI("pCodecCtx->width=%d,pCodecCtx->height=%d", m_nWidth, m_height);
	LOGI("m_audioindex=%d,m_videoindex=%d", m_audioindex, m_videoindex);
	LOGI("m_audioCodeID=%d,m_videoCodeID=%d", m_audioCodeID, m_videoCodeID);

	//Out Audio Param
	uint64_t out_channel_layout = AV_CH_FRONT_CENTER;
	//nb_samples: AAC-1024 MP3-1152
	int out_nb_samples = m_pAudioCodecCtx->frame_size;
	AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
	int out_sample_rate = m_pAudioCodecCtx->sample_rate;
	int out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
	//Out Buffer Size
	int Audiobuffer_size = av_samples_get_buffer_size(NULL, out_channels,
			out_nb_samples, out_sample_fmt, 1);
	uint8_t*Audiobuffer = (uint8_t *) av_malloc(MAX_AUDIO_FRAME_SIZE * 2);
	//FIX:Some Codec's Context Information is missing
	int64_t in_channel_layout;
	struct SwrContext *au_convert_ctx;
	in_channel_layout = av_get_default_channel_layout(
			m_pAudioCodecCtx->channels);
	//Swr
	au_convert_ctx = swr_alloc();
	au_convert_ctx = swr_alloc_set_opts(au_convert_ctx, out_channel_layout,
			out_sample_fmt, out_sample_rate, in_channel_layout,
			m_pAudioCodecCtx->sample_fmt, m_pAudioCodecCtx->sample_rate, 0,
			NULL);
	swr_init(au_convert_ctx);
	AVFrame *pAudioFrame;
	pAudioFrame = av_frame_alloc();

	m_audioLen = Audiobuffer_size;
	LOGI("m_pFormatCtx->duration=%lld", m_pFormatCtx->duration);
	LOGI("den=%d,num=%d", m_pVideoCodecCtx->time_base.den,
			m_pVideoCodecCtx->time_base.num);
	m_ptm = 40000;	//av_q2d(m_pVideoCodecCtx->time_base)*1000000;
	int flag_start = 0;
	LOGI(" bit_rate = %d ", m_pAudioCodecCtx->bit_rate);
	LOGI(" sample_rate = %d ", m_pAudioCodecCtx->sample_rate);
	LOGI(" channels = %d ", m_pAudioCodecCtx->channels);
	LOGI(" code_name = %s ", m_pAudioCodecCtx->codec->name);
	LOGI(" block_align = %d", m_pAudioCodecCtx->block_align);
	m_audioPlay.init();
	m_audioPlay.createBufferQueueAudioPlayer(out_sample_rate, out_channels,
	SL_PCMSAMPLEFORMAT_FIXED_16, bqPlayerCallback);
	while (av_read_frame(m_pFormatCtx, packet) >= 0)
	{
		if (m_eState == State_Stop)
		{
			LOGI("STOP");
			break;
		}
		int got_picture = 0;
		if (packet->stream_index == m_videoindex)
		{
			ret = avcodec_decode_video2(m_pVideoCodecCtx, pFrame, &got_picture,
					packet);
			if (ret < 0)
			{
				LOGE("Decode Error.\n");
				return;
			}
			if (got_picture)
			{
				sws_scale(img_convert_ctx,
						(const uint8_t* const *) pFrame->data, pFrame->linesize,
						0, m_height, pFrameRGB->data, pFrameRGB->linesize);

				sem_wait(&semVideoEmpty);
				pthread_mutex_lock(&mutexVideo);
				unsigned char* tmp = new unsigned char[m_nWidth * m_height * 3];
				memcpy(tmp, pFrameRGB->data[0], m_nWidth * m_height * 3);
				m_videoBuff.push((int) tmp);
				pthread_mutex_unlock(&mutexVideo);
				sem_post(&semVideoFull);
			}
		}
		int AudioFinished = 0;
		if (packet->stream_index == m_audioindex)
		{
			ret = avcodec_decode_audio4(m_pAudioCodecCtx, pAudioFrame,
					&AudioFinished, packet);
			if (ret > 0 && AudioFinished)
			{
				swr_convert(au_convert_ctx, &Audiobuffer, MAX_AUDIO_FRAME_SIZE,
						(const uint8_t **) pAudioFrame->data,
						pAudioFrame->nb_samples);
				if (flag_start == 0)
				{
					flag_start = 1;
					m_audioPlay.PlayBuff(Audiobuffer, Audiobuffer_size);
				}
				uint8_t *tmp = (uint8_t *) av_malloc(
				MAX_AUDIO_FRAME_SIZE * 2);
				memcpy(tmp, Audiobuffer, Audiobuffer_size);
				m_audioBuff.push((int) tmp);
				tmp = NULL;
			}
		}
	}
	av_free(viedeoBuffer);
	av_free(Audiobuffer);
	sws_freeContext(img_convert_ctx);
	swr_free(&au_convert_ctx);
	av_frame_free(&pAudioFrame);
	av_frame_free(&pFrameRGB);
	av_frame_free(&pFrame);
	av_free_packet(packet);
	m_bDecodeFinish = true;
	LOGI("Decode File Finish!");
}
コード例 #23
0
HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived( IDeckLinkVideoInputFrame *videoframe, IDeckLinkAudioInputPacket *audioframe )
{
    decklink_ctx_t *decklink_ctx = &decklink_opts_->decklink_ctx;
    obe_raw_frame_t *raw_frame = NULL;
    AVPacket pkt;
    AVFrame *frame = NULL;
    void *frame_bytes, *anc_line;
    obe_t *h = decklink_ctx->h;
    int finished = 0, ret, num_anc_lines = 0, anc_line_stride,
    lines_read = 0, first_line = 0, last_line = 0, line, num_vbi_lines, vii_line;
    uint32_t *frame_ptr;
    uint16_t *anc_buf, *anc_buf_pos;
    uint8_t *vbi_buf;
    int anc_lines[DECKLINK_VANC_LINES];
    IDeckLinkVideoFrameAncillary *ancillary;
    BMDTimeValue stream_time, frame_duration;

    if( decklink_opts_->probe_success )
        return S_OK;

    av_init_packet( &pkt );

    if( videoframe )
    {
        if( videoframe->GetFlags() & bmdFrameHasNoInputSource )
        {
            syslog( LOG_ERR, "Decklink card index %i: No input signal detected", decklink_opts_->card_idx );
            return S_OK;
        }
        else if( decklink_opts_->probe )
            decklink_opts_->probe_success = 1;

        /* use SDI ticks as clock source */
        videoframe->GetStreamTime( &stream_time, &frame_duration, OBE_CLOCK );
        obe_clock_tick( h, (int64_t)stream_time );

        if( decklink_ctx->last_frame_time == -1 )
            decklink_ctx->last_frame_time = obe_mdate();
        else
        {
            int64_t cur_frame_time = obe_mdate();
            if( cur_frame_time - decklink_ctx->last_frame_time >= SDI_MAX_DELAY )
            {
                syslog( LOG_WARNING, "Decklink card index %i: No frame received for %"PRIi64" ms", decklink_opts_->card_idx,
                       (cur_frame_time - decklink_ctx->last_frame_time) / 1000 );
                pthread_mutex_lock( &h->drop_mutex );
                h->encoder_drop = h->mux_drop = 1;
                pthread_mutex_unlock( &h->drop_mutex );
            }

            decklink_ctx->last_frame_time = cur_frame_time;
        }

        const int width = videoframe->GetWidth();
        const int height = videoframe->GetHeight();
        const int stride = videoframe->GetRowBytes();

        videoframe->GetBytes( &frame_bytes );

        /* TODO: support format switching (rare in SDI) */
        int j;
        for( j = 0; first_active_line[j].format != -1; j++ )
        {
            if( decklink_opts_->video_format == first_active_line[j].format )
                break;
        }

        videoframe->GetAncillaryData( &ancillary );

        /* NTSC starts on line 4 */
        line = decklink_opts_->video_format == INPUT_VIDEO_FORMAT_NTSC ? 4 : 1;
        anc_line_stride = FFALIGN( (width * 2 * sizeof(uint16_t)), 16 );

        /* Overallocate slightly for VANC buffer
         * Some VBI services stray into the active picture so allocate some extra space */
        anc_buf = anc_buf_pos = (uint16_t*)av_malloc( DECKLINK_VANC_LINES * anc_line_stride );
        if( !anc_buf )
        {
            syslog( LOG_ERR, "Malloc failed\n" );
            goto end;
        }

        while( 1 )
        {
            /* Some cards have restrictions on what lines can be accessed so try them all
             * Some buggy decklink cards will randomly refuse access to a particular line so
             * work around this issue by blanking the line */
            if( ancillary->GetBufferForVerticalBlankingLine( line, &anc_line ) == S_OK )
                decklink_ctx->unpack_line( (uint32_t*)anc_line, anc_buf_pos, width );
            else
                decklink_ctx->blank_line( anc_buf_pos, width );

            anc_buf_pos += anc_line_stride / 2;
            anc_lines[num_anc_lines++] = line;

            if( !first_line )
                first_line = line;
            last_line = line;

            lines_read++;
            line = sdi_next_line( decklink_opts_->video_format, line );

            if( line == first_active_line[j].line )
                break;
        }

        ancillary->Release();

        if( !decklink_opts_->probe )
        {
            raw_frame = new_raw_frame();
            if( !raw_frame )
            {
                syslog( LOG_ERR, "Malloc failed\n" );
                goto end;
            }
        }

        anc_buf_pos = anc_buf;
        for( int i = 0; i < num_anc_lines; i++ )
        {
            parse_vanc_line( h, &decklink_ctx->non_display_parser, raw_frame, anc_buf_pos, width, anc_lines[i] );
            anc_buf_pos += anc_line_stride / 2;
        }

        if( IS_SD( decklink_opts_->video_format ) && first_line != last_line )
        {
            /* Add a some VBI lines to the ancillary buffer */
            frame_ptr = (uint32_t*)frame_bytes;

            /* NTSC starts from line 283 so add an extra line */
            num_vbi_lines = NUM_ACTIVE_VBI_LINES + ( decklink_opts_->video_format == INPUT_VIDEO_FORMAT_NTSC );
            for( int i = 0; i < num_vbi_lines; i++ )
            {
                decklink_ctx->unpack_line( frame_ptr, anc_buf_pos, width );
                anc_buf_pos += anc_line_stride / 2;
                frame_ptr += stride / 4;
                last_line = sdi_next_line( decklink_opts_->video_format, last_line );
            }
            num_anc_lines += num_vbi_lines;

            vbi_buf = (uint8_t*)av_malloc( width * 2 * num_anc_lines );
            if( !vbi_buf )
            {
                syslog( LOG_ERR, "Malloc failed\n" );
                goto end;
            }

            /* Scale the lines from 10-bit to 8-bit */
            decklink_ctx->downscale_line( anc_buf, vbi_buf, num_anc_lines );
            anc_buf_pos = anc_buf;

            /* Handle Video Index information */
            int tmp_line = first_line;
            vii_line = decklink_opts_->video_format == INPUT_VIDEO_FORMAT_NTSC ? NTSC_VIDEO_INDEX_LINE : PAL_VIDEO_INDEX_LINE;
            while( tmp_line < vii_line )
            {
                anc_buf_pos += anc_line_stride / 2;
                tmp_line++;
            }

            if( decode_video_index_information( h, &decklink_ctx->non_display_parser, anc_buf_pos, raw_frame, vii_line ) < 0 )
                goto fail;

            if( !decklink_ctx->has_setup_vbi )
            {
                vbi_raw_decoder_init( &decklink_ctx->non_display_parser.vbi_decoder );

                decklink_ctx->non_display_parser.ntsc = decklink_opts_->video_format == INPUT_VIDEO_FORMAT_NTSC;
                decklink_ctx->non_display_parser.vbi_decoder.start[0] = first_line;
                decklink_ctx->non_display_parser.vbi_decoder.start[1] = sdi_next_line( decklink_opts_->video_format, first_line );
                decklink_ctx->non_display_parser.vbi_decoder.count[0] = last_line - decklink_ctx->non_display_parser.vbi_decoder.start[1] + 1;
                decklink_ctx->non_display_parser.vbi_decoder.count[1] = decklink_ctx->non_display_parser.vbi_decoder.count[0];

                if( setup_vbi_parser( &decklink_ctx->non_display_parser ) < 0 )
                    goto fail;

                decklink_ctx->has_setup_vbi = 1;
            }

            if( decode_vbi( h, &decklink_ctx->non_display_parser, vbi_buf, raw_frame ) < 0 )
                goto fail;

            av_free( vbi_buf );
        }

        av_free( anc_buf );

        if( !decklink_opts_->probe )
        {
            frame = avcodec_alloc_frame();
            if( !frame )
            {
                syslog( LOG_ERR, "[decklink]: Could not allocate video frame\n" );
                goto end;
            }
            decklink_ctx->codec->width = width;
            decklink_ctx->codec->height = height;

            pkt.data = (uint8_t*)frame_bytes;
            pkt.size = stride * height;

            ret = avcodec_decode_video2( decklink_ctx->codec, frame, &finished, &pkt );
            if( ret < 0 || !finished )
            {
                syslog( LOG_ERR, "[decklink]: Could not decode video frame\n" );
                goto end;
            }

            raw_frame->release_data = obe_release_video_data;
            raw_frame->release_frame = obe_release_frame;

            memcpy( raw_frame->alloc_img.stride, frame->linesize, sizeof(raw_frame->alloc_img.stride) );
            memcpy( raw_frame->alloc_img.plane, frame->data, sizeof(raw_frame->alloc_img.plane) );
            avcodec_free_frame( &frame );
            raw_frame->alloc_img.csp = (int)decklink_ctx->codec->pix_fmt;
            raw_frame->alloc_img.planes = av_pix_fmt_descriptors[raw_frame->alloc_img.csp].nb_components;
            raw_frame->alloc_img.width = width;
            raw_frame->alloc_img.height = height;
            raw_frame->alloc_img.format = decklink_opts_->video_format;
            raw_frame->timebase_num = decklink_opts_->timebase_num;
            raw_frame->timebase_den = decklink_opts_->timebase_den;

            memcpy( &raw_frame->img, &raw_frame->alloc_img, sizeof(raw_frame->alloc_img) );
            if( IS_SD( decklink_opts_->video_format ) )
            {
                if( raw_frame->alloc_img.height == 486 )
                    raw_frame->img.height = 480;

                raw_frame->img.first_line = first_active_line[j].line;
            }

            /* If AFD is present and the stream is SD this will be changed in the video filter */
            raw_frame->sar_width = raw_frame->sar_height = 1;
            raw_frame->pts = stream_time;

            for( int i = 0; i < decklink_ctx->device->num_input_streams; i++ )
            {
                if( decklink_ctx->device->streams[i]->stream_format == VIDEO_UNCOMPRESSED )
                    raw_frame->input_stream_id = decklink_ctx->device->streams[i]->input_stream_id;
            }

            if( add_to_filter_queue( h, raw_frame ) < 0 )
                goto fail;

            if( send_vbi_and_ttx( h, &decklink_ctx->non_display_parser, raw_frame->pts ) < 0 )
                goto fail;

            decklink_ctx->non_display_parser.num_vbi = 0;
            decklink_ctx->non_display_parser.num_anc_vbi = 0;
        }
    }

    /* TODO: probe SMPTE 337M audio */

    if( audioframe && !decklink_opts_->probe )
    {
        audioframe->GetBytes( &frame_bytes );
        raw_frame = new_raw_frame();
        if( !raw_frame )
        {
            syslog( LOG_ERR, "Malloc failed\n" );
            goto end;
        }

        raw_frame->audio_frame.num_samples = audioframe->GetSampleFrameCount();
        raw_frame->audio_frame.num_channels = decklink_opts_->num_channels;
        raw_frame->audio_frame.sample_fmt = AV_SAMPLE_FMT_S32P;

        if( av_samples_alloc( raw_frame->audio_frame.audio_data, &raw_frame->audio_frame.linesize, decklink_opts_->num_channels,
                              raw_frame->audio_frame.num_samples, (AVSampleFormat)raw_frame->audio_frame.sample_fmt, 0 ) < 0 )
        {
            syslog( LOG_ERR, "Malloc failed\n" );
            return -1;
        }

        if( avresample_convert( decklink_ctx->avr, raw_frame->audio_frame.audio_data, raw_frame->audio_frame.linesize,
                                raw_frame->audio_frame.num_samples, (uint8_t**)&frame_bytes, 0, raw_frame->audio_frame.num_samples ) < 0 )
        {
            syslog( LOG_ERR, "[decklink] Sample format conversion failed\n" );
            return -1;
        }

        BMDTimeValue packet_time;
        audioframe->GetPacketTime( &packet_time, OBE_CLOCK );
        raw_frame->pts = packet_time;
        raw_frame->release_data = obe_release_audio_data;
        raw_frame->release_frame = obe_release_frame;
        for( int i = 0; i < decklink_ctx->device->num_input_streams; i++ )
        {
            if( decklink_ctx->device->streams[i]->stream_format == AUDIO_PCM )
                raw_frame->input_stream_id = decklink_ctx->device->streams[i]->input_stream_id;
        }

        if( add_to_filter_queue( decklink_ctx->h, raw_frame ) < 0 )
            goto fail;
    }

end:
    if( frame )
        avcodec_free_frame( &frame );

    av_free_packet( &pkt );

    return S_OK;

fail:

    if( raw_frame )
    {
        raw_frame->release_data( raw_frame );
        raw_frame->release_frame( raw_frame );
    }

    return S_OK;
}
コード例 #24
0
int ReplayPlayoutLavfSource::run_lavc( ) {
    AVPacket packet;
    int frame_finished = 0;
    int audio_finished = 0;

    /* 
     * read stream until we get a video frame, 
     * possibly also decoding some audio along the way
     */
    while (frame_finished == 0 && audio_finished == 0 &&
            av_read_frame(format_ctx, &packet) >= 0) {
        if (packet.stream_index == video_stream) {
            avcodec_decode_video2(video_codecctx, lavc_frame, 
                    &frame_finished, &packet);
        } else if (packet.stream_index == audio_stream) {
            avcodec_decode_audio4(audio_codecctx, audio_frame, 
                    &audio_finished, &packet);
        }

        av_free_packet(&packet);
    }

    if (frame_finished) {
        /* make a RawFrame out of lavc_frame */
        RawFrame *fr = new RawFrame(1920, 1080, RawFrame::CbYCrY8422);
        switch (lavc_frame->format) {
            case AV_PIX_FMT_YUVJ422P:
            case AV_PIX_FMT_YUV422P:
                fr->pack->YCbCr8P422(
                    lavc_frame->data[0], 
                    lavc_frame->data[1],
                    lavc_frame->data[2],
                    lavc_frame->linesize[0],
                    lavc_frame->linesize[1],
                    lavc_frame->linesize[2]
                );
                break;

            case AV_PIX_FMT_UYVY422:
                /* copy stuff */
                memcpy(fr->data( ), lavc_frame->data[0], fr->size( ));
                break;

            case AV_PIX_FMT_YUV422P10LE:
                fr->pack->YCbCr10P422(
                    (uint16_t *)lavc_frame->data[0],
                    (uint16_t *)lavc_frame->data[1],
                    (uint16_t *)lavc_frame->data[2],
                    lavc_frame->linesize[0] / 2,
                    lavc_frame->linesize[1] / 2,
                    lavc_frame->linesize[2] / 2
                );
                break;

            case AV_PIX_FMT_YUV420P:
                fr->pack->YCbCr8P420(
                    lavc_frame->data[0],
                    lavc_frame->data[1],
                    lavc_frame->data[2],
                    lavc_frame->linesize[0],
                    lavc_frame->linesize[1],
                    lavc_frame->linesize[2]
                );
                break;

            default:
                fprintf(stderr, "ReplayPlayoutLavfSource doesn't know how "
                    "to handle AVPixelFormat %d\n", lavc_frame->format);
                memset(fr->data( ), 128, fr->size( ));
                break;
        }

        pending_video_frames.push_back(fr);
        return 1;
    } else if (audio_finished) {
        PackedAudioPacket<int16_t> apkt(
            audio_frame->nb_samples,
            audio_codecctx->channels
        );

        if (audio_codecctx->sample_fmt == AV_SAMPLE_FMT_S16) {
            memcpy(apkt.data( ), audio_frame->data[0], apkt.size_bytes( ));
        } else if (audio_codecctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
            /* convert planar float (from AAC) to signed 16-bit */
            copy_fltp(audio_frame, apkt);
        } else {
            fprintf(stderr, "sample_fmt=%d\n", audio_codecctx->sample_fmt);
            throw std::runtime_error("don't understand sample format");
        }
        if (audio_codecctx->sample_rate != 48000) {
            throw std::runtime_error("need 48khz");
        }

        if (audio_codecctx->channels != 2) {
            /* mix down to 2 channels if needed */
            PackedAudioPacket<int16_t> *twoch = apkt.change_channels(2);
            pending_audio.add_packet(twoch);
            delete twoch;
        } else {
            pending_audio.add_packet(&apkt);
        }

        return 1;
    } else {
        return 0;
    }
}
コード例 #25
0
ファイル: tutorial_01.cpp プロジェクト: zchen24/tutorial
int main(int argc, char *argv[])
{
    std::cout << "hello\n";

    av_register_all();

    AVFormatContext *pFormatCtx = nullptr;
    if (avformat_open_input(&pFormatCtx, argv[1], nullptr, nullptr) != 0) {
        std::cerr << "Failed to open file\n";
        return -1;
    }

    if (avformat_find_stream_info(pFormatCtx, nullptr) < 0) {
        std::cerr << "Failed to find stream info\n";
        return -1;
    }

    AVCodecContext *pCodecCtx = nullptr;
    AVCodecContext *pCodecCtxOriginal = nullptr;
    int videoStream = -1;
    for (int i = 0; i < pFormatCtx->nb_streams; i++) {
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    }
    if (videoStream == -1) {
        std::cerr << "Failed to find a VIDEO stream\n";
        return -1;
    } else {
        std::cout << "Video stream = " << videoStream << "\n";
    }
    pCodecCtxOriginal = pFormatCtx->streams[videoStream]->codec;

    AVCodec *pCodec = nullptr;
    pCodec = avcodec_find_decoder(pCodecCtxOriginal->codec_id);
    if (pCodec == nullptr) {
        std::cerr << "Unsupported codec!\n";
        return -1;
    }

    // Copy Context
    pCodecCtx = avcodec_alloc_context3(pCodec);
    if (avcodec_copy_context(pCodecCtx, pCodecCtxOriginal) != 0) {
        std::cerr << "Failed to copy Codec Context\n";
        return -1;
    }

    // Open Codec
    if (avcodec_open2(pCodecCtx, pCodec, nullptr) < 0) {
        std::cerr << "Failed to open Codec\n";
        return -1;
    }

    AVFrame* pFrame;
    AVFrame* pFrameBGR;
    pFrame = av_frame_alloc();
    pFrameBGR = av_frame_alloc();
    if (pFrame == nullptr || pFrameBGR == nullptr) {
        std::cerr << "Failed to allocate frame\n";
        return -1;
    }

    int numFrameBytes;
    numFrameBytes = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
    avpicture_fill((AVPicture*)pFrameBGR,
                   (uint8_t*)av_malloc(numFrameBytes * sizeof(uint8_t)),
                   AV_PIX_FMT_BGR24,
                   pCodecCtx->width,
                   pCodecCtx->height);
    std::cout << "Picture width = " << pCodecCtx->width << " height = " << pCodecCtx->height << "\n";

    // Format Conversion
    struct SwsContext * pSwsCtx = nullptr;
    pSwsCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                             pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGR24,
                             SWS_BILINEAR,  nullptr, nullptr, nullptr);
    if (pSwsCtx == nullptr) {
        std::cerr << "Failed to get the conversion context\n";
    }

    // -----------------------------
    // Loop through the video file
    // -----------------------------

    int packet_index = 0;
    AVPacket packet;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        if (packet.stream_index == videoStream) {
            packet_index++;

            // convert to AVFrame
            int got_picture;
            if (avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet) < 0) {
                std::cerr << "Failed to decode video AVPacket\n";
            }

            if (got_picture) {
                std::cout << "Successfully decoded a frame " << packet_index << "\n";

                // convert to RGB format
                sws_scale(pSwsCtx,
                          pFrame->data,
                          pFrame->linesize,
                          0,
                          pFrame->height,
                          pFrameBGR->data,
                          pFrameBGR->linesize);

                cv::Mat mat(pCodecCtx->height, pCodecCtx->width, CV_8UC3, pFrameBGR->data[0]);
                char filename[100];
                sprintf(filename, "frame%02d.jpg", packet_index);
                cv::imwrite(filename, mat);
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);

        if (packet_index == 12) {
            break;
        }
    }

    // Free allocated resources
    av_frame_free(&pFrame);
    av_frame_free(&pFrameBGR);

    // Close the codec
    avcodec_close(pCodecCtx);
    avcodec_close(pCodecCtxOriginal);

    // Close the video file
    avformat_close_input(&pFormatCtx);
    return 0;
}
コード例 #26
0
ファイル: MediaEngine.cpp プロジェクト: BlueSplash/ppsspp
bool MediaEngine::stepVideo(int videoPixelMode, bool skipFrame) {
#ifdef USE_FFMPEG
	auto codecIter = m_pCodecCtxs.find(m_videoStream);
	AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;

	if (!m_pFormatCtx)
		return false;
	if (!m_pCodecCtx)
		return false;
	if ((!m_pFrame)||(!m_pFrameRGB))
		return false;

	updateSwsFormat(videoPixelMode);
	// TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf.
	// Update the linesize for the new format too.  We started with the largest size, so it should fit.
	m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth;

	AVPacket packet;
	av_init_packet(&packet);
	int frameFinished;
	bool bGetFrame = false;
	while (!bGetFrame) {
		bool dataEnd = av_read_frame(m_pFormatCtx, &packet) < 0;
		// Even if we've read all frames, some may have been re-ordered frames at the end.
		// Still need to decode those, so keep calling avcodec_decode_video2().
		if (dataEnd || packet.stream_index == m_videoStream) {
			// avcodec_decode_video2() gives us the re-ordered frames with a NULL packet.
			if (dataEnd)
				av_free_packet(&packet);

			int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet);
			if (frameFinished) {
				if (!skipFrame) {
					sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0,
						m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);
				}

				if (av_frame_get_best_effort_timestamp(m_pFrame) != AV_NOPTS_VALUE)
					m_videopts = av_frame_get_best_effort_timestamp(m_pFrame) + av_frame_get_pkt_duration(m_pFrame) - m_firstTimeStamp;
				else
					m_videopts += av_frame_get_pkt_duration(m_pFrame);
				bGetFrame = true;
			}
			if (result <= 0 && dataEnd) {
				// Sometimes, m_readSize is less than m_streamSize at the end, but not by much.
				// This is kinda a hack, but the ringbuffer would have to be prematurely empty too.
				m_isVideoEnd = !bGetFrame && (m_pdata->getQueueSize() == 0);
				if (m_isVideoEnd)
					m_decodingsize = 0;
				break;
			}
		}
		av_free_packet(&packet);
	}
	return bGetFrame;
#else
	// If video engine is not available, just add to the timestamp at least.
	m_videopts += 3003;
	return true;
#endif // USE_FFMPEG
}
コード例 #27
0
ファイル: main.c プロジェクト: elmagroud00/Experiments
int main(int argc ,char **argv)
{
	av_register_all();
	AVFormatContext *pFormatCtx = NULL;
	AVInputFormat *file_iformat = NULL;
	
	//avio_set_interrupt_cb(decode_interrupt_cb);	
	//Open video file
	printf("open video file:%s\n", argv[1]);
	if(avformat_open_input(&pFormatCtx, argv[1], file_iformat, NULL) < 0)
	{
		printf("canot open input file: %s\n", argv[1]);
		return -1; //Cannot open file
	}
	printf("open input file: %s OK\n", argv[1]);
	//Retrieve stream information
	if(av_find_stream_info(pFormatCtx) < 0)
		return -1;//cannot find stream infomation
	//Dump information about file no to standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	int i;
	int videoStream;
	int audioStream;
	videoStream = -1;
	audioStream = -1;
	AVCodecContext *vCodecCtx;
	AVCodecContext *aCodecCtx;
	//Find the first video stream
	for(i = 0; i < pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) 
		{
			videoStream = i;
		}
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0)
		{
			audioStream = i;	
		}
	}
	if(videoStream == -1)
	{
		printf("no video stream\n");
		return -1;//Did not find a video stream
	}
	if(audioStream == -1)
	{
		printf("no audio stream\n");
		return -1;//Did not find a audio stream
	}
	printf("find video strean: %d\n", videoStream);
	printf("find audio strean: %d\n", audioStream);

	//Get a pointer to the codec context for the video stream
	vCodecCtx = pFormatCtx->streams[videoStream]->codec;
	//set vCodecCtx for vdpau
	vCodecCtx->get_format = decoder_get_format;
	vCodecCtx->get_buffer = decoder_get_buffer;
	vCodecCtx->release_buffer = decoder_release_buffer;
	vCodecCtx->draw_horiz_band = decoder_draw_horiz_band;
	vCodecCtx->reget_buffer = decoder_get_buffer;
	vCodecCtx->slice_flags = SLICE_FLAG_CODEC_ORDER | SLICE_FLAG_ALLOW_FIELD;

	AVCodec *vCodec;
	vCodec = avcodec_find_decoder(vCodecCtx->codec_id);
	if(vCodec == NULL)
	{
		fprintf(stderr, "Unsupported video codec\n");
		return -1;//codec not find
	}
	//Open video codec
	if(avcodec_open(vCodecCtx, vCodec) < 0)
	{
		fprintf(stderr, "open video codec error\n");
		return -1;//Could not open codec
	}

	

	//Get a pointer to the codec context for the audio stream
	aCodecCtx = pFormatCtx->streams[audioStream]->codec;
	static SDL_AudioSpec wanted_spec, spec;
	wanted_spec.freq = aCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = aCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = aCodecCtx;
	if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
	{	
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return -1;
	}
	AVCodec *aCodec;
	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if(aCodec == NULL)
	{
		fprintf(stderr, "Unsupport audio codec\n");
		return -1;//codec not found
	}
	if(avcodec_open(aCodecCtx, aCodec) < 0)
	{
		fprintf(stderr, "open avcodec error\n");
		return -1;
	}
	packet_queue_init(&audioq);
	SDL_PauseAudio(0);

	AVFrame *pFrame;
	//Allocate video frame
	pFrame = avcodec_alloc_frame();
	AVFrame *pFrameRGB;
	//Allocate an AVFrame structure
	pFrameRGB = avcodec_alloc_frame();
	if(pFrameRGB == NULL)
		return -1;
	uint8_t *buffer;
	int numBytes;
	//Detemine required buffer size and allocate buffer
	numBytes = avpicture_get_size(PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height);
	buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
	//Assign appropriate parts of buffer to image planes in pFrameRGB
	//Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	//of AVPicture
	avpicture_fill((AVPicture*)pFrameRGB, buffer, PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height);
	
	if((SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)))
	{
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}
	SDL_Surface *screen;
	screen = SDL_SetVideoMode(vCodecCtx->width, vCodecCtx->height, 0, 0);
	if(!screen)
	{
		fprintf(stderr, "SDL: could not set video mode\n");
		exit(1);
	}
	SDL_Overlay *bmp;
	bmp = SDL_CreateYUVOverlay(vCodecCtx->width, vCodecCtx->height, SDL_YV12_OVERLAY, screen);

	int frameFinished;
	AVPacket packet;
	SDL_Rect rect;
	i = 0;
	while(av_read_frame(pFormatCtx, &packet) >=0)
	{
		//is this a packet from video stream?
		if(packet.stream_index == videoStream)
		{
			//Decoder video frame
			avcodec_decode_video2(vCodecCtx, pFrame, &frameFinished, &packet);
			//Did we got a video frame?
			if(frameFinished)
			{
				//Convert the image into OPENGL 
				AVPicture pict;
				static struct SwsContext *img_convert_ctx;
				img_convert_ctx = sws_getCachedContext(img_convert_ctx,
		                   vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt,
			               vCodecCtx->width, vCodecCtx->height, PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
		        sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
		                   0, pFrame->height, pict.data, pict.linesize);

				/*
				usleep(40 * 1000);
				SDL_LockYUVOverlay(bmp);
				AVPicture pict;
				pict.data[0] = bmp->pixels[0];
				pict.data[1] = bmp->pixels[2];
				pict.data[2] = bmp->pixels[1];
				pict.linesize[0] = bmp->pitches[0];
				pict.linesize[1] = bmp->pitches[2];
				pict.linesize[2] = bmp->pitches[1];
				//Convert the image into YUV format that SDL uses
				static struct SwsContext *img_convert_ctx;
				img_convert_ctx = sws_getCachedContext(img_convert_ctx,
		                   vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt,
			               vCodecCtx->width, vCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
		        sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
		                   0, pFrame->height, pict.data, pict.linesize);
				SDL_UnlockYUVOverlay(bmp); 
				rect.x = 0;
				rect.y = 0;
				rect.w = vCodecCtx->width;
				rect.h = vCodecCtx->height;
				SDL_DisplayYUVOverlay(bmp, &rect);
				*/
			}
			//Free the packet that was allocated by av_read_frame
			av_free_packet(&packet);	
			/*
			SDL_Event event;
			SDL_PollEvent(&event);
			switch(event.type)
			{
				case SDL_QUIT:
					quit = 1;
					SDL_Quit();
					exit(0);
					break;
				defalut:
					break;
			}
			*/
		}
		else if(packet.stream_index == audioStream)
		{
			packet_queue_put(&audioq, &packet);
		}
		else
		{
			av_free_packet(&packet);
		}
	}
	//Free the RGB image
	av_free(buffer);
	av_free(pFrameRGB);
	//Free the YUV freame
	av_free(pFrame);
	//Close the codec
	avcodec_close(vCodecCtx);
	//Close the video file
	avformat_close_input(&pFormatCtx);
}
コード例 #28
0
ファイル: fa_imageloader.c プロジェクト: suzuke/showtime
static pixmap_t *
fa_image_from_video2(const char *url, const image_meta_t *im, 
		     const char *cacheid, char *errbuf, size_t errlen,
		     int sec, time_t mtime, cancellable_t *c)
{
  pixmap_t *pm = NULL;

  if(ifv_url == NULL || strcmp(url, ifv_url)) {
    // Need to open
    int i;
    AVFormatContext *fctx;
    fa_handle_t *fh = fa_open_ex(url, errbuf, errlen, FA_BUFFERED_BIG, NULL);

    if(fh == NULL)
      return NULL;

    AVIOContext *avio = fa_libav_reopen(fh, 0);

    if((fctx = fa_libav_open_format(avio, url, NULL, 0, NULL, 0, 0)) == NULL) {
      fa_libav_close(avio);
      snprintf(errbuf, errlen, "Unable to open format");
      return NULL;
    }

    if(!strcmp(fctx->iformat->name, "avi"))
      fctx->flags |= AVFMT_FLAG_GENPTS;

    AVCodecContext *ctx = NULL;
    for(i = 0; i < fctx->nb_streams; i++) {
      if(fctx->streams[i]->codec != NULL && 
	 fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	ctx = fctx->streams[i]->codec;
	break;
      }
    }
    if(ctx == NULL) {
      fa_libav_close_format(fctx);
      return NULL;
    }

    AVCodec *codec = avcodec_find_decoder(ctx->codec_id);
    if(codec == NULL) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to find codec");
      return NULL;
    }

    if(avcodec_open2(ctx, codec, NULL) < 0) {
      fa_libav_close_format(fctx);
      snprintf(errbuf, errlen, "Unable to open codec");
      return NULL;
    }

    ifv_close();

    ifv_stream = i;
    ifv_url = strdup(url);
    ifv_fctx = fctx;
    ifv_ctx = ctx;
  }

  AVPacket pkt;
  AVFrame *frame = avcodec_alloc_frame();
  int got_pic;


  AVStream *st = ifv_fctx->streams[ifv_stream];
  int64_t ts = av_rescale(sec, st->time_base.den, st->time_base.num);

  if(av_seek_frame(ifv_fctx, ifv_stream, ts, AVSEEK_FLAG_BACKWARD) < 0) {
    ifv_close();
    snprintf(errbuf, errlen, "Unable to seek to %"PRId64, ts);
    return NULL;
  }
  
  avcodec_flush_buffers(ifv_ctx);

#define MAX_FRAME_SCAN 500
  
  int cnt = MAX_FRAME_SCAN;
  while(1) {
    int r;

    r = av_read_frame(ifv_fctx, &pkt);

    if(r == AVERROR(EAGAIN))
      continue;

    if(r == AVERROR_EOF)
      break;

    if(cancellable_is_cancelled(c)) {
      snprintf(errbuf, errlen, "Cancelled");
      av_free_packet(&pkt);
      break;
    }

    if(r != 0) {
      ifv_close();
      break;
    }

    if(pkt.stream_index != ifv_stream) {
      av_free_packet(&pkt);
      continue;
    }
    cnt--;
    int want_pic = pkt.pts >= ts || cnt <= 0;

    ifv_ctx->skip_frame = want_pic ? AVDISCARD_DEFAULT : AVDISCARD_NONREF;
    
    avcodec_decode_video2(ifv_ctx, frame, &got_pic, &pkt);
    av_free_packet(&pkt);
    if(got_pic == 0 || !want_pic) {
      continue;
    }
    int w,h;

    if(im->im_req_width != -1 && im->im_req_height != -1) {
      w = im->im_req_width;
      h = im->im_req_height;
    } else if(im->im_req_width != -1) {
      w = im->im_req_width;
      h = im->im_req_width * ifv_ctx->height / ifv_ctx->width;

    } else if(im->im_req_height != -1) {
      w = im->im_req_height * ifv_ctx->width / ifv_ctx->height;
      h = im->im_req_height;
    } else {
      w = im->im_req_width;
      h = im->im_req_height;
    }

    pm = pixmap_create(w, h, PIXMAP_BGR32, 0);

    if(pm == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Out of memory");
      av_free(frame);
      return NULL;
    }

    struct SwsContext *sws;
    sws = sws_getContext(ifv_ctx->width, ifv_ctx->height, ifv_ctx->pix_fmt,
			 w, h, AV_PIX_FMT_BGR32, SWS_BILINEAR,
                         NULL, NULL, NULL);
    if(sws == NULL) {
      ifv_close();
      snprintf(errbuf, errlen, "Scaling failed");
      pixmap_release(pm);
      av_free(frame);
      return NULL;
    }
    
    uint8_t *ptr[4] = {0,0,0,0};
    int strides[4] = {0,0,0,0};

    ptr[0] = pm->pm_pixels;
    strides[0] = pm->pm_linesize;

    sws_scale(sws, (const uint8_t **)frame->data, frame->linesize,
	      0, ifv_ctx->height, ptr, strides);

    sws_freeContext(sws);

    write_thumb(ifv_ctx, frame, w, h, cacheid, mtime);

    break;
  }

  av_free(frame);
  if(pm == NULL)
    snprintf(errbuf, errlen, "Frame not found (scanned %d)", 
	     MAX_FRAME_SCAN - cnt);

  avcodec_flush_buffers(ifv_ctx);
  callout_arm(&thumb_flush_callout, ifv_autoclose, NULL, 5);
  return pm;
}
コード例 #29
0
ファイル: video.c プロジェクト: brendonjustin/vlc
/*****************************************************************************
 * DecodeVideo: Called to decode one or more frames
 *****************************************************************************/
picture_t *DecodeVideo( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    AVCodecContext *p_context = p_sys->p_context;
    int b_drawpicture;
    int b_null_size = false;
    block_t *p_block;

    if( !pp_block || !*pp_block )
        return NULL;

    if( !p_context->extradata_size && p_dec->fmt_in.i_extra )
    {
        ffmpeg_InitCodec( p_dec );
        if( p_sys->b_delayed_open )
        {
            if( ffmpeg_OpenCodec( p_dec ) )
                msg_Err( p_dec, "cannot open codec (%s)", p_sys->psz_namecodec );
        }
    }

    p_block = *pp_block;
    if( p_sys->b_delayed_open )
    {
        block_Release( p_block );
        return NULL;
    }

    if( p_block->i_flags & (BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED) )
    {
        p_sys->i_pts = VLC_TS_INVALID; /* To make sure we recover properly */

        p_sys->i_late_frames = 0;

        if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY )
            avcodec_flush_buffers( p_context );

        block_Release( p_block );
        return NULL;
    }

    if( p_block->i_flags & BLOCK_FLAG_PREROLL )
    {
        /* Do not care about late frames when prerolling
         * TODO avoid decoding of non reference frame
         * (ie all B except for H264 where it depends only on nal_ref_idc) */
        p_sys->i_late_frames = 0;
    }

    if( !p_dec->b_pace_control && (p_sys->i_late_frames > 0) &&
        (mdate() - p_sys->i_late_frames_start > INT64_C(5000000)) )
    {
        if( p_sys->i_pts > VLC_TS_INVALID )
        {
            msg_Err( p_dec, "more than 5 seconds of late video -> "
                     "dropping frame (computer too slow ?)" );
            p_sys->i_pts = VLC_TS_INVALID; /* To make sure we recover properly */
        }
        block_Release( p_block );
        p_sys->i_late_frames--;
        return NULL;
    }

    /* A good idea could be to decode all I pictures and see for the other */
    if( !p_dec->b_pace_control &&
        p_sys->b_hurry_up &&
        (p_sys->i_late_frames > 4) )
    {
        b_drawpicture = 0;
        if( p_sys->i_late_frames < 12 )
        {
            p_context->skip_frame =
                    (p_sys->i_skip_frame <= AVDISCARD_NONREF) ?
                    AVDISCARD_NONREF : p_sys->i_skip_frame;
        }
        else
        {
            /* picture too late, won't decode
             * but break picture until a new I, and for mpeg4 ...*/
            p_sys->i_late_frames--; /* needed else it will never be decrease */
            block_Release( p_block );
            return NULL;
        }
    }
    else
    {
        if( p_sys->b_hurry_up )
            p_context->skip_frame = p_sys->i_skip_frame;
        if( !(p_block->i_flags & BLOCK_FLAG_PREROLL) )
            b_drawpicture = 1;
        else
            b_drawpicture = 0;
    }

    if( p_context->width <= 0 || p_context->height <= 0 )
    {
        if( p_sys->b_hurry_up )
            p_context->skip_frame = p_sys->i_skip_frame;
        b_null_size = true;
    }
    else if( !b_drawpicture )
    {
        /* It creates broken picture
         * FIXME either our parser or ffmpeg is broken */
#if 0
        if( p_sys->b_hurry_up )
            p_context->skip_frame = __MAX( p_context->skip_frame,
                                                  AVDISCARD_NONREF );
#endif
    }

    /*
     * Do the actual decoding now */

    /* Don't forget that ffmpeg requires a little more bytes
     * that the real frame size */
    if( p_block->i_buffer > 0 )
    {
        p_sys->b_flush = ( p_block->i_flags & BLOCK_FLAG_END_OF_SEQUENCE ) != 0;

        p_block = block_Realloc( p_block, 0,
                            p_block->i_buffer + FF_INPUT_BUFFER_PADDING_SIZE );
        if( !p_block )
            return NULL;
        p_block->i_buffer -= FF_INPUT_BUFFER_PADDING_SIZE;
        *pp_block = p_block;
        memset( p_block->p_buffer + p_block->i_buffer, 0,
                FF_INPUT_BUFFER_PADDING_SIZE );
    }

    while( p_block->i_buffer > 0 || p_sys->b_flush )
    {
        int i_used, b_gotpicture;
        picture_t *p_pic;
        AVPacket pkt;

        /* Set the PTS/DTS in the context reordered_opaque field */
        if( p_block->i_pts > VLC_TS_INVALID  )
            p_context->reordered_opaque = (p_block->i_pts << 1) | 0;
        else if( p_block->i_dts > VLC_TS_INVALID )
            p_context->reordered_opaque = (p_block->i_dts << 1) | 1;
        else
            p_context->reordered_opaque = INT64_MIN;
        p_sys->p_ff_pic->reordered_opaque = p_context->reordered_opaque;

        /* Make sure we don't reuse the same timestamps twice */
        p_block->i_pts =
        p_block->i_dts = VLC_TS_INVALID;

        post_mt( p_sys );

        av_init_packet( &pkt );
        pkt.data = p_block->p_buffer;
        pkt.size = p_block->i_buffer;
        i_used = avcodec_decode_video2( p_context, p_sys->p_ff_pic,
                                       &b_gotpicture, &pkt );

        if( b_null_size && !p_sys->b_flush &&
            p_context->width > 0 && p_context->height > 0 )
        {
            /* Reparse it to not drop the I frame */
            b_null_size = false;
            if( p_sys->b_hurry_up )
                p_context->skip_frame = p_sys->i_skip_frame;
            i_used = avcodec_decode_video2( p_context, p_sys->p_ff_pic,
                                           &b_gotpicture, &pkt );
        }
        wait_mt( p_sys );

        if( p_sys->b_flush )
            p_sys->b_first_frame = true;

        if( p_block->i_buffer <= 0 )
            p_sys->b_flush = false;

        if( i_used < 0 )
        {
            if( b_drawpicture )
                msg_Warn( p_dec, "cannot decode one frame (%zu bytes)",
                          p_block->i_buffer );
            block_Release( p_block );
            return NULL;
        }
        else if( i_used > p_block->i_buffer ||
                 p_context->thread_count > 1 )
        {
            i_used = p_block->i_buffer;
        }

        /* Consumed bytes */
        p_block->i_buffer -= i_used;
        p_block->p_buffer += i_used;

        /* Nothing to display */
        if( !b_gotpicture )
        {
            if( i_used == 0 ) break;
            continue;
        }

        /* Sanity check (seems to be needed for some streams) */
        if( p_sys->p_ff_pic->pict_type == AV_PICTURE_TYPE_B)
        {
            p_sys->b_has_b_frames = true;
        }

        /* Compute the PTS */
        mtime_t i_pts = VLC_TS_INVALID;
        if( p_sys->p_ff_pic->reordered_opaque != INT64_MIN )
        {
            mtime_t i_ts = p_sys->p_ff_pic->reordered_opaque >> 1;
            bool    b_dts = p_sys->p_ff_pic->reordered_opaque & 1;
            if( b_dts )
            {
                if( !p_context->has_b_frames ||
                    !p_sys->b_has_b_frames ||
                    !p_sys->p_ff_pic->reference ||
                    p_sys->i_pts <= VLC_TS_INVALID )
                    i_pts = i_ts;

                /* Guess what ? The rules are different for Real Video :( */
                if( (p_dec->fmt_in.i_codec == VLC_CODEC_RV30 ||
                     p_dec->fmt_in.i_codec == VLC_CODEC_RV40) &&
                    p_sys->b_has_b_frames )
                {
                    i_pts = VLC_TS_INVALID;
                    if(p_sys->p_ff_pic->reference) i_pts = i_ts;
                }
            }
            else
            {
                i_pts = i_ts;
            }
        }
        if( i_pts <= VLC_TS_INVALID )
            i_pts = p_sys->i_pts;

        /* Interpolate the next PTS */
        if( i_pts > VLC_TS_INVALID )
            p_sys->i_pts = i_pts;
        if( p_sys->i_pts > VLC_TS_INVALID )
        {
            /* interpolate the next PTS */
            if( p_dec->fmt_in.video.i_frame_rate > 0 &&
                p_dec->fmt_in.video.i_frame_rate_base > 0 )
            {
                p_sys->i_pts += INT64_C(1000000) *
                    (2 + p_sys->p_ff_pic->repeat_pict) *
                    p_dec->fmt_in.video.i_frame_rate_base /
                    (2 * p_dec->fmt_in.video.i_frame_rate);
            }
            else if( p_context->time_base.den > 0 )
            {
                int i_tick = p_context->ticks_per_frame;
                if( i_tick <= 0 )
                    i_tick = 1;

                p_sys->i_pts += INT64_C(1000000) *
                    (2 + p_sys->p_ff_pic->repeat_pict) *
                    i_tick * p_context->time_base.num /
                    (2 * p_context->time_base.den);
            }
        }

        /* Update frame late count (except when doing preroll) */
        mtime_t i_display_date = 0;
        if( !(p_block->i_flags & BLOCK_FLAG_PREROLL) )
            i_display_date = decoder_GetDisplayDate( p_dec, i_pts );

        if( i_display_date > 0 && i_display_date <= mdate() )
        {
            p_sys->i_late_frames++;
            if( p_sys->i_late_frames == 1 )
                p_sys->i_late_frames_start = mdate();
        }
        else
        {
            p_sys->i_late_frames = 0;
        }

        if( !b_drawpicture || ( !p_sys->p_va && !p_sys->p_ff_pic->linesize[0] ) )
            continue;

        if( !p_sys->p_ff_pic->opaque )
        {
            /* Get a new picture */
            p_pic = ffmpeg_NewPictBuf( p_dec, p_context );
            if( !p_pic )
            {
                block_Release( p_block );
                return NULL;
            }

            /* Fill p_picture_t from AVVideoFrame and do chroma conversion
             * if needed */
            ffmpeg_CopyPicture( p_dec, p_pic, p_sys->p_ff_pic );
        }
        else
        {
            p_pic = (picture_t *)p_sys->p_ff_pic->opaque;
            decoder_LinkPicture( p_dec, p_pic );
        }

        if( !p_dec->fmt_in.video.i_sar_num || !p_dec->fmt_in.video.i_sar_den )
        {
            /* Fetch again the aspect ratio in case it changed */
            p_dec->fmt_out.video.i_sar_num
                = p_context->sample_aspect_ratio.num;
            p_dec->fmt_out.video.i_sar_den
                = p_context->sample_aspect_ratio.den;

            if( !p_dec->fmt_out.video.i_sar_num || !p_dec->fmt_out.video.i_sar_den )
            {
                p_dec->fmt_out.video.i_sar_num = 1;
                p_dec->fmt_out.video.i_sar_den = 1;
            }
        }

        /* Send decoded frame to vout */
        if( i_pts > VLC_TS_INVALID)
        {
            p_pic->date = i_pts;

            if( p_sys->b_first_frame )
            {
                /* Hack to force display of still pictures */
                p_sys->b_first_frame = false;
                p_pic->b_force = true;
            }

            p_pic->i_nb_fields = 2 + p_sys->p_ff_pic->repeat_pict;
            p_pic->b_progressive = !p_sys->p_ff_pic->interlaced_frame;
            p_pic->b_top_field_first = p_sys->p_ff_pic->top_field_first;

            p_pic->i_qstride = p_sys->p_ff_pic->qstride;
            int i_mb_h = ( p_pic->format.i_height + 15 ) / 16;
            p_pic->p_q = malloc( p_pic->i_qstride * i_mb_h );
            memcpy( p_pic->p_q, p_sys->p_ff_pic->qscale_table,
                    p_pic->i_qstride * i_mb_h );
            switch( p_sys->p_ff_pic->qscale_type )
            {
                case FF_QSCALE_TYPE_MPEG1:
                    p_pic->i_qtype = QTYPE_MPEG1;
                    break;
                case FF_QSCALE_TYPE_MPEG2:
                    p_pic->i_qtype = QTYPE_MPEG2;
                    break;
                case FF_QSCALE_TYPE_H264:
                    p_pic->i_qtype = QTYPE_H264;
                    break;
            }

            return p_pic;
        }
        else
        {
            decoder_DeletePicture( p_dec, p_pic );
        }
    }
コード例 #30
0
ファイル: i_ffmpeg.c プロジェクト: dotfloat/strife-ve
static void I_AVProcessNextVideoFrame(void)
{
    int frameDone = 0;
    AVFrame *frame;
    AVPacket *packet;
    double pts = 0;
    boolean behind = false;

    if(hasAudio)
    {
        if(videoClock > audioClock || audioClock <= 0)
        {
            // don't process if the audio clock hasn't started
            // or if the video clock is ahead though
            // this shouldn't be needed but just in case....
            return;
        }
    }
    
    frame = av_frame_alloc();

    if(hasAudio)
    {
        behind = audioFinished ? true : I_AVVideoClockBehind();
    }

    // collect packets until we have a frame
    while(!frameDone || behind)
    {
        if(!I_AVPopPacketFromQueue(videoPacketQueue, &packet))
        {
            videoFinished = true;
            break;
        }
        
        if(packet == NULL)
        {
            break;
        }

        // get presentation timestamp
        pts = 0;
        globalPts = packet->pts;
        
        avcodec_decode_video2(videoCodecCtx, frame, &frameDone, packet);

        // get the decompression timestamp from this packet
        if(packet->dts == AV_NOPTS_VALUE && frame->opaque &&
            *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
        {
            pts = *(uint64_t*)frame->opaque;
        }
        else if(packet->dts != AV_NOPTS_VALUE)
        {
            pts = packet->dts;
        }
        else
        {
            pts = 0;
        }

        // approximate the timestamp
        pts *= av_q2d(videoCodecCtx->time_base);

        if(frameDone)
        {
            // update the video clock and frame time
            pts = I_AVUpdateVideoClock(frame, pts);
            frameTime = (pts - lastFrameTime) * 1000.0;
            lastFrameTime = pts;
            currentPts = av_gettime();
            
            if(hasAudio)
            {
                // need to keep processing if we're behind
                // some frames may be skipped
                behind = I_AVVideoClockBehind();
            }
        }

        av_free_packet(packet);
    }
    
    if(frameDone)
    {
        // convert the decoded data to color data
        sws_scale(swsCtx,
                  (uint8_t const*const*)frame->data,
                  frame->linesize,
                  0,
                  videoCodecCtx->height,
                  videoFrame->data,
                  videoFrame->linesize);
        
        I_AVYUVToBuffer(frame);

        RB_BindTexture(&texture);
        RB_UpdateTexture(&texture, videoBuffer);
    }
    
    av_free(frame);
}