示例#1
0
    AVFrame *LibAVVideoWriter::Data::get_video_frame(const ImgBase *src, OutputStream *ost)
    {
        AVCodecContext *c = ost->st->codec;
        int sw = src->getSize().width;
        int sh = src->getSize().height;
        //check if format or size of the input and output do not match
        if (c->pix_fmt != INPUT_FORMAT || c->width != sw || c->height != sh) {
            //check if the scale context needs to be updated
            if (!ost->sws_ctx || ost->sws_ctx_width != sw || ost->sws_ctx_height != sh) {
                //update context
                if(ost->sws_ctx)sws_freeContext(ost->sws_ctx);
                ost->sws_ctx = sws_getContext(sw, sh,
                                              INPUT_FORMAT,
                                              c->width, c->height,
                                              c->pix_fmt,
                                              SWS_BICUBIC, 0, 0, 0);
                if (!ost->sws_ctx) throw ICLException("Cannot initialize the conversion context");
            }
            fill_rgb_image(src,&(ost->tmp_frame));
            sws_scale(ost->sws_ctx, ost->tmp_frame->data, ost->tmp_frame->linesize,
                      0, ost->tmp_frame->height, ost->frame->data, ost->frame->linesize);
        } else {
            fill_rgb_image(src,&(ost->frame));
        }

        ost->frame->pts = ost->next_pts++;

        return ost->frame;
    }
示例#2
0
static AVFrame *get_video_frame(OutputStream *ost)
{
    AVCodecContext *c = ost->st->codec;

    /* check if we want to generate more frames */
    if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
                      STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
        return NULL;

    if (c->pix_fmt != AV_PIX_FMT_RGB24) {
        /* as we only generate a YUV420P picture, we must convert it
         * to the codec pixel format if needed */
        if (!ost->sws_ctx) {
            ost->sws_ctx = sws_getContext(1024, 1024,
                                          AV_PIX_FMT_RGB24,
                                          c->width, c->height,
                                          c->pix_fmt,
                                          SCALE_FLAGS, NULL, NULL, NULL);
            if (!ost->sws_ctx) {
                fprintf(stderr,
                        "Cannot initialize the conversion context\n");
                exit(1);
            }
        }
        fill_rgb_image(ost->tmp_frame, ost->next_pts, 1024, 1024);
        sws_scale(ost->sws_ctx, ost->tmp_frame->data, ost->tmp_frame->linesize,
                  0, 1024, ost->frame->data, ost->frame->linesize);
    } else {
        assert(0);
        fill_rgb_image(ost->frame, ost->next_pts, c->width, c->height);
    }

    ost->frame->pts = ost->next_pts++;

    return ost->frame;
}
示例#3
0
static void
write_video_frame (GeglProperties *o,
                   AVFormatContext *oc, AVStream *st)
{
  Priv           *p = (Priv*)o->user_data;
  int             out_size, ret;
  AVCodecContext *c;
  AVFrame        *picture_ptr;

  c = st->codec;

  if (c->pix_fmt != AV_PIX_FMT_RGB24)
    {
      struct SwsContext *img_convert_ctx;
      fill_rgb_image (o, p->tmp_picture, p->frame_count, c->width,
                      c->height);

      img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
                                       c->width, c->height, c->pix_fmt,
                                       SWS_BICUBIC, NULL, NULL, NULL);

      if (img_convert_ctx == NULL)
        {
          fprintf(stderr, "ff_save: Cannot initialize conversion context.");
        }
      else
        {
          sws_scale(img_convert_ctx,
                    (void*)p->tmp_picture->data,
                    p->tmp_picture->linesize,
                    0,
                    c->height,
                    p->picture->data,
                    p->picture->linesize);
         p->picture->format = c->pix_fmt;
         p->picture->width = c->width;
         p->picture->height = c->height;
        }
    }
  else
    {
      fill_rgb_image (o, p->picture, p->frame_count, c->width, c->height);
    }

  picture_ptr      = p->picture;
  picture_ptr->pts = p->frame_count;

  if (oc->oformat->flags & AVFMT_RAWPICTURE)
    {
      /* raw video case. The API will change slightly in the near
         future for that */
      AVPacket  pkt;
      av_init_packet (&pkt);

      pkt.flags |= AV_PKT_FLAG_KEY;
      pkt.stream_index = st->index;
      pkt.data = (uint8_t *) picture_ptr;
      pkt.size = sizeof (AVPicture);
      pkt.pts = picture_ptr->pts;
      av_packet_rescale_ts (&pkt, c->time_base, st->time_base);

      ret = av_write_frame (oc, &pkt);
    }
  else
    {
      /* encode the image */
      out_size =
        avcodec_encode_video (c,
                              p->video_outbuf,
                              p->video_outbuf_size, picture_ptr);

      /* if zero size, it means the image was buffered */
      if (out_size != 0)
        {
          AVPacket  pkt;
          av_init_packet (&pkt);
          if (c->coded_frame->key_frame)
            pkt.flags |= AV_PKT_FLAG_KEY;
          pkt.stream_index = st->index;
          pkt.data = p->video_outbuf;
          pkt.size = out_size;
          pkt.pts = picture_ptr->pts;
          av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
          /* write the compressed frame in the media file */
          ret = av_write_frame (oc, &pkt);
        }
      else
        {
          ret = 0;
        }
    }
  if (ret != 0)
    {
      fprintf (stderr, "Error while writing video frame\n");
      exit (1);
    }
  p->frame_count++;
}