コード例 #1
0
ファイル: yav.c プロジェクト: paumard/yorick-av
void
Y_av_codec_opt_set(int argc)
{
  yav_ctxt * obj = yget_obj(argc-1, &yav_ops);
  AVCodecContext *c = obj->video_st->codec;

  char* name = ygets_q(argc-2);
  char* val = ygets_q(argc-3);

  ypush_long(av_opt_set(c, name, val, AV_OPT_SEARCH_CHILDREN ));

}
コード例 #2
0
ファイル: yusb.c プロジェクト: emmt/yusb
/* Get an USB device from the stack. */
static ydev_instance_t* get_device(int iarg)
{
  return (ydev_instance_t*)yget_obj(iarg, &ydev_class);
}
コード例 #3
0
ファイル: yav.c プロジェクト: paumard/yorick-av
void
Y_av_write(int argc)
{
  yav_ctxt * obj = yget_obj(argc-1, &yav_ops);
  AVCodecContext *c = obj->video_st->codec;

  long ntot=0;
  long dims[Y_DIMSIZE]={0,0};
  uint8_t *data = ygeta_c(argc-2, &ntot, dims);

  if (!c->width)
    yav_opencodec(obj, dims[2], dims[3]);

  if (dims[0]!=3 || dims[1]!=3 || 
      dims[2]!=c->width || dims[3]!=c->height)
    y_error("DATA should be an array(char, 3, width, height)");

  long npix=dims[2]*dims[3];

  const uint8_t *src[4] = {data, 0, 0, 0};
  int src_linesizes[4] = {3*c->width,0,0,0};

  if (c->pix_fmt != AV_PIX_FMT_RGB24) {
    /* as we only generate a RGB24 picture, we must convert it
       to the codec pixel format if needed */
    obj->img_convert_ctx = sws_getCachedContext(obj->img_convert_ctx,
						c->width, c->height,
						AV_PIX_FMT_RGB24,
						c->width, c->height,
						c->pix_fmt,
						SWS_BICUBIC, NULL, NULL, NULL);
    if (obj->img_convert_ctx == NULL)
      y_error("Cannot initialize the conversion context");

    av_image_copy(obj->tmp_picture->data, obj->tmp_picture->linesize,
		  src, src_linesizes, AV_PIX_FMT_RGB24, c->width, c->height);
    sws_scale(obj->img_convert_ctx,
	      (const uint8_t * const*)obj->tmp_picture->data,
	      obj->tmp_picture->linesize,
	      0, c->height, obj->picture->data, obj->picture->linesize);
  } else {
    av_image_copy(obj->picture->data, obj->picture->linesize,
		  src, src_linesizes, AV_PIX_FMT_RGB24, c->width, c->height);
  }

  /* encode the image */
  if (obj->oc->oformat->flags & AVFMT_RAWPICTURE)
    y_error("RAW picture not supported");

  if (obj->oc->oformat->video_codec == AV_CODEC_ID_H264 ||
      obj->oc->oformat->video_codec == AV_CODEC_ID_THEORA) ++obj->picture->pts;

  int ret=0;

#if (LIBAVCODEC_VERSION_MAJOR > 53)
  if (obj->oc->oformat->flags & AVFMT_RAWPICTURE) {
    /* Raw video case - directly store the picture in the packet */
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.flags |= AV_PKT_FLAG_KEY;
    pkt.stream_index = obj->video_st->index;
    pkt.data= obj->video_outbuf;
    // pkt.size= out_size;
    //    pkt.data = dst_picture.data[0];
    pkt.size = sizeof(AVPicture);
    ret = av_interleaved_write_frame(obj->oc, &pkt);
  } else {
    AVPacket pkt = { 0 };
    int got_packet;
    av_init_packet(&pkt);
    /* encode the image */
    ret = avcodec_encode_video2(c, &pkt, obj->picture, &got_packet);
    if (ret < 0) {
      y_errorn("Error encoding video frame: %d", ret);
    }
    /* If size is zero, it means the image was buffered. */
    if (!ret && got_packet && pkt.size) {
      pkt.stream_index = obj->video_st->index;
      /* Write the compressed frame to the media file. */
      ret = av_interleaved_write_frame(obj->oc, &pkt);
    } else {
      ret = 0;
    }
  }
#else
  int out_size
    = avcodec_encode_video(c, obj->video_outbuf, obj->video_outbuf_size,
			   obj->picture);
  /* if zero size, it means the image was buffered */
  if (out_size > 0) {
    AVPacket pkt;
    av_init_packet(&pkt);

    if (c->coded_frame->pts != AV_NOPTS_VALUE)
      pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base,
  			    obj->video_st->time_base);
    if(c->coded_frame->key_frame)
      pkt.flags |= AV_PKT_FLAG_KEY;
    pkt.stream_index= obj->video_st->index;
    pkt.data= obj->video_outbuf;
    pkt.size= out_size;
    /* write the compressed frame in the media file */
    ret = av_interleaved_write_frame(obj->oc, &pkt);
  }
#endif

  if (ret != 0)
    y_errorn("Error while writing video frame: %d", ret);

  /* return [] */
  ypush_nil();
}