Example #1
0
static GstFlowReturn
theora_enc_chain (GstPad * pad, GstBuffer * buffer)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  GstClockTime timestamp, duration, running_time;
  GstFlowReturn ret;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  /* we keep track of two timelines.
   * - The timestamps from the incomming buffers, which we copy to the outgoing
   *   encoded buffers as-is. We need to do this as we simply forward the
   *   newsegment events.
   * - The running_time of the buffers, which we use to construct the granulepos
   *   in the packets.
   */
  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);
  running_time =
      gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);

  /* make sure we copy the discont flag to the next outgoing buffer when it's
   * set on the incomming buffer */
  if (GST_BUFFER_IS_DISCONT (buffer)) {
    enc->next_discont = TRUE;
  }

  if (enc->packetno == 0) {
    /* no packets written yet, setup headers */
    GstCaps *caps;
    GstBuffer *buf1, *buf2, *buf3;

    enc->granulepos_offset = 0;
    enc->timestamp_offset = 0;

    GST_DEBUG_OBJECT (enc, "output headers");
    /* Theora streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libtheora one at a time;
       libtheora handles the additional Ogg bitstream constraints */

    /* first packet will get its own page automatically */
    if (theora_encode_header (&enc->state, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf1);
    if (ret != GST_FLOW_OK) {
      goto header_buffer_alloc;
    }

    /* create the remaining theora headers */
    theora_comment_clear (&enc->comment);
    theora_comment_init (&enc->comment);

    if (theora_encode_comment (&enc->comment, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf2);
    /* Theora expects us to put this packet buffer into an ogg page,
     * in which case it becomes the ogg library's responsibility to
     * free it. Since we're copying and outputting a gst_buffer,
     * we need to free it ourselves. */
    if (op.packet)
      free (op.packet);

    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf1);
      goto header_buffer_alloc;
    }

    if (theora_encode_tables (&enc->state, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf3);
    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf1);
      gst_buffer_unref (buf2);
      goto header_buffer_alloc;
    }

    /* mark buffers and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = theora_set_header_on_caps (caps, buf1, buf2, buf3);
    GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    gst_buffer_set_caps (buf1, caps);
    gst_buffer_set_caps (buf2, caps);
    gst_buffer_set_caps (buf3, caps);

    gst_caps_unref (caps);

    /* push out the header buffers */
    if ((ret = theora_push_buffer (enc, buf1)) != GST_FLOW_OK) {
      gst_buffer_unref (buf2);
      gst_buffer_unref (buf3);
      goto header_push;
    }
    if ((ret = theora_push_buffer (enc, buf2)) != GST_FLOW_OK) {
      gst_buffer_unref (buf3);
      goto header_push;
    }
    if ((ret = theora_push_buffer (enc, buf3)) != GST_FLOW_OK) {
      goto header_push;
    }

    enc->granulepos_offset =
        gst_util_uint64_scale (running_time, enc->fps_n,
        GST_SECOND * enc->fps_d);
    enc->timestamp_offset = running_time;
    enc->next_ts = 0;
  }

  {
    yuv_buffer yuv;
    gint res;
    gint y_size;
    guint8 *pixels;

    yuv.y_width = enc->info_width;
    yuv.y_height = enc->info_height;
    yuv.y_stride = enc->info_width;

    yuv.uv_width = enc->info_width / 2;
    yuv.uv_height = enc->info_height / 2;
    yuv.uv_stride = yuv.uv_width;

    y_size = enc->info_width * enc->info_height;

    if (enc->width == enc->info_width && enc->height == enc->info_height) {
      GST_LOG_OBJECT (enc, "no cropping/conversion needed");
      /* easy case, no cropping/conversion needed */
      pixels = GST_BUFFER_DATA (buffer);

      yuv.y = pixels;
      yuv.u = yuv.y + y_size;
      yuv.v = yuv.u + y_size / 4;
    } else {
      GstBuffer *newbuf;
      gint i;
      guchar *dest_y, *src_y;
      guchar *dest_u, *src_u;
      guchar *dest_v, *src_v;
      gint src_y_stride, src_uv_stride;
      gint dst_y_stride, dst_uv_stride;
      gint width, height;
      gint cwidth, cheight;
      gint offset_x, right_x, right_border;

      GST_LOG_OBJECT (enc, "cropping/conversion needed for strides");
      /* source width/height */
      width = enc->width;
      height = enc->height;
      /* soucre chroma width/height */
      cwidth = width / 2;
      cheight = height / 2;

      /* source strides as defined in videotestsrc */
      src_y_stride = GST_ROUND_UP_4 (width);
      src_uv_stride = GST_ROUND_UP_8 (width) / 2;

      /* destination strides from the real picture width */
      dst_y_stride = enc->info_width;
      dst_uv_stride = enc->info_width / 2;

      newbuf = gst_buffer_new_and_alloc (y_size * 3 / 2);
      if (!newbuf) {
        ret = GST_FLOW_ERROR;
        goto no_buffer;
      }
      GST_BUFFER_OFFSET (newbuf) = GST_BUFFER_OFFSET_NONE;
      gst_buffer_set_caps (newbuf, GST_PAD_CAPS (enc->srcpad));

      dest_y = yuv.y = GST_BUFFER_DATA (newbuf);
      dest_u = yuv.u = yuv.y + y_size;
      dest_v = yuv.v = yuv.u + y_size / 4;

      src_y = GST_BUFFER_DATA (buffer);
      src_u = src_y + src_y_stride * GST_ROUND_UP_2 (height);
      src_v = src_u + src_uv_stride * GST_ROUND_UP_2 (height) / 2;

      if (enc->border != BORDER_NONE) {
        /* fill top border */
        for (i = 0; i < enc->offset_y; i++) {
          memset (dest_y, 0, dst_y_stride);
          dest_y += dst_y_stride;
        }
      } else {
        dest_y += dst_y_stride * enc->offset_y;
      }

      offset_x = enc->offset_x;
      right_x = width + enc->offset_x;
      right_border = dst_y_stride - right_x;

      /* copy Y plane */
      for (i = 0; i < height; i++) {
        memcpy (dest_y + offset_x, src_y, width);
        if (enc->border != BORDER_NONE) {
          memset (dest_y, 0, offset_x);
          memset (dest_y + right_x, 0, right_border);
        }

        dest_y += dst_y_stride;
        src_y += src_y_stride;
      }

      if (enc->border != BORDER_NONE) {
        /* fill bottom border */
        for (i = height + enc->offset_y; i < enc->info.height; i++) {
          memset (dest_y, 0, dst_y_stride);
          dest_y += dst_y_stride;
        }

        /* fill top border chroma */
        for (i = 0; i < enc->offset_y / 2; i++) {
          memset (dest_u, 128, dst_uv_stride);
          memset (dest_v, 128, dst_uv_stride);
          dest_u += dst_uv_stride;
          dest_v += dst_uv_stride;
        }
      } else {
        dest_u += dst_uv_stride * enc->offset_y / 2;
        dest_v += dst_uv_stride * enc->offset_y / 2;
      }

      offset_x = enc->offset_x / 2;
      right_x = cwidth + offset_x;
      right_border = dst_uv_stride - right_x;

      /* copy UV planes */
      for (i = 0; i < cheight; i++) {
        memcpy (dest_v + offset_x, src_v, cwidth);
        memcpy (dest_u + offset_x, src_u, cwidth);

        if (enc->border != BORDER_NONE) {
          memset (dest_u, 128, offset_x);
          memset (dest_u + right_x, 128, right_border);
          memset (dest_v, 128, offset_x);
          memset (dest_v + right_x, 128, right_border);
        }

        dest_u += dst_uv_stride;
        dest_v += dst_uv_stride;
        src_u += src_uv_stride;
        src_v += src_uv_stride;
      }

      if (enc->border != BORDER_NONE) {
        /* fill bottom border */
        for (i = cheight + enc->offset_y / 2; i < enc->info_height / 2; i++) {
          memset (dest_u, 128, dst_uv_stride);
          memset (dest_v, 128, dst_uv_stride);
          dest_u += dst_uv_stride;
          dest_v += dst_uv_stride;
        }
      }

      gst_buffer_unref (buffer);
      buffer = newbuf;
    }

    if (theora_enc_is_discontinuous (enc, running_time, duration)) {
      theora_enc_reset (enc);
      enc->granulepos_offset =
          gst_util_uint64_scale (running_time, enc->fps_n,
          GST_SECOND * enc->fps_d);
      enc->timestamp_offset = running_time;
      enc->next_ts = 0;
      enc->next_discont = TRUE;
    }

    res = theora_encode_YUVin (&enc->state, &yuv);

    ret = GST_FLOW_OK;
    while (theora_encode_packetout (&enc->state, 0, &op)) {
      GstClockTime next_time;

      next_time = theora_enc_get_ogg_packet_end_time (enc, &op);

      ret =
          theora_push_packet (enc, &op, timestamp, enc->next_ts,
          next_time - enc->next_ts);

      enc->next_ts = next_time;
      if (ret != GST_FLOW_OK)
        goto data_push;
    }
    gst_buffer_unref (buffer);
  }

  return ret;

  /* ERRORS */
header_buffer_alloc:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
no_buffer:
  {
    return ret;
  }
data_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
encoder_disabled:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("libtheora has been compiled with the encoder disabled"));
    gst_buffer_unref (buffer);
    return GST_FLOW_ERROR;
  }
}
void rmdInitEncoder(ProgData *pdata,EncData *enc_data_t,int buffer_ready){
    
    int y0,
        y1,
        y2,
        fname_length;
    ogg_stream_state m_ogg_skel;
    ogg_page skel_og_pg;
    fisbone_packet skel_fbv,    //video fisbone packet
                   skel_fba ;   //audio fisbone packet

    (pdata)->enc_data=enc_data_t;

    fname_length=strlen(pdata->args.filename);
    if(!(fname_length>4 &&
       pdata->args.filename[fname_length-4] == '.' &&
       (pdata->args.filename[fname_length-3] == 'o' ||
        pdata->args.filename[fname_length-3] == 'O') &&
       (pdata->args.filename[fname_length-2] == 'g' ||
        pdata->args.filename[fname_length-2] == 'G') &&
       (pdata->args.filename[fname_length-1] == 'v' ||
        pdata->args.filename[fname_length-1] == 'V'))){
    
        char *new_name=malloc(fname_length+5);
        strcpy(new_name,pdata->args.filename);
        strcat(new_name,".ogv");
        
        free(pdata->args.filename);
        pdata->args.filename=new_name;


    }
        
    if (!pdata->args.overwrite) {
        rmdIncrementalNaming(&(pdata)->args.filename);
        fprintf(stderr, "Output file: %s\n", pdata->args.filename);
    }
        
    enc_data_t->fp=fopen((pdata)->args.filename,"w");
    if(enc_data_t->fp==NULL){
        fprintf(stderr,"Cannot open file %s for writting!\n",
                       (pdata)->args.filename);
        exit(13);
    }

    //each stream must have a unique 
    srand(time(NULL));
    y0=rand()+1;
    y1=rand()+1;
    y2=rand()+1;
    y2+=(y1==y2);
    y0=(((y0==y1)||(y0==y2))?(y1+y2):y0);

    //init ogg streams
    //skeleton first
    ogg_stream_init(&m_ogg_skel,y0);
    m_add_fishead_packet(&m_ogg_skel);
	if(ogg_stream_pageout(&m_ogg_skel,&skel_og_pg)!= 1){
        fprintf (stderr, "Internal Ogg library error.\n");
        exit (2);
    }
    fwrite(skel_og_pg.header,1,skel_og_pg.header_len,enc_data_t->fp);
    fwrite(skel_og_pg.body,1,skel_og_pg.body_len,enc_data_t->fp);
    


    ogg_stream_init(&enc_data_t->m_ogg_ts,y1);
    if(!pdata->args.nosound)
        ogg_stream_init(&enc_data_t->m_ogg_vs,y2);


    theora_info_init(&enc_data_t->m_th_inf);
    enc_data_t->m_th_inf.frame_width                  = pdata->brwin.rrect.width;
    enc_data_t->m_th_inf.frame_height                 = pdata->brwin.rrect.height;
    enc_data_t->m_th_inf.width                        = ((enc_data_t->m_th_inf.frame_width + 15) >> 4) << 4;
    enc_data_t->m_th_inf.height                       = ((enc_data_t->m_th_inf.frame_height + 15) >> 4) << 4;
    enc_data_t->m_th_inf.offset_x                     = 0;
    enc_data_t->m_th_inf.offset_y                     = 0;

    enc_data_t->m_th_inf.fps_numerator                = pdata->args.fps * 100.0;
    enc_data_t->m_th_inf.fps_denominator              = 100;
    enc_data_t->m_th_inf.aspect_numerator             = 1;
    enc_data_t->m_th_inf.aspect_denominator           = 1;

    enc_data_t->m_th_inf.colorspace                   = OC_CS_UNSPECIFIED;
    enc_data_t->m_th_inf.pixelformat                  = OC_PF_420;

    enc_data_t->m_th_inf.target_bitrate               = pdata->args.v_bitrate;
    enc_data_t->m_th_inf.quality                      = pdata->args.v_quality;
    enc_data_t->m_th_inf.dropframes_p                 = 0;
    enc_data_t->m_th_inf.quick_p                      = 1;
    enc_data_t->m_th_inf.keyframe_auto_p              = 1;
    enc_data_t->m_th_inf.keyframe_frequency           = 64;
    enc_data_t->m_th_inf.keyframe_frequency_force     = 64;
    enc_data_t->m_th_inf.keyframe_data_target_bitrate = enc_data_t->m_th_inf.quality * 1.5;
    enc_data_t->m_th_inf.keyframe_auto_threshold      = 80;
    enc_data_t->m_th_inf.keyframe_mindistance         = 8;
    enc_data_t->m_th_inf.noise_sensitivity            = 1;
    enc_data_t->m_th_inf.sharpness                    = 2;

    theora_encode_init(&enc_data_t->m_th_st,&enc_data_t->m_th_inf);


    if(!pdata->args.nosound){
        int ret;
        vorbis_info_init(&enc_data_t->m_vo_inf);
        ret = vorbis_encode_init_vbr(&enc_data_t->m_vo_inf,
                                     pdata->args.channels,
                                     pdata->args.frequency,
                                     (float)pdata->args.s_quality*0.1);
        if(ret){
            fprintf(stderr,"Error while setting up vorbis stream quality!\n");
            exit(2);
        }
        vorbis_comment_init(&enc_data_t->m_vo_cmmnt);
        vorbis_analysis_init(&enc_data_t->m_vo_dsp,&enc_data_t->m_vo_inf);
        vorbis_block_init(&enc_data_t->m_vo_dsp,&enc_data_t->m_vo_block);
    }


    theora_encode_header(&enc_data_t->m_th_st,&enc_data_t->m_ogg_pckt1);
    ogg_stream_packetin(&enc_data_t->m_ogg_ts,&enc_data_t->m_ogg_pckt1);
    if(ogg_stream_pageout(&enc_data_t->m_ogg_ts,&enc_data_t->m_ogg_pg)!=1){
        fprintf(stderr,"Internal Ogg library error.\n");
        exit(2);
    }
    fwrite(enc_data_t->m_ogg_pg.header,1,
           enc_data_t->m_ogg_pg.header_len,
           enc_data_t->fp);
    fwrite(enc_data_t->m_ogg_pg.body,1,
           enc_data_t->m_ogg_pg.body_len,
           enc_data_t->fp);

    theora_comment_init(&enc_data_t->m_th_cmmnt);
    theora_comment_add_tag(&enc_data_t->m_th_cmmnt,"recordMyDesktop",VERSION);
    theora_encode_comment(&enc_data_t->m_th_cmmnt,&enc_data_t->m_ogg_pckt1);
    ogg_stream_packetin(&enc_data_t->m_ogg_ts,&enc_data_t->m_ogg_pckt1);
    theora_encode_tables(&enc_data_t->m_th_st,&enc_data_t->m_ogg_pckt1);
    ogg_stream_packetin(&enc_data_t->m_ogg_ts,&enc_data_t->m_ogg_pckt1);


    if(!pdata->args.nosound){
        ogg_packet header;
        ogg_packet header_comm;
        ogg_packet header_code;

        vorbis_analysis_headerout(&enc_data_t->m_vo_dsp,
                                  &enc_data_t->m_vo_cmmnt,
                                  &header,&header_comm,
                                  &header_code);
        ogg_stream_packetin(&enc_data_t->m_ogg_vs,&header);
        if(ogg_stream_pageout(&enc_data_t->m_ogg_vs,&enc_data_t->m_ogg_pg)!=1){
            fprintf(stderr,"Internal Ogg library error.\n");
            exit(2);
        }
        fwrite(enc_data_t->m_ogg_pg.header,1,
               enc_data_t->m_ogg_pg.header_len,
               enc_data_t->fp);
        fwrite(enc_data_t->m_ogg_pg.body,1,
               enc_data_t->m_ogg_pg.body_len,
               enc_data_t->fp);

        ogg_stream_packetin(&enc_data_t->m_ogg_vs,&header_comm);
        ogg_stream_packetin(&enc_data_t->m_ogg_vs,&header_code);
    }

    //fishbone packets go here
    memset(&skel_fbv,0,sizeof(skel_fbv));
    skel_fbv.serial_no=enc_data_t->m_ogg_ts.serialno;
    skel_fbv.nr_header_packet=3;
    skel_fbv.granule_rate_n=enc_data_t->m_th_inf.fps_numerator;
    skel_fbv.granule_rate_d=enc_data_t->m_th_inf.fps_denominator;
    skel_fbv.start_granule=0;
    skel_fbv.preroll=0;
    skel_fbv.granule_shift=theora_granule_shift(&enc_data_t->m_th_inf);
    add_message_header_field(&skel_fbv,
                             "Content-Type",
                             "video/theora");

    add_fisbone_to_stream(&m_ogg_skel,&skel_fbv);

    if(!pdata->args.nosound){

        memset(&skel_fba,0,sizeof(skel_fba));
        skel_fba.serial_no=enc_data_t->m_ogg_vs.serialno;
        skel_fba.nr_header_packet=3;
        skel_fba.granule_rate_n=pdata->args.frequency;
        skel_fba.granule_rate_d=(ogg_int64_t)1;
        skel_fba.start_granule=0;
        skel_fba.preroll=2;
        skel_fba.granule_shift=0;
        add_message_header_field(&skel_fba,
                                 "Content-Type",
                                 "audio/vorbis");

        add_fisbone_to_stream(&m_ogg_skel,&skel_fba);
    
    }

    while(1){
        int result = ogg_stream_flush(&m_ogg_skel, &skel_og_pg);
        if(result<0){
            fprintf (stderr, "Internal Ogg library error.\n");
            exit(2);
        }
        if(result==0)
            break;
        fwrite(skel_og_pg.header,1,skel_og_pg.header_len,enc_data_t->fp);
        fwrite(skel_og_pg.body,1,skel_og_pg.body_len,enc_data_t->fp);
	}



    while(1){
        int result = ogg_stream_flush(&enc_data_t->m_ogg_ts,
                                      &enc_data_t->m_ogg_pg);
        if(result<0){
            fprintf(stderr,"Internal Ogg library error.\n");
            exit(2);
        }
        if(result==0)break;
        fwrite(enc_data_t->m_ogg_pg.header,1,
               enc_data_t->m_ogg_pg.header_len,
               enc_data_t->fp);
        fwrite(enc_data_t->m_ogg_pg.body,1,
               enc_data_t->m_ogg_pg.body_len,
               enc_data_t->fp);
    }

    if(!pdata->args.nosound){
        while(1){
            int result=ogg_stream_flush(&enc_data_t->m_ogg_vs,
                                        &enc_data_t->m_ogg_pg);
            if(result<0){
                fprintf(stderr,"Internal Ogg library error.\n");
                exit(2);
            }
            if(result==0)break;
            fwrite(enc_data_t->m_ogg_pg.header,1,
                   enc_data_t->m_ogg_pg.header_len,
                   enc_data_t->fp);
            fwrite(enc_data_t->m_ogg_pg.body,1,
                   enc_data_t->m_ogg_pg.body_len,
                   enc_data_t->fp);
        }
    }
    
    //skeleton eos
    add_eos_packet_to_stream(&m_ogg_skel);
	if(ogg_stream_flush(&m_ogg_skel,&skel_og_pg)<0){
        fprintf(stderr,"Internal Ogg library error.\n");
        exit(2);
    }
    fwrite(skel_og_pg.header,1,skel_og_pg.header_len,enc_data_t->fp);
    fwrite(skel_og_pg.body,1,skel_og_pg.body_len,enc_data_t->fp);


    //theora buffer allocation, if any
    if(!buffer_ready){
        enc_data_t->yuv.y=(unsigned char *)malloc(enc_data_t->m_th_inf.height*
                          enc_data_t->m_th_inf.width);
        enc_data_t->yuv.u=(unsigned char *)malloc(enc_data_t->m_th_inf.height*
                          enc_data_t->m_th_inf.width/4);
        enc_data_t->yuv.v=(unsigned char *)malloc(enc_data_t->m_th_inf.height*
                          enc_data_t->m_th_inf.width/4);
        enc_data_t->yuv.y_width=enc_data_t->m_th_inf.width;
        enc_data_t->yuv.y_height=enc_data_t->m_th_inf.height;
        enc_data_t->yuv.y_stride=enc_data_t->m_th_inf.width;

        enc_data_t->yuv.uv_width=enc_data_t->m_th_inf.width/2;
        enc_data_t->yuv.uv_height=enc_data_t->m_th_inf.height/2;
        enc_data_t->yuv.uv_stride=enc_data_t->m_th_inf.width/2;
        enc_data_t->x_offset=enc_data_t->m_th_inf.offset_x;
        enc_data_t->y_offset=enc_data_t->m_th_inf.offset_y;
    }
    theora_info_clear(&enc_data_t->m_th_inf);

}
Example #3
0
static int encode_init(AVCodecContext* avc_context)
{
    theora_info t_info;
    theora_comment t_comment;
    ogg_packet o_packet;
    unsigned int offset;
    TheoraContext *h = avc_context->priv_data;

    /* Set up the theora_info struct */
    theora_info_init( &t_info );
    t_info.width = avc_context->width;
    t_info.height = avc_context->height;
    t_info.frame_width = avc_context->width;
    t_info.frame_height = avc_context->height;
    t_info.offset_x = 0;
    t_info.offset_y = 0;
    /* Swap numerator and denominator as time_base in AVCodecContext gives the
     * time period between frames, but theora_info needs the framerate.  */
    t_info.fps_numerator = avc_context->time_base.den;
    t_info.fps_denominator = avc_context->time_base.num;
    if (avc_context->sample_aspect_ratio.num != 0) {
        t_info.aspect_numerator = avc_context->sample_aspect_ratio.num;
        t_info.aspect_denominator = avc_context->sample_aspect_ratio.den;
    } else {
        t_info.aspect_numerator = 1;
        t_info.aspect_denominator = 1;
    }
    t_info.colorspace = OC_CS_UNSPECIFIED;
    t_info.pixelformat = OC_PF_420;
    t_info.target_bitrate = avc_context->bit_rate;
    t_info.keyframe_frequency = avc_context->gop_size;
    t_info.keyframe_frequency_force = avc_context->gop_size;
    t_info.keyframe_mindistance = avc_context->keyint_min;
    t_info.quality = 0;

    t_info.quick_p = 1;
    t_info.dropframes_p = 0;
    t_info.keyframe_auto_p = 1;
    t_info.keyframe_data_target_bitrate = t_info.target_bitrate * 1.5;
    t_info.keyframe_auto_threshold = 80;
    t_info.noise_sensitivity = 1;
    t_info.sharpness = 0;

    /* Now initialise libtheora */
    if (theora_encode_init( &(h->t_state), &t_info ) != 0) {
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_init failed\n");
        return -1;
    }

    /* Clear up theora_info struct */
    theora_info_clear( &t_info );

    /*
        Output first header packet consisting of theora
        header, comment, and tables.

        Each one is prefixed with a 16bit size, then they
        are concatenated together into ffmpeg's extradata.
    */
    offset = 0;

    /* Header */
    theora_encode_header( &(h->t_state), &o_packet );
    if (concatenate_packet( &offset, avc_context, &o_packet ) != 0) {
        return -1;
    }

    /* Comment */
    theora_comment_init( &t_comment );
    theora_encode_comment( &t_comment, &o_packet );
    if (concatenate_packet( &offset, avc_context, &o_packet ) != 0) {
        return -1;
    }

    /* Tables */
    theora_encode_tables( &(h->t_state), &o_packet );
    if (concatenate_packet( &offset, avc_context, &o_packet ) != 0) {
        return -1;
    }

    /* Clear up theora_comment struct */
    theora_comment_clear( &t_comment );

    /* Set up the output AVFrame */
    avc_context->coded_frame= avcodec_alloc_frame();

    return 0;
}
Example #4
0
int main(int argc,char *argv[]){
  int c,long_option_index,ret;

  ogg_stream_state to; /* take physical pages, weld into a logical
                           stream of packets */
  ogg_stream_state vo; /* take physical pages, weld into a logical
                           stream of packets */
  ogg_page         og; /* one Ogg bitstream page.  Vorbis packets are inside */
  ogg_packet       op; /* one raw packet of data for decode */

  theora_state     td;
  theora_info      ti;
  theora_comment   tc;

  vorbis_info      vi; /* struct that stores all the static vorbis bitstream
                          settings */
  vorbis_comment   vc; /* struct that stores all the user comments */

  vorbis_dsp_state vd; /* central working state for the packet->PCM decoder */
  vorbis_block     vb; /* local working space for packet->PCM decode */

  int audioflag=0;
  int videoflag=0;
  int akbps=0;
  int vkbps=0;

  ogg_int64_t audio_bytesout=0;
  ogg_int64_t video_bytesout=0;
  double timebase;


  FILE* outfile = stdout;

#ifdef _WIN32 
# ifdef THEORA_PERF_DATA
    LARGE_INTEGER start_time;
    LARGE_INTEGER final_time;

    LONGLONG elapsed_ticks;
    LARGE_INTEGER ticks_per_second;
    
    LONGLONG elapsed_secs;
    LONGLONG elapsed_sec_mod;
    double elapsed_secs_dbl ;
# endif
  /* We need to set stdin/stdout to binary mode. Damn windows. */
  /* if we were reading/writing a file, it would also need to in
     binary mode, eg, fopen("file.wav","wb"); */
  /* Beware the evil ifdef. We avoid these where we can, but this one we
     cannot. Don't add any more, you'll probably go to hell if you do. */
  _setmode( _fileno( stdin ), _O_BINARY );
  _setmode( _fileno( stdout ), _O_BINARY );


#endif

  while((c=getopt_long(argc,argv,optstring,options,&long_option_index))!=EOF){
    switch(c){
    case 'o':
      outfile=fopen(optarg,"wb");
      if(outfile==NULL){
        fprintf(stderr,"Unable to open output file '%s'\n", optarg);
        exit(1);
      }
      break;;

    case 'a':
      audio_q=atof(optarg)*.099;
      if(audio_q<-.1 || audio_q>1){
        fprintf(stderr,"Illegal audio quality (choose -1 through 10)\n");
        exit(1);
      }
      audio_r=-1;
      break;

    case 'v':
      video_q=rint(atof(optarg)*6.3);
      if(video_q<0 || video_q>63){
        fprintf(stderr,"Illegal video quality (choose 0 through 10)\n");
        exit(1);
      }
      video_r=0;
      break;

    case 'A':
      audio_r=atof(optarg)*1000;
      if(audio_q<0){
        fprintf(stderr,"Illegal audio quality (choose > 0 please)\n");
        exit(1);
      }
      audio_q=-99;
      break;

    case 'V':
      video_r=rint(atof(optarg)*1000);
      if(video_r<45000 || video_r>2000000){
        fprintf(stderr,"Illegal video bitrate (choose 45kbps through 2000kbps)\n");
        exit(1);
      }
      video_q=0;
     break;

    case 's':
      video_an=rint(atof(optarg));
      break;

    case 'S':
      video_ad=rint(atof(optarg));
      break;

    case 'f':
      video_hzn=rint(atof(optarg));
      break;

    case 'F':
      video_hzd=rint(atof(optarg));
      break;

    default:
      usage();
    }
  }

  while(optind<argc){
    /* assume that anything following the options must be a filename */
    id_file(argv[optind]);
    optind++;
  }



#ifdef THEORA_PERF_DATA
# ifdef WIN32
    QueryPerformanceCounter(&start_time);
# endif
#endif


  /* yayness.  Set up Ogg output stream */
  srand(time(NULL));
  {
    /* need two inequal serial numbers */
    int serial1, serial2;
    serial1 = rand();
    serial2 = rand();
    if (serial1 == serial2) serial2++;
    ogg_stream_init(&to,serial1);
    ogg_stream_init(&vo,serial2);
  }

  /* Set up Theora encoder */
  if(!video){
    fprintf(stderr,"No video files submitted for compression?\n");
    exit(1);
  }
  /* Theora has a divisible-by-sixteen restriction for the encoded video size */
  /* scale the frame size up to the nearest /16 and calculate offsets */
  video_x=((frame_x + 15) >>4)<<4;
  video_y=((frame_y + 15) >>4)<<4;
  /* We force the offset to be even.
     This ensures that the chroma samples align properly with the luma
      samples. */
  frame_x_offset=((video_x-frame_x)/2)&~1;
  frame_y_offset=((video_y-frame_y)/2)&~1;

  theora_info_init(&ti);
  ti.width=video_x;
  ti.height=video_y;
  ti.frame_width=frame_x;
  ti.frame_height=frame_y;
  ti.offset_x=frame_x_offset;
  ti.offset_y=frame_y_offset;
  ti.fps_numerator=video_hzn;
  ti.fps_denominator=video_hzd;
  ti.aspect_numerator=video_an;
  ti.aspect_denominator=video_ad;
  ti.colorspace=OC_CS_UNSPECIFIED;
  ti.pixelformat=OC_PF_420;
  ti.target_bitrate=video_r;
  ti.quality=video_q;

  ti.dropframes_p=0;
  ti.quick_p=1;
  ti.keyframe_auto_p=1;
  ti.keyframe_frequency=64;
  ti.keyframe_frequency_force=64;
  ti.keyframe_data_target_bitrate=video_r*1.5;
  ti.keyframe_auto_threshold=80;
  ti.keyframe_mindistance=8;
  ti.noise_sensitivity=1;

  theora_encode_init(&td,&ti);
  theora_info_clear(&ti);

  /* initialize Vorbis too, assuming we have audio to compress. */
  if(audio){
    vorbis_info_init(&vi);
    if(audio_q>-99)
      ret = vorbis_encode_init_vbr(&vi,audio_ch,audio_hz,audio_q);
    else
      ret = vorbis_encode_init(&vi,audio_ch,audio_hz,-1,audio_r,-1);
    if(ret){
      fprintf(stderr,"The Vorbis encoder could not set up a mode according to\n"
              "the requested quality or bitrate.\n\n");
      exit(1);
    }

    vorbis_comment_init(&vc);
    vorbis_analysis_init(&vd,&vi);
    vorbis_block_init(&vd,&vb);
  }

  /* write the bitstream header packets with proper page interleave */

  /* first packet will get its own page automatically */
  theora_encode_header(&td,&op);
  ogg_stream_packetin(&to,&op);
  if(ogg_stream_pageout(&to,&og)!=1){
    fprintf(stderr,"Internal Ogg library error.\n");
    exit(1);
  }
  fwrite(og.header,1,og.header_len,outfile);
  fwrite(og.body,1,og.body_len,outfile);

  /* create the remaining theora headers */
  theora_comment_init(&tc);
  theora_encode_comment(&tc,&op);
  ogg_stream_packetin(&to,&op);
  /*theora_encode_comment() doesn't take a theora_state parameter, so it has to
     allocate its own buffer to pass back the packet data.
    If we don't free it here, we'll leak.
    libogg2 makes this much cleaner: the stream owns the buffer after you call
     packetin in libogg2, but this is not true in libogg1.*/
  free(op.packet);
  theora_encode_tables(&td,&op);
  ogg_stream_packetin(&to,&op);

  if(audio){
    ogg_packet header;
    ogg_packet header_comm;
    ogg_packet header_code;

    vorbis_analysis_headerout(&vd,&vc,&header,&header_comm,&header_code);
    ogg_stream_packetin(&vo,&header); /* automatically placed in its own
                                         page */
    if(ogg_stream_pageout(&vo,&og)!=1){
      fprintf(stderr,"Internal Ogg library error.\n");
      exit(1);
    }
    fwrite(og.header,1,og.header_len,outfile);
    fwrite(og.body,1,og.body_len,outfile);

    /* remaining vorbis header packets */
    ogg_stream_packetin(&vo,&header_comm);
    ogg_stream_packetin(&vo,&header_code);
  }

  /* Flush the rest of our headers. This ensures
     the actual data in each stream will start
     on a new page, as per spec. */
  while(1){
    int result = ogg_stream_flush(&to,&og);
      if(result<0){
        /* can't get here */
        fprintf(stderr,"Internal Ogg library error.\n");
        exit(1);
      }
    if(result==0)break;
    fwrite(og.header,1,og.header_len,outfile);
    fwrite(og.body,1,og.body_len,outfile);
  }
  if(audio){
    while(1){
      int result=ogg_stream_flush(&vo,&og);
      if(result<0){
        /* can't get here */
        fprintf(stderr,"Internal Ogg library error.\n");
        exit(1);
      }
      if(result==0)break;
      fwrite(og.header,1,og.header_len,outfile);
      fwrite(og.body,1,og.body_len,outfile);
    }
  }

  /* setup complete.  Raw processing loop */
  fprintf(stderr,"Compressing....\n");
  while(1){
    ogg_page audiopage;
    ogg_page videopage;

    /* is there an audio page flushed?  If not, fetch one if possible */
    audioflag=fetch_and_process_audio(audio,&audiopage,&vo,&vd,&vb,audioflag);

    /* is there a video page flushed?  If not, fetch one if possible */
    videoflag=fetch_and_process_video(video,&videopage,&to,&td,videoflag);

    /* no pages of either?  Must be end of stream. */
    if(!audioflag && !videoflag)break;

    /* which is earlier; the end of the audio page or the end of the
       video page? Flush the earlier to stream */
    {
      int audio_or_video=-1;
      double audiotime=
        audioflag?vorbis_granule_time(&vd,ogg_page_granulepos(&audiopage)):-1;
      double videotime=
        videoflag?theora_granule_time(&td,ogg_page_granulepos(&videopage)):-1;

      if(!audioflag){
        audio_or_video=1;
      } else if(!videoflag) {
        audio_or_video=0;
      } else {
        if(audiotime<videotime)
          audio_or_video=0;
        else
          audio_or_video=1;
      }

      if(audio_or_video==1){
        /* flush a video page */
        video_bytesout+=fwrite(videopage.header,1,videopage.header_len,outfile);
        video_bytesout+=fwrite(videopage.body,1,videopage.body_len,outfile);
        videoflag=0;
        timebase=videotime;
        
      }else{
        /* flush an audio page */
        audio_bytesout+=fwrite(audiopage.header,1,audiopage.header_len,outfile);
        audio_bytesout+=fwrite(audiopage.body,1,audiopage.body_len,outfile);
        audioflag=0;
        timebase=audiotime;
      }
      {
        int hundredths=timebase*100-(long)timebase*100;
        int seconds=(long)timebase%60;
        int minutes=((long)timebase/60)%60;
        int hours=(long)timebase/3600;
        
        if(audio_or_video)
          vkbps=rint(video_bytesout*8./timebase*.001);
        else
          akbps=rint(audio_bytesout*8./timebase*.001);
        
        fprintf(stderr,
                "\r      %d:%02d:%02d.%02d audio: %dkbps video: %dkbps                 ",
                hours,minutes,seconds,hundredths,akbps,vkbps);
      }
    }

  }

  /* clear out state */

  if(audio){
    ogg_stream_clear(&vo);
    vorbis_block_clear(&vb);
    vorbis_dsp_clear(&vd);
    vorbis_comment_clear(&vc);
    vorbis_info_clear(&vi);
  }
  if(video){
    ogg_stream_clear(&to);
    theora_clear(&td);
  }

  if(outfile && outfile!=stdout)fclose(outfile);

  fprintf(stderr,"\r   \ndone.\n\n");

#ifdef THEORA_PERF_DATA
# ifdef WIN32
    QueryPerformanceCounter(&final_time);
    elapsed_ticks = final_time.QuadPart - start_time.QuadPart;
    ticks_per_second;
    QueryPerformanceFrequency(&ticks_per_second);
    elapsed_secs = elapsed_ticks / ticks_per_second.QuadPart;
    elapsed_sec_mod = elapsed_ticks % ticks_per_second.QuadPart;
    elapsed_secs_dbl = elapsed_secs;
    elapsed_secs_dbl += ((double)elapsed_sec_mod / (double)ticks_per_second.QuadPart);
    printf("Encode time = %lld ticks\n", elapsed_ticks);
    printf("~%lld and %lld / %lld seconds\n", elapsed_secs, elapsed_sec_mod, ticks_per_second.QuadPart);
    printf("~%Lf seconds\n", elapsed_secs_dbl);
# endif

#endif 

  return(0);

}