Exemplo n.º 1
0
static av_cold int encode_close(AVCodecContext* avc_context)
{
    ogg_packet o_packet;
    TheoraContext *h = avc_context->priv_data;
    int result;
    const char* message;

    result = theora_encode_packetout( &(h->t_state), 1, &o_packet );
    theora_clear( &(h->t_state) );
    av_freep(&avc_context->coded_frame);
    av_freep(&avc_context->extradata);
    avc_context->extradata_size = 0;

    switch (result) {
        case 0:/* No packet is ready */
        case -1:/* Encoding finished */
            return 0;
        case 1:
            /* We have a packet */
            message = "gave us a packet";
            break;
        default:
            message = "unknown reason";
            break;
    }
    av_log(avc_context, AV_LOG_ERROR, "theora_encode_packetout failed (%s) [%d]\n", message, result);
    return -1;
}
Exemplo n.º 2
0
static void enc_process(MSFilter *f){
	mblk_t *im,*om;
	ogg_packet op;
	EncState *s=(EncState*)f->data;
	uint64_t timems=f->ticker->time;
	uint32_t timestamp=timems*90;
	uint64_t elapsed;

	
	while((im=ms_queue_get(f->inputs[0]))!=NULL){
		/*for the firsts frames only send theora packed conf*/
		om=NULL;
		if (s->nframes==0){
			s->start_time=timems;
		}
		elapsed=timems-s->start_time;

		if (need_send_conf(s,elapsed)){
			if (s->packed_conf) {
				om=dupmsg(s->packed_conf);
				ms_message("sending theora packed conf (%i bytes)",msgdsize(om));
				packetize_and_send(f,s,om,timestamp,THEORA_PACKED_CONF);
			}else {
				ms_error("No packed conf to send.");
			}
		}else{
			enc_fill_yuv(&s->yuv,im);
			ms_debug("subtmitting yuv frame to theora encoder...");
			if (theora_encode_YUVin(&s->tstate,&s->yuv)!=0){
				ms_error("theora_encode_YUVin error.");
			}else{
				if (theora_encode_packetout(&s->tstate,0,&op)==1){
					ms_debug("Got theora coded frame");
					om=allocb(op.bytes,0);
					memcpy(om->b_wptr,op.packet,op.bytes);
					om->b_wptr+=op.bytes;
					packetize_and_send(f,s,om,timestamp,THEORA_RAW_DATA);
				}
			}
		}
		freemsg(im);
		s->nframes++;
	}
}
Exemplo n.º 3
0
static GstFlowReturn
theora_enc_chain (GstPad * pad, GstBuffer * buffer)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  GstClockTime timestamp, duration, running_time;
  GstFlowReturn ret;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  /* we keep track of two timelines.
   * - The timestamps from the incomming buffers, which we copy to the outgoing
   *   encoded buffers as-is. We need to do this as we simply forward the
   *   newsegment events.
   * - The running_time of the buffers, which we use to construct the granulepos
   *   in the packets.
   */
  timestamp = GST_BUFFER_TIMESTAMP (buffer);
  duration = GST_BUFFER_DURATION (buffer);
  running_time =
      gst_segment_to_running_time (&enc->segment, GST_FORMAT_TIME, timestamp);

  /* make sure we copy the discont flag to the next outgoing buffer when it's
   * set on the incomming buffer */
  if (GST_BUFFER_IS_DISCONT (buffer)) {
    enc->next_discont = TRUE;
  }

  if (enc->packetno == 0) {
    /* no packets written yet, setup headers */
    GstCaps *caps;
    GstBuffer *buf1, *buf2, *buf3;

    enc->granulepos_offset = 0;
    enc->timestamp_offset = 0;

    GST_DEBUG_OBJECT (enc, "output headers");
    /* Theora streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libtheora one at a time;
       libtheora handles the additional Ogg bitstream constraints */

    /* first packet will get its own page automatically */
    if (theora_encode_header (&enc->state, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf1);
    if (ret != GST_FLOW_OK) {
      goto header_buffer_alloc;
    }

    /* create the remaining theora headers */
    theora_comment_clear (&enc->comment);
    theora_comment_init (&enc->comment);

    if (theora_encode_comment (&enc->comment, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf2);
    /* Theora expects us to put this packet buffer into an ogg page,
     * in which case it becomes the ogg library's responsibility to
     * free it. Since we're copying and outputting a gst_buffer,
     * we need to free it ourselves. */
    if (op.packet)
      free (op.packet);

    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf1);
      goto header_buffer_alloc;
    }

    if (theora_encode_tables (&enc->state, &op) != 0)
      goto encoder_disabled;

    ret =
        theora_buffer_from_packet (enc, &op, GST_CLOCK_TIME_NONE,
        GST_CLOCK_TIME_NONE, GST_CLOCK_TIME_NONE, &buf3);
    if (ret != GST_FLOW_OK) {
      gst_buffer_unref (buf1);
      gst_buffer_unref (buf2);
      goto header_buffer_alloc;
    }

    /* mark buffers and put on caps */
    caps = gst_pad_get_caps (enc->srcpad);
    caps = theora_set_header_on_caps (caps, buf1, buf2, buf3);
    GST_DEBUG ("here are the caps: %" GST_PTR_FORMAT, caps);
    gst_pad_set_caps (enc->srcpad, caps);

    gst_buffer_set_caps (buf1, caps);
    gst_buffer_set_caps (buf2, caps);
    gst_buffer_set_caps (buf3, caps);

    gst_caps_unref (caps);

    /* push out the header buffers */
    if ((ret = theora_push_buffer (enc, buf1)) != GST_FLOW_OK) {
      gst_buffer_unref (buf2);
      gst_buffer_unref (buf3);
      goto header_push;
    }
    if ((ret = theora_push_buffer (enc, buf2)) != GST_FLOW_OK) {
      gst_buffer_unref (buf3);
      goto header_push;
    }
    if ((ret = theora_push_buffer (enc, buf3)) != GST_FLOW_OK) {
      goto header_push;
    }

    enc->granulepos_offset =
        gst_util_uint64_scale (running_time, enc->fps_n,
        GST_SECOND * enc->fps_d);
    enc->timestamp_offset = running_time;
    enc->next_ts = 0;
  }

  {
    yuv_buffer yuv;
    gint res;
    gint y_size;
    guint8 *pixels;

    yuv.y_width = enc->info_width;
    yuv.y_height = enc->info_height;
    yuv.y_stride = enc->info_width;

    yuv.uv_width = enc->info_width / 2;
    yuv.uv_height = enc->info_height / 2;
    yuv.uv_stride = yuv.uv_width;

    y_size = enc->info_width * enc->info_height;

    if (enc->width == enc->info_width && enc->height == enc->info_height) {
      GST_LOG_OBJECT (enc, "no cropping/conversion needed");
      /* easy case, no cropping/conversion needed */
      pixels = GST_BUFFER_DATA (buffer);

      yuv.y = pixels;
      yuv.u = yuv.y + y_size;
      yuv.v = yuv.u + y_size / 4;
    } else {
      GstBuffer *newbuf;
      gint i;
      guchar *dest_y, *src_y;
      guchar *dest_u, *src_u;
      guchar *dest_v, *src_v;
      gint src_y_stride, src_uv_stride;
      gint dst_y_stride, dst_uv_stride;
      gint width, height;
      gint cwidth, cheight;
      gint offset_x, right_x, right_border;

      GST_LOG_OBJECT (enc, "cropping/conversion needed for strides");
      /* source width/height */
      width = enc->width;
      height = enc->height;
      /* soucre chroma width/height */
      cwidth = width / 2;
      cheight = height / 2;

      /* source strides as defined in videotestsrc */
      src_y_stride = GST_ROUND_UP_4 (width);
      src_uv_stride = GST_ROUND_UP_8 (width) / 2;

      /* destination strides from the real picture width */
      dst_y_stride = enc->info_width;
      dst_uv_stride = enc->info_width / 2;

      newbuf = gst_buffer_new_and_alloc (y_size * 3 / 2);
      if (!newbuf) {
        ret = GST_FLOW_ERROR;
        goto no_buffer;
      }
      GST_BUFFER_OFFSET (newbuf) = GST_BUFFER_OFFSET_NONE;
      gst_buffer_set_caps (newbuf, GST_PAD_CAPS (enc->srcpad));

      dest_y = yuv.y = GST_BUFFER_DATA (newbuf);
      dest_u = yuv.u = yuv.y + y_size;
      dest_v = yuv.v = yuv.u + y_size / 4;

      src_y = GST_BUFFER_DATA (buffer);
      src_u = src_y + src_y_stride * GST_ROUND_UP_2 (height);
      src_v = src_u + src_uv_stride * GST_ROUND_UP_2 (height) / 2;

      if (enc->border != BORDER_NONE) {
        /* fill top border */
        for (i = 0; i < enc->offset_y; i++) {
          memset (dest_y, 0, dst_y_stride);
          dest_y += dst_y_stride;
        }
      } else {
        dest_y += dst_y_stride * enc->offset_y;
      }

      offset_x = enc->offset_x;
      right_x = width + enc->offset_x;
      right_border = dst_y_stride - right_x;

      /* copy Y plane */
      for (i = 0; i < height; i++) {
        memcpy (dest_y + offset_x, src_y, width);
        if (enc->border != BORDER_NONE) {
          memset (dest_y, 0, offset_x);
          memset (dest_y + right_x, 0, right_border);
        }

        dest_y += dst_y_stride;
        src_y += src_y_stride;
      }

      if (enc->border != BORDER_NONE) {
        /* fill bottom border */
        for (i = height + enc->offset_y; i < enc->info.height; i++) {
          memset (dest_y, 0, dst_y_stride);
          dest_y += dst_y_stride;
        }

        /* fill top border chroma */
        for (i = 0; i < enc->offset_y / 2; i++) {
          memset (dest_u, 128, dst_uv_stride);
          memset (dest_v, 128, dst_uv_stride);
          dest_u += dst_uv_stride;
          dest_v += dst_uv_stride;
        }
      } else {
        dest_u += dst_uv_stride * enc->offset_y / 2;
        dest_v += dst_uv_stride * enc->offset_y / 2;
      }

      offset_x = enc->offset_x / 2;
      right_x = cwidth + offset_x;
      right_border = dst_uv_stride - right_x;

      /* copy UV planes */
      for (i = 0; i < cheight; i++) {
        memcpy (dest_v + offset_x, src_v, cwidth);
        memcpy (dest_u + offset_x, src_u, cwidth);

        if (enc->border != BORDER_NONE) {
          memset (dest_u, 128, offset_x);
          memset (dest_u + right_x, 128, right_border);
          memset (dest_v, 128, offset_x);
          memset (dest_v + right_x, 128, right_border);
        }

        dest_u += dst_uv_stride;
        dest_v += dst_uv_stride;
        src_u += src_uv_stride;
        src_v += src_uv_stride;
      }

      if (enc->border != BORDER_NONE) {
        /* fill bottom border */
        for (i = cheight + enc->offset_y / 2; i < enc->info_height / 2; i++) {
          memset (dest_u, 128, dst_uv_stride);
          memset (dest_v, 128, dst_uv_stride);
          dest_u += dst_uv_stride;
          dest_v += dst_uv_stride;
        }
      }

      gst_buffer_unref (buffer);
      buffer = newbuf;
    }

    if (theora_enc_is_discontinuous (enc, running_time, duration)) {
      theora_enc_reset (enc);
      enc->granulepos_offset =
          gst_util_uint64_scale (running_time, enc->fps_n,
          GST_SECOND * enc->fps_d);
      enc->timestamp_offset = running_time;
      enc->next_ts = 0;
      enc->next_discont = TRUE;
    }

    res = theora_encode_YUVin (&enc->state, &yuv);

    ret = GST_FLOW_OK;
    while (theora_encode_packetout (&enc->state, 0, &op)) {
      GstClockTime next_time;

      next_time = theora_enc_get_ogg_packet_end_time (enc, &op);

      ret =
          theora_push_packet (enc, &op, timestamp, enc->next_ts,
          next_time - enc->next_ts);

      enc->next_ts = next_time;
      if (ret != GST_FLOW_OK)
        goto data_push;
    }
    gst_buffer_unref (buffer);
  }

  return ret;

  /* ERRORS */
header_buffer_alloc:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
header_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
no_buffer:
  {
    return ret;
  }
data_push:
  {
    gst_buffer_unref (buffer);
    return ret;
  }
encoder_disabled:
  {
    GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL),
        ("libtheora has been compiled with the encoder disabled"));
    gst_buffer_unref (buffer);
    return GST_FLOW_ERROR;
  }
}
Exemplo n.º 4
0
static gboolean
theora_enc_sink_event (GstPad * pad, GstEvent * event)
{
  GstTheoraEnc *enc;
  ogg_packet op;
  gboolean res;

  enc = GST_THEORA_ENC (GST_PAD_PARENT (pad));

  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_NEWSEGMENT:
    {
      gboolean update;
      gdouble rate, applied_rate;
      GstFormat format;
      gint64 start, stop, time;

      gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
          &format, &start, &stop, &time);

      gst_segment_set_newsegment_full (&enc->segment, update, rate,
          applied_rate, format, start, stop, time);

      res = gst_pad_push_event (enc->srcpad, event);
      break;
    }
    case GST_EVENT_EOS:
      if (enc->initialised) {
        /* push last packet with eos flag, should not be called */
        while (theora_encode_packetout (&enc->state, 1, &op)) {
          GstClockTime next_time =
              theora_enc_get_ogg_packet_end_time (enc, &op);

          theora_push_packet (enc, &op, GST_CLOCK_TIME_NONE, enc->next_ts,
              next_time - enc->next_ts);
          enc->next_ts = next_time;
        }
      }
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    case GST_EVENT_FLUSH_STOP:
      gst_segment_init (&enc->segment, GST_FORMAT_UNDEFINED);
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    case GST_EVENT_CUSTOM_DOWNSTREAM:
    {
      const GstStructure *s;

      s = gst_event_get_structure (event);

      if (gst_structure_has_name (s, "GstForceKeyUnit")) {
        GstClockTime next_ts;

        /* make sure timestamps increment after resetting the decoder */
        next_ts = enc->next_ts + enc->timestamp_offset;

        theora_enc_reset (enc);
        enc->granulepos_offset =
            gst_util_uint64_scale (next_ts, enc->fps_n,
            GST_SECOND * enc->fps_d);
        enc->timestamp_offset = next_ts;
        enc->next_ts = 0;
      }
      res = gst_pad_push_event (enc->srcpad, event);
      break;
    }
    default:
      res = gst_pad_push_event (enc->srcpad, event);
      break;
  }
  return res;
}
Exemplo n.º 5
0
static int
granulepos_test_encode (int frequency, int auto_p)
{
  theora_info ti;
  theora_state th;
  int result;
  int frame, tframe, keyframe, keydist;
  int shift;
  double rate, ttime;
  yuv_buffer yuv;
  unsigned char *framedata;
  ogg_packet op;
  long long int last_granule = -1;

/*  INFO ("+ Initializing theora_info struct"); */
  theora_info_init (&ti);

  ti.width = 32;
  ti.height = 32;
  ti.frame_width = ti.width;
  ti.frame_height = ti.frame_height;
  ti.offset_x = 0;
  ti.offset_y = 0;
  ti.fps_numerator = 16;
  ti.fps_denominator = 1;
  ti.aspect_numerator = 1;
  ti.aspect_denominator = 1;
  ti.colorspace = OC_CS_UNSPECIFIED;
  ti.pixelformat = OC_PF_420;
  ti.target_bitrate = 0;
  ti.quality = 16;

  ti.dropframes_p = 0;
  ti.quick_p = 1;

  /* check variations of automatic or forced keyframe choice */
  ti.keyframe_auto_p = auto_p;
  /* check with variations of the maximum gap */
  ti.keyframe_frequency = frequency;
  ti.keyframe_frequency_force = frequency;

  ti.keyframe_data_target_bitrate = ti.target_bitrate * 1.5;
  ti.keyframe_auto_threshold = 80;
  ti.keyframe_mindistance = MIN(8, frequency);
  ti.noise_sensitivity = 1;

/*  INFO ("+ Initializing theora_state for encoding"); */
  result = theora_encode_init (&th, &ti);
  if (result == OC_DISABLED) {
    INFO ("+ Clearing theora_state");
    theora_clear (&th);
  } else if (result < 0) {
    FAIL ("negative return code initializing encoder");
  }

/*  INFO ("+ Setting up dummy 4:2:0 frame data"); */
  framedata = calloc(ti.height, ti.width);
  yuv.y_width = ti.width;
  yuv.y_height = ti.height;
  yuv.y_stride = ti.width;
  yuv.y = framedata;
  yuv.uv_width = ti.width / 2;
  yuv.uv_height = ti.width / 2;
  yuv.uv_stride = ti.width;
  yuv.u = framedata;
  yuv.v = framedata;

  INFO ("+ Checking granulepos generation");
  shift = theora_granule_shift(&ti);
  rate = (double)ti.fps_denominator/ti.fps_numerator;
  for (frame = 0; frame < frequency * 2 + 1; frame++) {
    result = theora_encode_YUVin (&th, &yuv);
    if (result < 0) {
      printf("theora_encode_YUVin() returned %d\n", result);
      FAIL ("negative error code submitting frame for compression");
    }
    theora_encode_packetout (&th, frame >= frequency * 2, &op);
    if ((long long int)op.granulepos < last_granule)
      FAIL ("encoder returned a decreasing granulepos value");
    last_granule = op.granulepos;
    keyframe = op.granulepos >> shift;
    keydist = op.granulepos - (keyframe << shift);
    tframe = theora_granule_frame (&th, op.granulepos);
    ttime = theora_granule_time(&th, op.granulepos);
#if DEBUG
    printf("++ frame %d granulepos %lld %d:%d %d %.3lfs\n", 
	frame, (long long int)op.granulepos, keyframe, keydist,
	tframe, theora_granule_time (&th, op.granulepos));
#endif
    if ((keyframe + keydist) != frame + 1)
      FAIL ("encoder granulepos does not map to the correct frame number");
    if (tframe != frame)
      FAIL ("theora_granule_frame returned incorrect results");
    if (fabs(rate*(frame+1) - ttime) > 1.0e-6)
      FAIL ("theora_granule_time returned incorrect results");
  }

  /* clean up */
/*  INFO ("+ Freeing dummy frame data"); */
  free (framedata);

/*  INFO ("+ Clearing theora_info struct"); */
  theora_info_clear (&ti);

/*  INFO ("+ Clearing theora_state"); */
  theora_clear (&th);

  return 0;
}
Exemplo n.º 6
0
static int encode_frame(
    AVCodecContext* avc_context,
    uint8_t *outbuf,
    int buf_size,
    void *data)
{
    yuv_buffer t_yuv_buffer;
    TheoraContext *h = avc_context->priv_data;
    AVFrame *frame = data;
    ogg_packet o_packet;
    int result;

    assert(avc_context->pix_fmt == PIX_FMT_YUV420P);

    /* Copy planes to the theora yuv_buffer */
    if (frame->linesize[1] != frame->linesize[2]) {
        av_log(avc_context, AV_LOG_ERROR, "U and V stride differ\n");
        return -1;
    }

    t_yuv_buffer.y_width = avc_context->width;
    t_yuv_buffer.y_height = avc_context->height;
    t_yuv_buffer.y_stride = frame->linesize[0];
    t_yuv_buffer.uv_width = t_yuv_buffer.y_width / 2;
    t_yuv_buffer.uv_height = t_yuv_buffer.y_height / 2;
    t_yuv_buffer.uv_stride = frame->linesize[1];

    t_yuv_buffer.y = frame->data[0];
    t_yuv_buffer.u = frame->data[1];
    t_yuv_buffer.v = frame->data[2];

    /* Now call into theora_encode_YUVin */
    result = theora_encode_YUVin( &(h->t_state), &t_yuv_buffer );
    if (result != 0) {
        const char* message;
        switch (result) {
            case -1:
                message = "differing frame sizes";
                break;
            case OC_EINVAL:
                message = "encoder is not ready or is finished";
                break;
            default:
                message = "unknown reason";
                break;
        }
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_YUVin failed (%s) [%d]\n", message, result);
        return -1;
    }

    /* Pick up returned ogg_packet */
    result = theora_encode_packetout( &(h->t_state), 0, &o_packet );
    switch (result) {
        case 0:
            /* No packet is ready */
            return 0;
        case 1:
            /* Success, we have a packet */
            break;
        default:
            av_log(avc_context, AV_LOG_ERROR, "theora_encode_packetout failed [%d]\n", result);
            return -1;
    }

    /* Copy ogg_packet content out to buffer */
    if (buf_size < o_packet.bytes) {
        av_log(avc_context, AV_LOG_ERROR, "encoded frame too large\n");
        return -1;
    }
    memcpy(outbuf, o_packet.packet, o_packet.bytes);

    return o_packet.bytes;
}
Exemplo n.º 7
0
static void *ucil_theora_encode_thread( ucil_theora_video_file_object_t *vobj )
{
   yuv_buffer yuv;
   double videopos = 0;
   double audiopos = 0;
   int gotpage = 0;
   unsigned char *ds_y_buffer = NULL;
   unsigned char *ds_u_buffer = NULL;
   unsigned char *ds_v_buffer = NULL;
   
   
   yuv.y_width = vobj->ti.width;
   yuv.y_height = vobj->ti.height;
   yuv.y_stride = vobj->ti.width;
   yuv.uv_width = vobj->ti.width / 2;
   yuv.uv_height = vobj->ti.height / 2;
   yuv.uv_stride = vobj->ti.width / 2;

   if( vobj->downsize > 1 || vobj->requires_resizing_frames )
   {
      ds_y_buffer = malloc( yuv.y_width * yuv.y_height );
      ds_u_buffer = malloc( yuv.uv_width * yuv.uv_height );
      ds_v_buffer = malloc( yuv.uv_width * yuv.uv_height );
   }

   vobj->last_frame = NULL;

   while( !vobj->quit_thread )
   {
      unicap_data_buffer_t *data_buffer;
      ogg_page og;

      sem_wait( &vobj->lock );
      data_buffer = ( unicap_data_buffer_t *)g_queue_pop_head( vobj->full_queue );
      sem_post( &vobj->lock );
      if( !data_buffer )
      {
	 audiopos = fetch_and_process_audio( vobj, audiopos );	 usleep( 1000 );
	 continue;
      }

      if( vobj->frame_count == 0 )
      {
	 memcpy( &vobj->recording_start_time, &data_buffer->fill_time, sizeof( struct timeval ) );
      }

      audiopos = fetch_and_process_audio( vobj, audiopos );

/*       printf( "v: %f   a: %f\n", videopos, audiopos ); */

      if( vobj->audio && ( videopos > audiopos ) )
      {
	 data_buffer->flags &= ~UNICAP_FLAGS_BUFFER_LOCKED;
	 sem_wait( &vobj->lock );
	 g_queue_push_head( vobj->empty_queue, data_buffer );
	 sem_post( &vobj->lock );
	 continue;
      }


      if( vobj->fill_frames )
      {

	 if( vobj->audio )
	 {
	    if( vobj->last_frame )
	    {
	       unicap_data_buffer_t *last_data_buffer;
	       double streampos;
	       struct timeval streamtime;
	       
	       last_data_buffer = vobj->last_frame;
	       if( vobj->downsize > 1 || vobj->requires_resizing_frames )
	       {
		  yuv.y = ds_y_buffer;
		  yuv.u = ds_u_buffer;
		  yuv.v = ds_v_buffer;
	       }
	       else
	       {
		  yuv.y = last_data_buffer->data;
		  yuv.u = last_data_buffer->data + ( yuv.y_stride * yuv.y_height );
		  yuv.v = yuv.u + ( yuv.uv_stride * yuv.uv_height );
	       }

	       streamtime.tv_sec = data_buffer->fill_time.tv_sec - vobj->recording_start_time.tv_sec;
	       streamtime.tv_usec = data_buffer->fill_time.tv_usec;
	       if( data_buffer->fill_time.tv_usec < vobj->recording_start_time.tv_usec )
	       {
		  streamtime.tv_sec--;
		  streamtime.tv_usec += 1000000;
	       }
	       streamtime.tv_usec -= vobj->recording_start_time.tv_usec;
	       streampos = streamtime.tv_sec + ( (double)streamtime.tv_usec / 1000000.0f );
	       
		  
	       // If the streampos is ahead, this means that we get
	       // less than 30 frames per seconds.
	       // --> Fill up the stream 
	       while( streampos > videopos )
	       {
/* 		  printf( "s v: %f   a: %f\n", videopos, audiopos ); */
		  gotpage = 0;
		  if( theora_encode_YUVin( &vobj->th, &yuv ) )
		  {
		     TRACE( "theora_encode_YUVin FAILED!\n" );
		  }
		  theora_encode_packetout( &vobj->th, 0, &vobj->op );
		  ogg_stream_packetin( &vobj->os, &vobj->op );
		  while( ogg_stream_pageout( &vobj->os, &og ) )
		  {
		     double gt;
		     fwrite( og.header, og.header_len, 1, vobj->f );
		     fwrite( og.body, og.body_len, 1, vobj->f );
		     
		     gt = theora_granule_time( &vobj->th, ogg_page_granulepos( &og ) );
		     if( gt < 0 )
		     {
			continue;
		     }

		     gotpage = 1;
		     videopos = gt;
/* 		     printf( "THEORA: %f\n", videopos ); */
		  }
		  if( !gotpage )
		  {
		     videopos += vobj->frame_interval / 1000000.0f;
		  }
		  
		  vobj->frame_count++;
		  audiopos = fetch_and_process_audio( vobj, audiopos );
	       }

	       // 
	       while( ( videopos + ( vobj->frame_interval / 1000000.0f ) ) < audiopos ) 
	       {
/* 		  printf( "a v: %f   a: %f\n", videopos, audiopos ); */
		  gotpage = 0;
		  if( theora_encode_YUVin( &vobj->th, &yuv ) )
		  {
		     TRACE( "theora_encode_YUVin FAILED!\n" );
		  }
		  theora_encode_packetout( &vobj->th, 0, &vobj->op );
		  ogg_stream_packetin( &vobj->os, &vobj->op );
		  while( ogg_stream_pageout( &vobj->os, &og ) )
		  {
		     double gt;
		     fwrite( og.header, og.header_len, 1, vobj->f );
		     fwrite( og.body, og.body_len, 1, vobj->f );

		     gt = theora_granule_time( &vobj->th, ogg_page_granulepos( &og ) );
		     if( gt < 0 )
		     {
			continue;
		     }
		     
		     gotpage = 1;
		     videopos = gt;
/* 		     printf( "THEORA: %f\n", videopos ); */
		  }
		  if( !gotpage )
		  {
		     videopos += vobj->frame_interval / 1000000.0f;
		  }
		  
		  vobj->frame_count++;
		  audiopos = fetch_and_process_audio( vobj, audiopos );
	       }
	       last_data_buffer->flags &= ~UNICAP_FLAGS_BUFFER_LOCKED;
	       sem_wait( &vobj->lock );
	       g_queue_push_head( vobj->empty_queue, vobj->last_frame );
	       sem_post( &vobj->lock );
	       vobj->last_frame = NULL;
	    }
	 }
	 else
	 {
	    fill_frames( vobj, data_buffer, &yuv, ds_y_buffer, ds_u_buffer, ds_v_buffer );
	 }
	 
      }
      else// ( !vobj->fill_frames )
      {
	 if( vobj->last_frame )
	 {
	    unicap_data_buffer_t *last_data_buffer;
	    
	    last_data_buffer = vobj->last_frame;
	    last_data_buffer->flags &= ~UNICAP_FLAGS_BUFFER_LOCKED;
	    sem_wait( &vobj->lock );
	    g_queue_push_head( vobj->empty_queue, vobj->last_frame );
	    sem_post( &vobj->lock );
	 }
	 vobj->last_frame = NULL;
      }
      
      //
      // Encode the new buffer
      //
      if( vobj->encode_frame_cb )
      {
	 vobj->encode_frame_cb( UNICAP_EVENT_NEW_FRAME, NULL, data_buffer, vobj->encode_frame_cb_data );
      }
      vobj->frame_count++;

      if( vobj->downsize > 1 || vobj->requires_resizing_frames )
      {
	 downsize_yuv420p( vobj->format.size.width, vobj->format.size.height, vobj->downsize, 
			   vobj->ti.width, vobj->ti.height, 
			   ds_y_buffer, ds_u_buffer, ds_v_buffer, 
			   data_buffer->data, data_buffer->data + ( vobj->format.size.width * vobj->format.size.height ), 
			   data_buffer->data + ( vobj->format.size.width * vobj->format.size.height ) + 
			   ( ( vobj->format.size.width * vobj->format.size.height ) / 4 ) );
	 yuv.y = ds_y_buffer;
	 yuv.u = ds_u_buffer;
	 yuv.v = ds_v_buffer;
      }
      else
      {
	 yuv.y = data_buffer->data;
	 yuv.u = data_buffer->data + ( yuv.y_stride * yuv.y_height );
	 yuv.v = yuv.u + ( yuv.uv_stride * yuv.uv_height );
      }
      
      if( theora_encode_YUVin( &vobj->th, &yuv ) )
      {
	 TRACE( "theora_encode_YUVin FAILED!\n" );
      }
      memcpy( &vobj->last_frame_time, &data_buffer->fill_time, sizeof( struct timeval ) );

      vobj->last_frame = data_buffer;
      
      theora_encode_packetout( &vobj->th, 0, &vobj->op );
      ogg_stream_packetin( &vobj->os, &vobj->op );
/*       printf( "= v: %f   a: %f\n", videopos, audiopos ); */
      gotpage = 0;
      while( ogg_stream_pageout( &vobj->os, &og ) )
      {
	 double gt;
	 fwrite( og.header, og.header_len, 1, vobj->f );
	 fwrite( og.body, og.body_len, 1, vobj->f );

	 gt = theora_granule_time( &vobj->th, ogg_page_granulepos( &og ) );
	 if( gt < 0 )
	 {
	    continue;
	 }
	 
	 gotpage = 1;
	 videopos = gt;
/* 	 printf( "THEORA: %f\n", videopos ); */
      }
      if( !gotpage )
      {
	 videopos += vobj->frame_interval / 1000000.0f;
      }
   }

   if( vobj->last_frame )
   {
      // encode again to set eos
      unicap_data_buffer_t *last_data_buffer;
      ogg_page og;
      ogg_packet op;

#if HAVE_ALSA      
      if( vobj->audio && !vobj->async_audio_encoding )
      {
	 audiopos = fetch_and_process_audio( vobj, audiopos );
	 vorbis_analysis_wrote( &vobj->vd, 0 );
	 while( vorbis_analysis_blockout( &vobj->vd, &vobj->vb ) == 1 )
	 {
	    vorbis_analysis( &vobj->vb, NULL );
	    vorbis_bitrate_addblock( &vobj->vb );
	    while( vorbis_bitrate_flushpacket( &vobj->vd, &op ) )
	    {
	       ogg_stream_packetin( &vobj->vo, &op );
	    }
	 }
	 while( ogg_stream_pageout( &vobj->vo, &og ) )
	 {
	    fwrite( og.header, og.header_len, 1, vobj->f );
	    fwrite( og.body, og.body_len, 1, vobj->f );
	 }
      }
      else if( vobj->audio )
      {
	 audiopos = fetch_and_process_audio( vobj, audiopos );
      }
#endif

      last_data_buffer = vobj->last_frame;
      if( vobj->downsize > 1 || vobj->requires_resizing_frames )
      {
	 yuv.y = ds_y_buffer;
	 yuv.u = ds_u_buffer;
	 yuv.v = ds_v_buffer;
      }
      else
      {
         yuv.y = last_data_buffer->data;
         yuv.u = last_data_buffer->data + ( yuv.y_stride * yuv.y_height );
         yuv.v = yuv.u + ( yuv.uv_stride * yuv.uv_height );
      }
      if( theora_encode_YUVin( &vobj->th, &yuv ) )
      {
	 TRACE( "theora_encode_YUVin FAILED!\n" );
      }
      theora_encode_packetout( &vobj->th, 1, &vobj->op );
      ogg_stream_packetin( &vobj->os, &vobj->op );
      while( ogg_stream_pageout( &vobj->os, &og ) )
      {
/* 	 printf( "THEORA: %f\n", theora_granule_time( &vobj->th, ogg_page_granulepos( &og ) ) ); */
	 fwrite( og.header, og.header_len, 1, vobj->f );
	 fwrite( og.body, og.body_len, 1, vobj->f );
      }
      last_data_buffer->flags &= ~UNICAP_FLAGS_BUFFER_LOCKED;
      sem_wait( &vobj->lock );
      g_queue_push_head( vobj->empty_queue, vobj->last_frame );
      sem_post( &vobj->lock );

      vobj->last_frame = NULL;
   }

   return NULL;
}
Exemplo n.º 8
0
static void fill_frames( ucil_theora_video_file_object_t *vobj, unicap_data_buffer_t *data_buffer, yuv_buffer *yuv, 
			 unsigned char *ds_y_buffer, unsigned char *ds_u_buffer, unsigned char *ds_v_buffer )
{
   unsigned long long ctu;
   unsigned long long rtu;
   ogg_page og;

   ctu = timevaltousec( &data_buffer->fill_time );
   TRACE( "ctu: %lld\n", ctu );
   
   if( ctu )
   {
      unsigned long long etu;
      
      if( vobj->last_frame )
      {
	 unicap_data_buffer_t *last_data_buffer;
	 
	 last_data_buffer = vobj->last_frame;
	 if( vobj->downsize > 1 || vobj->requires_resizing_frames )
	 {
	    yuv->y = ds_y_buffer;
	    yuv->u = ds_u_buffer;
	    yuv->v = ds_v_buffer;
	 }
	 else
	 {
	    yuv->y = last_data_buffer->data;
	    yuv->u = last_data_buffer->data + ( yuv->y_stride * yuv->y_height );
	    yuv->v = yuv->u + ( yuv->uv_stride * yuv->uv_height );
	 }
	    
	 rtu = timevaltousec( &vobj->recording_start_time );
	 ctu -= rtu;
	 
	 etu = vobj->frame_interval * (unsigned long long)vobj->frame_count + (unsigned long long)vobj->frame_interval / 2;
	 
/* 		  TRACE( "ctu: %lld etu: %lld\n", ctu, etu ); */
	    
	 while( ctu > etu )
	 {
/* 		     TRACE( "Frame fill\n" );       */
	    if( theora_encode_YUVin( &vobj->th, yuv ) )
	    {
	       TRACE( "theora_encode_YUVin FAILED!\n" );
	    }
	    theora_encode_packetout( &vobj->th, 0, &vobj->op );
	    ogg_stream_packetin( &vobj->os, &vobj->op );
	    while( ogg_stream_pageout( &vobj->os, &og ) )
	    {
/* 		  printf( "THEORA: %f\n", theora_granule_time( &vobj->th, ogg_page_granulepos( &og ) ) ); */
	       fwrite( og.header, og.header_len, 1, vobj->f );
	       fwrite( og.body, og.body_len, 1, vobj->f );
	    }
	    etu += vobj->frame_interval;
	    vobj->frame_count++;
	    
	 }
	 
	 last_data_buffer->flags &= ~UNICAP_FLAGS_BUFFER_LOCKED;
	 sem_wait( &vobj->lock );
	 g_queue_push_head( vobj->empty_queue, vobj->last_frame );
	 sem_post( &vobj->lock );
      } 
   }
   else// (!ctu)
   {
      vobj->fill_frames = 0;
      if(  vobj->frame_count == 0 )
      {
	 TRACE( "Frame time information not available; can not fill frames\n" );
      }
   } 
}
Exemplo n.º 9
0
int fetch_and_process_video(FILE *video,ogg_page *videopage,
                            ogg_stream_state *to,
                            theora_state *td,
                            int videoflag){
  /* You'll go to Hell for using static variables */
  static int          state=-1;
  static unsigned char *yuvframe[2];
  unsigned char        *line;
  yuv_buffer          yuv;
  ogg_packet          op;
  int i, e;

  if(state==-1){
        /* initialize the double frame buffer */
    yuvframe[0]=malloc(video_x*video_y*3/2);
    yuvframe[1]=malloc(video_x*video_y*3/2);

        /* clear initial frame as it may be larger than actual video data */
        /* fill Y plane with 0x10 and UV planes with 0X80, for black data */
    memset(yuvframe[0],0x10,video_x*video_y);
    memset(yuvframe[0]+video_x*video_y,0x80,video_x*video_y/2);
    memset(yuvframe[1],0x10,video_x*video_y);
    memset(yuvframe[1]+video_x*video_y,0x80,video_x*video_y/2);

    state=0;
  }

  /* is there a video page flushed?  If not, work until there is. */
  while(!videoflag){
    spinnit();

    if(ogg_stream_pageout(to,videopage)>0) return 1;
    if(ogg_stream_eos(to)) return 0;

    {
      /* read and process more video */
      /* video strategy reads one frame ahead so we know when we're
         at end of stream and can mark last video frame as such
         (vorbis audio has to flush one frame past last video frame
         due to overlap and thus doesn't need this extra work */

      /* have two frame buffers full (if possible) before
         proceeding.  after first pass and until eos, one will
         always be full when we get here */

      for(i=state;i<2;i++){
        char c,frame[6];
        int ret=fread(frame,1,6,video);
        
	/* match and skip the frame header */
        if(ret<6)break;
        if(memcmp(frame,"FRAME",5)){
          fprintf(stderr,"Loss of framing in YUV input data\n");
          exit(1);
        }
        if(frame[5]!='\n'){
          int j;
          for(j=0;j<79;j++)
            if(fread(&c,1,1,video)&&c=='\n')break;
          if(j==79){
            fprintf(stderr,"Error parsing YUV frame header\n");
            exit(1);
          }
        }

        /* read the Y plane into our frame buffer with centering */
        line=yuvframe[i]+video_x*frame_y_offset+frame_x_offset;
        for(e=0;e<frame_y;e++){
          ret=fread(line,1,frame_x,video);
            if(ret!=frame_x) break;
          line+=video_x;
        }
        /* now get U plane*/
        line=yuvframe[i]+(video_x*video_y)
          +(video_x/2)*(frame_y_offset/2)+frame_x_offset/2;
        for(e=0;e<frame_y/2;e++){
          ret=fread(line,1,frame_x/2,video);
            if(ret!=frame_x/2) break;
          line+=video_x/2;
        }
        /* and the V plane*/
        line=yuvframe[i]+(video_x*video_y*5/4)
                  +(video_x/2)*(frame_y_offset/2)+frame_x_offset/2;
        for(e=0;e<frame_y/2;e++){
          ret=fread(line,1,frame_x/2,video);
            if(ret!=frame_x/2) break;
          line+=video_x/2;
        }
        state++;
      }

      if(state<1){
        /* can't get here unless YUV4MPEG stream has no video */
        fprintf(stderr,"Video input contains no frames.\n");
        exit(1);
      }

      /* Theora is a one-frame-in,one-frame-out system; submit a frame
         for compression and pull out the packet */

      {
        yuv.y_width=video_x;
        yuv.y_height=video_y;
        yuv.y_stride=video_x;

        yuv.uv_width=video_x/2;
        yuv.uv_height=video_y/2;
        yuv.uv_stride=video_x/2;

        yuv.y= yuvframe[0];
        yuv.u= yuvframe[0]+ video_x*video_y;
        yuv.v= yuvframe[0]+ video_x*video_y*5/4 ;
      }

      theora_encode_YUVin(td,&yuv);

      /* if there's only one frame, it's the last in the stream */
      if(state<2)
        theora_encode_packetout(td,1,&op);
      else
        theora_encode_packetout(td,0,&op);

      ogg_stream_packetin(to,&op);

      {
        unsigned char *temp=yuvframe[0];
        yuvframe[0]=yuvframe[1];
        yuvframe[1]=temp;
        state--;
      }

    }
  }
  return videoflag;
}