Exemplo n.º 1
0
IoObject *IoTheoraDecodeContext_ycbcr(IoTheoraDecodeContext *self, IoObject *locals, IoMessage *m)
{
	/*doc TheoraDecodecontext ycbcr
	Returns an object containing the YUV data from the decoded frame.
	*/
        th_ycbcr_buffer buffer;
	int ret = th_decode_ycbcr_out(DATA(self)->ctx, buffer);
	IOASSERT(ret == 0, "th_decode_ycbcr_out failed");
	
	IoObject* yuv0 = IoObject_new(IOSTATE);
	IoObject* yuv1 = IoObject_new(IOSTATE);
	IoObject* yuv2 = IoObject_new(IOSTATE);
	IoObject_setSlot_to_(yuv0, IOSYMBOL("width"), IONUMBER(buffer[0].width));
	IoObject_setSlot_to_(yuv0, IOSYMBOL("height"), IONUMBER(buffer[0].height));
	IoObject_setSlot_to_(yuv0, IOSYMBOL("stride"), IONUMBER(buffer[0].stride));
	IoObject_setSlot_to_(yuv0, IOSYMBOL("data"), IOSEQ(buffer[0].data, buffer[0].stride * buffer[0].height));
	IoObject_setSlot_to_(yuv1, IOSYMBOL("width"), IONUMBER(buffer[1].width));
	IoObject_setSlot_to_(yuv1, IOSYMBOL("height"), IONUMBER(buffer[1].height));
	IoObject_setSlot_to_(yuv1, IOSYMBOL("stride"), IONUMBER(buffer[1].stride));
	IoObject_setSlot_to_(yuv1, IOSYMBOL("data"), IOSEQ(buffer[1].data, buffer[1].stride * buffer[1].height));
	IoObject_setSlot_to_(yuv2, IOSYMBOL("width"), IONUMBER(buffer[2].width));
	IoObject_setSlot_to_(yuv2, IOSYMBOL("height"), IONUMBER(buffer[2].height));
	IoObject_setSlot_to_(yuv2, IOSYMBOL("stride"), IONUMBER(buffer[2].stride));
	IoObject_setSlot_to_(yuv2, IOSYMBOL("data"), IOSEQ(buffer[2].data, buffer[2].stride * buffer[2].height));

	IoList* result = IoList_new(IOSTATE);
	IoList_rawAppend_(result, yuv0);
	IoList_rawAppend_(result, yuv1);
	IoList_rawAppend_(result, yuv2);
	return result;
}
Exemplo n.º 2
0
bool TheoraDecoder::TheoraVideoTrack::decodePacket(ogg_packet &oggPacket) {
	if (th_decode_packetin(_theoraDecode, &oggPacket, 0) == 0) {
		_curFrame++;

		// Convert YUV data to RGB data
		th_ycbcr_buffer yuv;
		th_decode_ycbcr_out(_theoraDecode, yuv);
		translateYUVtoRGBA(yuv);

		double time = th_granule_time(_theoraDecode, oggPacket.granulepos);

		// We need to calculate when the next frame should be shown
		// This is all in floating point because that's what the Ogg code gives us
		// Ogg is a lossy container format, so it doesn't always list the time to the
		// next frame. In such cases, we need to calculate it ourselves.
		if (time == -1.0)
			_nextFrameStartTime += _frameRate.getInverse().toDouble();
		else
			_nextFrameStartTime = time;

		return true;
	}

	return false;
}
Exemplo n.º 3
0
void VideoStreamPlaybackTheora::video_write(void) {
	th_ycbcr_buffer yuv;
	th_decode_ycbcr_out(td, yuv);

	int pitch = 4;
	frame_data.resize(size.x * size.y * pitch);
	{
		PoolVector<uint8_t>::Write w = frame_data.write();
		char *dst = (char *)w.ptr();

		//uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y/2);

		if (px_fmt == TH_PF_444) {

			yuv444_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data, (uint8_t *)yuv[1].data, (uint8_t *)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x << 2, 0);

		} else if (px_fmt == TH_PF_422) {

			yuv422_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data, (uint8_t *)yuv[1].data, (uint8_t *)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x << 2, 0);

		} else if (px_fmt == TH_PF_420) {

			yuv420_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data, (uint8_t *)yuv[2].data, (uint8_t *)yuv[1].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x << 2, 0);
		};

		format = Image::FORMAT_RGBA8;
	}

	Ref<Image> img = memnew(Image(size.x, size.y, 0, Image::FORMAT_RGBA8, frame_data)); //zero copy image creation

	texture->set_data(img); //zero copy send to visual server

	frames_pending = 1;
}
Exemplo n.º 4
0
void krad_theora_decoder_decode(krad_theora_decoder_t *krad_theora, void *buffer, int len) {


	//printf("theora decode with %d\n", len);
	
	krad_theora->packet.packet = buffer;
	krad_theora->packet.bytes = len;
	krad_theora->packet.packetno++;
	
	th_decode_packetin(krad_theora->decoder, &krad_theora->packet, &krad_theora->granulepos);
	
	th_decode_ycbcr_out(krad_theora->decoder, krad_theora->ycbcr);

}
Exemplo n.º 5
0
/*
 * decode frame
 */
static mp_image_t *decode(sh_video_t *sh, void *data, int len, int flags)
{
    theora_struct_t *context = sh->context;
    int errorCode = 0;
    ogg_packet op;
    mp_image_t *mpi;

    // no delayed frames
    if (!data || !len)
        return NULL;

    memset(&op, 0, sizeof(op));
    op.bytes      = len;
    op.packet     = data;
    op.granulepos = -1;

    errorCode = th_decode_packetin(context->tctx, &op, NULL);
    if (errorCode < 0) {
        mp_msg(MSGT_DECVIDEO, MSGL_ERR, "Theora decode packetin failed: %i \n",
               errorCode);
        return NULL;
    }

    if (errorCode != TH_DUPFRAME) {
        errorCode = th_decode_ycbcr_out(context->tctx, context->ycbcrbuf);
        if (errorCode != 0) {
            mp_msg(MSGT_DECVIDEO, MSGL_ERR,
                   "Theora decode YUVout failed: %i \n", errorCode);
            return NULL;
        }
    }

    mpi = mpcodecs_get_image(sh, MP_IMGTYPE_EXPORT, 0,
                             context->ycbcrbuf[0].width,
                             context->ycbcrbuf[0].height);
    if (!mpi)
        return NULL;

    mpi->planes[0] = context->ycbcrbuf[0].data;
    mpi->stride[0] = context->ycbcrbuf[0].stride;
    mpi->planes[1] = context->ycbcrbuf[1].data;
    mpi->stride[1] = context->ycbcrbuf[1].stride;
    mpi->planes[2] = context->ycbcrbuf[2].data;
    mpi->stride[2] = context->ycbcrbuf[2].stride;

    return mpi;
}
Exemplo n.º 6
0
/*****************************************************************************
 * DecodePacket: decodes a Theora packet.
 *****************************************************************************/
static picture_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic;
    th_ycbcr_buffer ycbcr;

    /* TODO: Implement _granpos (3rd parameter here) and add the
     * call to TH_DECCTL_SET_GRANDPOS after seek */
    /* TODO: If the return is TH_DUPFRAME, we don't need to display a new
     * frame, but we do need to keep displaying the previous one. */
    if (th_decode_packetin( p_sys->tcx, p_oggpacket, NULL ) < 0)
        return NULL; /* bad packet */

    /* Check for keyframe */
    if( !(p_oggpacket->packet[0] & 0x80) /* data packet */ &&
        !(p_oggpacket->packet[0] & 0x40) /* intra frame */ )
        p_sys->b_decoded_first_keyframe = true;

    /* If we haven't seen a single keyframe yet, don't let Theora decode
     * anything, otherwise we'll get display artifacts.  (This is impossible
     * in the general case, but can happen if e.g. we play a network stream
     * using a timed URL, such that the server doesn't start the video with a
     * keyframe). */
    if( !p_sys->b_decoded_first_keyframe )
        return NULL; /* Wait until we've decoded the first keyframe */

    if( th_decode_ycbcr_out( p_sys->tcx, ycbcr ) ) /* returns 0 on success */
        return NULL;

    /* Get a new picture */
    if( decoder_UpdateVideoFormat( p_dec ) )
        return NULL;
    p_pic = decoder_NewPicture( p_dec );
    if( !p_pic ) return NULL;

    theora_CopyPicture( p_pic, ycbcr );

    p_pic->date = p_sys->i_pts;
    p_pic->b_progressive = true;

    return p_pic;
}
Exemplo n.º 7
0
int theora_decode_YUVout(theora_state *_td,yuv_buffer *_yuv){
  th_api_wrapper  *api;
  th_ycbcr_buffer  buf;
  int              ret;

  api=(th_api_wrapper *)_td->i->codec_setup;
  ret=th_decode_ycbcr_out(api->decode,buf);
  if(ret>=0){
    _yuv->y_width=buf[0].width;
    _yuv->y_height=buf[0].height;
    _yuv->y_stride=buf[0].stride;
    _yuv->uv_width=buf[1].width;
    _yuv->uv_height=buf[1].height;
    _yuv->uv_stride=buf[1].stride;
    _yuv->y=buf[0].data;
    _yuv->u=buf[1].data;
    _yuv->v=buf[2].data;
  }

  return ret;
}
Exemplo n.º 8
0
void krad_theora_decoder_decode (krad_theora_decoder_t *krad_theora,
                                 void *buffer,
                                 int len) {

    int ret;

    krad_theora->packet.packet = buffer;
    krad_theora->packet.bytes = len;
    krad_theora->packet.packetno++;

    ret = th_decode_packetin (krad_theora->decoder,
                              &krad_theora->packet,
                              &krad_theora->granulepos);

    if (ret == 0) {
        th_decode_ycbcr_out (krad_theora->decoder, krad_theora->ycbcr);
    } else {

        printke ("theora decoder err! %d", ret);

        if (ret == TH_DUPFRAME) {
            printke ("theora decoder DUPFRAME!");
        }

        if (ret == TH_EFAULT) {
            printke ("theora decoder EFAULT!");
        }

        if (ret == TH_EBADPACKET) {
            printke ("theora decoder EBADPACKET!");
        }

        if (ret == TH_EIMPL) {
            printke ("theora decoder EIMPL!");
        }
    }
}
Exemplo n.º 9
0
yuv_buffer* TheoraDecoder::decodeTheora(StampedOggPacket* inPacket) 
{		
    //Accepts packet and deletes it.
    LOG(logDEBUG3) << __FUNCTIONW__;

	if (mPacketCount < 3) 
    {
		decodeHeader(inPacket);		//Accepts header and deletes it.

		if (mPacketCount == 3) 
        {
			mTheoraState = th_decode_alloc(&mTheoraInfo, mTheoraSetup);
			//TODO::: Post processing http://people.xiph.org/~tterribe/doc/libtheora-exp/theoradec_8h.html#a1
		}
		
        LOG(logDEBUG3) << __FUNCTIONW__ << " PacketCount under 3: " << mPacketCount;
		return NULL;
	} 
    else 
    {
		//if (mFirstPacket) 
        //{
		//	theora_decode_init(&mTheoraState, &mTheoraInfo);
		//	mFirstPacket = false;
		//}
		if (inPacket->packetSize() > 0 && (inPacket->packetData()[0] & 128) != 0) 
        {
			//Ignore header packets
			delete inPacket;

            LOG(logDEBUG3) << __FUNCTIONW__ << " Ignoring header packets";
			return NULL;
		}

		ogg_packet* locOldPack = simulateOldOggPacket(inPacket);		//Accepts the packet and deletes it.

		th_decode_packetin(mTheoraState, locOldPack, NULL);

        delete locOldPack->packet;
		delete locOldPack;
		
		th_decode_ycbcr_out(mTheoraState, mYCbCrBuffer);

		//TODO:::
		//This is slightly nasty for now... since changing the return type
		// will screw with other stuff
		//
		//Need to probably use the theora-exp buffer type and change all the
		// uses of yuv_buffer to handle this, and avoid assumptions about
		// the relative size of the Y and U and V buffer

		mYUVBuffer.y_width = mYCbCrBuffer[0].width;
		mYUVBuffer.y_height = mYCbCrBuffer[0].height;
		mYUVBuffer.y_stride = mYCbCrBuffer[0].stride;
		mYUVBuffer.y = mYCbCrBuffer[0].data;
		mYUVBuffer.uv_width = mYCbCrBuffer[1].width;
		mYUVBuffer.uv_height = mYCbCrBuffer[1].height;
		mYUVBuffer.uv_stride = mYCbCrBuffer[1].stride;
		mYUVBuffer.u = mYCbCrBuffer[1].data;
		mYUVBuffer.v = mYCbCrBuffer[2].data;
		
		return &mYUVBuffer;
	}
}
Exemplo n.º 10
0
void VideoStreamTheora::video_write(void){
	th_ycbcr_buffer yuv;
	int y_offset, uv_offset;
	th_decode_ycbcr_out(td,yuv);

	y_offset=(ti.pic_x&~1)+yuv[0].stride*(ti.pic_y&~1);

	/*
	{
		int pixels = size.x * size.y;
		frame_data.resize(pixels * 4);
		DVector<uint8_t>::Write w = frame_data.write();
		char* dst = (char*)w.ptr();
		int p = 0;
		for (int i=0; i<size.y; i++) {

			char *in_y  = (char *)yuv[0].data+y_offset+yuv[0].stride*i;
			char *out = dst + (int)size.x * 4 * i;
			for (int j=0;j<size.x;j++) {

				dst[p++] = in_y[j];
				dst[p++] = in_y[j];
				dst[p++] = in_y[j];
				dst[p++] = 255;
			};
		}
		format = Image::FORMAT_RGBA;
	}
	//	*/

	//*

	int pitch = 4;
	frame_data.resize(size.x * size.y * pitch);
	DVector<uint8_t>::Write w = frame_data.write();
	char* dst = (char*)w.ptr();

	uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y/2);

	if (px_fmt == TH_PF_444) {

		yuv444_2_rgb8888((uint8_t*)dst, (uint8_t*)yuv[0].data, (uint8_t*)yuv[1].data, (uint8_t*)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x<<2, 0);

	} else if (px_fmt == TH_PF_422) {

		yuv422_2_rgb8888((uint8_t*)dst, (uint8_t*)yuv[0].data, (uint8_t*)yuv[1].data, (uint8_t*)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x<<2, 0);

	} else if (px_fmt == TH_PF_420) {

		yuv420_2_rgb8888((uint8_t*)dst, (uint8_t*)yuv[0].data, (uint8_t*)yuv[2].data, (uint8_t*)yuv[1].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x<<2, 0);
	};

	format = Image::FORMAT_RGBA;

	/*

	if (px_fmt == TH_PF_444) {

		int pitch = 3;
		frame_data.resize(size.x * size.y * pitch);
		DVector<uint8_t>::Write w = frame_data.write();
		char* dst = (char*)w.ptr();

		for(int i=0;i<size.y;i++) {

			char *in_y  = (char *)yuv[0].data+y_offset+yuv[0].stride*i;
			char *out = dst + (int)size.x * pitch * i;
			char *in_u  = (char *)yuv[1].data+uv_offset+yuv[1].stride*i;
			char *in_v  = (char *)yuv[2].data+uv_offset+yuv[2].stride*i;
			for (int j=0;j<size.x;j++) {

				out[j*3+0] = in_y[j];
				out[j*3+1] = in_u[j];
				out[j*3+2] = in_v[j];
			};
		}

		format = Image::FORMAT_YUV_444;

	} else {

		int div;
		if (px_fmt!=TH_PF_422) {
			div = 2;
		}

		bool rgba = true;
		if (rgba) {

			int pitch = 4;
			frame_data.resize(size.x * size.y * pitch);
			DVector<uint8_t>::Write w = frame_data.write();
			char* dst = (char*)w.ptr();

			uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y / div);
			for(int i=0;i<size.y;i++) {
				char *in_y  = (char *)yuv[0].data+y_offset+yuv[0].stride*i;
				char *in_u  = (char *)yuv[1].data+uv_offset+yuv[1].stride*(i/div);
				char *in_v  = (char *)yuv[2].data+uv_offset+yuv[2].stride*(i/div);
				uint8_t *out = (uint8_t*)dst + (int)size.x * pitch * i;
				int ofs = 0;
				for (int j=0;j<size.x;j++) {

					uint8_t y, u, v;
					y = in_y[j];
					u = in_u[j/2];
					v = in_v[j/2];

					int32_t r = Math::fast_ftoi(1.164 * (y - 16) + 1.596 * (v - 128));
					int32_t g = Math::fast_ftoi(1.164 * (y - 16) - 0.813 * (v - 128) - 0.391 * (u - 128));
					int32_t b = Math::fast_ftoi(1.164 * (y - 16) + 2.018 * (u - 128));

					out[ofs++] = CLAMP(r, 0, 255);
					out[ofs++] = CLAMP(g, 0, 255);
					out[ofs++] = CLAMP(b, 0, 255);
					out[ofs++] = 255;
				}
			}

			format = Image::FORMAT_RGBA;

		} else {

			int pitch = 2;
			frame_data.resize(size.x * size.y * pitch);
			DVector<uint8_t>::Write w = frame_data.write();
			char* dst = (char*)w.ptr();

			uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y / div);
			for(int i=0;i<size.y;i++) {
				char *in_y  = (char *)yuv[0].data+y_offset+yuv[0].stride*i;
				char *out = dst + (int)size.x * pitch * i;
				for (int j=0;j<size.x;j++)
					out[j*2] = in_y[j];
				char *in_u  = (char *)yuv[1].data+uv_offset+yuv[1].stride*(i/div);
				char *in_v  = (char *)yuv[2].data+uv_offset+yuv[2].stride*(i/div);
				for (int j=0;j<(int)size.x>>1;j++) {
					out[j*4+1] = in_u[j];
					out[j*4+3] = in_v[j];
				}
			}

			format = Image::FORMAT_YUV_422;
		};
	};
	//	*/

	frames_pending = 1;
}
Exemplo n.º 11
0
static GstFlowReturn
theora_handle_data_packet (GstTheoraDec * dec, ogg_packet * packet,
    GstClockTime outtime, GstClockTime outdur)
{
  /* normal data packet */
  th_ycbcr_buffer buf;
  GstBuffer *out;
  gboolean keyframe;
  GstFlowReturn result;
  ogg_int64_t gp;

  if (G_UNLIKELY (!dec->have_header))
    goto not_initialized;

  /* get timestamp and durations */
  if (outtime == -1)
    outtime = dec->last_timestamp;
  if (outdur == -1)
    outdur = gst_util_uint64_scale_int (GST_SECOND, dec->info.fps_denominator,
        dec->info.fps_numerator);

  /* calculate expected next timestamp */
  if (outtime != -1 && outdur != -1)
    dec->last_timestamp = outtime + outdur;

  /* the second most significant bit of the first data byte is cleared 
   * for keyframes. We can only check it if it's not a zero-length packet. */
  keyframe = packet->bytes && ((packet->packet[0] & 0x40) == 0);
  if (G_UNLIKELY (keyframe)) {
    GST_DEBUG_OBJECT (dec, "we have a keyframe");
    dec->need_keyframe = FALSE;
  } else if (G_UNLIKELY (dec->need_keyframe)) {
    goto dropping;
  }

  GST_DEBUG_OBJECT (dec, "parsing data packet");

  /* this does the decoding */
  if (G_UNLIKELY (th_decode_packetin (dec->decoder, packet, &gp) < 0))
    goto decode_error;

  if (outtime != -1) {
    gboolean need_skip;
    GstClockTime running_time;
    GstClockTime earliest_time;
    gdouble proportion;

    /* qos needs to be done on running time */
    running_time = gst_segment_to_running_time (&dec->segment, GST_FORMAT_TIME,
        outtime);

    GST_OBJECT_LOCK (dec);
    proportion = dec->proportion;
    earliest_time = dec->earliest_time;
    /* check for QoS, don't perform the last steps of getting and
     * pushing the buffers that are known to be late. */
    need_skip = earliest_time != -1 && running_time <= earliest_time;
    GST_OBJECT_UNLOCK (dec);

    if (need_skip) {
      GstMessage *qos_msg;
      guint64 stream_time;
      gint64 jitter;

      GST_DEBUG_OBJECT (dec, "skipping decoding: qostime %"
          GST_TIME_FORMAT " <= %" GST_TIME_FORMAT,
          GST_TIME_ARGS (running_time), GST_TIME_ARGS (earliest_time));

      dec->dropped++;

      stream_time =
          gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, outtime);
      jitter = GST_CLOCK_DIFF (running_time, earliest_time);

      qos_msg =
          gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, running_time,
          stream_time, outtime, outdur);
      gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
      gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
          dec->processed, dec->dropped);
      gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);

      goto dropping_qos;
    }
  }

  /* this does postprocessing and set up the decoded frame
   * pointers in our yuv variable */
  if (G_UNLIKELY (th_decode_ycbcr_out (dec->decoder, buf) < 0))
    goto no_yuv;

  if (G_UNLIKELY ((buf[0].width != dec->info.frame_width)
          || (buf[0].height != dec->info.frame_height)))
    goto wrong_dimensions;

  result = theora_handle_image (dec, buf, &out);
  if (result != GST_FLOW_OK)
    return result;

  GST_BUFFER_OFFSET (out) = dec->frame_nr;
  if (dec->frame_nr != -1)
    dec->frame_nr++;
  GST_BUFFER_OFFSET_END (out) = dec->frame_nr;

  GST_BUFFER_TIMESTAMP (out) = outtime;
  GST_BUFFER_DURATION (out) = outdur;

  dec->processed++;

  if (dec->segment.rate >= 0.0)
    result = theora_dec_push_forward (dec, out);
  else
    result = theora_dec_push_reverse (dec, out);

  return result;

  /* ERRORS */
not_initialized:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("no header sent yet"));
    return GST_FLOW_ERROR;
  }
dropping:
  {
    GST_WARNING_OBJECT (dec, "dropping frame because we need a keyframe");
    dec->discont = TRUE;
    return GST_FLOW_OK;
  }
dropping_qos:
  {
    if (dec->frame_nr != -1)
      dec->frame_nr++;
    dec->discont = TRUE;
    GST_WARNING_OBJECT (dec, "dropping frame because of QoS");
    return GST_FLOW_OK;
  }
decode_error:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("theora decoder did not decode data packet"));
    return GST_FLOW_ERROR;
  }
no_yuv:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("couldn't read out YUV image"));
    return GST_FLOW_ERROR;
  }
wrong_dimensions:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, FORMAT,
        (NULL), ("dimensions of image do not match header"));
    return GST_FLOW_ERROR;
  }
}
Exemplo n.º 12
0
	bool VideoClip_Theora::_decodeNextFrame()
	{
		if (this->endOfFile)
		{
			return false;
		}
		VideoFrame* frame = this->frameQueue->requestEmptyFrame();
		if (frame == NULL)
		{
			return false; // max number of precached frames reached
		}
		bool shouldRestart = false;
		ogg_packet opTheora;
		ogg_int64_t granulePos;
		th_ycbcr_buffer buff;
		int result = 0;
		int attempts = 0;
		int status = 0;
		float time = 0.0f;
		unsigned long frameNumber = 0;
		while (true)
		{
			// ogg_stream_packetout can return -1 and the official docs suggest to do subsequent calls until it succeeds
			// because the data is out of sync. still will limit the number of attempts just in case
			for (result = -1, attempts = 0; result < 0 && attempts < 100; ++attempts)
			{
				result = ogg_stream_packetout(&this->info.TheoraStreamState, &opTheora);
			}
			if (result > 0)
			{
				status = th_decode_packetin(this->info.TheoraDecoder, &opTheora, &granulePos);
				if (status != 0 && status != TH_DUPFRAME) // 0 means success
				{
					continue;
				}
				time = (float)th_granule_time(this->info.TheoraDecoder, granulePos);
				frameNumber = (unsigned long)th_granule_frame(this->info.TheoraDecoder, granulePos);
				// %16 operation is here to prevent a playback halt during video playback if the decoder can't keep up with demand.
				if (time < this->timer->getTime() && !this->restarted && frameNumber % 16 != 0)
				{
#ifdef _DEBUG_FRAMEDROP
					log(mName + ": pre-dropped frame " + str((int)frameNumber));
#endif
					++this->droppedFramesCount;
					continue; // drop frame
				}
				this->_setVideoFrameTimeToDisplay(frame, time - this->frameDuration);
				this->_setVideoFrameIteration(frame, this->iteration);
				this->_setVideoFrameFrameNumber(frame, (int)frameNumber);
				this->lastDecodedFrameNumber = frameNumber;
				th_decode_ycbcr_out(this->info.TheoraDecoder, buff);
				Theoraplayer_PixelTransform pixelTransform;
				memset(&pixelTransform, 0, sizeof(Theoraplayer_PixelTransform));
				pixelTransform.y = buff[0].data;	pixelTransform.yStride = buff[0].stride;
				pixelTransform.u = buff[1].data;	pixelTransform.uStride = buff[1].stride;
				pixelTransform.v = buff[2].data;	pixelTransform.vStride = buff[2].stride;
				frame->decode(&pixelTransform);
				break;
			}
			if (!this->_readData())
			{
				this->_setVideoFrameInUse(frame, false);
				shouldRestart = this->autoRestart;
				break;
			}
		}
		if (this->audioInterface != NULL)
		{
			Mutex::ScopeLock lock(this->audioMutex);
			this->_decodeAudio();
		}
		if (shouldRestart)
		{
			++this->iteration;
			this->_executeRestart();
		}
		return true;
	}
Exemplo n.º 13
0
void TheoraPlayer::Update(float32 timeElapsed)
{
    if(!isPlaying)
        return;
        
    videoTime += timeElapsed;
    
    currFrameTime += timeElapsed;
    if(currFrameTime < frameTime)
    {
        return;
    }
    else
    {
        currFrameTime -= frameTime;
    }
    
    int ret;
    
    while(theora_p && !isVideoBufReady)
    {
        ret = ogg_stream_packetout(&theoraData->state, &theoraData->packet);
        if(ret > 0)
        {
            if(pp_inc)
            {
                pp_level += pp_inc;
                th_decode_ctl(theoraData->thCtx, TH_DECCTL_SET_PPLEVEL, &pp_level, sizeof(pp_level));
                pp_inc = 0;
            }
            if(theoraData->packet.granulepos >= 0)
                th_decode_ctl(theoraData->thCtx, TH_DECCTL_SET_GRANPOS, &theoraData->packet.granulepos, sizeof(theoraData->packet.granulepos));

            if(th_decode_packetin(theoraData->thCtx, &theoraData->packet, &theoraData->videoBufGranulePos) == 0)
            {
                if((videoBufTime = th_granule_time(theoraData->thCtx, theoraData->videoBufGranulePos)) >= videoTime)
                    isVideoBufReady = true;
                else
                    pp_inc = (pp_level > 0)? -1 : 0;
            }
        }
        else
        {
            isVideoBufReady = false;
            break;
        }
    }
    
    if(!isVideoBufReady)
    {
        BufferData();
        while(ogg_sync_pageout(&theoraData->syncState, &theoraData->page) > 0)
            ogg_stream_pagein(&theoraData->state, &theoraData->page);
    }
    
    if(isVideoBufReady)
    {
        isVideoBufReady = false;
        ret = th_decode_ycbcr_out(theoraData->thCtx, theoraData->yuvBuffer);
    
        for(int i = 0; i < frameBufferH; i++) //Y
        {
            int yShift = 0, uShift = 0, vShift = 0;
            const bool inBuffer = (i <= theoraData->yuvBuffer[0].height);
            if(inBuffer)
            {
                yShift = theoraData->yuvBuffer[0].stride * i;
                uShift = theoraData->yuvBuffer[1].stride * (i / 2);
                vShift = theoraData->yuvBuffer[2].stride * (i / 2);
            }
            
            for(int j = 0; j < frameBufferW; j++) //X
            {
                const int index = (i * frameBufferW + j) * 4;
                
                if(inBuffer && j <= theoraData->yuvBuffer[0].width)
                {
                    const unsigned char Y = *(theoraData->yuvBuffer[0].data + yShift + j);
                    const unsigned char U = *(theoraData->yuvBuffer[1].data + uShift + j / 2);
                    const unsigned char V = *(theoraData->yuvBuffer[2].data + vShift + j / 2);
                
                    frameBuffer[index]   = ClampFloatToByte(Y + 1.371f * (V - 128));
                    frameBuffer[index+1] = ClampFloatToByte(Y - 0.698f * (V - 128) - 0.336f * (U - 128));
                    frameBuffer[index+2] = ClampFloatToByte(Y + 1.732f * (U - 128));
                    frameBuffer[index+3] = 255;
                }
                else
                {
                    memset(&frameBuffer[index], 0, 4 * sizeof(unsigned char));
                }
            }
        }
    
        if(!ret)
        {
            Texture * tex = Texture::CreateFromData(FORMAT_RGBA8888, frameBuffer, frameBufferW, frameBufferH, false);
            Sprite * spr = Sprite::CreateFromTexture(tex, 0, 0, tex->width, tex->height);
            spr->ConvertToVirtualSize();

            SafeRelease(tex);
            SetSprite(spr, 0);
            SafeRelease(spr);
        }
    }
    
    if(theora_p)
    {
        double tdiff = videoBufTime - videoTime;
        /*If we have lots of extra time, increase the post-processing level.*/
        if(tdiff > theoraData->thInfo.fps_denominator * 0.25f / theoraData->thInfo.fps_numerator)
        {
            pp_inc = (pp_level < pp_level_max) ? 1 : 0;
        }
        else if(tdiff < theoraData->thInfo.fps_denominator * 0.05 / theoraData->thInfo.fps_numerator)
        {
            pp_inc = (pp_level > 0)? -1 : 0;
        }
    }
    if(isRepeat && file->GetPos() == file->GetSize())
    {
        ReleaseData();
        OpenFile(filePath);
    }
}
Exemplo n.º 14
0
static void video_write(void){
  int i;
  th_ycbcr_buffer yuv;
  int y_offset, uv_offset;
  th_decode_ycbcr_out(td,yuv);
  /* Lock SDL_yuv_overlay */
  if ( SDL_MUSTLOCK(screen) ) {
    if ( SDL_LockSurface(screen) < 0 ) return;
  }
  if (SDL_LockYUVOverlay(yuv_overlay) < 0) return;

  /* let's draw the data on a SDL screen (*screen) */
  /* deal with border stride */
  /* reverse u and v for SDL */
  /* and crop input properly, respecting the encoded frame rect */
  /* problems may exist for odd frame rect for some encodings */

  y_offset=(ti.pic_x&~1)+yuv[0].stride*(ti.pic_y&~1);

  if (px_fmt==TH_PF_422) {
    uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y);
    /* SDL doesn't have a planar 4:2:2 */ 
    for(i=0;i<yuv_overlay->h;i++) {
      int j;
      char *in_y  = (char *)yuv[0].data+y_offset+yuv[0].stride*i;
      char *out = (char *)(yuv_overlay->pixels[0]+yuv_overlay->pitches[0]*i);
      for (j=0;j<yuv_overlay->w;j++)
        out[j*2] = in_y[j];
      char *in_u  = (char *)yuv[1].data+uv_offset+yuv[1].stride*i;
      char *in_v  = (char *)yuv[2].data+uv_offset+yuv[2].stride*i;
      for (j=0;j<yuv_overlay->w>>1;j++) {
        out[j*4+1] = in_u[j];
        out[j*4+3] = in_v[j];
      }
    }
  } else {
    uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y/2);
    for(i=0;i<yuv_overlay->h;i++)
      memcpy(yuv_overlay->pixels[0]+yuv_overlay->pitches[0]*i,
           yuv[0].data+y_offset+yuv[0].stride*i,
           yuv_overlay->w);
    for(i=0;i<yuv_overlay->h/2;i++){
      memcpy(yuv_overlay->pixels[1]+yuv_overlay->pitches[1]*i,
           yuv[2].data+uv_offset+yuv[2].stride*i,
           yuv_overlay->w/2);
      memcpy(yuv_overlay->pixels[2]+yuv_overlay->pitches[2]*i,
           yuv[1].data+uv_offset+yuv[1].stride*i,
           yuv_overlay->w/2);
    }
  }

  /* Unlock SDL_yuv_overlay */
  if ( SDL_MUSTLOCK(screen) ) {
    SDL_UnlockSurface(screen);
  }
  SDL_UnlockYUVOverlay(yuv_overlay);


  /* Show, baby, show! */
  SDL_DisplayYUVOverlay(yuv_overlay, &rect);

}
Exemplo n.º 15
0
const Graphics::Surface *TheoraDecoder::decodeNextFrame() {
	// First, let's get our frame
	while (_theoraPacket) {
		// theora is one in, one out...
		if (ogg_stream_packetout(&_theoraOut, &_oggPacket) > 0) {

			if (_ppInc) {
				_ppLevel += _ppInc;
				th_decode_ctl(_theoraDecode, TH_DECCTL_SET_PPLEVEL, &_ppLevel, sizeof(_ppLevel));
				_ppInc = 0;
			}

			if (th_decode_packetin(_theoraDecode, &_oggPacket, NULL) == 0) {
				_curFrame++;

				// Convert YUV data to RGB data
				th_ycbcr_buffer yuv;
				th_decode_ycbcr_out(_theoraDecode, yuv);
				translateYUVtoRGBA(yuv);

				if (_curFrame == 0)
					_startTime = g_system->getMillis();

				double time = th_granule_time(_theoraDecode, _oggPacket.granulepos);

				// We need to calculate when the next frame should be shown
				// This is all in floating point because that's what the Ogg code gives us
				// Ogg is a lossy container format, so it doesn't always list the time to the
				// next frame. In such cases, we need to calculate it ourselves.
				if (time == -1.0)
					_nextFrameStartTime += _frameRate.getInverse().toDouble();
				else
					_nextFrameStartTime = time;

				// break out
				break;
			}
		} else {
			// If we can't get any more frames, we're done.
			if (_theoraOut.e_o_s || _fileStream->eos()) {
				_endOfVideo = true;
				break;
			}

			// Queue more data
			bufferData();
			while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0)
				queuePage(&_oggPage);
		}

		// Update audio if we can
		queueAudio();
	}

	// Force at least some audio to be buffered
	// TODO: 5 is very arbitrary. We probably should do something like QuickTime does.
	while (!_endOfAudio && _audStream->numQueuedStreams() < 5) {
		bufferData();
		while (ogg_sync_pageout(&_oggSync, &_oggPage) > 0)
			queuePage(&_oggPage);

		bool queuedAudio = queueAudio();
		if ((_vorbisOut.e_o_s  || _fileStream->eos()) && !queuedAudio) {
			_endOfAudio = true;
			break;
		}
	}

	return &_displaySurface;
}
Exemplo n.º 16
0
void VideoStreamPlaybackTheora::video_write(void) {
	th_ycbcr_buffer yuv;
	th_decode_ycbcr_out(td, yuv);

	// FIXME: The way stuff is commented out with `//*/` closing comments
	// sounds very fishy...

	/*
	int y_offset, uv_offset;
	y_offset=(ti.pic_x&~1)+yuv[0].stride*(ti.pic_y&~1);

	{
		int pixels = size.x * size.y;
		frame_data.resize(pixels * 4);
		PoolVector<uint8_t>::Write w = frame_data.write();
		char* dst = (char*)w.ptr();
		int p = 0;
		for (int i=0; i<size.y; i++) {

			char *in_y  = (char *)yuv[0].data+y_offset+yuv[0].stride*i;
			char *out = dst + (int)size.x * 4 * i;
			for (int j=0;j<size.x;j++) {

				dst[p++] = in_y[j];
				dst[p++] = in_y[j];
				dst[p++] = in_y[j];
				dst[p++] = 255;
			};
		}
		format = Image::FORMAT_RGBA8;
	}
		//*/

	//*

	int pitch = 4;
	frame_data.resize(size.x * size.y * pitch);
	{
		PoolVector<uint8_t>::Write w = frame_data.write();
		char *dst = (char *)w.ptr();

		//uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y/2);

		if (px_fmt == TH_PF_444) {

			yuv444_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data, (uint8_t *)yuv[1].data, (uint8_t *)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x << 2, 0);

		} else if (px_fmt == TH_PF_422) {

			yuv422_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data, (uint8_t *)yuv[1].data, (uint8_t *)yuv[2].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x << 2, 0);

		} else if (px_fmt == TH_PF_420) {

			yuv420_2_rgb8888((uint8_t *)dst, (uint8_t *)yuv[0].data, (uint8_t *)yuv[2].data, (uint8_t *)yuv[1].data, size.x, size.y, yuv[0].stride, yuv[1].stride, size.x << 2, 0);
		};

		format = Image::FORMAT_RGBA8;
	}

	Ref<Image> img = memnew(Image(size.x, size.y, 0, Image::FORMAT_RGBA8, frame_data)); //zero copy image creation

	texture->set_data(img); //zero copy send to visual server

	/*

	if (px_fmt == TH_PF_444) {

		int pitch = 3;
		frame_data.resize(size.x * size.y * pitch);
		PoolVector<uint8_t>::Write w = frame_data.write();
		char* dst = (char*)w.ptr();

		for(int i=0;i<size.y;i++) {

			char *in_y  = (char *)yuv[0].data+y_offset+yuv[0].stride*i;
			char *out = dst + (int)size.x * pitch * i;
			char *in_u  = (char *)yuv[1].data+uv_offset+yuv[1].stride*i;
			char *in_v  = (char *)yuv[2].data+uv_offset+yuv[2].stride*i;
			for (int j=0;j<size.x;j++) {

				out[j*3+0] = in_y[j];
				out[j*3+1] = in_u[j];
				out[j*3+2] = in_v[j];
			};
		}

		format = Image::FORMAT_YUV_444;

	} else {

		int div;
		if (px_fmt!=TH_PF_422) {
			div = 2;
		}

		bool rgba = true;
		if (rgba) {

			int pitch = 4;
			frame_data.resize(size.x * size.y * pitch);
			PoolVector<uint8_t>::Write w = frame_data.write();
			char* dst = (char*)w.ptr();

			uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y / div);
			for(int i=0;i<size.y;i++) {
				char *in_y  = (char *)yuv[0].data+y_offset+yuv[0].stride*i;
				char *in_u  = (char *)yuv[1].data+uv_offset+yuv[1].stride*(i/div);
				char *in_v  = (char *)yuv[2].data+uv_offset+yuv[2].stride*(i/div);
				uint8_t *out = (uint8_t*)dst + (int)size.x * pitch * i;
				int ofs = 0;
				for (int j=0;j<size.x;j++) {

					uint8_t y, u, v;
					y = in_y[j];
					u = in_u[j/2];
					v = in_v[j/2];

					int32_t r = Math::fast_ftoi(1.164 * (y - 16) + 1.596 * (v - 128));
					int32_t g = Math::fast_ftoi(1.164 * (y - 16) - 0.813 * (v - 128) - 0.391 * (u - 128));
					int32_t b = Math::fast_ftoi(1.164 * (y - 16) + 2.018 * (u - 128));

					out[ofs++] = CLAMP(r, 0, 255);
					out[ofs++] = CLAMP(g, 0, 255);
					out[ofs++] = CLAMP(b, 0, 255);
					out[ofs++] = 255;
				}
			}

			format = Image::FORMAT_RGBA8;

		} else {

			int pitch = 2;
			frame_data.resize(size.x * size.y * pitch);
			PoolVector<uint8_t>::Write w = frame_data.write();
			char* dst = (char*)w.ptr();

			uv_offset=(ti.pic_x/2)+(yuv[1].stride)*(ti.pic_y / div);
			for(int i=0;i<size.y;i++) {
				char *in_y  = (char *)yuv[0].data+y_offset+yuv[0].stride*i;
				char *out = dst + (int)size.x * pitch * i;
				for (int j=0;j<size.x;j++)
					out[j*2] = in_y[j];
				char *in_u  = (char *)yuv[1].data+uv_offset+yuv[1].stride*(i/div);
				char *in_v  = (char *)yuv[2].data+uv_offset+yuv[2].stride*(i/div);
				for (int j=0;j<(int)size.x>>1;j++) {
					out[j*4+1] = in_u[j];
					out[j*4+3] = in_v[j];
				}
			}

			format = Image::FORMAT_YUV_422;
		};
	};
		//*/

	frames_pending = 1;
}
Exemplo n.º 17
0
static GstFlowReturn
theora_handle_data_packet (GstTheoraDec * dec, ogg_packet * packet,
    GstVideoCodecFrame * frame)
{
  /* normal data packet */
  th_ycbcr_buffer buf;
  gboolean keyframe;
  GstFlowReturn result;
  ogg_int64_t gp;

  if (G_UNLIKELY (!dec->have_header))
    goto not_initialized;

  /* the second most significant bit of the first data byte is cleared 
   * for keyframes. We can only check it if it's not a zero-length packet. */
  keyframe = packet->bytes && ((packet->packet[0] & 0x40) == 0);
  if (G_UNLIKELY (keyframe)) {
    GST_DEBUG_OBJECT (dec, "we have a keyframe");
    dec->need_keyframe = FALSE;
  } else if (G_UNLIKELY (dec->need_keyframe)) {
    goto dropping;
  }

  GST_DEBUG_OBJECT (dec, "parsing data packet");

  /* this does the decoding */
  if (G_UNLIKELY (th_decode_packetin (dec->decoder, packet, &gp) < 0))
    goto decode_error;

  if (frame &&
      (gst_video_decoder_get_max_decode_time (GST_VIDEO_DECODER (dec),
              frame) < 0))
    goto dropping_qos;

  /* this does postprocessing and set up the decoded frame
   * pointers in our yuv variable */
  if (G_UNLIKELY (th_decode_ycbcr_out (dec->decoder, buf) < 0))
    goto no_yuv;

  if (G_UNLIKELY ((buf[0].width != dec->info.frame_width)
          || (buf[0].height != dec->info.frame_height)))
    goto wrong_dimensions;

  result = theora_handle_image (dec, buf, frame);

  return result;

  /* ERRORS */
not_initialized:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("no header sent yet"));
    return GST_FLOW_ERROR;
  }
dropping:
  {
    GST_WARNING_OBJECT (dec, "dropping frame because we need a keyframe");
    return GST_CUSTOM_FLOW_DROP;
  }
dropping_qos:
  {
    GST_WARNING_OBJECT (dec, "dropping frame because of QoS");
    return GST_CUSTOM_FLOW_DROP;
  }
decode_error:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("theora decoder did not decode data packet"));
    return GST_FLOW_ERROR;
  }
no_yuv:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE,
        (NULL), ("couldn't read out YUV image"));
    return GST_FLOW_ERROR;
  }
wrong_dimensions:
  {
    GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, FORMAT,
        (NULL), ("dimensions of image do not match header"));
    return GST_FLOW_ERROR;
  }
}
Exemplo n.º 18
0
void TheoraVideoStream::threadedFillBackBuffer(double dt)
{
	// Synchronize
	frameSync->update(dt);
	double position = frameSync->getPosition();

	// Seeking backwards
	if (position < lastFrame)
		seekDecoder(position);

	// Until we are at the end of the stream, or we are displaying the right frame
	unsigned int lagCounter = 0;
	while (!demuxer.isEos() && position >= nextFrame)
	{
		// If we can't catch up, seek
		if (lagCounter++ > 5)
		{
			seekDecoder(position);
			lagCounter = 0;
		}

		th_ycbcr_buffer bufferinfo;
		th_decode_ycbcr_out(decoder, bufferinfo);

		ogg_int64_t granulePosition;
		do
		{
			if (demuxer.readPacket(packet))
				return;
		} while (th_decode_packetin(decoder, &packet, &granulePosition) != 0);
		lastFrame = nextFrame;
		nextFrame = th_granule_time(decoder, granulePosition);

		// Don't swap whilst we're writing to the backbuffer
		{
			love::thread::Lock l(bufferMutex);
			frameReady = false;
		}

		for (int y = 0; y < backBuffer->yh; ++y)
		{
			memcpy(backBuffer->yplane+backBuffer->yw*y,
					bufferinfo[0].data+
						bufferinfo[0].stride*(y+yPlaneYOffset)+yPlaneXOffset,
					backBuffer->yw);
		}

		for (int y = 0; y < backBuffer->ch; ++y)
		{
			memcpy(backBuffer->cbplane+backBuffer->cw*y,
					bufferinfo[1].data+
						bufferinfo[1].stride*(y+cPlaneYOffset)+cPlaneXOffset,
					backBuffer->cw);
		}

		for (int y = 0; y < backBuffer->ch; ++y)
		{
			memcpy(backBuffer->crplane+backBuffer->cw*y,
					bufferinfo[2].data+
						bufferinfo[2].stride*(y+cPlaneYOffset)+cPlaneXOffset,
					backBuffer->cw);
		}

		// Re-enable swapping
		{
			love::thread::Lock l(bufferMutex);
			frameReady = true;
		}
	}
}