static GstFlowReturn theora_handle_data_packet (GstTheoraDec * dec, ogg_packet * packet, GstClockTime outtime) { /* normal data packet */ yuv_buffer yuv; GstBuffer *out; guint i; gboolean keyframe; gint out_size; gint stride_y, stride_uv; gint width, height; gint cwidth, cheight; GstFlowReturn result; if (G_UNLIKELY (!dec->have_header)) goto not_initialized; /* the second most significant bit of the first data byte is cleared * for keyframes. We can only check it if it's not a zero-length packet. */ keyframe = packet->bytes && ((packet->packet[0] & 0x40) == 0); if (G_UNLIKELY (keyframe)) { GST_DEBUG_OBJECT (dec, "we have a keyframe"); dec->need_keyframe = FALSE; } else if (G_UNLIKELY (dec->need_keyframe)) { goto dropping; } GST_DEBUG_OBJECT (dec, "parsing data packet"); /* this does the decoding */ if (G_UNLIKELY (theora_decode_packetin (&dec->state, packet))) goto decode_error; if (outtime != -1) { gboolean need_skip; GstClockTime qostime; /* qos needs to be done on running time */ qostime = gst_segment_to_running_time (&dec->segment, GST_FORMAT_TIME, outtime); GST_OBJECT_LOCK (dec); /* check for QoS, don't perform the last steps of getting and * pushing the buffers that are known to be late. */ /* FIXME, we can also entirely skip decoding if the next valid buffer is * known to be after a keyframe (using the granule_shift) */ need_skip = dec->earliest_time != -1 && qostime <= dec->earliest_time; GST_OBJECT_UNLOCK (dec); if (need_skip) goto dropping_qos; } /* this does postprocessing and set up the decoded frame * pointers in our yuv variable */ if (G_UNLIKELY (theora_decode_YUVout (&dec->state, &yuv) < 0)) goto no_yuv; if (G_UNLIKELY ((yuv.y_width != dec->info.width) || (yuv.y_height != dec->info.height))) goto wrong_dimensions; width = dec->width; height = dec->height; cwidth = width / 2; cheight = height / 2; /* should get the stride from the caps, for now we round up to the nearest * multiple of 4 because some element needs it. chroma needs special * treatment, see videotestsrc. */ stride_y = GST_ROUND_UP_4 (width); stride_uv = GST_ROUND_UP_8 (width) / 2; out_size = stride_y * height + stride_uv * cheight * 2; /* now copy over the area contained in offset_x,offset_y, * frame_width, frame_height */ result = gst_pad_alloc_buffer_and_set_caps (dec->srcpad, GST_BUFFER_OFFSET_NONE, out_size, GST_PAD_CAPS (dec->srcpad), &out); if (G_UNLIKELY (result != GST_FLOW_OK)) goto no_buffer; /* copy the visible region to the destination. This is actually pretty * complicated and gstreamer doesn't support all the needed caps to do this * correctly. For example, when we have an odd offset, we should only combine * 1 row/column of luma samples with one chroma sample in colorspace conversion. * We compensate for this by adding a black border around the image when the * offset or size is odd (see above). */ { guchar *dest_y, *src_y; guchar *dest_u, *src_u; guchar *dest_v, *src_v; gint offset; dest_y = GST_BUFFER_DATA (out); dest_u = dest_y + stride_y * height; dest_v = dest_u + stride_uv * cheight; src_y = yuv.y + dec->offset_x + dec->offset_y * yuv.y_stride; for (i = 0; i < height; i++) { memcpy (dest_y, src_y, width); dest_y += stride_y; src_y += yuv.y_stride; } offset = dec->offset_x / 2 + dec->offset_y / 2 * yuv.uv_stride; src_u = yuv.u + offset; src_v = yuv.v + offset; for (i = 0; i < cheight; i++) { memcpy (dest_u, src_u, cwidth); memcpy (dest_v, src_v, cwidth); dest_u += stride_uv; src_u += yuv.uv_stride; dest_v += stride_uv; src_v += yuv.uv_stride; } } GST_BUFFER_OFFSET (out) = dec->frame_nr; if (dec->frame_nr != -1) dec->frame_nr++; GST_BUFFER_OFFSET_END (out) = dec->frame_nr; if (dec->granulepos != -1) { gint64 cf = _theora_granule_frame (dec, dec->granulepos) + 1; GST_BUFFER_DURATION (out) = gst_util_uint64_scale_int (cf * GST_SECOND, dec->info.fps_denominator, dec->info.fps_numerator) - outtime; } else { GST_BUFFER_DURATION (out) = gst_util_uint64_scale_int (GST_SECOND, dec->info.fps_denominator, dec->info.fps_numerator); } GST_BUFFER_TIMESTAMP (out) = outtime; if (dec->segment.rate >= 0.0) result = theora_dec_push_forward (dec, out); else result = theora_dec_push_reverse (dec, out); return result; /* ERRORS */ not_initialized: { GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE, (NULL), ("no header sent yet")); return GST_FLOW_ERROR; } dropping: { GST_WARNING_OBJECT (dec, "dropping frame because we need a keyframe"); dec->discont = TRUE; return GST_FLOW_OK; } dropping_qos: { if (dec->frame_nr != -1) dec->frame_nr++; dec->discont = TRUE; GST_WARNING_OBJECT (dec, "dropping frame because of QoS"); return GST_FLOW_OK; } decode_error: { GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE, (NULL), ("theora decoder did not decode data packet")); return GST_FLOW_ERROR; } no_yuv: { GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE, (NULL), ("couldn't read out YUV image")); return GST_FLOW_ERROR; } wrong_dimensions: { GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, FORMAT, (NULL), ("dimensions of image do not match header")); return GST_FLOW_ERROR; } no_buffer: { GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s", gst_flow_get_name (result)); return result; } }
static GstFlowReturn theora_handle_data_packet (GstTheoraDec * dec, ogg_packet * packet, GstClockTime outtime, GstClockTime outdur) { /* normal data packet */ th_ycbcr_buffer buf; GstBuffer *out; gboolean keyframe; GstFlowReturn result; ogg_int64_t gp; if (G_UNLIKELY (!dec->have_header)) goto not_initialized; /* get timestamp and durations */ if (outtime == -1) outtime = dec->last_timestamp; if (outdur == -1) outdur = gst_util_uint64_scale_int (GST_SECOND, dec->info.fps_denominator, dec->info.fps_numerator); /* calculate expected next timestamp */ if (outtime != -1 && outdur != -1) dec->last_timestamp = outtime + outdur; /* the second most significant bit of the first data byte is cleared * for keyframes. We can only check it if it's not a zero-length packet. */ keyframe = packet->bytes && ((packet->packet[0] & 0x40) == 0); if (G_UNLIKELY (keyframe)) { GST_DEBUG_OBJECT (dec, "we have a keyframe"); dec->need_keyframe = FALSE; } else if (G_UNLIKELY (dec->need_keyframe)) { goto dropping; } GST_DEBUG_OBJECT (dec, "parsing data packet"); /* this does the decoding */ if (G_UNLIKELY (th_decode_packetin (dec->decoder, packet, &gp) < 0)) goto decode_error; if (outtime != -1) { gboolean need_skip; GstClockTime running_time; GstClockTime earliest_time; gdouble proportion; /* qos needs to be done on running time */ running_time = gst_segment_to_running_time (&dec->segment, GST_FORMAT_TIME, outtime); GST_OBJECT_LOCK (dec); proportion = dec->proportion; earliest_time = dec->earliest_time; /* check for QoS, don't perform the last steps of getting and * pushing the buffers that are known to be late. */ need_skip = earliest_time != -1 && running_time <= earliest_time; GST_OBJECT_UNLOCK (dec); if (need_skip) { GstMessage *qos_msg; guint64 stream_time; gint64 jitter; GST_DEBUG_OBJECT (dec, "skipping decoding: qostime %" GST_TIME_FORMAT " <= %" GST_TIME_FORMAT, GST_TIME_ARGS (running_time), GST_TIME_ARGS (earliest_time)); dec->dropped++; stream_time = gst_segment_to_stream_time (&dec->segment, GST_FORMAT_TIME, outtime); jitter = GST_CLOCK_DIFF (running_time, earliest_time); qos_msg = gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, running_time, stream_time, outtime, outdur); gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000); gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS, dec->processed, dec->dropped); gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg); goto dropping_qos; } } /* this does postprocessing and set up the decoded frame * pointers in our yuv variable */ if (G_UNLIKELY (th_decode_ycbcr_out (dec->decoder, buf) < 0)) goto no_yuv; if (G_UNLIKELY ((buf[0].width != dec->info.frame_width) || (buf[0].height != dec->info.frame_height))) goto wrong_dimensions; result = theora_handle_image (dec, buf, &out); if (result != GST_FLOW_OK) return result; GST_BUFFER_OFFSET (out) = dec->frame_nr; if (dec->frame_nr != -1) dec->frame_nr++; GST_BUFFER_OFFSET_END (out) = dec->frame_nr; GST_BUFFER_TIMESTAMP (out) = outtime; GST_BUFFER_DURATION (out) = outdur; dec->processed++; if (dec->segment.rate >= 0.0) result = theora_dec_push_forward (dec, out); else result = theora_dec_push_reverse (dec, out); return result; /* ERRORS */ not_initialized: { GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE, (NULL), ("no header sent yet")); return GST_FLOW_ERROR; } dropping: { GST_WARNING_OBJECT (dec, "dropping frame because we need a keyframe"); dec->discont = TRUE; return GST_FLOW_OK; } dropping_qos: { if (dec->frame_nr != -1) dec->frame_nr++; dec->discont = TRUE; GST_WARNING_OBJECT (dec, "dropping frame because of QoS"); return GST_FLOW_OK; } decode_error: { GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE, (NULL), ("theora decoder did not decode data packet")); return GST_FLOW_ERROR; } no_yuv: { GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, DECODE, (NULL), ("couldn't read out YUV image")); return GST_FLOW_ERROR; } wrong_dimensions: { GST_ELEMENT_ERROR (GST_ELEMENT (dec), STREAM, FORMAT, (NULL), ("dimensions of image do not match header")); return GST_FLOW_ERROR; } }