static GstBuffer * gst_timidity_clip_buffer (GstTimidity * timidity, GstBuffer * buffer) { gint64 new_start, new_stop; gint64 offset, length; GstBuffer *out; return buffer; if (!gst_segment_clip (timidity->o_segment, GST_FORMAT_DEFAULT, GST_BUFFER_OFFSET (buffer), GST_BUFFER_OFFSET_END (buffer), &new_start, &new_stop)) { gst_buffer_unref (buffer); return NULL; } if (GST_BUFFER_OFFSET (buffer) == new_start && GST_BUFFER_OFFSET_END (buffer) == new_stop) return buffer; offset = new_start - GST_BUFFER_OFFSET (buffer); length = new_stop - new_start; out = gst_buffer_create_sub (buffer, offset * timidity->bytes_per_frame, length * timidity->bytes_per_frame); GST_BUFFER_OFFSET (out) = new_start; GST_BUFFER_OFFSET_END (out) = new_stop; GST_BUFFER_TIMESTAMP (out) = new_start * timidity->time_per_frame; GST_BUFFER_DURATION (out) = (new_stop - new_start) * timidity->time_per_frame; gst_buffer_unref (buffer); return out; }
static void new_dvb_subtitles_cb (DvbSub * dvb_sub, DVBSubtitles * subs, gpointer user_data) { GstDVBSubOverlay *overlay = GST_DVBSUB_OVERLAY (user_data); int max_page_timeout; guint64 start, stop; max_page_timeout = g_atomic_int_get (&overlay->max_page_timeout); if (max_page_timeout > 0) subs->page_time_out = MIN (subs->page_time_out, max_page_timeout); GST_INFO_OBJECT (overlay, "New DVB subtitles arrived with a page_time_out of %d and %d regions for " "PTS=%" G_GUINT64_FORMAT ", which should be at time %" GST_TIME_FORMAT, subs->page_time_out, subs->num_rects, subs->pts, GST_TIME_ARGS (subs->pts)); /* spec says page_time_out is not to be taken very accurately anyway, * and 0 does not make useful sense anyway */ if (!subs->page_time_out) { GST_WARNING_OBJECT (overlay, "overriding page_time_out 0"); subs->page_time_out = 1; } /* clip and convert to running time */ start = subs->pts; stop = subs->pts + subs->page_time_out; if (!(gst_segment_clip (&overlay->subtitle_segment, GST_FORMAT_TIME, start, stop, &start, &stop))) goto out_of_segment; subs->page_time_out = stop - start; gst_segment_to_running_time (&overlay->subtitle_segment, GST_FORMAT_TIME, start); g_assert (GST_CLOCK_TIME_IS_VALID (start)); subs->pts = start; GST_DEBUG_OBJECT (overlay, "SUBTITLE real running time: %" GST_TIME_FORMAT, GST_TIME_ARGS (start)); g_queue_push_tail (overlay->pending_subtitles, subs); overlay->pending_sub = FALSE; return; out_of_segment: { GST_DEBUG_OBJECT (overlay, "subtitle out of segment, discarding"); dvb_subtitles_free (subs); } }
static GstFlowReturn gst_base_video_encoder_chain (GstPad * pad, GstBuffer * buf) { GstBaseVideoEncoder *base_video_encoder; GstBaseVideoEncoderClass *klass; GstVideoFrame *frame; if (!gst_pad_is_negotiated (pad)) { return GST_FLOW_NOT_NEGOTIATED; } base_video_encoder = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); klass = GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder); if (base_video_encoder->a.at_eos) { return GST_FLOW_UNEXPECTED; } if (base_video_encoder->sink_clipping) { gint64 start = GST_BUFFER_TIMESTAMP (buf); gint64 stop = start + GST_BUFFER_DURATION (buf); gint64 clip_start; gint64 clip_stop; if (!gst_segment_clip (&GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, GST_FORMAT_TIME, start, stop, &clip_start, &clip_stop)) { GST_DEBUG ("clipping to segment dropped frame"); goto done; } } frame = gst_base_video_codec_new_frame (GST_BASE_VIDEO_CODEC (base_video_encoder)); frame->sink_buffer = buf; frame->presentation_timestamp = GST_BUFFER_TIMESTAMP (buf); frame->presentation_duration = GST_BUFFER_DURATION (buf); frame->presentation_frame_number = base_video_encoder->presentation_frame_number; base_video_encoder->presentation_frame_number++; GST_BASE_VIDEO_CODEC (base_video_encoder)->frames = g_list_append (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame); klass->handle_frame (base_video_encoder, frame); done: g_object_unref (base_video_encoder); return GST_FLOW_OK; }
static gchar * parse_mpsub (ParserState * state, const gchar * line) { gchar *ret; float t1, t2; switch (state->state) { case 0: /* looking for two floats (offset, duration) */ if (sscanf (line, "%f %f", &t1, &t2) == 2) { state->state = 1; state->start_time += state->duration + GST_SECOND * t1; state->duration = GST_SECOND * t2; } return NULL; case 1: { /* No need to parse that text if it's out of segment */ gint64 clip_start = 0, clip_stop = 0; gboolean in_seg = FALSE; /* Check our segment start/stop */ in_seg = gst_segment_clip (state->segment, GST_FORMAT_TIME, state->start_time, state->start_time + state->duration, &clip_start, &clip_stop); if (in_seg) { state->start_time = clip_start; state->duration = clip_stop - clip_start; } else { state->state = 0; return NULL; } } /* looking for subtitle text; empty line ends this * subtitle entry */ if (state->buf->len) g_string_append_c (state->buf, '\n'); g_string_append (state->buf, line); if (strlen (line) == 0) { ret = g_strdup (state->buf->str); g_string_truncate (state->buf, 0); state->state = 0; return ret; } return NULL; default: g_assert_not_reached (); return NULL; } }
static GstPadProbeReturn handle_output (GstPad * pad, GstPadProbeInfo * info, StreamInfo * si) { GstClockTime start, end; GstBuffer *buf; GST_LOG_OBJECT (pad, "Fired probe type 0x%x", info->type); if (info->type & GST_PAD_PROBE_TYPE_BUFFER_LIST) { g_warning ("Buffer list handling not implemented"); return GST_PAD_PROBE_DROP; } if (info->type & GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM) { GstEvent *event = gst_pad_probe_info_get_event (info); switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEGMENT: gst_event_copy_segment (event, &si->seg); break; case GST_EVENT_EOS: dump_times (si); break; default: break; } return GST_PAD_PROBE_PASS; } buf = gst_pad_probe_info_get_buffer (info); if (!GST_BUFFER_PTS_IS_VALID (buf)) goto done; end = start = GST_BUFFER_PTS (buf); if (GST_BUFFER_DURATION_IS_VALID (buf)) end += GST_BUFFER_DURATION (buf); gst_segment_clip (&si->seg, GST_FORMAT_TIME, start, end, &start, &end); start = gst_segment_to_stream_time (&si->seg, GST_FORMAT_TIME, start); end = gst_segment_to_stream_time (&si->seg, GST_FORMAT_TIME, end); GST_DEBUG_OBJECT (pad, "new buffer %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (end)); /* Now extend measured time range to include new times */ extend_times (si, start, end); done: return GST_PAD_PROBE_PASS; }
static GstFlowReturn gst_smart_encoder_push_pending_gop (GstSmartEncoder * smart_encoder) { guint64 cstart, cstop; GList *tmp; GstFlowReturn res = GST_FLOW_OK; GST_DEBUG ("Pushing pending GOP (%" GST_TIME_FORMAT " -- %" GST_TIME_FORMAT ")", GST_TIME_ARGS (smart_encoder->gop_start), GST_TIME_ARGS (smart_encoder->gop_stop)); /* If GOP is entirely within segment, just push downstream */ if (gst_segment_clip (smart_encoder->segment, GST_FORMAT_TIME, smart_encoder->gop_start, smart_encoder->gop_stop, &cstart, &cstop)) { if ((cstart != smart_encoder->gop_start) || (cstop != smart_encoder->gop_stop)) { GST_DEBUG ("GOP needs to be re-encoded from %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT, GST_TIME_ARGS (cstart), GST_TIME_ARGS (cstop)); res = gst_smart_encoder_reencode_gop (smart_encoder); } else { /* The whole GOP is within the segment, push all pending buffers downstream */ GST_DEBUG ("GOP doesn't need to be modified, pushing downstream"); for (tmp = smart_encoder->pending_gop; tmp; tmp = tmp->next) { GstBuffer *buf = (GstBuffer *) tmp->data; res = gst_pad_push (smart_encoder->srcpad, buf); if (G_UNLIKELY (res != GST_FLOW_OK)) break; } } } else { /* The whole GOP is outside the segment, there's most likely * a bug somewhere. */ GST_WARNING ("GOP is entirely outside of the segment, upstream gave us too much data"); for (tmp = smart_encoder->pending_gop; tmp; tmp = tmp->next) { gst_buffer_unref ((GstBuffer *) tmp->data); } } if (smart_encoder->pending_gop) { g_list_free (smart_encoder->pending_gop); smart_encoder->pending_gop = NULL; } smart_encoder->gop_start = GST_CLOCK_TIME_NONE; smart_encoder->gop_stop = GST_CLOCK_TIME_NONE; return res; }
static GstBuffer * gst_wildmidi_clip_buffer (GstWildmidi * wildmidi, GstBuffer * buffer) { gint64 start, stop; gint64 new_start, new_stop; gint64 offset, length; GstBuffer *out; guint64 bpf; /* clipping disabled for now */ return buffer; start = GST_BUFFER_OFFSET (buffer); stop = GST_BUFFER_OFFSET_END (buffer); if (!gst_segment_clip (wildmidi->o_segment, GST_FORMAT_DEFAULT, start, stop, &new_start, &new_stop)) { gst_buffer_unref (buffer); return NULL; } if (start == new_start && stop == new_stop) return buffer; offset = new_start - start; length = new_stop - new_start; bpf = wildmidi->bytes_per_frame; out = gst_buffer_create_sub (buffer, offset * bpf, length * bpf); GST_BUFFER_OFFSET (out) = new_start; GST_BUFFER_OFFSET_END (out) = new_stop; GST_BUFFER_TIMESTAMP (out) = gst_util_uint64_scale_int (new_start, GST_SECOND, WILDMIDI_RATE); GST_BUFFER_DURATION (out) = gst_util_uint64_scale_int (new_stop, GST_SECOND, WILDMIDI_RATE) - GST_BUFFER_TIMESTAMP (out); gst_buffer_unref (buffer); return out; }
/* returns TRUE if buffer is within segment, else FALSE. * if Buffer is on segment border, it's timestamp and duration will be clipped */ static gboolean clip_buffer (GstTheoraDec * dec, GstBuffer * buf) { gboolean res = TRUE; GstClockTime in_ts, in_dur, stop; gint64 cstart, cstop; in_ts = GST_BUFFER_TIMESTAMP (buf); in_dur = GST_BUFFER_DURATION (buf); GST_LOG_OBJECT (dec, "timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT, GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur)); /* can't clip without TIME segment */ if (dec->segment.format != GST_FORMAT_TIME) goto beach; /* we need a start time */ if (!GST_CLOCK_TIME_IS_VALID (in_ts)) goto beach; /* generate valid stop, if duration unknown, we have unknown stop */ stop = GST_CLOCK_TIME_IS_VALID (in_dur) ? (in_ts + in_dur) : GST_CLOCK_TIME_NONE; /* now clip */ if (!(res = gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &cstart, &cstop))) goto beach; /* update timestamp and possibly duration if the clipped stop time is * valid */ GST_BUFFER_TIMESTAMP (buf) = cstart; if (GST_CLOCK_TIME_IS_VALID (cstop)) GST_BUFFER_DURATION (buf) = cstop - cstart; beach: GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : "")); return res; }
static gboolean buffer_clip (GstPngDec * dec, GstBuffer * buffer) { gboolean res = TRUE; gint64 cstart, cstop; if ((!GST_CLOCK_TIME_IS_VALID (GST_BUFFER_TIMESTAMP (buffer))) || (!GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DURATION (buffer))) || (dec->segment.format != GST_FORMAT_TIME)) goto beach; if ((res = gst_segment_clip (&dec->segment, GST_FORMAT_TIME, GST_BUFFER_TIMESTAMP (buffer), GST_BUFFER_TIMESTAMP (buffer) + GST_BUFFER_DURATION (buffer), &cstart, &cstop))) { GST_BUFFER_TIMESTAMP (buffer) = cstart; GST_BUFFER_DURATION (buffer) = cstop - cstart; } beach: return res; }
static GstBuffer * gst_fluid_dec_clip_buffer (GstFluidDec * fluiddec, GstBuffer * buffer) { guint64 start, stop; guint64 new_start, new_stop; gint64 offset, length; /* clipping disabled for now */ return buffer; start = GST_BUFFER_OFFSET (buffer); stop = GST_BUFFER_OFFSET_END (buffer); if (!gst_segment_clip (&fluiddec->segment, GST_FORMAT_DEFAULT, start, stop, &new_start, &new_stop)) { gst_buffer_unref (buffer); return NULL; } if (start == new_start && stop == new_stop) return buffer; offset = new_start - start; length = new_stop - new_start; buffer = gst_buffer_make_writable (buffer); gst_buffer_resize (buffer, offset, length); GST_BUFFER_OFFSET (buffer) = new_start; GST_BUFFER_OFFSET_END (buffer) = new_stop; GST_BUFFER_TIMESTAMP (buffer) = gst_util_uint64_scale_int (new_start, GST_SECOND, FLUID_DEC_RATE); GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (new_stop, GST_SECOND, FLUID_DEC_RATE) - GST_BUFFER_TIMESTAMP (buffer); return buffer; }
gboolean gst_kate_util_decoder_base_update_segment (GstKateDecoderBase * decoder, GstElement * element, GstBuffer * buf) { gint64 clip_start = 0, clip_stop = 0; gboolean in_seg; if (decoder->kate_flushing) { GST_LOG_OBJECT (element, "Kate pad flushing, buffer ignored"); return FALSE; } if (G_LIKELY (GST_BUFFER_TIMESTAMP_IS_VALID (buf))) { GstClockTime stop; if (G_LIKELY (GST_BUFFER_DURATION_IS_VALID (buf))) stop = GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf); else stop = GST_CLOCK_TIME_NONE; in_seg = gst_segment_clip (&decoder->kate_segment, GST_FORMAT_TIME, GST_BUFFER_TIMESTAMP (buf), stop, &clip_start, &clip_stop); } else { in_seg = TRUE; } if (in_seg) { if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) { gst_segment_set_last_stop (&decoder->kate_segment, GST_FORMAT_TIME, clip_start); } } else { GST_INFO_OBJECT (element, "Kate buffer not in segment, ignored"); } return in_seg; }
static GstFlowReturn gst_audio_ringbuffer_render (GstAudioRingbuffer * ringbuffer, GstBuffer * buf) { GstRingBuffer *rbuf; gint bps, accum; guint size; guint samples, written, out_samples; gint64 diff, align, ctime, cstop; guint8 *data; guint64 in_offset; GstClockTime time, stop, render_start, render_stop, sample_offset; gboolean align_next; rbuf = ringbuffer->buffer; /* can't do anything when we don't have the device */ if (G_UNLIKELY (!gst_ring_buffer_is_acquired (rbuf))) goto wrong_state; bps = rbuf->spec.bytes_per_sample; size = GST_BUFFER_SIZE (buf); if (G_UNLIKELY (size % bps) != 0) goto wrong_size; samples = size / bps; out_samples = samples; in_offset = GST_BUFFER_OFFSET (buf); time = GST_BUFFER_TIMESTAMP (buf); GST_DEBUG_OBJECT (ringbuffer, "time %" GST_TIME_FORMAT ", offset %llu, start %" GST_TIME_FORMAT ", samples %u", GST_TIME_ARGS (time), in_offset, GST_TIME_ARGS (ringbuffer->sink_segment.start), samples); data = GST_BUFFER_DATA (buf); stop = time + gst_util_uint64_scale_int (samples, GST_SECOND, rbuf->spec.rate); if (!gst_segment_clip (&ringbuffer->sink_segment, GST_FORMAT_TIME, time, stop, &ctime, &cstop)) goto out_of_segment; /* see if some clipping happened */ diff = ctime - time; if (diff > 0) { /* bring clipped time to samples */ diff = gst_util_uint64_scale_int (diff, rbuf->spec.rate, GST_SECOND); GST_DEBUG_OBJECT (ringbuffer, "clipping start to %" GST_TIME_FORMAT " %" G_GUINT64_FORMAT " samples", GST_TIME_ARGS (ctime), diff); samples -= diff; data += diff * bps; time = ctime; } diff = stop - cstop; if (diff > 0) { /* bring clipped time to samples */ diff = gst_util_uint64_scale_int (diff, rbuf->spec.rate, GST_SECOND); GST_DEBUG_OBJECT (ringbuffer, "clipping stop to %" GST_TIME_FORMAT " %" G_GUINT64_FORMAT " samples", GST_TIME_ARGS (cstop), diff); samples -= diff; stop = cstop; } /* bring buffer start and stop times to running time */ render_start = gst_segment_to_running_time (&ringbuffer->sink_segment, GST_FORMAT_TIME, time); render_stop = gst_segment_to_running_time (&ringbuffer->sink_segment, GST_FORMAT_TIME, stop); GST_DEBUG_OBJECT (ringbuffer, "running: start %" GST_TIME_FORMAT " - stop %" GST_TIME_FORMAT, GST_TIME_ARGS (render_start), GST_TIME_ARGS (render_stop)); /* and bring the time to the rate corrected offset in the buffer */ render_start = gst_util_uint64_scale_int (render_start, rbuf->spec.rate, GST_SECOND); render_stop = gst_util_uint64_scale_int (render_stop, rbuf->spec.rate, GST_SECOND); /* positive playback rate, first sample is render_start, negative rate, first * sample is render_stop. When no rate conversion is active, render exactly * the amount of input samples to avoid aligning to rounding errors. */ if (ringbuffer->sink_segment.rate >= 0.0) { sample_offset = render_start; if (ringbuffer->sink_segment.rate == 1.0) render_stop = sample_offset + samples; } else { sample_offset = render_stop; if (ringbuffer->sink_segment.rate == -1.0) render_start = sample_offset + samples; } /* always resync after a discont */ if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) { GST_DEBUG_OBJECT (ringbuffer, "resync after discont"); goto no_align; } /* resync when we don't know what to align the sample with */ if (G_UNLIKELY (ringbuffer->next_sample == -1)) { GST_DEBUG_OBJECT (ringbuffer, "no align possible: no previous sample position known"); goto no_align; } /* now try to align the sample to the previous one, first see how big the * difference is. */ if (sample_offset >= ringbuffer->next_sample) diff = sample_offset - ringbuffer->next_sample; else diff = ringbuffer->next_sample - sample_offset; /* we tollerate half a second diff before we start resyncing. This * should be enough to compensate for various rounding errors in the timestamp * and sample offset position. We always resync if we got a discont anyway and * non-discont should be aligned by definition. */ if (G_LIKELY (diff < rbuf->spec.rate / DIFF_TOLERANCE)) { /* calc align with previous sample */ align = ringbuffer->next_sample - sample_offset; GST_DEBUG_OBJECT (ringbuffer, "align with prev sample, ABS (%" G_GINT64_FORMAT ") < %d", align, rbuf->spec.rate / DIFF_TOLERANCE); } else { /* bring sample diff to seconds for error message */ diff = gst_util_uint64_scale_int (diff, GST_SECOND, rbuf->spec.rate); /* timestamps drifted apart from previous samples too much, we need to * resync. We log this as an element warning. */ GST_ELEMENT_WARNING (ringbuffer, CORE, CLOCK, ("Compensating for audio synchronisation problems"), ("Unexpected discontinuity in audio timestamps of more " "than half a second (%" GST_TIME_FORMAT "), resyncing", GST_TIME_ARGS (diff))); align = 0; } ringbuffer->last_align = align; /* apply alignment */ render_start += align; render_stop += align; no_align: /* number of target samples is difference between start and stop */ out_samples = render_stop - render_start; /* we render the first or last sample first, depending on the rate */ if (ringbuffer->sink_segment.rate >= 0.0) sample_offset = render_start; else sample_offset = render_stop; GST_DEBUG_OBJECT (ringbuffer, "rendering at %" G_GUINT64_FORMAT " %d/%d", sample_offset, samples, out_samples); /* we need to accumulate over different runs for when we get interrupted */ accum = 0; align_next = TRUE; do { written = gst_ring_buffer_commit_full (rbuf, &sample_offset, data, samples, out_samples, &accum); GST_DEBUG_OBJECT (ringbuffer, "wrote %u of %u", written, samples); /* if we wrote all, we're done */ if (written == samples) break; GST_OBJECT_LOCK (ringbuffer); if (ringbuffer->flushing) goto flushing; GST_OBJECT_UNLOCK (ringbuffer); /* if we got interrupted, we cannot assume that the next sample should * be aligned to this one */ align_next = FALSE; samples -= written; data += written * bps; } while (TRUE); if (align_next) ringbuffer->next_sample = sample_offset; else ringbuffer->next_sample = -1; GST_DEBUG_OBJECT (ringbuffer, "next sample expected at %" G_GUINT64_FORMAT, ringbuffer->next_sample); if (GST_CLOCK_TIME_IS_VALID (stop) && stop >= ringbuffer->sink_segment.stop) { GST_DEBUG_OBJECT (ringbuffer, "start playback because we are at the end of segment"); gst_ring_buffer_start (rbuf); } return GST_FLOW_OK; /* SPECIAL cases */ out_of_segment: { GST_DEBUG_OBJECT (ringbuffer, "dropping sample out of segment time %" GST_TIME_FORMAT ", start %" GST_TIME_FORMAT, GST_TIME_ARGS (time), GST_TIME_ARGS (ringbuffer->sink_segment.start)); return GST_FLOW_OK; } /* ERRORS */ wrong_state: { GST_DEBUG_OBJECT (ringbuffer, "ringbuffer not negotiated"); GST_ELEMENT_ERROR (ringbuffer, STREAM, FORMAT, (NULL), ("ringbuffer not negotiated.")); return GST_FLOW_NOT_NEGOTIATED; } wrong_size: { GST_DEBUG_OBJECT (ringbuffer, "wrong size"); GST_ELEMENT_ERROR (ringbuffer, STREAM, WRONG_TYPE, (NULL), ("ringbuffer received buffer of wrong size.")); return GST_FLOW_ERROR; } flushing: { GST_DEBUG_OBJECT (ringbuffer, "ringbuffer is flushing"); GST_OBJECT_UNLOCK (ringbuffer); return GST_FLOW_WRONG_STATE; } }
/** * gst_audio_buffer_clip: * @buffer: (transfer full): The buffer to clip. * @segment: Segment in %GST_FORMAT_TIME or %GST_FORMAT_DEFAULT to which * the buffer should be clipped. * @rate: sample rate. * @bpf: size of one audio frame in bytes. This is the size of one sample * * channels. * * Clip the buffer to the given %GstSegment. * * After calling this function the caller does not own a reference to * @buffer anymore. * * Returns: (transfer full): %NULL if the buffer is completely outside the configured segment, * otherwise the clipped buffer is returned. * * If the buffer has no timestamp, it is assumed to be inside the segment and * is not clipped */ GstBuffer * gst_audio_buffer_clip (GstBuffer * buffer, GstSegment * segment, gint rate, gint bpf) { GstBuffer *ret; GstClockTime timestamp = GST_CLOCK_TIME_NONE, duration = GST_CLOCK_TIME_NONE; guint64 offset = GST_BUFFER_OFFSET_NONE, offset_end = GST_BUFFER_OFFSET_NONE; gsize trim, size, osize; gboolean change_duration = TRUE, change_offset = TRUE, change_offset_end = TRUE; g_return_val_if_fail (segment->format == GST_FORMAT_TIME || segment->format == GST_FORMAT_DEFAULT, buffer); g_return_val_if_fail (GST_IS_BUFFER (buffer), NULL); if (!GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) /* No timestamp - assume the buffer is completely in the segment */ return buffer; /* Get copies of the buffer metadata to change later. * Calculate the missing values for the calculations, * they won't be changed later though. */ trim = 0; osize = size = gst_buffer_get_size (buffer); /* no data, nothing to clip */ if (!size) return buffer; timestamp = GST_BUFFER_TIMESTAMP (buffer); GST_DEBUG ("timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); if (GST_BUFFER_DURATION_IS_VALID (buffer)) { duration = GST_BUFFER_DURATION (buffer); } else { change_duration = FALSE; duration = gst_util_uint64_scale (size / bpf, GST_SECOND, rate); } if (GST_BUFFER_OFFSET_IS_VALID (buffer)) { offset = GST_BUFFER_OFFSET (buffer); } else { change_offset = FALSE; offset = 0; } if (GST_BUFFER_OFFSET_END_IS_VALID (buffer)) { offset_end = GST_BUFFER_OFFSET_END (buffer); } else { change_offset_end = FALSE; offset_end = offset + size / bpf; } if (segment->format == GST_FORMAT_TIME) { /* Handle clipping for GST_FORMAT_TIME */ guint64 start, stop, cstart, cstop, diff; start = timestamp; stop = timestamp + duration; if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) { diff = cstart - start; if (diff > 0) { timestamp = cstart; if (change_duration) duration -= diff; diff = gst_util_uint64_scale (diff, rate, GST_SECOND); if (change_offset) offset += diff; trim += diff * bpf; size -= diff * bpf; } diff = stop - cstop; if (diff > 0) { /* duration is always valid if stop is valid */ duration -= diff; diff = gst_util_uint64_scale (diff, rate, GST_SECOND); if (change_offset_end) offset_end -= diff; size -= diff * bpf; } } else { gst_buffer_unref (buffer); return NULL; } } else { /* Handle clipping for GST_FORMAT_DEFAULT */ guint64 start, stop, cstart, cstop, diff; g_return_val_if_fail (GST_BUFFER_OFFSET_IS_VALID (buffer), buffer); start = offset; stop = offset_end; if (gst_segment_clip (segment, GST_FORMAT_DEFAULT, start, stop, &cstart, &cstop)) { diff = cstart - start; if (diff > 0) { offset = cstart; timestamp = gst_util_uint64_scale (cstart, GST_SECOND, rate); if (change_duration) duration -= gst_util_uint64_scale (diff, GST_SECOND, rate); trim += diff * bpf; size -= diff * bpf; } diff = stop - cstop; if (diff > 0) { offset_end = cstop; if (change_duration) duration -= gst_util_uint64_scale (diff, GST_SECOND, rate); size -= diff * bpf; } } else { gst_buffer_unref (buffer); return NULL; } } if (trim == 0 && size == osize) { ret = buffer; if (GST_BUFFER_TIMESTAMP (ret) != timestamp) { ret = gst_buffer_make_writable (ret); GST_BUFFER_TIMESTAMP (ret) = timestamp; } if (GST_BUFFER_DURATION (ret) != duration) { ret = gst_buffer_make_writable (ret); GST_BUFFER_DURATION (ret) = duration; } } else { /* Get a writable buffer and apply all changes */ GST_DEBUG ("trim %" G_GSIZE_FORMAT " size %" G_GSIZE_FORMAT, trim, size); ret = gst_buffer_copy_region (buffer, GST_BUFFER_COPY_ALL, trim, size); gst_buffer_unref (buffer); GST_DEBUG ("timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); GST_BUFFER_TIMESTAMP (ret) = timestamp; if (change_duration) GST_BUFFER_DURATION (ret) = duration; if (change_offset) GST_BUFFER_OFFSET (ret) = offset; if (change_offset_end) GST_BUFFER_OFFSET_END (ret) = offset_end; } return ret; }
static GstFlowReturn gst_dvdec_chain (GstPad * pad, GstObject * parent, GstBuffer * buf) { GstDVDec *dvdec; guint8 *inframe; guint8 *outframe_ptrs[3]; gint outframe_pitches[3]; GstMapInfo map; GstVideoFrame frame; GstBuffer *outbuf; GstFlowReturn ret = GST_FLOW_OK; guint length; guint64 cstart = GST_CLOCK_TIME_NONE, cstop = GST_CLOCK_TIME_NONE; gboolean PAL, wide; dvdec = GST_DVDEC (parent); gst_buffer_map (buf, &map, GST_MAP_READ); inframe = map.data; /* buffer should be at least the size of one NTSC frame, this should * be enough to decode the header. */ if (G_UNLIKELY (map.size < NTSC_BUFFER)) goto wrong_size; /* preliminary dropping. unref and return if outside of configured segment */ if ((dvdec->segment.format == GST_FORMAT_TIME) && (!(gst_segment_clip (&dvdec->segment, GST_FORMAT_TIME, GST_BUFFER_TIMESTAMP (buf), GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf), &cstart, &cstop)))) goto dropping; if (G_UNLIKELY (dv_parse_header (dvdec->decoder, inframe) < 0)) goto parse_header_error; /* get size */ PAL = dv_system_50_fields (dvdec->decoder); wide = dv_format_wide (dvdec->decoder); /* check the buffer is of right size after we know if we are * dealing with PAL or NTSC */ length = (PAL ? PAL_BUFFER : NTSC_BUFFER); if (G_UNLIKELY (map.size < length)) goto wrong_size; dv_parse_packs (dvdec->decoder, inframe); if (dvdec->video_offset % dvdec->drop_factor != 0) goto skip; /* renegotiate on change */ if (PAL != dvdec->PAL || wide != dvdec->wide) { dvdec->src_negotiated = FALSE; dvdec->PAL = PAL; dvdec->wide = wide; } dvdec->height = (dvdec->PAL ? PAL_HEIGHT : NTSC_HEIGHT); dvdec->interlaced = !dv_is_progressive (dvdec->decoder); /* negotiate if not done yet */ if (!dvdec->src_negotiated) { if (!gst_dvdec_src_negotiate (dvdec)) goto not_negotiated; } if (gst_pad_check_reconfigure (dvdec->srcpad)) { GstCaps *caps; caps = gst_pad_get_current_caps (dvdec->srcpad); if (!caps) goto not_negotiated; gst_dvdec_negotiate_pool (dvdec, caps, &dvdec->vinfo); gst_caps_unref (caps); } if (dvdec->need_segment) { gst_pad_push_event (dvdec->srcpad, gst_event_new_segment (&dvdec->segment)); dvdec->need_segment = FALSE; } ret = gst_buffer_pool_acquire_buffer (dvdec->pool, &outbuf, NULL); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto no_buffer; gst_video_frame_map (&frame, &dvdec->vinfo, outbuf, GST_MAP_WRITE); outframe_ptrs[0] = GST_VIDEO_FRAME_COMP_DATA (&frame, 0); outframe_pitches[0] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 0); /* the rest only matters for YUY2 */ if (dvdec->bpp < 3) { outframe_ptrs[1] = GST_VIDEO_FRAME_COMP_DATA (&frame, 1); outframe_ptrs[2] = GST_VIDEO_FRAME_COMP_DATA (&frame, 2); outframe_pitches[1] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 1); outframe_pitches[2] = GST_VIDEO_FRAME_COMP_STRIDE (&frame, 2); } GST_DEBUG_OBJECT (dvdec, "decoding and pushing buffer"); dv_decode_full_frame (dvdec->decoder, inframe, e_dv_color_yuv, outframe_ptrs, outframe_pitches); gst_video_frame_unmap (&frame); GST_BUFFER_FLAG_UNSET (outbuf, GST_VIDEO_BUFFER_FLAG_TFF); GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET (buf); GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_END (buf); /* FIXME : Compute values when using non-TIME segments, * but for the moment make sure we at least don't set bogus values */ if (GST_CLOCK_TIME_IS_VALID (cstart)) { GST_BUFFER_TIMESTAMP (outbuf) = cstart; if (GST_CLOCK_TIME_IS_VALID (cstop)) GST_BUFFER_DURATION (outbuf) = cstop - cstart; } ret = gst_pad_push (dvdec->srcpad, outbuf); skip: dvdec->video_offset++; done: gst_buffer_unmap (buf, &map); gst_buffer_unref (buf); return ret; /* ERRORS */ wrong_size: { GST_ELEMENT_ERROR (dvdec, STREAM, DECODE, (NULL), ("Input buffer too small")); ret = GST_FLOW_ERROR; goto done; } parse_header_error: { GST_ELEMENT_ERROR (dvdec, STREAM, DECODE, (NULL), ("Error parsing DV header")); ret = GST_FLOW_ERROR; goto done; } not_negotiated: { GST_DEBUG_OBJECT (dvdec, "could not negotiate output"); ret = GST_FLOW_NOT_NEGOTIATED; goto done; } no_buffer: { GST_DEBUG_OBJECT (dvdec, "could not allocate buffer"); goto done; } dropping: { GST_DEBUG_OBJECT (dvdec, "dropping buffer since it's out of the configured segment"); goto done; } }
GstFlowReturn gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, GstVideoFrame * frame) { GstBaseVideoDecoderClass *base_video_decoder_class; GstBuffer *src_buffer; GST_DEBUG ("finish frame"); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); GST_DEBUG ("finish frame sync=%d pts=%" G_GINT64_FORMAT, frame->is_sync_point, frame->presentation_timestamp); if (frame->is_sync_point) { if (GST_CLOCK_TIME_IS_VALID (frame->presentation_timestamp)) { if (frame->presentation_timestamp != base_video_decoder->timestamp_offset) { GST_DEBUG ("sync timestamp %" G_GINT64_FORMAT " diff %" G_GINT64_FORMAT, frame->presentation_timestamp, frame->presentation_timestamp - base_video_decoder->state.segment.start); base_video_decoder->timestamp_offset = frame->presentation_timestamp; base_video_decoder->field_index = 0; } else { /* This case is for one initial timestamp and no others, e.g., * filesrc ! decoder ! xvimagesink */ GST_WARNING ("sync timestamp didn't change, ignoring"); frame->presentation_timestamp = GST_CLOCK_TIME_NONE; } } else { GST_WARNING ("sync point doesn't have timestamp"); if (GST_CLOCK_TIME_IS_VALID (base_video_decoder->timestamp_offset)) { GST_ERROR ("No base timestamp. Assuming frames start at 0"); base_video_decoder->timestamp_offset = 0; base_video_decoder->field_index = 0; } } } frame->field_index = base_video_decoder->field_index; base_video_decoder->field_index += frame->n_fields; if (frame->presentation_timestamp == GST_CLOCK_TIME_NONE) { frame->presentation_timestamp = gst_base_video_decoder_get_field_timestamp (base_video_decoder, frame->field_index); frame->presentation_duration = GST_CLOCK_TIME_NONE; frame->decode_timestamp = gst_base_video_decoder_get_timestamp (base_video_decoder, frame->decode_frame_number); } if (frame->presentation_duration == GST_CLOCK_TIME_NONE) { frame->presentation_duration = gst_base_video_decoder_get_field_timestamp (base_video_decoder, frame->field_index + frame->n_fields) - frame->presentation_timestamp; } if (GST_CLOCK_TIME_IS_VALID (base_video_decoder->last_timestamp)) { if (frame->presentation_timestamp < base_video_decoder->last_timestamp) { GST_WARNING ("decreasing timestamp (%" G_GINT64_FORMAT " < %" G_GINT64_FORMAT ")", frame->presentation_timestamp, base_video_decoder->last_timestamp); } } base_video_decoder->last_timestamp = frame->presentation_timestamp; GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_BUFFER_FLAG_DELTA_UNIT); if (base_video_decoder->state.interlaced) { #ifndef GST_VIDEO_BUFFER_TFF #define GST_VIDEO_BUFFER_TFF (GST_MINI_OBJECT_FLAG_LAST << 5) #endif #ifndef GST_VIDEO_BUFFER_RFF #define GST_VIDEO_BUFFER_RFF (GST_MINI_OBJECT_FLAG_LAST << 6) #endif #ifndef GST_VIDEO_BUFFER_ONEFIELD #define GST_VIDEO_BUFFER_ONEFIELD (GST_MINI_OBJECT_FLAG_LAST << 7) #endif int tff = base_video_decoder->state.top_field_first; if (frame->field_index & 1) { tff ^= 1; } if (tff) { GST_BUFFER_FLAG_SET (frame->src_buffer, GST_VIDEO_BUFFER_TFF); } else { GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_VIDEO_BUFFER_TFF); } GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_VIDEO_BUFFER_RFF); GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_VIDEO_BUFFER_ONEFIELD); if (frame->n_fields == 3) { GST_BUFFER_FLAG_SET (frame->src_buffer, GST_VIDEO_BUFFER_RFF); } else if (frame->n_fields == 1) { GST_BUFFER_FLAG_UNSET (frame->src_buffer, GST_VIDEO_BUFFER_ONEFIELD); } } GST_BUFFER_TIMESTAMP (frame->src_buffer) = frame->presentation_timestamp; GST_BUFFER_DURATION (frame->src_buffer) = frame->presentation_duration; GST_BUFFER_OFFSET (frame->src_buffer) = -1; GST_BUFFER_OFFSET_END (frame->src_buffer) = -1; GST_DEBUG ("pushing frame %" G_GINT64_FORMAT, frame->presentation_timestamp); base_video_decoder->frames = g_list_remove (base_video_decoder->frames, frame); gst_base_video_decoder_set_src_caps (base_video_decoder); src_buffer = frame->src_buffer; frame->src_buffer = NULL; gst_base_video_decoder_free_frame (frame); if (base_video_decoder->sink_clipping) { gint64 start = GST_BUFFER_TIMESTAMP (src_buffer); gint64 stop = GST_BUFFER_TIMESTAMP (src_buffer) + GST_BUFFER_DURATION (src_buffer); if (gst_segment_clip (&base_video_decoder->state.segment, GST_FORMAT_TIME, start, stop, &start, &stop)) { GST_BUFFER_TIMESTAMP (src_buffer) = start; GST_BUFFER_DURATION (src_buffer) = stop - start; } else { GST_DEBUG ("dropping buffer outside segment"); gst_buffer_unref (src_buffer); return GST_FLOW_OK; } } return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), src_buffer); }
EXPORT_C #endif GstBuffer * gst_audio_buffer_clip (GstBuffer * buffer, GstSegment * segment, gint rate, gint frame_size) { GstBuffer *ret; GstClockTime timestamp = GST_CLOCK_TIME_NONE, duration = GST_CLOCK_TIME_NONE; guint64 offset = GST_BUFFER_OFFSET_NONE, offset_end = GST_BUFFER_OFFSET_NONE; guint8 *data; guint size; gboolean change_duration = TRUE, change_offset = TRUE, change_offset_end = TRUE; g_return_val_if_fail (segment->format == GST_FORMAT_TIME || segment->format == GST_FORMAT_DEFAULT, buffer); g_return_val_if_fail (GST_IS_BUFFER (buffer), NULL); if (!GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) /* No timestamp - assume the buffer is completely in the segment */ return buffer; /* Get copies of the buffer metadata to change later. * Calculate the missing values for the calculations, * they won't be changed later though. */ data = GST_BUFFER_DATA (buffer); size = GST_BUFFER_SIZE (buffer); timestamp = GST_BUFFER_TIMESTAMP (buffer); if (GST_BUFFER_DURATION_IS_VALID (buffer)) { duration = GST_BUFFER_DURATION (buffer); } else { change_duration = FALSE; duration = gst_util_uint64_scale (size / frame_size, GST_SECOND, rate); } if (GST_BUFFER_OFFSET_IS_VALID (buffer)) { offset = GST_BUFFER_OFFSET (buffer); } else { change_offset = FALSE; offset = 0; } if (GST_BUFFER_OFFSET_END_IS_VALID (buffer)) { offset_end = GST_BUFFER_OFFSET_END (buffer); } else { change_offset_end = FALSE; offset_end = offset + size / frame_size; } if (segment->format == GST_FORMAT_TIME) { /* Handle clipping for GST_FORMAT_TIME */ gint64 start, stop, cstart, cstop, diff; start = timestamp; stop = timestamp + duration; if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) { diff = cstart - start; if (diff > 0) { timestamp = cstart; if (change_duration) duration -= diff; diff = gst_util_uint64_scale (diff, rate, GST_SECOND); if (change_offset) offset += diff; data += diff * frame_size; size -= diff * frame_size; } diff = stop - cstop; if (diff > 0) { /* duration is always valid if stop is valid */ duration -= diff; diff = gst_util_uint64_scale (diff, rate, GST_SECOND); if (change_offset_end) offset_end -= diff; size -= diff * frame_size; } } else { gst_buffer_unref (buffer); return NULL; } } else { /* Handle clipping for GST_FORMAT_DEFAULT */ gint64 start, stop, cstart, cstop, diff; g_return_val_if_fail (GST_BUFFER_OFFSET_IS_VALID (buffer), buffer); start = offset; stop = offset_end; if (gst_segment_clip (segment, GST_FORMAT_DEFAULT, start, stop, &cstart, &cstop)) { diff = cstart - start; if (diff > 0) { offset = cstart; timestamp = gst_util_uint64_scale (cstart, GST_SECOND, rate); if (change_duration) duration -= gst_util_uint64_scale (diff, GST_SECOND, rate); data += diff * frame_size; size -= diff * frame_size; } diff = stop - cstop; if (diff > 0) { offset_end = cstop; if (change_duration) duration -= gst_util_uint64_scale (diff, GST_SECOND, rate); size -= diff * frame_size; } } else { gst_buffer_unref (buffer); return NULL; } } /* Get a metadata writable buffer and apply all changes */ ret = gst_buffer_make_metadata_writable (buffer); GST_BUFFER_TIMESTAMP (ret) = timestamp; GST_BUFFER_SIZE (ret) = size; GST_BUFFER_DATA (ret) = data; if (change_duration) GST_BUFFER_DURATION (ret) = duration; if (change_offset) GST_BUFFER_OFFSET (ret) = offset; if (change_offset_end) GST_BUFFER_OFFSET_END (ret) = offset_end; return ret; }
HRESULT AudioFakeSink::DoRenderSample(IMediaSample *pMediaSample) { GstBuffer *out_buf = NULL; gboolean in_seg = FALSE; GstClockTime buf_start, buf_stop; gint64 clip_start = 0, clip_stop = 0; guint start_offset = 0, stop_offset; GstClockTime duration; if(pMediaSample) { BYTE *pBuffer = NULL; LONGLONG lStart = 0, lStop = 0; long size = pMediaSample->GetActualDataLength(); pMediaSample->GetPointer(&pBuffer); pMediaSample->GetTime(&lStart, &lStop); if (!GST_CLOCK_TIME_IS_VALID (mDec->timestamp)) { // Convert REFERENCE_TIME to GST_CLOCK_TIME mDec->timestamp = (GstClockTime)lStart * 100; } duration = (lStop - lStart) * 100; buf_start = mDec->timestamp; buf_stop = mDec->timestamp + duration; /* save stop position to start next buffer with it */ mDec->timestamp = buf_stop; /* check if this buffer is in our current segment */ in_seg = gst_segment_clip (mDec->segment, GST_FORMAT_TIME, buf_start, buf_stop, &clip_start, &clip_stop); /* if the buffer is out of segment do not push it downstream */ if (!in_seg) { GST_DEBUG_OBJECT (mDec, "buffer is out of segment, start %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT, GST_TIME_ARGS (buf_start), GST_TIME_ARGS (buf_stop)); goto done; } /* buffer is entirely or partially in-segment, so allocate a * GstBuffer for output, and clip if required */ /* allocate a new buffer for raw audio */ mDec->last_ret = gst_pad_alloc_buffer (mDec->srcpad, GST_BUFFER_OFFSET_NONE, size, GST_PAD_CAPS (mDec->srcpad), &out_buf); if (!out_buf) { GST_WARNING_OBJECT (mDec, "cannot allocate a new GstBuffer"); goto done; } /* set buffer properties */ GST_BUFFER_TIMESTAMP (out_buf) = buf_start; GST_BUFFER_DURATION (out_buf) = duration; memcpy (GST_BUFFER_DATA (out_buf), pBuffer, MIN ((unsigned int)size, GST_BUFFER_SIZE (out_buf))); /* we have to remove some heading samples */ if ((GstClockTime) clip_start > buf_start) { start_offset = (guint)gst_util_uint64_scale_int (clip_start - buf_start, mDec->rate, GST_SECOND) * mDec->depth / 8 * mDec->channels; } else start_offset = 0; /* we have to remove some trailing samples */ if ((GstClockTime) clip_stop < buf_stop) { stop_offset = (guint)gst_util_uint64_scale_int (buf_stop - clip_stop, mDec->rate, GST_SECOND) * mDec->depth / 8 * mDec->channels; } else stop_offset = size; /* truncating */ if ((start_offset != 0) || (stop_offset != (size_t) size)) { GstBuffer *subbuf = gst_buffer_create_sub (out_buf, start_offset, stop_offset - start_offset); if (subbuf) { gst_buffer_set_caps (subbuf, GST_PAD_CAPS (mDec->srcpad)); gst_buffer_unref (out_buf); out_buf = subbuf; } } GST_BUFFER_TIMESTAMP (out_buf) = clip_start; GST_BUFFER_DURATION (out_buf) = clip_stop - clip_start; /* replace the saved stop position by the clipped one */ mDec->timestamp = clip_stop; GST_DEBUG_OBJECT (mDec, "push_buffer (size %d)=> pts %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT, size, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (out_buf)), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (out_buf) + GST_BUFFER_DURATION (out_buf)), GST_TIME_ARGS (GST_BUFFER_DURATION (out_buf))); mDec->last_ret = gst_pad_push (mDec->srcpad, out_buf); } done: return S_OK; }
static gchar * parse_subviewer (ParserState * state, const gchar * line) { guint h1, m1, s1, ms1; guint h2, m2, s2, ms2; gchar *ret; /* TODO: Maybe also parse the fields in the header, especially DELAY. * For examples see the unit test or * http://www.doom9.org/index.html?/sub.htm */ switch (state->state) { case 0: /* looking for start_time,end_time */ if (sscanf (line, "%u:%u:%u.%u,%u:%u:%u.%u", &h1, &m1, &s1, &ms1, &h2, &m2, &s2, &ms2) == 8) { state->state = 1; state->start_time = (((guint64) h1) * 3600 + m1 * 60 + s1) * GST_SECOND + ms1 * GST_MSECOND; state->duration = (((guint64) h2) * 3600 + m2 * 60 + s2) * GST_SECOND + ms2 * GST_MSECOND - state->start_time; } return NULL; case 1: { /* No need to parse that text if it's out of segment */ gint64 clip_start = 0, clip_stop = 0; gboolean in_seg = FALSE; /* Check our segment start/stop */ in_seg = gst_segment_clip (state->segment, GST_FORMAT_TIME, state->start_time, state->start_time + state->duration, &clip_start, &clip_stop); if (in_seg) { state->start_time = clip_start; state->duration = clip_stop - clip_start; } else { state->state = 0; return NULL; } } /* looking for subtitle text; empty line ends this subtitle entry */ if (state->buf->len) g_string_append_c (state->buf, '\n'); g_string_append (state->buf, line); if (strlen (line) == 0) { ret = g_strdup (state->buf->str); subviewer_unescape_newlines (ret); strip_trailing_newlines (ret); g_string_truncate (state->buf, 0); state->state = 0; return ret; } return NULL; default: g_assert_not_reached (); return NULL; } }
static void gst_image_freeze_src_loop (GstPad * pad) { GstImageFreeze *self = GST_IMAGE_FREEZE (GST_PAD_PARENT (pad)); GstBuffer *buffer; guint64 offset; GstClockTime timestamp, timestamp_end; guint64 cstart, cstop; gboolean in_seg, eos; GstFlowReturn flow_ret = GST_FLOW_OK; g_mutex_lock (&self->lock); if (!gst_pad_has_current_caps (self->srcpad)) { GST_ERROR_OBJECT (pad, "Not negotiated yet"); flow_ret = GST_FLOW_NOT_NEGOTIATED; g_mutex_unlock (&self->lock); goto pause_task; } if (!self->buffer) { GST_ERROR_OBJECT (pad, "Have no buffer yet"); flow_ret = GST_FLOW_ERROR; g_mutex_unlock (&self->lock); goto pause_task; } buffer = gst_buffer_ref (self->buffer); buffer = gst_buffer_make_writable (buffer); g_mutex_unlock (&self->lock); if (self->need_segment) { GstEvent *e; GST_DEBUG_OBJECT (pad, "Pushing SEGMENT event: %" GST_SEGMENT_FORMAT, &self->segment); e = gst_event_new_segment (&self->segment); g_mutex_lock (&self->lock); if (self->segment.rate >= 0) { self->offset = gst_util_uint64_scale (self->segment.start, self->fps_n, self->fps_d * GST_SECOND); } else { self->offset = gst_util_uint64_scale (self->segment.stop, self->fps_n, self->fps_d * GST_SECOND); } g_mutex_unlock (&self->lock); self->need_segment = FALSE; gst_pad_push_event (self->srcpad, e); } g_mutex_lock (&self->lock); offset = self->offset; if (self->fps_n != 0) { timestamp = gst_util_uint64_scale (offset, self->fps_d * GST_SECOND, self->fps_n); timestamp_end = gst_util_uint64_scale (offset + 1, self->fps_d * GST_SECOND, self->fps_n); } else { timestamp = self->segment.start; timestamp_end = GST_CLOCK_TIME_NONE; } eos = (self->fps_n == 0 && offset > 0) || (self->segment.rate >= 0 && self->segment.stop != -1 && timestamp > self->segment.stop) || (self->segment.rate < 0 && offset == 0) || (self->segment.rate < 0 && self->segment.start != -1 && timestamp_end < self->segment.start); if (self->fps_n == 0 && offset > 0) in_seg = FALSE; else in_seg = gst_segment_clip (&self->segment, GST_FORMAT_TIME, timestamp, timestamp_end, &cstart, &cstop); if (in_seg) { self->segment.position = cstart; if (self->segment.rate >= 0) self->segment.position = cstop; } if (self->segment.rate >= 0) self->offset++; else self->offset--; g_mutex_unlock (&self->lock); GST_DEBUG_OBJECT (pad, "Handling buffer with timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); if (in_seg) { GST_BUFFER_DTS (buffer) = GST_CLOCK_TIME_NONE; GST_BUFFER_PTS (buffer) = cstart; GST_BUFFER_DURATION (buffer) = cstop - cstart; GST_BUFFER_OFFSET (buffer) = offset; GST_BUFFER_OFFSET_END (buffer) = offset + 1; flow_ret = gst_pad_push (self->srcpad, buffer); GST_DEBUG_OBJECT (pad, "Pushing buffer resulted in %s", gst_flow_get_name (flow_ret)); if (flow_ret != GST_FLOW_OK) goto pause_task; } else { gst_buffer_unref (buffer); } if (eos) { flow_ret = GST_FLOW_EOS; goto pause_task; } return; pause_task: { const gchar *reason = gst_flow_get_name (flow_ret); GST_LOG_OBJECT (self, "pausing task, reason %s", reason); gst_pad_pause_task (pad); if (flow_ret == GST_FLOW_EOS) { if ((self->segment.flags & GST_SEEK_FLAG_SEGMENT)) { GstMessage *m; GstEvent *e; GST_DEBUG_OBJECT (pad, "Sending segment done at end of segment"); if (self->segment.rate >= 0) { m = gst_message_new_segment_done (GST_OBJECT_CAST (self), GST_FORMAT_TIME, self->segment.stop); e = gst_event_new_segment_done (GST_FORMAT_TIME, self->segment.stop); } else { m = gst_message_new_segment_done (GST_OBJECT_CAST (self), GST_FORMAT_TIME, self->segment.start); e = gst_event_new_segment_done (GST_FORMAT_TIME, self->segment.start); } gst_element_post_message (GST_ELEMENT_CAST (self), m); gst_pad_push_event (self->srcpad, e); } else { GST_DEBUG_OBJECT (pad, "Sending EOS at end of segment"); gst_pad_push_event (self->srcpad, gst_event_new_eos ()); } } else if (flow_ret == GST_FLOW_NOT_LINKED || flow_ret < GST_FLOW_EOS) { GST_ELEMENT_ERROR (self, STREAM, FAILED, ("Internal data stream error."), ("stream stopped, reason %s", reason)); gst_pad_push_event (self->srcpad, gst_event_new_eos ()); } return; } }
static GstFlowReturn gst_video_segment_clip_clip_buffer (GstSegmentClip * base, GstBuffer * buffer, GstBuffer ** outbuf) { GstVideoSegmentClip *self = GST_VIDEO_SEGMENT_CLIP (base); GstSegment *segment = &base->segment; GstClockTime timestamp, duration; guint64 cstart, cstop; gboolean in_seg; if (!self->fps_d) { GST_ERROR_OBJECT (self, "Not negotiated yet"); gst_buffer_unref (buffer); return GST_FLOW_NOT_NEGOTIATED; } if (segment->format != GST_FORMAT_TIME) { GST_DEBUG_OBJECT (self, "Unsupported segment format %s", gst_format_get_name (segment->format)); *outbuf = buffer; return GST_FLOW_OK; } if (!GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) { GST_WARNING_OBJECT (self, "Buffer without valid timestamp"); *outbuf = buffer; return GST_FLOW_OK; } if (self->fps_n == 0) { *outbuf = buffer; return GST_FLOW_OK; } timestamp = GST_BUFFER_TIMESTAMP (buffer); duration = GST_BUFFER_DURATION (buffer); if (!GST_CLOCK_TIME_IS_VALID (duration)) duration = gst_util_uint64_scale (GST_SECOND, self->fps_d, self->fps_n); in_seg = gst_segment_clip (segment, GST_FORMAT_TIME, timestamp, timestamp + duration, &cstart, &cstop); if (in_seg) { if (timestamp != cstart || timestamp + duration != cstop) { *outbuf = gst_buffer_make_writable (buffer); GST_BUFFER_TIMESTAMP (*outbuf) = cstart; GST_BUFFER_DURATION (*outbuf) = cstop - cstart; } else { *outbuf = buffer; } } else { GST_DEBUG_OBJECT (self, "Buffer outside the configured segment"); gst_buffer_unref (buffer); if (segment->rate >= 0) { if (segment->stop != -1 && timestamp >= segment->stop) return GST_FLOW_EOS; } else { if (segment->start != -1 && timestamp + duration <= segment->start) return GST_FLOW_EOS; } } return GST_FLOW_OK; }
HRESULT VideoFakeSink::DoRenderSample(IMediaSample *pMediaSample) { gboolean in_seg = FALSE; gint64 clip_start = 0, clip_stop = 0; GstDshowVideoDecClass *klass = (GstDshowVideoDecClass *) G_OBJECT_GET_CLASS (mDec); GstBuffer *buf = NULL; GstClockTime start, stop; if(pMediaSample) { BYTE *pBuffer = NULL; LONGLONG lStart = 0, lStop = 0; long size = pMediaSample->GetActualDataLength(); pMediaSample->GetPointer(&pBuffer); pMediaSample->GetTime(&lStart, &lStop); start = lStart * 100; stop = lStop * 100; /* check if this buffer is in our current segment */ in_seg = gst_segment_clip (mDec->segment, GST_FORMAT_TIME, start, stop, &clip_start, &clip_stop); /* if the buffer is out of segment do not push it downstream */ if (!in_seg) { GST_DEBUG_OBJECT (mDec, "buffer is out of segment, start %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (stop)); goto done; } /* buffer is in our segment, allocate a new out buffer and clip its * timestamps */ mDec->last_ret = gst_pad_alloc_buffer (mDec->srcpad, GST_BUFFER_OFFSET_NONE, size, GST_PAD_CAPS (mDec->srcpad), &buf); if (!buf) { GST_WARNING_OBJECT (mDec, "cannot allocate a new GstBuffer"); goto done; } /* set buffer properties */ GST_BUFFER_TIMESTAMP (buf) = clip_start; GST_BUFFER_DURATION (buf) = clip_stop - clip_start; if (strstr (klass->entry->srccaps, "rgb")) { /* FOR RGB directshow decoder will return bottom-up BITMAP * There is probably a way to get top-bottom video frames from * the decoder... */ gint line = 0; guint stride = mDec->width * 4; for (; line < mDec->height; line++) { memcpy (GST_BUFFER_DATA (buf) + (line * stride), pBuffer + (size - ((line + 1) * (stride))), stride); } } else { memcpy (GST_BUFFER_DATA (buf), pBuffer, MIN ((unsigned int)size, GST_BUFFER_SIZE (buf))); } GST_LOG_OBJECT (mDec, "push_buffer (size %d)=> pts %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT, size, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf)), GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); /* push the buffer downstream */ mDec->last_ret = gst_pad_push (mDec->srcpad, buf); } done: return S_OK; }
static GstFlowReturn gst_dvdec_chain (GstPad * pad, GstBuffer * buf) { GstDVDec *dvdec; guint8 *inframe; guint8 *outframe; guint8 *outframe_ptrs[3]; gint outframe_pitches[3]; GstBuffer *outbuf; GstFlowReturn ret = GST_FLOW_OK; guint length; gint64 cstart, cstop; gboolean PAL, wide; dvdec = GST_DVDEC (gst_pad_get_parent (pad)); inframe = GST_BUFFER_DATA (buf); /* buffer should be at least the size of one NTSC frame, this should * be enough to decode the header. */ if (G_UNLIKELY (GST_BUFFER_SIZE (buf) < NTSC_BUFFER)) goto wrong_size; /* preliminary dropping. unref and return if outside of configured segment */ if ((dvdec->segment->format == GST_FORMAT_TIME) && (!(gst_segment_clip (dvdec->segment, GST_FORMAT_TIME, GST_BUFFER_TIMESTAMP (buf), GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf), &cstart, &cstop)))) goto dropping; if (G_UNLIKELY (dv_parse_header (dvdec->decoder, inframe) < 0)) goto parse_header_error; /* get size */ PAL = dv_system_50_fields (dvdec->decoder); wide = dv_format_wide (dvdec->decoder); /* check the buffer is of right size after we know if we are * dealing with PAL or NTSC */ length = (PAL ? PAL_BUFFER : NTSC_BUFFER); if (G_UNLIKELY (GST_BUFFER_SIZE (buf) < length)) goto wrong_size; dv_parse_packs (dvdec->decoder, inframe); if (dvdec->video_offset % dvdec->drop_factor != 0) goto skip; /* renegotiate on change */ if (PAL != dvdec->PAL || wide != dvdec->wide) { dvdec->src_negotiated = FALSE; dvdec->PAL = PAL; dvdec->wide = wide; } dvdec->height = (dvdec->PAL ? PAL_HEIGHT : NTSC_HEIGHT); /* negotiate if not done yet */ if (!dvdec->src_negotiated) { if (!gst_dvdec_src_negotiate (dvdec)) goto not_negotiated; } ret = gst_pad_alloc_buffer_and_set_caps (dvdec->srcpad, 0, (720 * dvdec->height) * dvdec->bpp, GST_PAD_CAPS (dvdec->srcpad), &outbuf); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto no_buffer; outframe = GST_BUFFER_DATA (outbuf); outframe_ptrs[0] = outframe; outframe_pitches[0] = 720 * dvdec->bpp; /* the rest only matters for YUY2 */ if (dvdec->bpp < 3) { outframe_ptrs[1] = outframe_ptrs[0] + 720 * dvdec->height; outframe_ptrs[2] = outframe_ptrs[1] + 360 * dvdec->height; outframe_pitches[1] = dvdec->height / 2; outframe_pitches[2] = outframe_pitches[1]; } GST_DEBUG_OBJECT (dvdec, "decoding and pushing buffer"); dv_decode_full_frame (dvdec->decoder, inframe, e_dv_color_yuv, outframe_ptrs, outframe_pitches); GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET (buf); GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_END (buf); GST_BUFFER_TIMESTAMP (outbuf) = cstart; GST_BUFFER_DURATION (outbuf) = cstop - cstart; ret = gst_pad_push (dvdec->srcpad, outbuf); skip: dvdec->video_offset++; done: gst_buffer_unref (buf); gst_object_unref (dvdec); return ret; /* ERRORS */ wrong_size: { GST_ELEMENT_ERROR (dvdec, STREAM, DECODE, (NULL), ("Input buffer too small")); ret = GST_FLOW_ERROR; goto done; } parse_header_error: { GST_ELEMENT_ERROR (dvdec, STREAM, DECODE, (NULL), ("Error parsing DV header")); ret = GST_FLOW_ERROR; goto done; } not_negotiated: { GST_DEBUG_OBJECT (dvdec, "could not negotiate output"); ret = GST_FLOW_NOT_NEGOTIATED; goto done; } no_buffer: { GST_DEBUG_OBJECT (dvdec, "could not allocate buffer"); goto done; } dropping: { GST_DEBUG_OBJECT (dvdec, "dropping buffer since it's out of the configured segment"); goto done; } }
static gchar * parse_mdvdsub (ParserState * state, const gchar * line) { const gchar *line_split; gchar *line_chunk; guint start_frame, end_frame; gint64 clip_start = 0, clip_stop = 0; gboolean in_seg = FALSE; GString *markup; gchar *ret; /* style variables */ gboolean italic; gboolean bold; guint fontsize; if (sscanf (line, "{%u}{%u}", &start_frame, &end_frame) != 2) { g_warning ("Parse of the following line, assumed to be in microdvd .sub" " format, failed:\n%s", line); return NULL; } /* skip the {%u}{%u} part */ line = strchr (line, '}') + 1; line = strchr (line, '}') + 1; /* see if there's a first line with a framerate */ if (state->fps == 0.0 && start_frame == 1 && end_frame == 1) { gchar *rest, *end = NULL; rest = g_strdup (line); g_strdelimit (rest, ",", '.'); state->fps = g_ascii_strtod (rest, &end); if (end == rest) state->fps = 0.0; GST_INFO ("framerate from file: %f ('%s')", state->fps, rest); g_free (rest); return NULL; } if (state->fps == 0.0) { /* FIXME: hardcoded for now, is there a better way/assumption? */ state->fps = 24000.0 / 1001.0; GST_INFO ("no framerate specified, assuming %f", state->fps); } state->start_time = start_frame / state->fps * GST_SECOND; state->duration = (end_frame - start_frame) / state->fps * GST_SECOND; /* Check our segment start/stop */ in_seg = gst_segment_clip (state->segment, GST_FORMAT_TIME, state->start_time, state->start_time + state->duration, &clip_start, &clip_stop); /* No need to parse that text if it's out of segment */ if (in_seg) { state->start_time = clip_start; state->duration = clip_stop - clip_start; } else { return NULL; } markup = g_string_new (NULL); while (1) { italic = FALSE; bold = FALSE; fontsize = 0; /* parse style markup */ if (strncmp (line, "{y:i}", 5) == 0) { italic = TRUE; line = strchr (line, '}') + 1; } if (strncmp (line, "{y:b}", 5) == 0) { bold = TRUE; line = strchr (line, '}') + 1; } if (sscanf (line, "{s:%u}", &fontsize) == 1) { line = strchr (line, '}') + 1; } /* forward slashes at beginning/end signify italics too */ if (g_str_has_prefix (line, "/")) { italic = TRUE; ++line; } if ((line_split = strchr (line, '|'))) line_chunk = g_markup_escape_text (line, line_split - line); else line_chunk = g_markup_escape_text (line, strlen (line)); /* Remove italics markers at end of line/stanza (CHECKME: are end slashes * always at the end of a line or can they span multiple lines?) */ if (g_str_has_suffix (line_chunk, "/")) { line_chunk[strlen (line_chunk) - 1] = '\0'; } markup = g_string_append (markup, "<span"); if (italic) g_string_append (markup, " style=\"italic\""); if (bold) g_string_append (markup, " weight=\"bold\""); if (fontsize) g_string_append_printf (markup, " size=\"%u\"", fontsize * 1000); g_string_append_printf (markup, ">%s</span>", line_chunk); g_free (line_chunk); if (line_split) { g_string_append (markup, "\n"); line = line_split + 1; } else { break; } } ret = markup->str; g_string_free (markup, FALSE); GST_DEBUG ("parse_mdvdsub returning (%f+%f): %s", state->start_time / (double) GST_SECOND, state->duration / (double) GST_SECOND, ret); return ret; }
/* Pipeline Callbacks */ static gboolean probe_cb (InsanityGstTest * ptest, GstPad * pad, GstMiniObject * object, gpointer userdata) { InsanityTest *test = INSANITY_TEST (ptest); global_last_probe = g_get_monotonic_time (); DECODER_TEST_LOCK (); if (GST_IS_BUFFER (object)) { GstBuffer *buf; GstClockTime ts; buf = GST_BUFFER (object); ts = GST_BUFFER_PTS (buf); /* First check clipping */ if (glob_testing_parser == FALSE && GST_CLOCK_TIME_IS_VALID (ts) && glob_waiting_segment == FALSE) { GstClockTime ts_end, cstart, cstop; /* Check if buffer is completely outside the segment */ ts_end = ts; if (GST_BUFFER_DURATION_IS_VALID (buf)) ts_end += GST_BUFFER_DURATION (buf); /* Check if buffer is completely outside the segment */ ts_end = ts; if (!gst_segment_clip (&glob_last_segment, glob_last_segment.format, ts, ts_end, &cstart, &cstop)) { char *msg = g_strdup_printf ("Got timestamp %" GST_TIME_FORMAT " -- %" GST_TIME_FORMAT ", outside configured segment (%" GST_TIME_FORMAT " -- %" GST_TIME_FORMAT "), method %s", GST_TIME_ARGS (ts), GST_TIME_ARGS (ts_end), GST_TIME_ARGS (glob_last_segment.start), GST_TIME_ARGS (glob_last_segment.stop), test_get_name (glob_in_progress)); insanity_test_validate_checklist_item (INSANITY_TEST (ptest), "segment-clipping", FALSE, msg); g_free (msg); glob_bad_segment_clipping = TRUE; } } switch (glob_in_progress) { case TEST_NONE: if (glob_waiting_first_segment == TRUE) insanity_test_validate_checklist_item (test, "first-segment", FALSE, "Got a buffer before the first segment"); /* Got the first buffer, starting testing dance */ next_test (test); break; case TEST_POSITION: test_position (test, buf); break; case TEST_FAST_FORWARD: case TEST_BACKWARD_PLAYBACK: case TEST_FAST_BACKWARD: { gint64 stime_ts; if (GST_CLOCK_TIME_IS_VALID (ts) == FALSE || glob_waiting_segment == TRUE) { break; } stime_ts = gst_segment_to_stream_time (&glob_last_segment, glob_last_segment.format, ts); if (GST_CLOCK_TIME_IS_VALID (glob_seek_first_buf_ts) == FALSE) { GstClockTime expected_ts = gst_segment_to_stream_time (&glob_last_segment, glob_last_segment.format, glob_seek_rate < 0 ? glob_seek_stop_ts : glob_seek_segment_seektime); GstClockTimeDiff diff = ABS (GST_CLOCK_DIFF (stime_ts, expected_ts)); if (diff > SEEK_THRESHOLD) { gchar *valmsg = g_strdup_printf ("Received buffer timestamp %" GST_TIME_FORMAT " Seeek wanted %" GST_TIME_FORMAT "", GST_TIME_ARGS (stime_ts), GST_TIME_ARGS (expected_ts)); validate_current_test (test, FALSE, valmsg); next_test (test); g_free (valmsg); } else glob_seek_first_buf_ts = stime_ts; } else { GstClockTimeDiff diff = GST_CLOCK_DIFF (stime_ts, glob_seek_first_buf_ts); if (diff < 0) diff = -diff; if (diff >= glob_playback_duration * GST_SECOND) { validate_current_test (test, TRUE, NULL); next_test (test); } } break; } default: break; } } else if (GST_IS_EVENT (object)) { GstEvent *event = GST_EVENT (object); guint seqnum = gst_event_get_seqnum (event); if (G_LIKELY (glob_seqnum_found == FALSE) && seqnum == glob_seqnum) glob_seqnum_found = TRUE; if (glob_seqnum_found == TRUE && seqnum != glob_seqnum) { gchar *message = g_strdup_printf ("Current seqnum %i != " "received %i", glob_seqnum, seqnum); insanity_test_validate_checklist_item (test, "seqnum-management", FALSE, message); glob_wrong_seqnum = TRUE; g_free (message); } switch (GST_EVENT_TYPE (event)) { case GST_EVENT_SEGMENT: { gst_event_copy_segment (event, &glob_last_segment); if (glob_waiting_segment == FALSE) /* Cache the segment as it will be our reference but don't look * further */ goto done; glob_last_segment_start_time = glob_last_segment.start; if (glob_waiting_first_segment == TRUE) { insanity_test_validate_checklist_item (test, "first-segment", TRUE, NULL); glob_waiting_first_segment = FALSE; } else if (glob_in_progress >= TEST_FAST_FORWARD && glob_in_progress <= TEST_FAST_BACKWARD) { GstClockTimeDiff diff; gboolean valid_stop = TRUE; GstClockTimeDiff wdiff, rdiff; rdiff = ABS (GST_CLOCK_DIFF (glob_last_segment.stop, glob_last_segment.start)) * ABS (glob_last_segment.rate * glob_last_segment.applied_rate); wdiff = ABS (GST_CLOCK_DIFF (glob_seek_stop_ts, glob_seek_segment_seektime)); diff = GST_CLOCK_DIFF (glob_last_segment.position, glob_seek_segment_seektime); if (diff < 0) diff = -diff; /* Now compare with the expected segment */ if ((glob_last_segment.rate * glob_last_segment.applied_rate) == glob_seek_rate && diff <= SEEK_THRESHOLD && valid_stop) { glob_seek_got_segment = TRUE; } else { GstClockTime stopdiff = ABS (GST_CLOCK_DIFF (rdiff, wdiff)); gchar *validate_msg = g_strdup_printf ("Wrong segment received, Rate %f expected " "%f, start time diff %" GST_TIME_FORMAT " stop diff %" GST_TIME_FORMAT, (glob_last_segment.rate * glob_last_segment.applied_rate), glob_seek_rate, GST_TIME_ARGS (diff), GST_TIME_ARGS (stopdiff)); validate_current_test (test, FALSE, validate_msg); next_test (test); g_free (validate_msg); } } glob_waiting_segment = FALSE; break; } default: break; } } done: DECODER_TEST_UNLOCK (); return TRUE; }
GstFlowReturn gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, GstVideoFrame * frame) { GstBaseVideoDecoderClass *base_video_decoder_class; GstClockTime presentation_timestamp; GstClockTime presentation_duration; GstBuffer *src_buffer; GST_DEBUG ("finish frame"); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); gst_base_video_decoder_calculate_timestamps (base_video_decoder, frame, &presentation_timestamp, &presentation_duration); src_buffer = frame->src_buffer; GST_BUFFER_FLAG_UNSET (src_buffer, GST_BUFFER_FLAG_DELTA_UNIT); if (base_video_decoder->state.interlaced) { #ifndef GST_VIDEO_BUFFER_TFF #define GST_VIDEO_BUFFER_TFF (GST_MINI_OBJECT_FLAG_LAST << 5) #endif #ifndef GST_VIDEO_BUFFER_RFF #define GST_VIDEO_BUFFER_RFF (GST_MINI_OBJECT_FLAG_LAST << 6) #endif #ifndef GST_VIDEO_BUFFER_ONEFIELD #define GST_VIDEO_BUFFER_ONEFIELD (GST_MINI_OBJECT_FLAG_LAST << 7) #endif if (GST_VIDEO_FRAME_FLAG_IS_SET (frame, GST_VIDEO_FRAME_FLAG_TFF)) { GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_TFF); } else { GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_TFF); } GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_RFF); GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_ONEFIELD); if (frame->n_fields == 3) { GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_RFF); } else if (frame->n_fields == 1) { GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_ONEFIELD); } } if (base_video_decoder->discont) { GST_BUFFER_FLAG_UNSET (src_buffer, GST_BUFFER_FLAG_DISCONT); base_video_decoder->discont = FALSE; } GST_BUFFER_TIMESTAMP (src_buffer) = presentation_timestamp; GST_BUFFER_DURATION (src_buffer) = presentation_duration; GST_BUFFER_OFFSET (src_buffer) = GST_BUFFER_OFFSET_NONE; GST_BUFFER_OFFSET_END (src_buffer) = GST_BUFFER_OFFSET_NONE; GST_DEBUG ("pushing frame %" GST_TIME_FORMAT, GST_TIME_ARGS (presentation_timestamp)); gst_base_video_decoder_set_src_caps (base_video_decoder); if (base_video_decoder->sink_clipping) { gint64 start = GST_BUFFER_TIMESTAMP (src_buffer); gint64 stop = GST_BUFFER_TIMESTAMP (src_buffer) + GST_BUFFER_DURATION (src_buffer); if (gst_segment_clip (&base_video_decoder->segment, GST_FORMAT_TIME, start, stop, &start, &stop)) { GST_BUFFER_TIMESTAMP (src_buffer) = start; GST_BUFFER_DURATION (src_buffer) = stop - start; GST_DEBUG ("accepting buffer inside segment: %" GST_TIME_FORMAT " %" GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT " time %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer)), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer) + GST_BUFFER_DURATION (src_buffer)), GST_TIME_ARGS (base_video_decoder->segment.start), GST_TIME_ARGS (base_video_decoder->segment.stop), GST_TIME_ARGS (base_video_decoder->segment.time)); } else { GST_DEBUG ("dropping buffer outside segment: %" GST_TIME_FORMAT " %" GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT " time %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer)), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer) + GST_BUFFER_DURATION (src_buffer)), GST_TIME_ARGS (base_video_decoder->segment.start), GST_TIME_ARGS (base_video_decoder->segment.stop), GST_TIME_ARGS (base_video_decoder->segment.time)); gst_video_frame_unref (frame); return GST_FLOW_OK; } } gst_buffer_ref (src_buffer); gst_video_frame_unref (frame); if (base_video_decoder_class->shape_output) return base_video_decoder_class->shape_output (base_video_decoder, src_buffer); return gst_pad_push (GST_BASE_VIDEO_DECODER_SRC_PAD (base_video_decoder), src_buffer); }
static gchar * parse_subrip (ParserState * state, const gchar * line) { guint h1, m1, s1, ms1; guint h2, m2, s2, ms2; int subnum; gchar *ret; switch (state->state) { case 0: /* looking for a single integer */ if (sscanf (line, "%u", &subnum) == 1) state->state = 1; return NULL; case 1: /* looking for start_time --> end_time */ if (sscanf (line, "%u:%u:%u,%u --> %u:%u:%u,%u", &h1, &m1, &s1, &ms1, &h2, &m2, &s2, &ms2) == 8) { state->state = 2; state->start_time = (((guint64) h1) * 3600 + m1 * 60 + s1) * GST_SECOND + ms1 * GST_MSECOND; state->duration = (((guint64) h2) * 3600 + m2 * 60 + s2) * GST_SECOND + ms2 * GST_MSECOND - state->start_time; } else { GST_DEBUG ("error parsing subrip time line"); state->state = 0; } return NULL; case 2: { /* No need to parse that text if it's out of segment */ gint64 clip_start = 0, clip_stop = 0; gboolean in_seg = FALSE; /* Check our segment start/stop */ in_seg = gst_segment_clip (state->segment, GST_FORMAT_TIME, state->start_time, state->start_time + state->duration, &clip_start, &clip_stop); if (in_seg) { state->start_time = clip_start; state->duration = clip_stop - clip_start; } else { state->state = 0; return NULL; } } /* looking for subtitle text; empty line ends this subtitle entry */ if (state->buf->len) g_string_append_c (state->buf, '\n'); g_string_append (state->buf, line); if (strlen (line) == 0) { ret = g_markup_escape_text (state->buf->str, state->buf->len); g_string_truncate (state->buf, 0); state->state = 0; subrip_unescape_formatting (ret); subrip_remove_unhandled_tags (ret); strip_trailing_newlines (ret); subrip_fix_up_markup (&ret); return ret; } return NULL; default: g_return_val_if_reached (NULL); } }
GstFlowReturn gst_base_video_decoder_finish_frame (GstBaseVideoDecoder * base_video_decoder, GstVideoFrame * frame) { GstBaseVideoDecoderClass *base_video_decoder_class; GstBuffer *src_buffer; GST_DEBUG ("finish frame"); base_video_decoder_class = GST_BASE_VIDEO_DECODER_GET_CLASS (base_video_decoder); GST_DEBUG ("finish frame sync=%d pts=%" GST_TIME_FORMAT, GST_VIDEO_FRAME_FLAG_IS_SET (frame, GST_VIDEO_FRAME_FLAG_SYNC_POINT), GST_TIME_ARGS (frame->presentation_timestamp)); if (GST_CLOCK_TIME_IS_VALID (frame->presentation_timestamp)) { if (frame->presentation_timestamp != base_video_decoder->timestamp_offset) { GST_DEBUG ("sync timestamp %" GST_TIME_FORMAT " diff %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->presentation_timestamp), GST_TIME_ARGS (frame->presentation_timestamp - base_video_decoder->segment.start)); base_video_decoder->timestamp_offset = frame->presentation_timestamp; base_video_decoder->field_index = 0; } else { /* This case is for one initial timestamp and no others, e.g., * filesrc ! decoder ! xvimagesink */ GST_WARNING ("sync timestamp didn't change, ignoring"); frame->presentation_timestamp = GST_CLOCK_TIME_NONE; } } else { if (GST_VIDEO_FRAME_FLAG_IS_SET (frame, GST_VIDEO_FRAME_FLAG_SYNC_POINT)) { GST_WARNING ("sync point doesn't have timestamp"); if (!GST_CLOCK_TIME_IS_VALID (base_video_decoder->timestamp_offset)) { GST_WARNING ("No base timestamp. Assuming frames start at segment start"); base_video_decoder->timestamp_offset = base_video_decoder->segment.start; base_video_decoder->field_index = 0; } } } frame->field_index = base_video_decoder->field_index; base_video_decoder->field_index += frame->n_fields; if (frame->presentation_timestamp == GST_CLOCK_TIME_NONE) { frame->presentation_timestamp = gst_base_video_decoder_get_field_timestamp (base_video_decoder, frame->field_index); frame->presentation_duration = GST_CLOCK_TIME_NONE; frame->decode_timestamp = gst_base_video_decoder_get_timestamp (base_video_decoder, frame->decode_frame_number); } if (frame->presentation_duration == GST_CLOCK_TIME_NONE) { frame->presentation_duration = gst_base_video_decoder_get_field_duration (base_video_decoder, frame->n_fields); } if (GST_CLOCK_TIME_IS_VALID (base_video_decoder->last_timestamp)) { if (frame->presentation_timestamp < base_video_decoder->last_timestamp) { GST_WARNING ("decreasing timestamp (%" GST_TIME_FORMAT " < %" GST_TIME_FORMAT ")", GST_TIME_ARGS (frame->presentation_timestamp), GST_TIME_ARGS (base_video_decoder->last_timestamp)); } } base_video_decoder->last_timestamp = frame->presentation_timestamp; src_buffer = frame->src_buffer; GST_BUFFER_FLAG_UNSET (src_buffer, GST_BUFFER_FLAG_DELTA_UNIT); if (base_video_decoder->state.interlaced) { #ifndef GST_VIDEO_BUFFER_TFF #define GST_VIDEO_BUFFER_TFF (GST_MINI_OBJECT_FLAG_LAST << 5) #endif #ifndef GST_VIDEO_BUFFER_RFF #define GST_VIDEO_BUFFER_RFF (GST_MINI_OBJECT_FLAG_LAST << 6) #endif #ifndef GST_VIDEO_BUFFER_ONEFIELD #define GST_VIDEO_BUFFER_ONEFIELD (GST_MINI_OBJECT_FLAG_LAST << 7) #endif if (GST_VIDEO_FRAME_FLAG_IS_SET (frame, GST_VIDEO_FRAME_FLAG_TFF)) { GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_TFF); } else { GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_TFF); } GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_RFF); GST_BUFFER_FLAG_UNSET (src_buffer, GST_VIDEO_BUFFER_ONEFIELD); if (frame->n_fields == 3) { GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_RFF); } else if (frame->n_fields == 1) { GST_BUFFER_FLAG_SET (src_buffer, GST_VIDEO_BUFFER_ONEFIELD); } } if (base_video_decoder->discont) { GST_BUFFER_FLAG_UNSET (src_buffer, GST_BUFFER_FLAG_DISCONT); base_video_decoder->discont = FALSE; } GST_BUFFER_TIMESTAMP (src_buffer) = frame->presentation_timestamp; GST_BUFFER_DURATION (src_buffer) = frame->presentation_duration; GST_BUFFER_OFFSET (src_buffer) = GST_BUFFER_OFFSET_NONE; GST_BUFFER_OFFSET_END (src_buffer) = GST_BUFFER_OFFSET_NONE; GST_DEBUG ("pushing frame %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->presentation_timestamp)); gst_base_video_decoder_set_src_caps (base_video_decoder); if (base_video_decoder->sink_clipping) { gint64 start = GST_BUFFER_TIMESTAMP (src_buffer); gint64 stop = GST_BUFFER_TIMESTAMP (src_buffer) + GST_BUFFER_DURATION (src_buffer); if (gst_segment_clip (&base_video_decoder->segment, GST_FORMAT_TIME, start, stop, &start, &stop)) { GST_BUFFER_TIMESTAMP (src_buffer) = start; GST_BUFFER_DURATION (src_buffer) = stop - start; GST_DEBUG ("accepting buffer inside segment: %" GST_TIME_FORMAT " %" GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT " time %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer)), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer) + GST_BUFFER_DURATION (src_buffer)), GST_TIME_ARGS (base_video_decoder->segment.start), GST_TIME_ARGS (base_video_decoder->segment.stop), GST_TIME_ARGS (base_video_decoder->segment.time)); } else { GST_DEBUG ("dropping buffer outside segment: %" GST_TIME_FORMAT " %" GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT " time %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer)), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (src_buffer) + GST_BUFFER_DURATION (src_buffer)), GST_TIME_ARGS (base_video_decoder->segment.start), GST_TIME_ARGS (base_video_decoder->segment.stop), GST_TIME_ARGS (base_video_decoder->segment.time)); gst_video_frame_unref (frame); return GST_FLOW_OK; } } gst_buffer_ref (src_buffer); gst_video_frame_unref (frame); if (base_video_decoder_class->shape_output) return base_video_decoder_class->shape_output (base_video_decoder, src_buffer); return gst_pad_push (GST_BASE_VIDEO_DECODER_SRC_PAD (base_video_decoder), src_buffer); }
static GstFlowReturn gst_dvbsub_overlay_chain_video (GstPad * pad, GstObject * parent, GstBuffer * buffer) { GstDVBSubOverlay *overlay = GST_DVBSUB_OVERLAY (parent); GstFlowReturn ret = GST_FLOW_OK; gint64 start, stop; guint64 cstart, cstop; gboolean in_seg; GstClockTime vid_running_time, vid_running_time_end; if (GST_VIDEO_INFO_FORMAT (&overlay->info) == GST_VIDEO_FORMAT_UNKNOWN) return GST_FLOW_NOT_NEGOTIATED; if (!GST_BUFFER_TIMESTAMP_IS_VALID (buffer)) goto missing_timestamp; start = GST_BUFFER_TIMESTAMP (buffer); GST_LOG_OBJECT (overlay, "Video segment: %" GST_SEGMENT_FORMAT " --- Subtitle position: %" GST_TIME_FORMAT " --- BUFFER: ts=%" GST_TIME_FORMAT, &overlay->video_segment, GST_TIME_ARGS (overlay->subtitle_segment.position), GST_TIME_ARGS (start)); /* ignore buffers that are outside of the current segment */ if (!GST_BUFFER_DURATION_IS_VALID (buffer)) { stop = GST_CLOCK_TIME_NONE; } else { stop = start + GST_BUFFER_DURATION (buffer); } in_seg = gst_segment_clip (&overlay->video_segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop); if (!in_seg) { GST_DEBUG_OBJECT (overlay, "Buffer outside configured segment -- dropping"); gst_buffer_unref (buffer); return GST_FLOW_OK; } buffer = gst_buffer_make_writable (buffer); GST_BUFFER_TIMESTAMP (buffer) = cstart; if (GST_BUFFER_DURATION_IS_VALID (buffer)) GST_BUFFER_DURATION (buffer) = cstop - cstart; vid_running_time = gst_segment_to_running_time (&overlay->video_segment, GST_FORMAT_TIME, cstart); if (GST_BUFFER_DURATION_IS_VALID (buffer)) vid_running_time_end = gst_segment_to_running_time (&overlay->video_segment, GST_FORMAT_TIME, cstop); else vid_running_time_end = vid_running_time; GST_DEBUG_OBJECT (overlay, "Video running time: %" GST_TIME_FORMAT, GST_TIME_ARGS (vid_running_time)); overlay->video_segment.position = GST_BUFFER_TIMESTAMP (buffer); g_mutex_lock (&overlay->dvbsub_mutex); if (!g_queue_is_empty (overlay->pending_subtitles)) { DVBSubtitles *tmp, *candidate = NULL; while (!g_queue_is_empty (overlay->pending_subtitles)) { tmp = g_queue_peek_head (overlay->pending_subtitles); if (tmp->pts > vid_running_time_end) { /* For a future video frame */ break; } else if (tmp->num_rects == 0) { /* Clear screen */ if (overlay->current_subtitle) dvb_subtitles_free (overlay->current_subtitle); overlay->current_subtitle = NULL; if (candidate) dvb_subtitles_free (candidate); candidate = NULL; g_queue_pop_head (overlay->pending_subtitles); dvb_subtitles_free (tmp); tmp = NULL; } else if (tmp->pts + tmp->page_time_out * GST_SECOND * ABS (overlay->subtitle_segment.rate) >= vid_running_time) { if (candidate) dvb_subtitles_free (candidate); candidate = tmp; g_queue_pop_head (overlay->pending_subtitles); } else { /* Too late */ dvb_subtitles_free (tmp); tmp = NULL; g_queue_pop_head (overlay->pending_subtitles); } } if (candidate) { GST_DEBUG_OBJECT (overlay, "Time to show the next subtitle page (%" GST_TIME_FORMAT " >= %" GST_TIME_FORMAT ") - it has %u regions", GST_TIME_ARGS (vid_running_time), GST_TIME_ARGS (candidate->pts), candidate->num_rects); dvb_subtitles_free (overlay->current_subtitle); overlay->current_subtitle = candidate; if (overlay->current_comp) gst_video_overlay_composition_unref (overlay->current_comp); overlay->current_comp = gst_dvbsub_overlay_subs_to_comp (overlay, overlay->current_subtitle); } } /* Check that we haven't hit the fallback timeout for current subtitle page */ if (overlay->current_subtitle && vid_running_time > (overlay->current_subtitle->pts + overlay->current_subtitle->page_time_out * GST_SECOND * ABS (overlay->subtitle_segment.rate))) { GST_INFO_OBJECT (overlay, "Subtitle page not redefined before fallback page_time_out of %u seconds (missed data?) - deleting current page", overlay->current_subtitle->page_time_out); dvb_subtitles_free (overlay->current_subtitle); overlay->current_subtitle = NULL; } /* Now render it */ if (g_atomic_int_get (&overlay->enable) && overlay->current_subtitle) { GstVideoFrame frame; g_assert (overlay->current_comp); if (overlay->attach_compo_to_buffer) { GST_DEBUG_OBJECT (overlay, "Attaching overlay image to video buffer"); gst_buffer_add_video_overlay_composition_meta (buffer, overlay->current_comp); } else { GST_DEBUG_OBJECT (overlay, "Blending overlay image to video buffer"); gst_video_frame_map (&frame, &overlay->info, buffer, GST_MAP_READWRITE); gst_video_overlay_composition_blend (overlay->current_comp, &frame); gst_video_frame_unmap (&frame); } } g_mutex_unlock (&overlay->dvbsub_mutex); ret = gst_pad_push (overlay->srcpad, buffer); return ret; missing_timestamp: { GST_WARNING_OBJECT (overlay, "video buffer without timestamp, discarding"); gst_buffer_unref (buffer); return GST_FLOW_OK; } }
static gboolean gst_dshowvideodec_push_buffer (byte * buffer, long size, byte * src_object, UINT64 start, UINT64 stop) { GstDshowVideoDec *vdec = (GstDshowVideoDec *) src_object; GstDshowVideoDecClass *klass = (GstDshowVideoDecClass *) G_OBJECT_GET_CLASS (vdec); GstBuffer *buf = NULL; gboolean in_seg = FALSE; gint64 clip_start = 0, clip_stop = 0; /* check if this buffer is in our current segment */ in_seg = gst_segment_clip (vdec->segment, GST_FORMAT_TIME, start, stop, &clip_start, &clip_stop); /* if the buffer is out of segment do not push it downstream */ if (!in_seg) { GST_CAT_DEBUG_OBJECT (dshowvideodec_debug, vdec, "buffer is out of segment, start %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (stop)); return FALSE; } /* buffer is in our segment allocate a new out buffer and clip its * timestamps */ vdec->last_ret = gst_pad_alloc_buffer (vdec->srcpad, GST_BUFFER_OFFSET_NONE, size, GST_PAD_CAPS (vdec->srcpad), &buf); if (!buf) { GST_CAT_WARNING_OBJECT (dshowvideodec_debug, vdec, "can't not allocate a new GstBuffer"); return FALSE; } /* set buffer properties */ GST_BUFFER_TIMESTAMP (buf) = clip_start; GST_BUFFER_DURATION (buf) = clip_stop - clip_start; if (strstr (klass->entry->srccaps, "rgb")) { /* FOR RGB directshow decoder will return bottom-up BITMAP * There is probably a way to get top-bottom video frames from * the decoder... */ gint line = 0; guint stride = vdec->width * 4; for (; line < vdec->height; line++) { memcpy (GST_BUFFER_DATA (buf) + (line * stride), buffer + (size - ((line + 1) * (stride))), stride); } } else { memcpy (GST_BUFFER_DATA (buf), buffer, MIN (size, GST_BUFFER_SIZE (buf))); } GST_CAT_LOG_OBJECT (dshowvideodec_debug, vdec, "push_buffer (size %d)=> pts %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT, size, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf)), GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); /* push the buffer downstream */ vdec->last_ret = gst_pad_push (vdec->srcpad, buf); return TRUE; }
static GstFlowReturn gst_base_video_encoder_chain (GstPad * pad, GstBuffer * buf) { GstBaseVideoEncoder *base_video_encoder; GstBaseVideoEncoderClass *klass; GstVideoFrame *frame; GstFlowReturn ret = GST_FLOW_OK; base_video_encoder = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); klass = GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder); g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR); GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder); if (!GST_PAD_CAPS (pad)) { ret = GST_FLOW_NOT_NEGOTIATED; goto done; } GST_LOG_OBJECT (base_video_encoder, "received buffer of size %d with ts %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); if (base_video_encoder->a.at_eos) { ret = GST_FLOW_UNEXPECTED; goto done; } if (base_video_encoder->sink_clipping) { gint64 start = GST_BUFFER_TIMESTAMP (buf); gint64 stop = start + GST_BUFFER_DURATION (buf); gint64 clip_start; gint64 clip_stop; if (!gst_segment_clip (&GST_BASE_VIDEO_CODEC (base_video_encoder)->segment, GST_FORMAT_TIME, start, stop, &clip_start, &clip_stop)) { GST_DEBUG_OBJECT (base_video_encoder, "clipping to segment dropped frame"); goto done; } } if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))) { GST_LOG_OBJECT (base_video_encoder, "marked discont"); GST_BASE_VIDEO_CODEC (base_video_encoder)->discont = TRUE; } frame = gst_base_video_codec_new_frame (GST_BASE_VIDEO_CODEC (base_video_encoder)); frame->events = base_video_encoder->current_frame_events; base_video_encoder->current_frame_events = NULL; frame->sink_buffer = buf; frame->presentation_timestamp = GST_BUFFER_TIMESTAMP (buf); frame->presentation_duration = GST_BUFFER_DURATION (buf); frame->presentation_frame_number = base_video_encoder->presentation_frame_number; base_video_encoder->presentation_frame_number++; frame->force_keyframe = base_video_encoder->force_keyframe; base_video_encoder->force_keyframe = FALSE; GST_BASE_VIDEO_CODEC (base_video_encoder)->frames = g_list_append (GST_BASE_VIDEO_CODEC (base_video_encoder)->frames, frame); /* new data, more finish needed */ base_video_encoder->drained = FALSE; GST_LOG_OBJECT (base_video_encoder, "passing frame pfn %d to subclass", frame->presentation_frame_number); ret = klass->handle_frame (base_video_encoder, frame); done: GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); g_object_unref (base_video_encoder); return ret; }