static GstFlowReturn gst_visual_chain (GstPad * pad, GstBuffer * buffer) { GstBuffer *outbuf = NULL; guint i; GstVisual *visual = GST_VISUAL (gst_pad_get_parent (pad)); GstFlowReturn ret = GST_FLOW_OK; guint avail; GST_DEBUG_OBJECT (visual, "chain function called"); /* If we don't have an output format yet, preallocate a buffer to try and * set one */ if (GST_PAD_CAPS (visual->srcpad) == NULL) { ret = get_buffer (visual, &outbuf); if (ret != GST_FLOW_OK) { gst_buffer_unref (buffer); goto beach; } } /* resync on DISCONT */ if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) { gst_adapter_clear (visual->adapter); } GST_DEBUG_OBJECT (visual, "Input buffer has %d samples, time=%" G_GUINT64_FORMAT, GST_BUFFER_SIZE (buffer) / visual->bps, GST_BUFFER_TIMESTAMP (buffer)); gst_adapter_push (visual->adapter, buffer); while (TRUE) { gboolean need_skip; const guint16 *data; guint64 dist, timestamp; GST_DEBUG_OBJECT (visual, "processing buffer"); avail = gst_adapter_available (visual->adapter); GST_DEBUG_OBJECT (visual, "avail now %u", avail); /* we need at least 512 samples */ if (avail < 512 * visual->bps) break; /* we need at least enough samples to make one frame */ if (avail < visual->spf * visual->bps) break; /* get timestamp of the current adapter byte */ timestamp = gst_adapter_prev_timestamp (visual->adapter, &dist); if (GST_CLOCK_TIME_IS_VALID (timestamp)) { /* convert bytes to time */ dist /= visual->bps; timestamp += gst_util_uint64_scale_int (dist, GST_SECOND, visual->rate); } if (timestamp != -1) { gint64 qostime; /* QoS is done on running time */ qostime = gst_segment_to_running_time (&visual->segment, GST_FORMAT_TIME, timestamp); GST_OBJECT_LOCK (visual); /* check for QoS, don't compute buffers that are known to be late */ need_skip = visual->earliest_time != -1 && qostime <= visual->earliest_time; GST_OBJECT_UNLOCK (visual); if (need_skip) { GST_WARNING_OBJECT (visual, "QoS: skip ts: %" GST_TIME_FORMAT ", earliest: %" GST_TIME_FORMAT, GST_TIME_ARGS (qostime), GST_TIME_ARGS (visual->earliest_time)); goto skip; } } /* Read 512 samples per channel */ data = (const guint16 *) gst_adapter_peek (visual->adapter, 512 * visual->bps); #if defined(VISUAL_API_VERSION) && VISUAL_API_VERSION >= 4000 && VISUAL_API_VERSION < 5000 { VisBuffer *lbuf, *rbuf; guint16 ldata[512], rdata[512]; VisAudioSampleRateType rate; lbuf = visual_buffer_new_with_buffer (ldata, sizeof (ldata), NULL); rbuf = visual_buffer_new_with_buffer (rdata, sizeof (rdata), NULL); if (visual->channels == 2) { for (i = 0; i < 512; i++) { ldata[i] = *data++; rdata[i] = *data++; } } else { for (i = 0; i < 512; i++) { ldata[i] = *data; rdata[i] = *data++; } } switch (visual->rate) { case 8000: rate = VISUAL_AUDIO_SAMPLE_RATE_8000; break; case 11250: rate = VISUAL_AUDIO_SAMPLE_RATE_11250; break; case 22500: rate = VISUAL_AUDIO_SAMPLE_RATE_22500; break; case 32000: rate = VISUAL_AUDIO_SAMPLE_RATE_32000; break; case 44100: rate = VISUAL_AUDIO_SAMPLE_RATE_44100; break; case 48000: rate = VISUAL_AUDIO_SAMPLE_RATE_48000; break; case 96000: rate = VISUAL_AUDIO_SAMPLE_RATE_96000; break; default: visual_object_unref (VISUAL_OBJECT (lbuf)); visual_object_unref (VISUAL_OBJECT (rbuf)); GST_ERROR_OBJECT (visual, "unsupported rate %d", visual->rate); ret = GST_FLOW_ERROR; goto beach; break; } visual_audio_samplepool_input_channel (visual->audio->samplepool, lbuf, rate, VISUAL_AUDIO_SAMPLE_FORMAT_S16, (char *) VISUAL_AUDIO_CHANNEL_LEFT); visual_audio_samplepool_input_channel (visual->audio->samplepool, rbuf, rate, VISUAL_AUDIO_SAMPLE_FORMAT_S16, (char *) VISUAL_AUDIO_CHANNEL_RIGHT); visual_object_unref (VISUAL_OBJECT (lbuf)); visual_object_unref (VISUAL_OBJECT (rbuf)); } #else if (visual->channels == 2) { for (i = 0; i < 512; i++) { visual->audio->plugpcm[0][i] = *data++; visual->audio->plugpcm[1][i] = *data++; } } else { for (i = 0; i < 512; i++) { visual->audio->plugpcm[0][i] = *data; visual->audio->plugpcm[1][i] = *data++; } } #endif /* alloc a buffer if we don't have one yet, this happens * when we pushed a buffer in this while loop before */ if (outbuf == NULL) { ret = get_buffer (visual, &outbuf); if (ret != GST_FLOW_OK) { goto beach; } } visual_video_set_buffer (visual->video, GST_BUFFER_DATA (outbuf)); visual_audio_analyze (visual->audio); visual_actor_run (visual->actor, visual->audio); visual_video_set_buffer (visual->video, NULL); GST_DEBUG_OBJECT (visual, "rendered one frame"); GST_BUFFER_TIMESTAMP (outbuf) = timestamp; GST_BUFFER_DURATION (outbuf) = visual->duration; ret = gst_pad_push (visual->srcpad, outbuf); outbuf = NULL; skip: GST_DEBUG_OBJECT (visual, "finished frame, flushing %u samples from input", visual->spf); /* Flush out the number of samples per frame */ gst_adapter_flush (visual->adapter, visual->spf * visual->bps); /* quit the loop if something was wrong */ if (ret != GST_FLOW_OK) break; } beach: if (outbuf != NULL) gst_buffer_unref (outbuf); gst_object_unref (visual); return ret; }
static GstFlowReturn gst_goom_chain (GstPad * pad, GstBuffer * buffer) { GstGoom *goom; GstFlowReturn ret; GstBuffer *outbuf = NULL; goom = GST_GOOM (gst_pad_get_parent (pad)); if (goom->bps == 0) { ret = GST_FLOW_NOT_NEGOTIATED; goto beach; } /* If we don't have an output format yet, preallocate a buffer to try and * set one */ if (GST_PAD_CAPS (goom->srcpad) == NULL) { ret = get_buffer (goom, &outbuf); if (ret != GST_FLOW_OK) { gst_buffer_unref (buffer); goto beach; } } /* don't try to combine samples from discont buffer */ if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) { gst_adapter_clear (goom->adapter); } GST_DEBUG_OBJECT (goom, "Input buffer has %d samples, time=%" G_GUINT64_FORMAT, GST_BUFFER_SIZE (buffer) / goom->bps, GST_BUFFER_TIMESTAMP (buffer)); /* Collect samples until we have enough for an output frame */ gst_adapter_push (goom->adapter, buffer); ret = GST_FLOW_OK; while (TRUE) { const guint16 *data; gboolean need_skip; guchar *out_frame; gint i; guint avail, to_flush; guint64 dist, timestamp; avail = gst_adapter_available (goom->adapter); GST_DEBUG_OBJECT (goom, "avail now %u", avail); /* we need GOOM_SAMPLES to get a meaningful result from goom. */ if (avail < (GOOM_SAMPLES * goom->bps)) break; /* we also need enough samples to produce one frame at least */ if (avail < goom->bpf) break; GST_DEBUG_OBJECT (goom, "processing buffer"); /* get timestamp of the current adapter byte */ timestamp = gst_adapter_prev_timestamp (goom->adapter, &dist); if (GST_CLOCK_TIME_IS_VALID (timestamp)) { /* convert bytes to time */ dist /= goom->bps; timestamp += gst_util_uint64_scale_int (dist, GST_SECOND, goom->rate); } if (timestamp != -1) { gint64 qostime; qostime = gst_segment_to_running_time (&goom->segment, GST_FORMAT_TIME, timestamp); qostime += goom->duration; GST_OBJECT_LOCK (goom); /* check for QoS, don't compute buffers that are known to be late */ need_skip = goom->earliest_time != -1 && qostime <= goom->earliest_time; GST_OBJECT_UNLOCK (goom); if (need_skip) { GST_WARNING_OBJECT (goom, "QoS: skip ts: %" GST_TIME_FORMAT ", earliest: %" GST_TIME_FORMAT, GST_TIME_ARGS (qostime), GST_TIME_ARGS (goom->earliest_time)); goto skip; } } /* get next GOOM_SAMPLES, we have at least this amount of samples */ data = (const guint16 *) gst_adapter_peek (goom->adapter, GOOM_SAMPLES * goom->bps); if (goom->channels == 2) { for (i = 0; i < GOOM_SAMPLES; i++) { goom->datain[0][i] = *data++; goom->datain[1][i] = *data++; } } else { for (i = 0; i < GOOM_SAMPLES; i++) { goom->datain[0][i] = *data; goom->datain[1][i] = *data++; } } /* alloc a buffer if we don't have one yet, this happens * when we pushed a buffer in this while loop before */ if (outbuf == NULL) { ret = get_buffer (goom, &outbuf); if (ret != GST_FLOW_OK) { goto beach; } } GST_BUFFER_TIMESTAMP (outbuf) = timestamp; GST_BUFFER_DURATION (outbuf) = goom->duration; GST_BUFFER_SIZE (outbuf) = goom->outsize; out_frame = (guchar *) goom_update (goom->plugin, goom->datain, 0, 0); memcpy (GST_BUFFER_DATA (outbuf), out_frame, goom->outsize); GST_DEBUG ("Pushing frame with time=%" GST_TIME_FORMAT ", duration=%" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp), GST_TIME_ARGS (goom->duration)); ret = gst_pad_push (goom->srcpad, outbuf); outbuf = NULL; skip: /* Now flush the samples we needed for this frame, which might be more than * the samples we used (GOOM_SAMPLES). */ to_flush = goom->bpf; GST_DEBUG_OBJECT (goom, "finished frame, flushing %u bytes from input", to_flush); gst_adapter_flush (goom->adapter, to_flush); if (ret != GST_FLOW_OK) break; } if (outbuf != NULL) gst_buffer_unref (outbuf); beach: gst_object_unref (goom); return ret; }
static GstFlowReturn gst_siren_enc_chain (GstPad * pad, GstBuffer * buf) { GstSirenEnc *enc; GstFlowReturn ret = GST_FLOW_OK; GstBuffer *out_buf; guint8 *in_data, *out_data; guint8 *to_free = NULL; guint i, size, num_frames; gint out_size, in_size; gint encode_ret; gboolean discont; GstClockTime timestamp; guint64 distance; GstCaps *outcaps; enc = GST_SIREN_ENC (GST_PAD_PARENT (pad)); discont = GST_BUFFER_IS_DISCONT (buf); if (discont) { GST_DEBUG_OBJECT (enc, "received DISCONT, flush adapter"); gst_adapter_clear (enc->adapter); enc->discont = TRUE; } gst_adapter_push (enc->adapter, buf); size = gst_adapter_available (enc->adapter); GST_LOG_OBJECT (enc, "Received buffer of size %d with adapter of size : %d", GST_BUFFER_SIZE (buf), size); /* we need to process 640 input bytes to produce 40 output bytes */ /* calculate the amount of frames we will handle */ num_frames = size / 640; /* no frames, wait some more */ if (num_frames == 0) goto done; /* this is the input/output size */ in_size = num_frames * 640; out_size = num_frames * 40; GST_LOG_OBJECT (enc, "we have %u frames, %u in, %u out", num_frames, in_size, out_size); /* set output caps when needed */ if ((outcaps = GST_PAD_CAPS (enc->srcpad)) == NULL) { outcaps = gst_static_pad_template_get_caps (&srctemplate); gst_pad_set_caps (enc->srcpad, outcaps); gst_caps_unref (outcaps); } /* get a buffer */ ret = gst_pad_alloc_buffer_and_set_caps (enc->srcpad, -1, out_size, outcaps, &out_buf); if (ret != GST_FLOW_OK) goto alloc_failed; /* get the timestamp for the output buffer */ timestamp = gst_adapter_prev_timestamp (enc->adapter, &distance); /* add the amount of time taken by the distance */ if (timestamp != -1) timestamp += gst_util_uint64_scale_int (distance / 2, GST_SECOND, 16000); GST_LOG_OBJECT (enc, "timestamp %" GST_TIME_FORMAT ", distance %" G_GUINT64_FORMAT, GST_TIME_ARGS (timestamp), distance); /* get the input data for all the frames */ to_free = in_data = gst_adapter_take (enc->adapter, in_size); out_data = GST_BUFFER_DATA (out_buf); for (i = 0; i < num_frames; i++) { GST_LOG_OBJECT (enc, "Encoding frame %u/%u", i, num_frames); /* encode 640 input bytes to 40 output bytes */ encode_ret = Siren7_EncodeFrame (enc->encoder, in_data, out_data); if (encode_ret != 0) goto encode_error; /* move to next frame */ out_data += 40; in_data += 640; } GST_LOG_OBJECT (enc, "Finished encoding"); /* mark discont */ if (enc->discont) { GST_BUFFER_FLAG_SET (out_buf, GST_BUFFER_FLAG_DISCONT); enc->discont = FALSE; } GST_BUFFER_TIMESTAMP (out_buf) = timestamp; GST_BUFFER_DURATION (out_buf) = num_frames * FRAME_DURATION; ret = gst_pad_push (enc->srcpad, out_buf); done: if (to_free) g_free (to_free); return ret; /* ERRORS */ alloc_failed: { GST_DEBUG_OBJECT (enc, "failed to pad_alloc buffer: %d (%s)", ret, gst_flow_get_name (ret)); goto done; } encode_error: { GST_ELEMENT_ERROR (enc, STREAM, ENCODE, (NULL), ("Error encoding frame: %d", encode_ret)); ret = GST_FLOW_ERROR; gst_buffer_unref (out_buf); goto done; } }
static GstFlowReturn gst_visual_gl_chain (GstPad * pad, GstBuffer * buffer) { GstGLBuffer *outbuf = NULL; GstVisualGL *visual = GST_VISUAL_GL (gst_pad_get_parent (pad)); GstFlowReturn ret = GST_FLOW_OK; guint avail; GST_DEBUG_OBJECT (visual, "chain function called"); /* If we don't have an output format yet, preallocate a buffer to try and * set one */ if (GST_PAD_CAPS (visual->srcpad) == NULL) { ret = get_buffer (visual, &outbuf); if (ret != GST_FLOW_OK) { gst_buffer_unref (buffer); goto beach; } } /* resync on DISCONT */ if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) { gst_adapter_clear (visual->adapter); } GST_DEBUG_OBJECT (visual, "Input buffer has %d samples, time=%" G_GUINT64_FORMAT, GST_BUFFER_SIZE (buffer) / visual->bps, GST_BUFFER_TIMESTAMP (buffer)); gst_adapter_push (visual->adapter, buffer); while (TRUE) { gboolean need_skip; guint64 dist, timestamp; GST_DEBUG_OBJECT (visual, "processing buffer"); avail = gst_adapter_available (visual->adapter); GST_DEBUG_OBJECT (visual, "avail now %u", avail); /* we need at least VISUAL_SAMPLES samples */ if (avail < VISUAL_SAMPLES * visual->bps) break; /* we need at least enough samples to make one frame */ if (avail < visual->spf * visual->bps) break; /* get timestamp of the current adapter byte */ timestamp = gst_adapter_prev_timestamp (visual->adapter, &dist); if (GST_CLOCK_TIME_IS_VALID (timestamp)) { /* convert bytes to time */ dist /= visual->bps; timestamp += gst_util_uint64_scale_int (dist, GST_SECOND, visual->rate); } if (timestamp != -1) { gint64 qostime; /* QoS is done on running time */ qostime = gst_segment_to_running_time (&visual->segment, GST_FORMAT_TIME, timestamp); qostime += visual->duration; GST_OBJECT_LOCK (visual); /* check for QoS, don't compute buffers that are known to be late */ need_skip = visual->earliest_time != -1 && qostime <= visual->earliest_time; GST_OBJECT_UNLOCK (visual); if (need_skip) { GST_WARNING_OBJECT (visual, "QoS: skip ts: %" GST_TIME_FORMAT ", earliest: %" GST_TIME_FORMAT, GST_TIME_ARGS (qostime), GST_TIME_ARGS (visual->earliest_time)); goto skip; } } /* alloc a buffer if we don't have one yet, this happens * when we pushed a buffer in this while loop before */ if (outbuf == NULL) { ret = get_buffer (visual, &outbuf); if (ret != GST_FLOW_OK) { goto beach; } } /* render libvisual plugin to our target */ gst_gl_display_use_fbo_v2 (visual->display, visual->width, visual->height, visual->fbo, visual->depthbuffer, visual->midtexture, (GLCB_V2) render_frame, (gpointer *) visual); /* gst video is top-down whereas opengl plan is bottom up */ gst_gl_display_use_fbo (visual->display, visual->width, visual->height, visual->fbo, visual->depthbuffer, outbuf->texture, (GLCB) bottom_up_to_top_down, visual->width, visual->height, visual->midtexture, 0, visual->width, 0, visual->height, GST_GL_DISPLAY_PROJECTION_ORTHO2D, (gpointer *) visual); GST_BUFFER_TIMESTAMP (outbuf) = timestamp; GST_BUFFER_DURATION (outbuf) = visual->duration; ret = gst_pad_push (visual->srcpad, GST_BUFFER (outbuf)); outbuf = NULL; skip: GST_DEBUG_OBJECT (visual, "finished frame, flushing %u samples from input", visual->spf); /* Flush out the number of samples per frame */ gst_adapter_flush (visual->adapter, visual->spf * visual->bps); /* quit the loop if something was wrong */ if (ret != GST_FLOW_OK) break; } beach: if (outbuf != NULL) gst_gl_buffer_unref (outbuf); gst_object_unref (visual); return ret; }
/** * gst_base_rtp_audio_payload_flush: * @baseaudiopayload: a #GstBaseRTPPayload * @payload_len: length of payload * @timestamp: a #GstClockTime * * Create an RTP buffer and store @payload_len bytes of the adapter as the * payload. Set the timestamp on the new buffer to @timestamp before pushing * the buffer downstream. * * If @payload_len is -1, all pending bytes will be flushed. If @timestamp is * -1, the timestamp will be calculated automatically. * * Returns: a #GstFlowReturn * * Since: 0.10.25 */ GstFlowReturn gst_base_rtp_audio_payload_flush (GstBaseRTPAudioPayload * baseaudiopayload, guint payload_len, GstClockTime timestamp) { GstBaseRTPPayload *basepayload; GstBaseRTPAudioPayloadPrivate *priv; GstBuffer *outbuf; guint8 *payload; GstFlowReturn ret; GstAdapter *adapter; guint64 distance; priv = baseaudiopayload->priv; adapter = priv->adapter; basepayload = GST_BASE_RTP_PAYLOAD (baseaudiopayload); if (payload_len == -1) payload_len = gst_adapter_available (adapter); /* nothing to do, just return */ if (payload_len == 0) return GST_FLOW_OK; if (timestamp == -1) { /* calculate the timestamp */ timestamp = gst_adapter_prev_timestamp (adapter, &distance); GST_LOG_OBJECT (baseaudiopayload, "last timestamp %" GST_TIME_FORMAT ", distance %" G_GUINT64_FORMAT, GST_TIME_ARGS (timestamp), distance); if (GST_CLOCK_TIME_IS_VALID (timestamp) && distance > 0) { /* convert the number of bytes since the last timestamp to time and add to * the last seen timestamp */ timestamp += priv->bytes_to_time (baseaudiopayload, distance); } } GST_DEBUG_OBJECT (baseaudiopayload, "Pushing %d bytes ts %" GST_TIME_FORMAT, payload_len, GST_TIME_ARGS (timestamp)); if (priv->buffer_list && gst_adapter_available_fast (adapter) >= payload_len) { GstBuffer *buffer; /* we can quickly take a buffer out of the adapter without having to copy * anything. */ buffer = gst_adapter_take_buffer (adapter, payload_len); ret = gst_base_rtp_audio_payload_push_buffer (baseaudiopayload, buffer); } else { /* create buffer to hold the payload */ outbuf = gst_rtp_buffer_new_allocate (payload_len, 0, 0); /* copy payload */ payload = gst_rtp_buffer_get_payload (outbuf); gst_adapter_copy (adapter, payload, 0, payload_len); gst_adapter_flush (adapter, payload_len); /* set metadata */ gst_base_rtp_audio_payload_set_meta (baseaudiopayload, outbuf, payload_len, timestamp); ret = gst_basertppayload_push (basepayload, outbuf); } return ret; }
static GstFlowReturn gst_ffmpegenc_chain_audio (GstPad * pad, GstBuffer * inbuf) { GstFFMpegEnc *ffmpegenc; GstFFMpegEncClass *oclass; AVCodecContext *ctx; GstClockTime timestamp, duration; guint size, frame_size; gint osize; GstFlowReturn ret; gint out_size; gboolean discont; guint8 *in_data; ffmpegenc = (GstFFMpegEnc *) (GST_OBJECT_PARENT (pad)); oclass = (GstFFMpegEncClass *) G_OBJECT_GET_CLASS (ffmpegenc); ctx = ffmpegenc->context; size = GST_BUFFER_SIZE (inbuf); timestamp = GST_BUFFER_TIMESTAMP (inbuf); duration = GST_BUFFER_DURATION (inbuf); discont = GST_BUFFER_IS_DISCONT (inbuf); GST_DEBUG_OBJECT (ffmpegenc, "Received time %" GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT ", size %d", GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration), size); frame_size = ctx->frame_size; osize = av_get_bits_per_sample_format (ctx->sample_fmt) / 8; if (frame_size > 1) { /* we have a frame_size, feed the encoder multiples of this frame size */ guint avail, frame_bytes; if (discont) { GST_LOG_OBJECT (ffmpegenc, "DISCONT, clear adapter"); gst_adapter_clear (ffmpegenc->adapter); ffmpegenc->discont = TRUE; } if (gst_adapter_available (ffmpegenc->adapter) == 0) { /* lock on to new timestamp */ GST_LOG_OBJECT (ffmpegenc, "taking buffer timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); ffmpegenc->adapter_ts = timestamp; ffmpegenc->adapter_consumed = 0; } else { GstClockTime upstream_time; guint64 bytes; /* use timestamp at head of the adapter */ GST_LOG_OBJECT (ffmpegenc, "taking adapter timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (ffmpegenc->adapter_ts)); timestamp = ffmpegenc->adapter_ts; timestamp += gst_util_uint64_scale (ffmpegenc->adapter_consumed, GST_SECOND, ctx->sample_rate); /* check with upstream timestamps, if too much deviation, * forego some timestamp perfection in favour of upstream syncing * (particularly in case these do not happen to come in multiple * of frame size) */ upstream_time = gst_adapter_prev_timestamp (ffmpegenc->adapter, &bytes); if (GST_CLOCK_TIME_IS_VALID (upstream_time)) { GstClockTimeDiff diff; upstream_time += gst_util_uint64_scale (bytes, GST_SECOND, ctx->sample_rate); diff = upstream_time - timestamp; /* relaxed difference, rather than half a sample or so ... */ if (diff > GST_SECOND / 10 || diff < -GST_SECOND / 10) { GST_DEBUG_OBJECT (ffmpegenc, "adapter timestamp drifting, " "taking upstream timestamp %" GST_TIME_FORMAT, GST_TIME_ARGS (upstream_time)); timestamp = upstream_time; ffmpegenc->adapter_ts = upstream_time - gst_util_uint64_scale (bytes, GST_SECOND, ctx->sample_rate); ffmpegenc->adapter_consumed = bytes; ffmpegenc->discont = TRUE; } } } GST_LOG_OBJECT (ffmpegenc, "pushing buffer in adapter"); gst_adapter_push (ffmpegenc->adapter, inbuf); /* first see how many bytes we need to feed to the decoder. */ frame_bytes = frame_size * osize * ctx->channels; avail = gst_adapter_available (ffmpegenc->adapter); GST_LOG_OBJECT (ffmpegenc, "frame_bytes %u, avail %u", frame_bytes, avail); /* while there is more than a frame size in the adapter, consume it */ while (avail >= frame_bytes) { GST_LOG_OBJECT (ffmpegenc, "taking %u bytes from the adapter", frame_bytes); /* take an audio buffer out of the adapter */ in_data = (guint8 *) gst_adapter_peek (ffmpegenc->adapter, frame_bytes); ffmpegenc->adapter_consumed += frame_size; /* calculate timestamp and duration relative to start of adapter and to * the amount of samples we consumed */ duration = gst_util_uint64_scale (ffmpegenc->adapter_consumed, GST_SECOND, ctx->sample_rate); duration -= (timestamp - ffmpegenc->adapter_ts); /* 4 times the input size plus the minimal buffer size * should be big enough... */ out_size = frame_bytes * 4 + FF_MIN_BUFFER_SIZE; ret = gst_ffmpegenc_encode_audio (ffmpegenc, in_data, out_size, timestamp, duration, ffmpegenc->discont); gst_adapter_flush (ffmpegenc->adapter, frame_bytes); if (ret != GST_FLOW_OK) goto push_failed; /* advance the adapter timestamp with the duration */ timestamp += duration; ffmpegenc->discont = FALSE; avail = gst_adapter_available (ffmpegenc->adapter); } GST_LOG_OBJECT (ffmpegenc, "%u bytes left in the adapter", avail); } else { /* we have no frame_size, feed the encoder all the data and expect a fixed * output size */ int coded_bps = av_get_bits_per_sample (oclass->in_plugin->id) / 8; GST_LOG_OBJECT (ffmpegenc, "coded bps %d, osize %d", coded_bps, osize); out_size = size / osize; if (coded_bps) out_size *= coded_bps; /* We need to provide at least ffmpegs minimal buffer size */ out_size += FF_MIN_BUFFER_SIZE; in_data = (guint8 *) GST_BUFFER_DATA (inbuf); ret = gst_ffmpegenc_encode_audio (ffmpegenc, in_data, out_size, timestamp, duration, discont); gst_buffer_unref (inbuf); if (ret != GST_FLOW_OK) goto push_failed; } return GST_FLOW_OK; /* ERRORS */ push_failed: { GST_DEBUG_OBJECT (ffmpegenc, "Failed to push buffer %d (%s)", ret, gst_flow_get_name (ret)); return ret; } }
static GstVaapiDecoderStatus decode_step (GstVaapiDecoder * decoder) { GstVaapiParserState *const ps = &decoder->parser_state; GstVaapiDecoderStatus status; GstBuffer *buffer; gboolean got_frame; guint got_unit_size, input_size; status = gst_vaapi_decoder_check_status (decoder); if (status != GST_VAAPI_DECODER_STATUS_SUCCESS) return status; /* Fill adapter with all buffers we have in the queue */ for (;;) { buffer = pop_buffer (decoder); if (!buffer) break; ps->at_eos = GST_BUFFER_IS_EOS (buffer); if (!ps->at_eos) gst_adapter_push (ps->input_adapter, buffer); } /* Parse and decode all decode units */ input_size = gst_adapter_available (ps->input_adapter); if (input_size == 0) { if (ps->at_eos) return GST_VAAPI_DECODER_STATUS_END_OF_STREAM; return GST_VAAPI_DECODER_STATUS_ERROR_NO_DATA; } do { if (!ps->current_frame) { ps->current_frame = g_slice_new0 (GstVideoCodecFrame); if (!ps->current_frame) return GST_VAAPI_DECODER_STATUS_ERROR_ALLOCATION_FAILED; ps->current_frame->ref_count = 1; ps->current_frame->system_frame_number = ps->current_frame_number++; } status = do_parse (decoder, ps->current_frame, ps->input_adapter, ps->at_eos, &got_unit_size, &got_frame); GST_DEBUG ("parse frame (status = %d)", status); if (status != GST_VAAPI_DECODER_STATUS_SUCCESS) { if (status == GST_VAAPI_DECODER_STATUS_ERROR_NO_DATA && ps->at_eos) status = GST_VAAPI_DECODER_STATUS_END_OF_STREAM; break; } if (got_unit_size > 0) { buffer = gst_adapter_take_buffer (ps->input_adapter, got_unit_size); input_size -= got_unit_size; if (gst_adapter_available (ps->output_adapter) == 0) { ps->current_frame->pts = gst_adapter_prev_timestamp (ps->input_adapter, NULL); } gst_adapter_push (ps->output_adapter, buffer); } if (got_frame) { ps->current_frame->input_buffer = gst_adapter_take_buffer (ps->output_adapter, gst_adapter_available (ps->output_adapter)); status = do_decode (decoder, ps->current_frame); GST_DEBUG ("decode frame (status = %d)", status); gst_video_codec_frame_unref (ps->current_frame); ps->current_frame = NULL; break; } } while (input_size > 0); return status; }