static GstFlowReturn gst_type_find_element_chain (GstPad * pad, GstBuffer * buffer) { GstTypeFindElement *typefind; GstFlowReturn res = GST_FLOW_OK; typefind = GST_TYPE_FIND_ELEMENT (GST_PAD_PARENT (pad)); switch (typefind->mode) { case MODE_ERROR: /* we should already have called GST_ELEMENT_ERROR */ return GST_FLOW_ERROR; case MODE_NORMAL: gst_buffer_set_caps (buffer, typefind->caps); return gst_pad_push (typefind->src, buffer); case MODE_TYPEFIND:{ if (typefind->store) typefind->store = gst_buffer_join (typefind->store, buffer); else typefind->store = buffer; res = gst_type_find_element_chain_do_typefinding (typefind); if (typefind->mode == MODE_ERROR) res = GST_FLOW_ERROR; break; } default: g_assert_not_reached (); return GST_FLOW_ERROR; } return res; }
void gst_vdp_mpeg_frame_add_slice (GstVdpMpegFrame * mpeg_frame, GstBuffer * buf) { if (!mpeg_frame->slices) mpeg_frame->slices = buf; else mpeg_frame->slices = gst_buffer_join (mpeg_frame->slices, buf); mpeg_frame->n_slices++; }
static GstFlowReturn gst_rtmp_sink_render (GstBaseSink * bsink, GstBuffer * buf) { GstRTMPSink *sink = GST_RTMP_SINK (bsink); GstBuffer *reffed_buf = NULL; if (sink->first) { /* open the connection */ if (!RTMP_IsConnected (sink->rtmp)) { if (!RTMP_Connect (sink->rtmp, NULL) || !RTMP_ConnectStream (sink->rtmp, 0)) { GST_ELEMENT_ERROR (sink, RESOURCE, OPEN_WRITE, (NULL), ("Could not connect to RTMP stream \"%s\" for writing", sink->uri)); RTMP_Free (sink->rtmp); sink->rtmp = NULL; g_free (sink->rtmp_uri); sink->rtmp_uri = NULL; return GST_FLOW_ERROR; } GST_DEBUG_OBJECT (sink, "Opened connection to %s", sink->rtmp_uri); } /* FIXME: Parse the first buffer and see if it contains a header plus a packet instead * of just assuming it's only the header */ GST_LOG_OBJECT (sink, "Caching first buffer of size %d for concatenation", GST_BUFFER_SIZE (buf)); gst_buffer_replace (&sink->cache, buf); sink->first = FALSE; return GST_FLOW_OK; } if (sink->cache) { GST_LOG_OBJECT (sink, "Joining 2nd buffer of size %d to cached buf", GST_BUFFER_SIZE (buf)); gst_buffer_ref (buf); reffed_buf = buf = gst_buffer_join (sink->cache, buf); sink->cache = NULL; } GST_LOG_OBJECT (sink, "Sending %d bytes to RTMP server", GST_BUFFER_SIZE (buf)); if (!RTMP_Write (sink->rtmp, (char *) GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf))) { GST_ELEMENT_ERROR (sink, RESOURCE, WRITE, (NULL), ("Failed to write data")); if (reffed_buf) gst_buffer_unref (reffed_buf); return GST_FLOW_ERROR; } if (reffed_buf) gst_buffer_unref (reffed_buf); return GST_FLOW_OK; }
static GstFlowReturn gst_dtsdec_chain_raw (GstPad * pad, GstBuffer * buf) { GstDtsDec *dts; guint8 *data; gint size; gint length, flags, sample_rate, bit_rate, frame_length; GstFlowReturn result = GST_FLOW_OK; dts = GST_DTSDEC (GST_PAD_PARENT (pad)); if (dts->cache) { buf = gst_buffer_join (dts->cache, buf); dts->cache = NULL; } data = GST_BUFFER_DATA (buf); size = GST_BUFFER_SIZE (buf); length = 0; while (size >= 7) { length = dts_syncinfo (dts->state, data, &flags, &sample_rate, &bit_rate, &frame_length); if (length == 0) { /* shift window to re-find sync */ data++; size--; } else if (length <= size) { GST_DEBUG ("Sync: frame size %d", length); result = gst_dtsdec_handle_frame (dts, data, length, flags, sample_rate, bit_rate); if (result != GST_FLOW_OK) { size = 0; break; } size -= length; data += length; } else { GST_LOG ("Not enough data available (needed %d had %d)", length, size); break; } } /* keep cache */ if (length == 0) { GST_LOG ("No sync found"); } if (size > 0) { dts->cache = gst_buffer_create_sub (buf, GST_BUFFER_SIZE (buf) - size, size); } gst_buffer_unref (buf); return result; }
static GstBuffer * gst_exif_writer_reset_and_get_buffer (GstExifWriter * writer) { GstBuffer *header; GstBuffer *data; header = gst_byte_writer_reset_and_get_buffer (&writer->tagwriter); data = gst_byte_writer_reset_and_get_buffer (&writer->datawriter); return gst_buffer_join (header, data); }
static GstFlowReturn gst_y4m_encode_chain (GstPad * pad, GstBuffer * buf) { GstY4mEncode *filter = GST_Y4M_ENCODE (GST_PAD_PARENT (pad)); GstBuffer *outbuf; GstClockTime timestamp; /* check we got some decent info from caps */ if (filter->width < 0) { GST_ELEMENT_ERROR ("filter", CORE, NEGOTIATION, (NULL), ("format wasn't negotiated before chain function")); gst_buffer_unref (buf); return GST_FLOW_NOT_NEGOTIATED; } timestamp = GST_BUFFER_TIMESTAMP (buf); if (G_UNLIKELY (!filter->header)) { if (filter->interlaced == TRUE) { if (GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_BUFFER_TFF)) { filter->top_field_first = TRUE; } else { filter->top_field_first = FALSE; } } outbuf = gst_y4m_encode_get_stream_header (filter); filter->header = TRUE; outbuf = gst_buffer_join (outbuf, gst_y4m_encode_get_frame_header (filter)); } else { outbuf = gst_y4m_encode_get_frame_header (filter); } /* join with data */ outbuf = gst_buffer_join (outbuf, buf); /* decorate */ gst_buffer_make_metadata_writable (outbuf); gst_buffer_set_caps (outbuf, GST_PAD_CAPS (filter->srcpad)); GST_BUFFER_TIMESTAMP (outbuf) = timestamp; return gst_pad_push (filter->srcpad, outbuf); }
static GstFlowReturn gst_ss_demux_chain (GstPad * pad, GstBuffer * buf) { GstSSDemux *demux = GST_SS_DEMUX (gst_pad_get_parent (pad)); if (demux->manifest == NULL) demux->manifest = buf; else demux->manifest = gst_buffer_join (demux->manifest, buf); gst_object_unref (demux); return GST_FLOW_OK; }
static GstFlowReturn on_buffer(GstAppSink *sink, gpointer data) { Eyrie *e = (Eyrie *) data; if(e->recbin == NULL || gst_app_sink_is_eos(GST_APP_SINK(e->sink))) { return GST_FLOW_OK; } if(e->buf == NULL) { e->buf = gst_buffer_new(); } GstBuffer *tmpbuf; tmpbuf = gst_app_sink_pull_buffer(GST_APP_SINK(e->sink)); e->mutex->lock(); e->buf = gst_buffer_join(e->buf, tmpbuf); e->mutex->unlock(); return GST_FLOW_OK; }
static GstFlowReturn gst_spc_dec_chain (GstPad * pad, GstBuffer * buffer) { GstSpcDec *spc = GST_SPC_DEC (gst_pad_get_parent (pad)); if (spc->buf) { spc->buf = gst_buffer_join (spc->buf, buffer); } else { spc->buf = buffer; } gst_object_unref (spc); return GST_FLOW_OK; }
/* Internal method only. Tries to merge buffers at the head of the queue * to form a single larger buffer of size 'size'. Only merges buffers that * where 'gst_buffer_is_span_fast' returns TRUE. * * Returns TRUE if it managed to merge anything. */ static gboolean gst_adapter_try_to_merge_up (GstAdapter * adapter, guint size) { GstBuffer *cur, *head; GSList *g; gboolean ret = FALSE; g = adapter->buflist; if (g == NULL) return FALSE; head = g->data; g = g_slist_next (g); /* How large do we want our head buffer? The requested size, plus whatever's * been skipped already */ size += adapter->skip; while (g != NULL && GST_BUFFER_SIZE (head) < size) { cur = g->data; if (!gst_buffer_is_span_fast (head, cur)) return ret; /* Merge the head buffer and the next in line */ GST_LOG_OBJECT (adapter, "Merging buffers of size %u & %u in search of target %u", GST_BUFFER_SIZE (head), GST_BUFFER_SIZE (cur), size); head = gst_buffer_join (head, cur); ret = TRUE; /* Delete the front list item, and store our new buffer in the 2nd list * item */ adapter->buflist = g_slist_delete_link (adapter->buflist, adapter->buflist); g->data = head; /* invalidate scan position */ adapter->priv->scan_offset = 0; adapter->priv->scan_entry = NULL; g = g_slist_next (g); } return ret; }
static GstFlowReturn gst_nsfdec_chain (GstPad * pad, GstBuffer * buffer) { GstNsfDec *nsfdec; nsfdec = GST_NSFDEC (gst_pad_get_parent (pad)); /* collect all data, we start doing something when we get an EOS * event */ if (nsfdec->tune_buffer) { nsfdec->tune_buffer = gst_buffer_join (nsfdec->tune_buffer, buffer); } else { nsfdec->tune_buffer = buffer; } gst_object_unref (nsfdec); return GST_FLOW_OK; }
static GstFlowReturn gst_rtmp_sink_render (GstBaseSink * bsink, GstBuffer * buf) { GstRTMPSink *sink = GST_RTMP_SINK (bsink); GstBuffer *reffed_buf = NULL; if (sink->first) { /* FIXME: Parse the first buffer and see if it contains a header plus a packet instead * of just assuming it's only the header */ GST_LOG_OBJECT (sink, "Caching first buffer of size %d for concatenation", GST_BUFFER_SIZE (buf)); gst_buffer_replace (&sink->cache, buf); sink->first = FALSE; return GST_FLOW_OK; } if (sink->cache) { GST_LOG_OBJECT (sink, "Joining 2nd buffer of size %d to cached buf", GST_BUFFER_SIZE (buf)); gst_buffer_ref (buf); reffed_buf = buf = gst_buffer_join (sink->cache, buf); sink->cache = NULL; } GST_LOG_OBJECT (sink, "Sending %d bytes to RTMP server", GST_BUFFER_SIZE (buf)); if (!RTMP_Write (sink->rtmp, (char *) GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf))) { GST_ELEMENT_ERROR (sink, RESOURCE, WRITE, (NULL), ("Failed to write data")); if (reffed_buf) gst_buffer_unref (reffed_buf); return GST_FLOW_ERROR; } if (reffed_buf) gst_buffer_unref (reffed_buf); return GST_FLOW_OK; }
static GstFlowReturn gst_decklink_sink_audiosink_chain (GstPad * pad, GstBuffer * buffer) { GstDecklinkSink *decklinksink; GstFlowReturn ret; decklinksink = GST_DECKLINK_SINK (gst_pad_get_parent (pad)); GST_DEBUG_OBJECT (decklinksink, "chain"); // concatenate both buffers g_mutex_lock (decklinksink->audio_mutex); decklinksink->audio_buffer = gst_buffer_join (decklinksink->audio_buffer, buffer); g_mutex_unlock (decklinksink->audio_mutex); // GST_DEBUG("Audio Buffer Size: %d", GST_BUFFER_SIZE (decklinksink->audio_buffer)); gst_object_unref (decklinksink); ret = GST_FLOW_OK; return ret; }
static int gst_wavpack_enc_push_block (void *id, void *data, int32_t count) { GstWavpackEncWriteID *wid = (GstWavpackEncWriteID *) id; GstWavpackEnc *enc = GST_WAVPACK_ENC (wid->wavpack_enc); GstFlowReturn *flow; GstBuffer *buffer; GstPad *pad; guchar *block = (guchar *) data; pad = (wid->correction) ? enc->wvcsrcpad : enc->srcpad; flow = (wid->correction) ? &enc->wvcsrcpad_last_return : &enc-> srcpad_last_return; *flow = gst_pad_alloc_buffer_and_set_caps (pad, GST_BUFFER_OFFSET_NONE, count, GST_PAD_CAPS (pad), &buffer); if (*flow != GST_FLOW_OK) { GST_WARNING_OBJECT (enc, "flow on %s:%s = %s", GST_DEBUG_PAD_NAME (pad), gst_flow_get_name (*flow)); return FALSE; } g_memmove (GST_BUFFER_DATA (buffer), block, count); if (count > sizeof (WavpackHeader) && memcmp (block, "wvpk", 4) == 0) { /* if it's a Wavpack block set buffer timestamp and duration, etc */ WavpackHeader wph; GST_LOG_OBJECT (enc, "got %d bytes of encoded wavpack %sdata", count, (wid->correction) ? "correction " : ""); gst_wavpack_read_header (&wph, block); /* Only set when pushing the first buffer again, in that case * we don't want to delay the buffer or push newsegment events */ if (!wid->passthrough) { /* Only push complete blocks */ if (enc->pending_buffer == NULL) { enc->pending_buffer = buffer; enc->pending_offset = wph.block_index; } else if (enc->pending_offset == wph.block_index) { enc->pending_buffer = gst_buffer_join (enc->pending_buffer, buffer); } else { GST_ERROR ("Got incomplete block, dropping"); gst_buffer_unref (enc->pending_buffer); enc->pending_buffer = buffer; enc->pending_offset = wph.block_index; } if (!(wph.flags & FINAL_BLOCK)) return TRUE; buffer = enc->pending_buffer; enc->pending_buffer = NULL; enc->pending_offset = 0; /* if it's the first wavpack block, send a NEW_SEGMENT event */ if (wph.block_index == 0) { gst_pad_push_event (pad, gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, GST_BUFFER_OFFSET_NONE, 0)); /* save header for later reference, so we can re-send it later on * EOS with fixed up values for total sample count etc. */ if (enc->first_block == NULL && !wid->correction) { enc->first_block = g_memdup (GST_BUFFER_DATA (buffer), GST_BUFFER_SIZE (buffer)); enc->first_block_size = GST_BUFFER_SIZE (buffer); } } } /* set buffer timestamp, duration, offset, offset_end from * the wavpack header */ GST_BUFFER_TIMESTAMP (buffer) = enc->timestamp_offset + gst_util_uint64_scale_int (GST_SECOND, wph.block_index, enc->samplerate); GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (GST_SECOND, wph.block_samples, enc->samplerate); GST_BUFFER_OFFSET (buffer) = wph.block_index; GST_BUFFER_OFFSET_END (buffer) = wph.block_index + wph.block_samples; } else { /* if it's something else set no timestamp and duration on the buffer */ GST_DEBUG_OBJECT (enc, "got %d bytes of unknown data", count); GST_BUFFER_TIMESTAMP (buffer) = GST_CLOCK_TIME_NONE; GST_BUFFER_DURATION (buffer) = GST_CLOCK_TIME_NONE; } /* push the buffer and forward errors */ GST_DEBUG_OBJECT (enc, "pushing buffer with %d bytes", GST_BUFFER_SIZE (buffer)); *flow = gst_pad_push (pad, buffer); if (*flow != GST_FLOW_OK) { GST_WARNING_OBJECT (enc, "flow on %s:%s = %s", GST_DEBUG_PAD_NAME (pad), gst_flow_get_name (*flow)); return FALSE; } return TRUE; }
/* * Test if the parser pushes clean data properly. */ void gst_parser_test_run (GstParserTest * test, GstCaps ** out_caps) { buffer_verify_data_s vdata = { 0, 0, 0, NULL, 0, NULL, FALSE }; GstElement *element; GstBuffer *buffer = NULL; GstCaps *src_caps; guint i, j, k; guint frames = 0, size = 0; element = setup_element (test->factory, test->sink_template, NULL, test->src_template, test->src_caps); /* push some setup headers */ for (j = 0; j < G_N_ELEMENTS (test->headers) && test->headers[j].data; j++) { buffer = buffer_new (test->headers[j].data, test->headers[j].size); fail_unless_equals_int (gst_pad_push (srcpad, buffer), GST_FLOW_OK); } for (j = 0; j < 3; j++) { for (i = 0; i < test->series[j].num; i++) { /* sanity enforcing */ for (k = 0; k < MAX (1, test->series[j].fpb); k++) { if (!k) buffer = buffer_new (test->series[j].data, test->series[j].size); else { GstCaps *caps = gst_buffer_get_caps (buffer); buffer = gst_buffer_join (buffer, buffer_new (test->series[j].data, test->series[j].size)); if (caps) { gst_buffer_set_caps (buffer, caps); gst_caps_unref (caps); } } } fail_unless_equals_int (gst_pad_push (srcpad, buffer), GST_FLOW_OK); if (j == 0) vdata.buffers_before_offset_skip++; else if (j == 1) vdata.offset_skip_amount += test->series[j].size * test->series[j].fpb; if (j != 1) { frames += test->series[j].fpb; size += test->series[j].size * test->series[j].fpb; } } } gst_pad_push_event (srcpad, gst_event_new_eos ()); if (G_LIKELY (test->framed)) fail_unless_equals_int (g_list_length (buffers) - test->discard, frames); /* if all frames are identical, do extended test, * otherwise only verify total data size */ if (test->series[0].data && (!test->series[2].size || (test->series[0].size == test->series[2].size && test->series[2].data && !memcmp (test->series[0].data, test->series[2].data, test->series[0].size)))) { vdata.data_to_verify = test->series[0].data; vdata.data_to_verify_size = test->series[0].size; vdata.caps = test->sink_caps; vdata.discard = test->discard; vdata.no_metadata = test->no_metadata; g_list_foreach (buffers, buffer_verify_data, &vdata); } else { guint datasum = 0; g_list_foreach (buffers, buffer_count_size, &datasum); size -= test->dropped; fail_unless_equals_int (datasum, size); } src_caps = gst_pad_get_negotiated_caps (sinkpad); GST_LOG ("output caps: %" GST_PTR_FORMAT, src_caps); if (test->sink_caps) { GST_LOG ("%" GST_PTR_FORMAT " = %" GST_PTR_FORMAT " ?", src_caps, test->sink_caps); fail_unless (gst_caps_is_equal (src_caps, test->sink_caps)); } if (out_caps) *out_caps = src_caps; else gst_caps_unref (src_caps); cleanup_element (element); }
static GstFlowReturn gst_icydemux_typefind_or_forward (GstICYDemux * icydemux, GstBuffer * buf) { if (icydemux->typefinding) { GstBuffer *tf_buf; GstCaps *caps = NULL; GstTypeFindProbability prob; /* If we have a content-type from upstream, let's see if we can shortcut * typefinding */ if (G_UNLIKELY (icydemux->content_type)) { if (!g_ascii_strcasecmp (icydemux->content_type, "video/nsv")) { GST_DEBUG ("We have a NSV stream"); caps = gst_caps_new_simple ("video/x-nsv", NULL); } else { GST_DEBUG ("Upstream Content-Type isn't supported"); g_free (icydemux->content_type); icydemux->content_type = NULL; } } if (icydemux->typefind_buf) { icydemux->typefind_buf = gst_buffer_join (icydemux->typefind_buf, buf); } else { icydemux->typefind_buf = buf; } /* Only typefind if we haven't already got some caps */ if (caps == NULL) { caps = gst_type_find_helper_for_buffer (GST_OBJECT (icydemux), icydemux->typefind_buf, &prob); if (caps == NULL) { if (GST_BUFFER_SIZE (icydemux->typefind_buf) < ICY_TYPE_FIND_MAX_SIZE) { /* Just break for more data */ return GST_FLOW_OK; } /* We failed typefind */ GST_ELEMENT_ERROR (icydemux, STREAM, TYPE_NOT_FOUND, (NULL), ("No caps found for contents within an ICY stream")); gst_buffer_unref (icydemux->typefind_buf); icydemux->typefind_buf = NULL; return GST_FLOW_ERROR; } } if (!gst_icydemux_add_srcpad (icydemux, caps)) { GST_DEBUG_OBJECT (icydemux, "Failed to add srcpad"); gst_caps_unref (caps); gst_buffer_unref (icydemux->typefind_buf); icydemux->typefind_buf = NULL; return GST_FLOW_ERROR; } gst_caps_unref (caps); if (icydemux->cached_events) { gst_icydemux_send_cached_events (icydemux); } if (icydemux->cached_tags) { gst_icydemux_send_tag_event (icydemux, icydemux->cached_tags); icydemux->cached_tags = NULL; } /* Move onto streaming: call ourselves recursively with the typefind buffer * to get that forwarded. */ icydemux->typefinding = FALSE; tf_buf = icydemux->typefind_buf; icydemux->typefind_buf = NULL; return gst_icydemux_typefind_or_forward (icydemux, tf_buf); } else { if (G_UNLIKELY (icydemux->srcpad == NULL)) { gst_buffer_unref (buf); return GST_FLOW_ERROR; } buf = gst_buffer_make_metadata_writable (buf); gst_buffer_set_caps (buf, icydemux->src_caps); /* Most things don't care, and it's a pain to track (we should preserve a * 0 offset on the first buffer though if it's there, for id3demux etc.) */ if (GST_BUFFER_OFFSET (buf) != 0) { GST_BUFFER_OFFSET (buf) = GST_BUFFER_OFFSET_NONE; } return gst_pad_push (icydemux->srcpad, buf); } }
/* * description : convert input 3gpp buffer to nalu based buffer * params : @self : GstOmxH264Dec, @buf: buffer to be converted * return : none * comments : none */ static void convert_frame (GstOmxH264Dec *self, GstBuffer **buf) { OMX_U8 frameType; OMX_U32 nalSize = 0; OMX_U32 cumulSize = 0; OMX_U32 offset = 0; OMX_U32 nalHeaderSize = 0; OMX_U32 outSize = 0; OMX_U8 *frame_3gpp = GST_BUFFER_DATA(*buf); OMX_U32 frame_3gpp_size = GST_BUFFER_SIZE(*buf); GstBuffer *nalu_next_buf = NULL; GstBuffer *nalu_buf = NULL; do { /* get NAL Length based on length of length*/ if (self->h264NalLengthSize == 1) { nalSize = frame_3gpp[0]; } else if (self->h264NalLengthSize == 2) { nalSize = GSTOMX_H264_RB16(frame_3gpp); } else { nalSize = GSTOMX_H264_RB32(frame_3gpp); } GST_LOG_OBJECT(self, "packetized frame size = %d", nalSize); frame_3gpp += self->h264NalLengthSize; /* Checking frame type */ frameType = *frame_3gpp & 0x1f; switch (frameType) { case GSTOMX_H264_NUT_SLICE: GST_LOG_OBJECT(self, "Frame is non-IDR frame..."); break; case GSTOMX_H264_NUT_IDR: GST_LOG_OBJECT(self, "Frame is an IDR frame..."); break; case GSTOMX_H264_NUT_SEI: GST_LOG_OBJECT(self, "Found SEI Data..."); break; case GSTOMX_H264_NUT_SPS: GST_LOG_OBJECT(self, "Found SPS data..."); break; case GSTOMX_H264_NUT_PPS: GST_LOG_OBJECT(self, "Found PPS data..."); break; case GSTOMX_H264_NUT_EOSEQ: GST_LOG_OBJECT(self, "End of sequence..."); break; case GSTOMX_H264_NUT_EOSTREAM: GST_LOG_OBJECT(self, "End of stream..."); break; case GSTOMX_H264_NUT_DPA: case GSTOMX_H264_NUT_DPB: case GSTOMX_H264_NUT_DPC: case GSTOMX_H264_NUT_AUD: case GSTOMX_H264_NUT_FILL: case GSTOMX_H264_NUT_MIXED: break; default: GST_INFO_OBJECT(self, "Unknown Frame type: %d\n", frameType); goto EXIT; } /* if nal size is same, we can change only start code */ if((nalSize + GSTOMX_H264_NAL_START_LEN) == frame_3gpp_size) { GST_LOG_OBJECT(self, "only change start code"); GSTOMX_H264_WB32(GST_BUFFER_DATA(*buf), 1); return; } /* Convert 3GPP Frame to NALU Frame */ offset = outSize; nalHeaderSize = offset ? 3 : 4; outSize += nalSize + nalHeaderSize; if ((nalSize > frame_3gpp_size)||(outSize < 0)) { GST_ERROR_OBJECT(self, "out of bounds Error. frame_nalu_size=%d", outSize); goto EXIT; } if (nalu_buf) { nalu_next_buf= gst_buffer_new_and_alloc(nalSize + nalHeaderSize); if (nalu_next_buf == NULL) { GST_ERROR_OBJECT(self, "gst_buffer_new_and_alloc failed.(nalu_next_buf)"); goto EXIT; } } else { nalu_buf = gst_buffer_new_and_alloc(outSize); } if (nalu_buf == NULL) { GST_ERROR_OBJECT(self, "gst_buffer_new_and_alloc failed.(nalu_buf)"); goto EXIT; } if (!offset) { memcpy(GST_BUFFER_DATA(nalu_buf)+nalHeaderSize, frame_3gpp, nalSize); GSTOMX_H264_WB32(GST_BUFFER_DATA(nalu_buf), 1); } else { if (nalu_next_buf) { GstBuffer *nalu_joined_buf = gst_buffer_join(nalu_buf,nalu_next_buf); nalu_buf = nalu_joined_buf; nalu_next_buf = NULL; } memcpy(GST_BUFFER_DATA(nalu_buf)+nalHeaderSize+offset, frame_3gpp, nalSize); (GST_BUFFER_DATA(nalu_buf)+offset)[0] = (GST_BUFFER_DATA(nalu_buf)+offset)[1] = 0; (GST_BUFFER_DATA(nalu_buf)+offset)[2] = 1; } frame_3gpp += nalSize; cumulSize += nalSize + self->h264NalLengthSize; GST_LOG_OBJECT(self, "frame_3gpp_size = %d => frame_nalu_size=%d", frame_3gpp_size, outSize); } while (cumulSize < frame_3gpp_size); gst_buffer_copy_metadata(nalu_buf, *buf, GST_BUFFER_COPY_ALL); if (*buf) { gst_buffer_unref (*buf); } *buf = nalu_buf; return; EXIT: if (nalu_buf) { gst_buffer_unref (nalu_buf); } GST_ERROR_OBJECT(self, "converting frame error."); return; }
static GstFlowReturn gst_rtp_asf_pay_handle_buffer (GstBaseRTPPayload * rtppay, GstBuffer * buffer) { GstRtpAsfPay *rtpasfpay = GST_RTP_ASF_PAY_CAST (rtppay); if (G_UNLIKELY (rtpasfpay->state == ASF_END)) { GST_LOG_OBJECT (rtpasfpay, "Dropping buffer as we already pushed all packets"); gst_buffer_unref (buffer); return GST_FLOW_UNEXPECTED; /* we already finished our job */ } /* receive headers * we only accept if they are in a single buffer */ if (G_UNLIKELY (rtpasfpay->state == ASF_NOT_STARTED)) { guint64 header_size; if (GST_BUFFER_SIZE (buffer) < 24) { /* guid+object size size */ GST_ERROR_OBJECT (rtpasfpay, "Buffer too small, smaller than a Guid and object size"); gst_buffer_unref (buffer); return GST_FLOW_ERROR; } header_size = gst_asf_match_and_peek_obj_size (GST_BUFFER_DATA (buffer), &(guids[ASF_HEADER_OBJECT_INDEX])); if (header_size > 0) { GST_DEBUG_OBJECT (rtpasfpay, "ASF header guid received, size %" G_GUINT64_FORMAT, header_size); if (GST_BUFFER_SIZE (buffer) < header_size) { GST_ERROR_OBJECT (rtpasfpay, "Headers should be contained in a single" " buffer"); gst_buffer_unref (buffer); return GST_FLOW_ERROR; } else { rtpasfpay->state = ASF_DATA_OBJECT; /* clear previous headers, if any */ if (rtpasfpay->headers) { gst_buffer_unref (rtpasfpay->headers); } GST_DEBUG_OBJECT (rtpasfpay, "Storing headers"); if (GST_BUFFER_SIZE (buffer) == header_size) { rtpasfpay->headers = buffer; return GST_FLOW_OK; } else { /* headers are a subbuffer of thie buffer */ GstBuffer *aux = gst_buffer_create_sub (buffer, header_size, GST_BUFFER_SIZE (buffer) - header_size); rtpasfpay->headers = gst_buffer_create_sub (buffer, 0, header_size); gst_buffer_replace (&buffer, aux); } } } else { GST_ERROR_OBJECT (rtpasfpay, "Missing ASF header start"); gst_buffer_unref (buffer); return GST_FLOW_ERROR; } } if (G_UNLIKELY (rtpasfpay->state == ASF_DATA_OBJECT)) { if (GST_BUFFER_SIZE (buffer) != ASF_DATA_OBJECT_SIZE) { GST_ERROR_OBJECT (rtpasfpay, "Received buffer of different size of " "the data object header"); gst_buffer_unref (buffer); return GST_FLOW_ERROR; } if (gst_asf_match_guid (GST_BUFFER_DATA (buffer), &(guids[ASF_DATA_OBJECT_INDEX]))) { GST_DEBUG_OBJECT (rtpasfpay, "Received data object header"); rtpasfpay->headers = gst_buffer_join (rtpasfpay->headers, buffer); rtpasfpay->state = ASF_PACKETS; return gst_rtp_asf_pay_parse_headers (rtpasfpay); } else { GST_ERROR_OBJECT (rtpasfpay, "Unexpected object received (was expecting " "data object)"); gst_buffer_unref (buffer); return GST_FLOW_ERROR; } } if (G_LIKELY (rtpasfpay->state == ASF_PACKETS)) { /* in broadcast mode we can't trust the packets count information * from the headers * We assume that if this is on broadcast mode it is a live stream * and we are going to keep receiving packets indefinitely */ if (rtpasfpay->asfinfo.broadcast || rtpasfpay->packets_count < rtpasfpay->asfinfo.packets_count) { GST_DEBUG_OBJECT (rtpasfpay, "Received packet %" G_GUINT64_FORMAT "/%" G_GUINT64_FORMAT, rtpasfpay->packets_count, rtpasfpay->asfinfo.packets_count); rtpasfpay->packets_count++; return gst_rtp_asf_pay_handle_packet (rtpasfpay, buffer); } else { GST_INFO_OBJECT (rtpasfpay, "Packets ended"); rtpasfpay->state = ASF_END; gst_buffer_unref (buffer); return GST_FLOW_UNEXPECTED; } } gst_buffer_unref (buffer); return GST_FLOW_OK; }
static gboolean gst_asf_demux_parse_payload (GstASFDemux * demux, AsfPacket * packet, gint lentype, const guint8 ** p_data, guint * p_size) { AsfPayload payload = { 0, }; AsfStream *stream; gboolean is_compressed; guint payload_len; guint stream_num; if (G_UNLIKELY (*p_size < 1)) { GST_WARNING_OBJECT (demux, "Short packet!"); return FALSE; } stream_num = GST_READ_UINT8 (*p_data) & 0x7f; payload.keyframe = ((GST_READ_UINT8 (*p_data) & 0x80) != 0); *p_data += 1; *p_size -= 1; payload.ts = GST_CLOCK_TIME_NONE; payload.duration = GST_CLOCK_TIME_NONE; payload.par_x = 0; payload.par_y = 0; payload.interlaced = FALSE; payload.tff = FALSE; payload.rff = FALSE; payload.mo_number = asf_packet_read_varlen_int (packet->prop_flags, 4, p_data, p_size); payload.mo_offset = asf_packet_read_varlen_int (packet->prop_flags, 2, p_data, p_size); payload.rep_data_len = asf_packet_read_varlen_int (packet->prop_flags, 0, p_data, p_size); is_compressed = (payload.rep_data_len == 1); GST_LOG_OBJECT (demux, "payload for stream %u", stream_num); GST_LOG_OBJECT (demux, "keyframe : %s", (payload.keyframe) ? "yes" : "no"); GST_LOG_OBJECT (demux, "compressed : %s", (is_compressed) ? "yes" : "no"); if (G_UNLIKELY (*p_size < payload.rep_data_len)) { GST_WARNING_OBJECT (demux, "Short packet! rep_data_len=%u, size=%u", payload.rep_data_len, *p_size); return FALSE; } memcpy (payload.rep_data, *p_data, MIN (sizeof (payload.rep_data), payload.rep_data_len)); *p_data += payload.rep_data_len; *p_size -= payload.rep_data_len; if (G_UNLIKELY (*p_size == 0)) { GST_WARNING_OBJECT (demux, "payload without data!?"); return FALSE; } /* we use -1 as lentype for a single payload that's the size of the packet */ if (G_UNLIKELY ((lentype >= 0 && lentype <= 3))) { payload_len = asf_packet_read_varlen_int (lentype, 0, p_data, p_size); if (*p_size < payload_len) { GST_WARNING_OBJECT (demux, "Short packet! payload_len=%u, size=%u", payload_len, *p_size); return FALSE; } } else { payload_len = *p_size; } GST_LOG_OBJECT (demux, "payload length: %u", payload_len); stream = gst_asf_demux_get_stream (demux, stream_num); if (G_UNLIKELY (stream == NULL)) { GST_WARNING_OBJECT (demux, "Payload for unknown stream %u, skipping", stream_num); if (*p_size < payload_len) { *p_data += *p_size; *p_size = 0; } else { *p_data += payload_len; *p_size -= payload_len; } return TRUE; } if (G_UNLIKELY (!is_compressed)) { GST_LOG_OBJECT (demux, "replicated data length: %u", payload.rep_data_len); if (payload.rep_data_len >= 8) { payload.mo_size = GST_READ_UINT32_LE (payload.rep_data); payload.ts = GST_READ_UINT32_LE (payload.rep_data + 4) * GST_MSECOND; if (G_UNLIKELY (payload.ts < demux->preroll)) payload.ts = 0; else payload.ts -= demux->preroll; asf_payload_parse_replicated_data_extensions (stream, &payload); GST_LOG_OBJECT (demux, "media object size : %u", payload.mo_size); GST_LOG_OBJECT (demux, "media object ts : %" GST_TIME_FORMAT, GST_TIME_ARGS (payload.ts)); GST_LOG_OBJECT (demux, "media object dur : %" GST_TIME_FORMAT, GST_TIME_ARGS (payload.duration)); } else if (payload.rep_data_len != 0) { GST_WARNING_OBJECT (demux, "invalid replicated data length, very bad"); *p_data += payload_len; *p_size -= payload_len; return FALSE; } GST_LOG_OBJECT (demux, "media object offset : %u", payload.mo_offset); GST_LOG_OBJECT (demux, "payload length: %u", payload_len); if ((stream = gst_asf_demux_get_stream (demux, stream_num)) && payload_len) { payload.buf = asf_packet_create_payload_buffer (packet, p_data, p_size, payload_len); /* n-th fragment of a fragmented media object? */ if (payload.mo_offset != 0) { AsfPayload *prev; if ((prev = asf_payload_find_previous_fragment (&payload, stream))) { if (payload.mo_offset != GST_BUFFER_SIZE (prev->buf)) { GST_WARNING_OBJECT (demux, "Offset doesn't match previous data?!"); } /* note: buffer join/merge might not preserve buffer flags */ prev->buf = gst_buffer_join (prev->buf, payload.buf); GST_LOG_OBJECT (demux, "Merged fragments, merged size: %u", GST_BUFFER_SIZE (prev->buf)); } else { gst_buffer_unref (payload.buf); } payload.buf = NULL; } else { gst_asf_payload_queue_for_stream (demux, &payload, stream); } } } else { const guint8 *payload_data; GstClockTime ts, ts_delta; guint num; GST_LOG_OBJECT (demux, "Compressed payload, length=%u", payload_len); payload_data = *p_data; *p_data += payload_len; *p_size -= payload_len; ts = payload.mo_offset * GST_MSECOND; if (G_UNLIKELY (ts < demux->preroll)) ts = 0; else ts -= demux->preroll; ts_delta = payload.rep_data[0] * GST_MSECOND; for (num = 0; payload_len > 0; ++num) { guint sub_payload_len; sub_payload_len = GST_READ_UINT8 (payload_data); GST_LOG_OBJECT (demux, "subpayload #%u: len=%u, ts=%" GST_TIME_FORMAT, num, sub_payload_len, GST_TIME_ARGS (ts)); ++payload_data; --payload_len; if (G_UNLIKELY (payload_len < sub_payload_len)) { GST_WARNING_OBJECT (demux, "Short payload! %u bytes left", payload_len); return FALSE; } if (G_LIKELY (sub_payload_len > 0)) { payload.buf = asf_packet_create_payload_buffer (packet, &payload_data, &payload_len, sub_payload_len); payload.ts = ts; if (G_LIKELY (ts_delta)) payload.duration = ts_delta; else payload.duration = GST_CLOCK_TIME_NONE; gst_asf_payload_queue_for_stream (demux, &payload, stream); } ts += ts_delta; } } return TRUE; }
static GstFlowReturn gst_wavpack_parse_push_buffer (GstWavpackParse * wvparse, GstBuffer * buf, WavpackHeader * header) { wvparse->current_offset += header->ckSize + 8; wvparse->segment.last_stop = header->block_index; if (wvparse->need_newsegment) { if (gst_wavpack_parse_send_newsegment (wvparse, FALSE)) wvparse->need_newsegment = FALSE; } /* send any queued events */ if (wvparse->queued_events) { GList *l; for (l = wvparse->queued_events; l != NULL; l = l->next) { gst_pad_push_event (wvparse->srcpad, GST_EVENT (l->data)); } g_list_free (wvparse->queued_events); wvparse->queued_events = NULL; } if (wvparse->pending_buffer == NULL) { wvparse->pending_buffer = buf; wvparse->pending_offset = header->block_index; } else if (wvparse->pending_offset == header->block_index) { wvparse->pending_buffer = gst_buffer_join (wvparse->pending_buffer, buf); } else { GST_ERROR ("Got incomplete block, dropping"); gst_buffer_unref (wvparse->pending_buffer); wvparse->pending_buffer = buf; wvparse->pending_offset = header->block_index; } if (!(header->flags & FINAL_BLOCK)) return GST_FLOW_OK; buf = wvparse->pending_buffer; wvparse->pending_buffer = NULL; GST_BUFFER_TIMESTAMP (buf) = gst_util_uint64_scale_int (header->block_index, GST_SECOND, wvparse->samplerate); GST_BUFFER_DURATION (buf) = gst_util_uint64_scale_int (header->block_samples, GST_SECOND, wvparse->samplerate); GST_BUFFER_OFFSET (buf) = header->block_index; GST_BUFFER_OFFSET_END (buf) = header->block_index + header->block_samples; if (wvparse->discont || wvparse->next_block_index != header->block_index) { GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT); wvparse->discont = FALSE; } wvparse->next_block_index = header->block_index + header->block_samples; gst_buffer_set_caps (buf, GST_PAD_CAPS (wvparse->srcpad)); GST_LOG_OBJECT (wvparse, "Pushing buffer with time %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf))); return gst_pad_push (wvparse->srcpad, buf); }
static GstFlowReturn gst_a52dec_chain_raw (GstPad * pad, GstBuffer * buf) { GstA52Dec *a52dec; guint8 *data; guint size; gint length = 0, flags, sample_rate, bit_rate; GstFlowReturn result = GST_FLOW_OK; a52dec = GST_A52DEC (GST_PAD_PARENT (pad)); if (!a52dec->sent_segment) { GstSegment segment; /* Create a basic segment. Usually, we'll get a new-segment sent by * another element that will know more information (a demuxer). If we're * just looking at a raw AC3 stream, we won't - so we need to send one * here, but we don't know much info, so just send a minimal TIME * new-segment event */ gst_segment_init (&segment, GST_FORMAT_TIME); gst_pad_push_event (a52dec->srcpad, gst_event_new_new_segment (FALSE, segment.rate, segment.format, segment.start, segment.duration, segment.start)); a52dec->sent_segment = TRUE; } /* merge with cache, if any. Also make sure timestamps match */ if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) { a52dec->time = GST_BUFFER_TIMESTAMP (buf); GST_DEBUG_OBJECT (a52dec, "Received buffer with ts %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); } if (a52dec->cache) { buf = gst_buffer_join (a52dec->cache, buf); a52dec->cache = NULL; } data = GST_BUFFER_DATA (buf); size = GST_BUFFER_SIZE (buf); /* find and read header */ bit_rate = a52dec->bit_rate; sample_rate = a52dec->sample_rate; flags = 0; while (size >= 7) { length = a52_syncinfo (data, &flags, &sample_rate, &bit_rate); if (length == 0) { /* no sync */ data++; size--; } else if (length <= size) { GST_DEBUG ("Sync: %d", length); if (flags != a52dec->prev_flags) a52dec->flag_update = TRUE; a52dec->prev_flags = flags; result = gst_a52dec_handle_frame (a52dec, data, length, flags, sample_rate, bit_rate); if (result != GST_FLOW_OK) { size = 0; break; } size -= length; data += length; } else { /* not enough data */ GST_LOG ("Not enough data available"); break; } } /* keep cache */ if (length == 0) { GST_LOG ("No sync found"); } if (size > 0) { a52dec->cache = gst_buffer_create_sub (buf, GST_BUFFER_SIZE (buf) - size, size); } gst_buffer_unref (buf); return result; }