static GstFlowReturn vorbis_parse_push_buffer (GstVorbisParse * parse, GstBuffer * buf, gint64 granulepos) { guint64 samples; /* our hack as noted below */ samples = GST_BUFFER_OFFSET (buf); GST_BUFFER_OFFSET_END (buf) = granulepos; GST_BUFFER_DURATION (buf) = samples * GST_SECOND / parse->sample_rate; GST_BUFFER_OFFSET (buf) = granulepos * GST_SECOND / parse->sample_rate; GST_BUFFER_TIMESTAMP (buf) = GST_BUFFER_OFFSET (buf) - GST_BUFFER_DURATION (buf); gst_buffer_set_caps (buf, GST_PAD_CAPS (parse->srcpad)); return gst_pad_push (parse->srcpad, buf); }
static GstFlowReturn gst_ffmpegenc_encode_audio (GstFFMpegEnc * ffmpegenc, guint8 * audio_in, guint max_size, GstClockTime timestamp, GstClockTime duration, gboolean discont) { GstBuffer *outbuf; AVCodecContext *ctx; guint8 *audio_out; gint res; GstFlowReturn ret; ctx = ffmpegenc->context; outbuf = gst_buffer_new_and_alloc (max_size); audio_out = GST_BUFFER_DATA (outbuf); GST_LOG_OBJECT (ffmpegenc, "encoding buffer of max size %d", max_size); if (ffmpegenc->buffer_size != max_size) ffmpegenc->buffer_size = max_size; res = avcodec_encode_audio (ctx, audio_out, max_size, (short *) audio_in); if (res < 0) { GST_ERROR_OBJECT (ffmpegenc, "Failed to encode buffer: %d", res); gst_buffer_unref (outbuf); return GST_FLOW_OK; } GST_LOG_OBJECT (ffmpegenc, "got output size %d", res); GST_BUFFER_SIZE (outbuf) = res; GST_BUFFER_TIMESTAMP (outbuf) = timestamp; GST_BUFFER_DURATION (outbuf) = duration; if (discont) GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT); gst_buffer_set_caps (outbuf, GST_PAD_CAPS (ffmpegenc->srcpad)); GST_LOG_OBJECT (ffmpegenc, "pushing size %d, timestamp %" GST_TIME_FORMAT, res, GST_TIME_ARGS (timestamp)); ret = gst_pad_push (ffmpegenc->srcpad, outbuf); return ret; }
static gboolean gst_teletextdec_push_preroll_buffer (GstTeletextDec * teletext) { GstFlowReturn ret; GstBuffer *buf; gboolean res = TRUE; GstStructure *structure; const gchar *mimetype; GstCaps *out_caps, *peer_caps, *pad_caps; /* the stream is sparse, we send a dummy buffer for preroll */ peer_caps = gst_pad_peer_get_caps (teletext->srcpad); pad_caps = gst_pad_get_caps (teletext->srcpad); out_caps = gst_caps_intersect (pad_caps, peer_caps); if (gst_caps_is_empty (out_caps)) { res = FALSE; goto beach; } gst_caps_truncate (out_caps); structure = gst_caps_get_structure (out_caps, 0); mimetype = gst_structure_get_name (structure); if (g_strcmp0 (mimetype, "video/x-raw-rgb") == 0) { /* omit preroll buffer for this format */ goto beach; } buf = gst_buffer_new_and_alloc (1); GST_BUFFER_DATA (buf)[0] = 0; gst_buffer_set_caps (buf, out_caps); ret = gst_pad_push (teletext->srcpad, buf); if (ret != GST_FLOW_OK) res = FALSE; beach: { gst_caps_unref (out_caps); gst_caps_unref (pad_caps); gst_caps_unref (peer_caps); return res; } }
static gboolean _rtpbin_pad_have_data_callback (GstPad *pad, GstMiniObject *miniobj, gpointer user_data) { FsRtpSubStream *self = FS_RTP_SUB_STREAM (user_data); gboolean ret = TRUE; gboolean remove = FALSE; FS_RTP_SESSION_LOCK (self->priv->session); if (!self->priv->codecbin || !self->codec || !self->priv->caps) { ret = FALSE; } else if (GST_IS_BUFFER (miniobj)) { if (!gst_caps_is_equal_fixed (GST_BUFFER_CAPS (miniobj), self->priv->caps)) { GstCaps *intersect = gst_caps_intersect (GST_BUFFER_CAPS (miniobj), self->priv->caps); if (gst_caps_is_empty (intersect)) ret = FALSE; else gst_buffer_set_caps (GST_BUFFER (miniobj), self->priv->caps); gst_caps_unref (intersect); } else { remove = TRUE; } } if (remove && self->priv->blocking_id) { gst_pad_remove_data_probe (pad, self->priv->blocking_id); self->priv->blocking_id = 0; } FS_RTP_SESSION_UNLOCK (self->priv->session); return ret; }
/* Create and send a silence buffer downstream */ static GstFlowReturn rsn_audiomunge_make_audio (RsnAudioMunge * munge, GstClockTime start, GstClockTime fill_time) { GstFlowReturn ret; GstBuffer *audio_buf; GstCaps *caps; guint buf_size; /* Just generate a 48khz stereo buffer for now */ /* FIXME: Adapt to the allowed formats, according to the currently * plugged decoder, or at least add a source pad that accepts the * caps we're outputting if the upstream decoder does not */ #if 0 caps = gst_caps_from_string ("audio/x-raw-int,rate=48000,channels=2,width=16,depth=16,signed=(boolean)true,endianness=4321"); buf_size = 4 * (48000 * fill_time / GST_SECOND); #else caps = gst_caps_from_string ("audio/x-raw-float, endianness=(int)1234," "width=(int)32, channels=(int)2, rate=(int)48000"); buf_size = 2 * 4 * (48000 * fill_time / GST_SECOND); #endif audio_buf = gst_buffer_new_and_alloc (buf_size); gst_buffer_set_caps (audio_buf, caps); gst_caps_unref (caps); GST_BUFFER_TIMESTAMP (audio_buf) = start; GST_BUFFER_DURATION (audio_buf) = fill_time; GST_BUFFER_FLAG_SET (audio_buf, GST_BUFFER_FLAG_DISCONT); memset (GST_BUFFER_DATA (audio_buf), 0, buf_size); GST_LOG_OBJECT (munge, "Sending %u bytes (%" GST_TIME_FORMAT ") of audio data with TS %" GST_TIME_FORMAT, buf_size, GST_TIME_ARGS (fill_time), GST_TIME_ARGS (start)); ret = gst_pad_push (munge->srcpad, audio_buf); return ret; }
/* allocate and output buffer, if no format was negotiated, this * function will negotiate one. After calling this function, a * reverse negotiation could have happened. */ static GstFlowReturn get_buffer (GstVisualGL * visual, GstGLBuffer ** outbuf) { /* we don't know an output format yet, pick one */ if (GST_PAD_CAPS (visual->srcpad) == NULL) { if (!gst_vis_gl_src_negotiate (visual)) return GST_FLOW_NOT_NEGOTIATED; } GST_DEBUG_OBJECT (visual, "allocating output buffer with caps %" GST_PTR_FORMAT, GST_PAD_CAPS (visual->srcpad)); *outbuf = gst_gl_buffer_new (visual->display, visual->width, visual->height); if (*outbuf == NULL) return GST_FLOW_ERROR; gst_buffer_set_caps (GST_BUFFER (*outbuf), GST_PAD_CAPS (visual->srcpad)); return GST_FLOW_OK; }
/* the same as above, but different logic for setting timestamp and granulepos * */ static GstBuffer * gst_vorbis_enc_buffer_from_header_packet (GstVorbisEnc * vorbisenc, ogg_packet * packet) { GstBuffer *outbuf; outbuf = gst_buffer_new_and_alloc (packet->bytes); memcpy (GST_BUFFER_DATA (outbuf), packet->packet, packet->bytes); GST_BUFFER_OFFSET (outbuf) = vorbisenc->bytes_out; GST_BUFFER_OFFSET_END (outbuf) = 0; GST_BUFFER_TIMESTAMP (outbuf) = GST_CLOCK_TIME_NONE; GST_BUFFER_DURATION (outbuf) = GST_CLOCK_TIME_NONE; gst_buffer_set_caps (outbuf, vorbisenc->srccaps); GST_DEBUG ("created header packet buffer, %d bytes", GST_BUFFER_SIZE (outbuf)); return outbuf; }
static GstFlowReturn gst_manchestermod_chain(GstPad *pad, GstBuffer *buf) { Gst_manchestermod *mod; GstBuffer *outbuf; GstCaps *caps; gfloat *bufout, *bufin; int i, j; mod = GST_MANCHESTERMOD(gst_pad_get_parent(pad)); caps = gst_pad_get_caps(mod->srcpad); if (!gst_caps_is_fixed(caps) || mod->symbollen < 0) { GstStructure *structure; gst_caps_unref(caps); caps = gst_pad_get_allowed_caps(mod->srcpad); structure = gst_caps_get_structure(caps, 0); gst_structure_get_int(structure, "rate", &mod->rate); mod->symbollen = (float)mod->rate/(float)mod->symbolrate; gst_pad_use_fixed_caps(mod->srcpad); } outbuf = gst_buffer_new_and_alloc(GST_BUFFER_SIZE(buf) * mod->symbollen); GST_BUFFER_OFFSET(outbuf) = GST_BUFFER_OFFSET(buf); GST_BUFFER_TIMESTAMP(outbuf) = GST_BUFFER_TIMESTAMP(buf); bufin = (gfloat *)GST_BUFFER_DATA(buf); bufout = (gfloat *)GST_BUFFER_DATA(outbuf); for (i = 0; i < GST_BUFFER_SIZE(buf)/sizeof(gfloat); i ++) { for (j = 0; j < mod->symbollen; j++) { bufout[(i * mod->symbollen + j)] = sin((j * M_PI * 2) / mod->symbollen) * bufin[i] * 0.999; } } gst_buffer_unref(buf); gst_buffer_set_caps(outbuf, caps); gst_caps_unref(caps); gst_pad_push(mod->srcpad, outbuf); return GST_FLOW_OK; }
void test_add_client() { GstElement *sink; GstBuffer *buffer; GstCaps *caps; int pfd[2]; gchar data[4]; std_log(LOG_FILENAME_LINE, "Test Started test_add_client"); sink = setup_multifdsink (); fail_if (pipe (pfd) == -1); ASSERT_SET_STATE (sink, GST_STATE_PLAYING, GST_STATE_CHANGE_ASYNC); /* add the client */ g_signal_emit_by_name (sink, "add", pfd[1]); caps = gst_caps_from_string ("application/x-gst-check"); GST_DEBUG ("Created test caps %p %" GST_PTR_FORMAT, caps, caps); buffer = gst_buffer_new_and_alloc (4); gst_buffer_set_caps (buffer, caps); ASSERT_CAPS_REFCOUNT (caps, "caps", 2); memcpy (GST_BUFFER_DATA (buffer), "dead", 4); fail_unless (gst_pad_push (mysrcpad, buffer) == GST_FLOW_OK); GST_DEBUG ("reading"); fail_if (read (pfd[0], data, 4) < 4); fail_unless (strncmp (data, "dead", 4) == 0); wait_bytes_served (sink, 4); GST_DEBUG ("cleaning up multifdsink"); ASSERT_SET_STATE (sink, GST_STATE_NULL, GST_STATE_CHANGE_SUCCESS); cleanup_multifdsink (sink); ASSERT_CAPS_REFCOUNT (caps, "caps", 1); gst_caps_unref (caps); std_log(LOG_FILENAME_LINE, "Test Successful"); create_xml(0); }
static GstFlowReturn final_sinkpad_bufferalloc (GstPad * pad, guint64 offset, guint size, GstCaps * caps, GstBuffer ** buf) { BufferAllocHarness *h; GTimeVal deadline; h = g_object_get_qdata (G_OBJECT (pad), g_quark_from_static_string ("buffer-alloc-harness")); g_assert (h != NULL); if (--(h->countdown) == 0) { /* Time to make the app release the pad. */ h->app_thread_prepped = FALSE; h->bufferalloc_blocked = TRUE; h->app_thread = g_thread_create (app_thread_func, h, TRUE, NULL); fail_if (h->app_thread == NULL); /* Wait for the app thread to get ready to call release_request_pad(). */ g_mutex_lock (check_mutex); while (!h->app_thread_prepped) g_cond_wait (check_cond, check_mutex); g_mutex_unlock (check_mutex); /* Now wait for it to do that within a second, to avoid deadlocking * in the event of future changes to the locking semantics. */ g_mutex_lock (check_mutex); g_get_current_time (&deadline); deadline.tv_sec += 1; while (h->bufferalloc_blocked) { if (!g_cond_timed_wait (check_cond, check_mutex, &deadline)) break; } g_mutex_unlock (check_mutex); } *buf = gst_buffer_new_and_alloc (size); gst_buffer_set_caps (*buf, caps); return GST_FLOW_OK; }
static GstFlowReturn gst_y4m_encode_chain (GstPad * pad, GstBuffer * buf) { GstY4mEncode *filter = GST_Y4M_ENCODE (GST_PAD_PARENT (pad)); GstBuffer *outbuf; GstClockTime timestamp; /* check we got some decent info from caps */ if (filter->width < 0) { GST_ELEMENT_ERROR ("filter", CORE, NEGOTIATION, (NULL), ("format wasn't negotiated before chain function")); gst_buffer_unref (buf); return GST_FLOW_NOT_NEGOTIATED; } timestamp = GST_BUFFER_TIMESTAMP (buf); if (G_UNLIKELY (!filter->header)) { if (filter->interlaced == TRUE) { if (GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_BUFFER_TFF)) { filter->top_field_first = TRUE; } else { filter->top_field_first = FALSE; } } outbuf = gst_y4m_encode_get_stream_header (filter); filter->header = TRUE; outbuf = gst_buffer_join (outbuf, gst_y4m_encode_get_frame_header (filter)); } else { outbuf = gst_y4m_encode_get_frame_header (filter); } /* join with data */ outbuf = gst_buffer_join (outbuf, buf); /* decorate */ gst_buffer_make_metadata_writable (outbuf); gst_buffer_set_caps (outbuf, GST_PAD_CAPS (filter->srcpad)); GST_BUFFER_TIMESTAMP (outbuf) = timestamp; return gst_pad_push (filter->srcpad, outbuf); }
static GstBuffer *convert_frame(struct obj *self, AVFrame *frame) { GstBuffer *out_buf; int64_t v; out_buf = frame->opaque; if (!out_buf) { AVCodecContext *ctx; int i; guint8 *p; int width, height; ctx = self->av_ctx; width = ROUND_UP(ctx->width, 4); height = ctx->height; out_buf = gst_buffer_new_and_alloc(width * height * 3 / 2); gst_buffer_set_caps(out_buf, self->srcpad->caps); p = out_buf->data; for (i = 0; i < height; i++) memcpy(p + i * width, frame->data[0] + i * frame->linesize[0], width); p = out_buf->data + width * height; for (i = 0; i < height / 2; i++) memcpy(p + i * width / 2, frame->data[1] + i * frame->linesize[1], width / 2); p = out_buf->data + width * height * 5 / 4; for (i = 0; i < height / 2; i++) memcpy(p + i * width / 2, frame->data[2] + i * frame->linesize[2], width / 2); } #if LIBAVCODEC_VERSION_MAJOR < 53 v = frame->reordered_opaque; #else v = frame->pkt_pts; #endif out_buf->timestamp = gstav_pts_to_timestamp(self->av_ctx, v); return out_buf; }
static void gst_soup_http_src_got_chunk_cb (SoupMessage * msg, SoupBuffer * chunk, GstSoupHTTPSrc * src) { GstBaseSrc *basesrc; guint64 new_position; if (G_UNLIKELY (msg != src->msg)) { GST_DEBUG_OBJECT (src, "got chunk, but not for current message"); return; } if (G_UNLIKELY (src->session_io_status != GST_SOUP_HTTP_SRC_SESSION_IO_STATUS_RUNNING)) { /* Probably a redirect. */ return; } basesrc = GST_BASE_SRC_CAST (src); GST_DEBUG_OBJECT (src, "got chunk of %" G_GSIZE_FORMAT " bytes", chunk->length); /* Extract the GstBuffer from the SoupBuffer and set its fields. */ *src->outbuf = GST_BUFFER_CAST (soup_buffer_get_owner (chunk)); GST_BUFFER_SIZE (*src->outbuf) = chunk->length; GST_BUFFER_OFFSET (*src->outbuf) = basesrc->segment.last_stop; gst_buffer_set_caps (*src->outbuf, (src->src_caps) ? src->src_caps : GST_PAD_CAPS (GST_BASE_SRC_PAD (basesrc))); gst_buffer_ref (*src->outbuf); new_position = src->read_position + chunk->length; if (G_LIKELY (src->request_position == src->read_position)) src->request_position = new_position; src->read_position = new_position; src->ret = GST_FLOW_OK; g_main_loop_quit (src->loop); gst_soup_http_src_session_pause_message (src); }
void test_buffer() { GstCaps *c1; GstBuffer *buffer; //xmlfile = "test_buffer"; std_log(LOG_FILENAME_LINE, "Test Started test_buffer"); buffer = gst_buffer_new_and_alloc (1000); c1 = gst_caps_new_simple ("audio/x-raw-int", "buffer", GST_TYPE_BUFFER, buffer, NULL); GST_DEBUG ("caps: %" GST_PTR_FORMAT, c1); gst_buffer_unref (buffer); buffer = gst_buffer_new_and_alloc (1000); gst_buffer_set_caps (buffer, c1); /* doesn't give away our c1 ref */ gst_caps_unref (c1); gst_buffer_unref (buffer); /* Should now drop both references */ std_log(LOG_FILENAME_LINE, "Test Successful"); create_xml(0); }
/* takes a copy of the passed buffer data */ static GstBuffer * buffer_new (const unsigned char *buffer_data, guint size) { GstBuffer *buffer; buffer = gst_buffer_new_and_alloc (size); if (buffer_data) { memcpy (GST_BUFFER_DATA (buffer), buffer_data, size); } else { guint i; /* Create a recognizable pattern (loop 0x00 -> 0xff) in the data block */ for (i = 0; i < size; i++) { GST_BUFFER_DATA (buffer)[i] = i % 0x100; } } gst_buffer_set_caps (buffer, GST_PAD_CAPS (srcpad)); GST_BUFFER_OFFSET (buffer) = dataoffset; dataoffset += size; return buffer; }
static GstFlowReturn gst_gsmenc_chain (GstPad * pad, GstBuffer * buf) { GstGSMEnc *gsmenc; gsm_signal *data; GstFlowReturn ret = GST_FLOW_OK; gsmenc = GST_GSMENC (gst_pad_get_parent (pad)); if (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT)) { gst_adapter_clear (gsmenc->adapter); } gst_adapter_push (gsmenc->adapter, buf); while (gst_adapter_available (gsmenc->adapter) >= 320) { GstBuffer *outbuf; outbuf = gst_buffer_new_and_alloc (33 * sizeof (gsm_byte)); GST_BUFFER_TIMESTAMP (outbuf) = gsmenc->next_ts; GST_BUFFER_DURATION (outbuf) = 20 * GST_MSECOND; gsmenc->next_ts += 20 * GST_MSECOND; /* encode 160 16-bit samples into 33 bytes */ data = (gsm_signal *) gst_adapter_peek (gsmenc->adapter, 320); gsm_encode (gsmenc->state, data, (gsm_byte *) GST_BUFFER_DATA (outbuf)); gst_adapter_flush (gsmenc->adapter, 320); gst_buffer_set_caps (outbuf, GST_PAD_CAPS (gsmenc->srcpad)); GST_DEBUG_OBJECT (gsmenc, "Pushing buffer of size %d", GST_BUFFER_SIZE (outbuf)); ret = gst_pad_push (gsmenc->srcpad, outbuf); } gst_object_unref (gsmenc); return ret; }
/******************************************************************************* * gst_tidmaivideosink_buffer_alloc * * Function used to allocate a buffer from upstream elements *******************************************************************************/ static GstFlowReturn gst_tidmaivideosink_buffer_alloc(GstBaseSink *bsink, guint64 offset, guint size, GstCaps *caps, GstBuffer **buf){ Buffer_Handle hBuf; GstTIDmaiVideoSink *sink = GST_TIDMAIVIDEOSINK_CAST(bsink); if (!sink->zeromemcpy){ return GST_FLOW_OK; } if (!sink->capsAreSet){ if (!gst_tidmaivideosink_set_caps(bsink,caps)){ return GST_FLOW_UNEXPECTED; } } if (!sink->dmaiElementUpstream){ return GST_FLOW_OK; } hBuf = gst_tidmaivideosink_get_display_buffer(sink,NULL); if (hBuf){ *buf = gst_tidmaibuffertransport_new(hBuf,NULL, NULL, FALSE); sink->allocatedBuffers[Buffer_getId(hBuf)] = *buf; gst_tidmaibuffertransport_set_release_callback( (GstTIDmaiBufferTransport *)*buf, allocated_buffer_release_cb,sink); gst_buffer_set_caps(*buf,caps); GST_BUFFER_SIZE(*buf) = gst_ti_calculate_bufSize( sink->oattrs.width,sink->oattrs.height,sink->colorSpace); sink->numAllocatedBuffers++; GST_LOG("Number of pad allocated buffers is %d, current %p",sink->numAllocatedBuffers,*buf); sink->lastAllocatedBuffer = *buf; } else { return GST_FLOW_UNEXPECTED; } GST_DEBUG("Leave with buffer %p",*buf); return GST_FLOW_OK; }
static GstBuffer * gst_dtmf_src_create_next_tone_packet (GstDTMFSrc * dtmfsrc, GstDTMFSrcEvent * event) { GstBuffer *buf = NULL; gboolean send_silence = FALSE; GstPad *srcpad = GST_BASE_SRC_PAD (dtmfsrc); GST_LOG_OBJECT (dtmfsrc, "Creating buffer for tone %s", DTMF_KEYS[event->event_number].event_name); /* create buffer to hold the tone */ buf = gst_buffer_new (); if (event->packet_count * dtmfsrc->interval < MIN_INTER_DIGIT_INTERVAL) { send_silence = TRUE; } if (send_silence) { GST_LOG_OBJECT (dtmfsrc, "Generating silence"); gst_dtmf_src_generate_silence (buf, dtmfsrc->interval, dtmfsrc->sample_rate); } else { GST_LOG_OBJECT (dtmfsrc, "Generating tone"); gst_dtmf_src_generate_tone (event, DTMF_KEYS[event->event_number], dtmfsrc->interval, buf, dtmfsrc->sample_rate); } event->packet_count++; /* timestamp and duration of GstBuffer */ GST_BUFFER_DURATION (buf) = dtmfsrc->interval * GST_MSECOND; GST_BUFFER_TIMESTAMP (buf) = dtmfsrc->timestamp; dtmfsrc->timestamp += GST_BUFFER_DURATION (buf); /* Set caps on the buffer before pushing it */ gst_buffer_set_caps (buf, GST_PAD_CAPS (srcpad)); return buf; }
/* Create and send a silence buffer downstream */ static GstFlowReturn rsn_audiomunge_make_audio (RsnAudioMunge * munge, GstClockTime start, GstClockTime fill_time) { GstFlowReturn ret; GstBuffer *audio_buf; GstCaps *caps; guint buf_size; /* Just generate a 48khz stereo buffer for now */ #if 0 caps = gst_caps_from_string ("audio/x-raw-int,rate=48000,channels=2,width=16,depth=16,signed=(boolean)true,endianness=1234"); buf_size = 4 * (48000 * fill_time / GST_SECOND); #else caps = gst_caps_from_string ("audio/x-raw-float, endianness=(int)1234," "width=(int)32, channels=(int)2, rate=(int)48000"); buf_size = 2 * 4 * (48000 * fill_time / GST_SECOND); #endif audio_buf = gst_buffer_new_and_alloc (buf_size); gst_buffer_set_caps (audio_buf, caps); gst_caps_unref (caps); GST_BUFFER_TIMESTAMP (audio_buf) = start; GST_BUFFER_DURATION (audio_buf) = fill_time; GST_BUFFER_FLAG_SET (audio_buf, GST_BUFFER_FLAG_DISCONT); memset (GST_BUFFER_DATA (audio_buf), 0, buf_size); g_print ("Sending %u bytes (%" GST_TIME_FORMAT ") of audio data " "with TS %" GST_TIME_FORMAT "\n", buf_size, GST_TIME_ARGS (fill_time), GST_TIME_ARGS (start)); ret = gst_pad_push (munge->srcpad, audio_buf); return ret; }
static GstFlowReturn gst_wavenc_chain (GstPad * pad, GstBuffer * buf) { GstWavEnc *wavenc = GST_WAVENC (GST_PAD_PARENT (pad)); GstFlowReturn flow = GST_FLOW_OK; g_return_val_if_fail (wavenc->channels > 0, GST_FLOW_WRONG_STATE); if (!wavenc->sent_header) { /* use bogus size initially, we'll write the real * header when we get EOS and know the exact length */ flow = gst_wavenc_push_header (wavenc, 0x7FFF0000); if (flow != GST_FLOW_OK) return flow; GST_DEBUG_OBJECT (wavenc, "wrote dummy header"); wavenc->sent_header = TRUE; } wavenc->length += GST_BUFFER_SIZE (buf); GST_LOG_OBJECT (wavenc, "pushing %u bytes raw audio, ts=%" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf))); if (wavenc->width != wavenc->depth) { buf = gst_buffer_make_writable (buf); gst_wavenc_format_samples (buf, wavenc->width, wavenc->depth); } else { buf = gst_buffer_make_metadata_writable (buf); } gst_buffer_set_caps (buf, GST_PAD_CAPS (wavenc->srcpad)); GST_BUFFER_OFFSET (buf) = WAV_HEADER_LEN + wavenc->length; GST_BUFFER_OFFSET_END (buf) = GST_BUFFER_OFFSET_NONE; flow = gst_pad_push (wavenc->srcpad, buf); return flow; }
static GstFlowReturn process_output_buffer (GstGooEncPcm* self, OMX_BUFFERHEADERTYPE* buffer) { GstBuffer* out = NULL; GstFlowReturn ret = GST_FLOW_ERROR; if (buffer->nFilledLen <= 0) { GST_INFO_OBJECT (self, "Received an empty buffer!"); goo_component_release_buffer (self->component, buffer); return; } GST_DEBUG_OBJECT (self, "outcount = %d", self->outcount); out = gst_buffer_new_and_alloc (buffer->nFilledLen); g_assert (out != NULL); memmove (GST_BUFFER_DATA (out), buffer->pBuffer, buffer->nFilledLen); goo_component_release_buffer (self->component, buffer); if (out != NULL) { GST_BUFFER_DURATION (out) = self->duration; GST_BUFFER_OFFSET (out) = self->outcount++; GST_BUFFER_TIMESTAMP (out) = self->ts; if (self->ts != -1) { self->ts += self->duration; } gst_buffer_set_caps (out, GST_PAD_CAPS (self->srcpad)); GST_DEBUG_OBJECT (self, "pushing gst buffer"); ret = gst_pad_push (self->srcpad, out); } GST_INFO_OBJECT (self, ""); return ret; }
static GstFlowReturn gst_vdp_video_yuv_buffer_alloc (GstPad * pad, guint64 offset, guint size, GstCaps * caps, GstBuffer ** buf) { GstVdpVideoYUV *video_yuv = GST_VDP_VIDEO_YUV (gst_pad_get_parent (pad)); GstFlowReturn ret = GST_FLOW_ERROR; GstStructure *structure; gint width, height; gint chroma_type; structure = gst_caps_get_structure (caps, 0); if (!structure) goto error; if (!gst_structure_get_int (structure, "width", &width)) goto error; if (!gst_structure_get_int (structure, "height", &height)) goto error; if (!gst_structure_get_int (structure, "chroma-type", &chroma_type)) goto error; *buf = GST_BUFFER (gst_vdp_video_buffer_new (video_yuv->device, chroma_type, width, height)); if (*buf == NULL) goto error; GST_BUFFER_SIZE (*buf) = size; GST_BUFFER_OFFSET (*buf) = offset; gst_buffer_set_caps (*buf, caps); ret = GST_FLOW_OK; error: gst_object_unref (video_yuv); return ret; }
static GstFlowReturn gst_shm_sink_buffer_alloc (GstBaseSink * sink, guint64 offset, guint size, GstCaps * caps, GstBuffer ** out_buf) { GstShmSink *self = GST_SHM_SINK (sink); GstBuffer *buffer; ShmBlock *block = NULL; gpointer buf = NULL; GST_OBJECT_LOCK (self); block = sp_writer_alloc_block (self->pipe, size); if (block) { buf = sp_writer_block_get_buf (block); g_object_ref (self); } GST_OBJECT_UNLOCK (self); if (block) { buffer = gst_buffer_new (); GST_BUFFER_DATA (buffer) = buf; GST_BUFFER_MALLOCDATA (buffer) = (guint8 *) block; GST_BUFFER_FREE_FUNC (buffer) = GST_DEBUG_FUNCPTR (gst_shm_sink_free_buffer); GST_BUFFER_SIZE (buffer) = size; GST_LOG_OBJECT (self, "Allocated buffer of %u bytes from shared memory at %p", size, buf); } else { buffer = gst_buffer_new_and_alloc (size); GST_LOG_OBJECT (self, "Not enough shared memory for buffer of %u bytes, " "allocating using standard allocator", size); } GST_BUFFER_OFFSET (buffer) = offset; gst_buffer_set_caps (buffer, caps); *out_buf = buffer; return GST_FLOW_OK; }
static GstBuffer * test_buffer_new (gfloat value) { GstBuffer *buf; GstCaps *caps; gfloat *data; gint i; buf = gst_buffer_new_and_alloc (8 * sizeof (gfloat)); data = (gfloat *) GST_BUFFER_DATA (buf); for (i = 0; i < 8; i++) data[i] = value; caps = gst_caps_from_string ("audio/x-raw-float, " "rate = 8000, channels = 1, endianness = BYTE_ORDER, width = 32"); gst_buffer_set_caps (buf, caps); gst_caps_unref (caps); ASSERT_BUFFER_REFCOUNT (buf, "buf", 1); return buf; }
GstFlowReturn gst_base_video_parse_push (GstBaseVideoParse * base_video_parse, GstBuffer * buffer) { GstBaseVideoParseClass *base_video_parse_class; base_video_parse_class = GST_BASE_VIDEO_PARSE_GET_CLASS (base_video_parse); if (base_video_parse->caps == NULL) { gboolean ret; base_video_parse->caps = base_video_parse_class->get_caps (base_video_parse); ret = gst_pad_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_parse), base_video_parse->caps); if (!ret) { GST_WARNING ("pad didn't accept caps"); return GST_FLOW_ERROR; } } gst_buffer_set_caps (buffer, base_video_parse->caps); GST_DEBUG ("pushing ts=%" GST_TIME_FORMAT " dur=%" GST_TIME_FORMAT " off=%" G_GUINT64_FORMAT " off_end=%" G_GUINT64_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)), GST_TIME_ARGS (GST_BUFFER_DURATION (buffer)), GST_BUFFER_OFFSET (buffer), GST_BUFFER_OFFSET_END (buffer)); if (base_video_parse->discont) { GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT); base_video_parse->discont = FALSE; } else { GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DISCONT); } return gst_pad_push (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_parse), buffer); }
static GstFlowReturn gst_dirac_parse_parse_frame (GstBaseParse * parse, GstBaseParseFrame * frame) { //GstDiracParse * diracparse = GST_DIRAC_PARSE (parse); /* Called when processing incoming buffers. Function should parse a checked frame. */ /* MUST implement */ if (GST_PAD_CAPS (GST_BASE_PARSE_SRC_PAD (parse)) == NULL) { GstCaps *caps = gst_caps_new_simple ("video/x-dirac", NULL); gst_buffer_set_caps (frame->buffer, caps); gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (parse), caps); gst_caps_unref (caps); } gst_base_parse_set_min_frame_size (parse, 13); return GST_FLOW_OK; }
static GstFlowReturn gst_gl_filter_prepare_output_buffer (GstBaseTransform * trans, GstBuffer * inbuf, gint size, GstCaps * caps, GstBuffer ** buf) { GstGLFilter *filter = NULL; GstGLBuffer *gl_inbuf = GST_GL_BUFFER (inbuf); GstGLBuffer *gl_outbuf = NULL; filter = GST_GL_FILTER (trans); if (filter->display == NULL) { GstGLFilterClass *filter_class = GST_GL_FILTER_GET_CLASS (filter); filter->display = g_object_ref (gl_inbuf->display); //blocking call, generate a FBO gst_gl_display_gen_fbo (filter->display, filter->width, filter->height, &filter->fbo, &filter->depthbuffer); if (filter_class->display_init_cb != NULL) { gst_gl_display_thread_add (filter->display, gst_gl_filter_start_gl, filter); } if (filter_class->onInitFBO) filter_class->onInitFBO (filter); } gl_outbuf = gst_gl_buffer_new (filter->display, filter->width, filter->height); *buf = GST_BUFFER (gl_outbuf); gst_buffer_set_caps (*buf, caps); if (gl_outbuf->texture) return GST_FLOW_OK; else return GST_FLOW_UNEXPECTED; }
static GstFlowReturn gst_aravis_create (GstPushSrc * push_src, GstBuffer ** buffer) { GstAravis *gst_aravis; ArvBuffer *arv_buffer; gst_aravis = GST_ARAVIS (push_src); do { arv_buffer = arv_stream_timeout_pop_buffer (gst_aravis->stream, gst_aravis->buffer_timeout_us); if (arv_buffer != NULL && arv_buffer->status != ARV_BUFFER_STATUS_SUCCESS) arv_stream_push_buffer (gst_aravis->stream, arv_buffer); } while (arv_buffer != NULL && arv_buffer->status != ARV_BUFFER_STATUS_SUCCESS); if (arv_buffer == NULL) return GST_FLOW_ERROR; *buffer = gst_buffer_new (); GST_BUFFER_DATA (*buffer) = arv_buffer->data; GST_BUFFER_MALLOCDATA (*buffer) = NULL; GST_BUFFER_SIZE (*buffer) = gst_aravis->payload; if (gst_aravis->timestamp_offset == 0) { gst_aravis->timestamp_offset = arv_buffer->timestamp_ns; gst_aravis->last_timestamp = arv_buffer->timestamp_ns; } GST_BUFFER_TIMESTAMP (*buffer) = arv_buffer->timestamp_ns - gst_aravis->timestamp_offset; GST_BUFFER_DURATION (*buffer) = arv_buffer->timestamp_ns - gst_aravis->last_timestamp; gst_aravis->last_timestamp = arv_buffer->timestamp_ns; arv_stream_push_buffer (gst_aravis->stream, arv_buffer); gst_buffer_set_caps (*buffer, gst_aravis->fixed_caps); return GST_FLOW_OK; }
static GstFlowReturn gst_h263_parse_parse_frame (GstBaseParse * parse, GstBaseParseFrame * frame) { GstH263Parse *h263parse; GstBuffer *buffer; GstFlowReturn res; H263Params params = { 0, }; h263parse = GST_H263_PARSE (parse); buffer = frame->buffer; res = gst_h263_parse_get_params (¶ms, buffer, TRUE, &h263parse->state); if (res != GST_FLOW_OK) goto out; if (h263parse->state == PASSTHROUGH || h263parse->state == PARSING) { /* There's a feature we don't support, or we didn't have enough data to * parse the header, which should not be possible. Either way, go into * passthrough mode and let downstream handle it if it can. */ GST_WARNING ("Couldn't parse header - setting passthrough mode"); gst_base_parse_set_passthrough (parse, TRUE); goto out; } /* h263parse->state is now GOT_HEADER */ gst_buffer_set_caps (buffer, GST_PAD_CAPS (GST_BASE_PARSE_SRC_PAD (GST_BASE_PARSE (h263parse)))); if (gst_h263_parse_is_delta_unit (¶ms)) GST_BUFFER_FLAG_UNSET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); else GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT); out: return res; }
/* TODO there must be a more straight-forward way */ static GstBuffer *convert_frame(struct obj *self, AVFrame *frame) { AVCodecContext *ctx; int i; GstBuffer *out_buf; guint8 *p; ctx = self->av_ctx; out_buf = gst_buffer_new_and_alloc(ctx->width * ctx->height * 3 / 2); gst_buffer_set_caps(out_buf, self->srcpad->caps); p = out_buf->data; for (i = 0; i < ctx->height; i++) memcpy(p + i * ctx->width, frame->data[0] + i * frame->linesize[0], ctx->width); p = out_buf->data + ctx->width * ctx->height; for (i = 0; i < ctx->height / 2; i++) memcpy(p + i * ctx->width / 2, frame->data[1] + i * frame->linesize[1], ctx->width / 2); p = out_buf->data + ctx->width * ctx->height * 5 / 4; for (i = 0; i < ctx->height / 2; i++) memcpy(p + i * ctx->width / 2, frame->data[2] + i * frame->linesize[2], ctx->width / 2); return out_buf; }