static GstBufferList * create_buffer_list (void) { guint len; GstBuffer *buffer; mylist = gst_buffer_list_new (); fail_if (mylist == NULL); mycaps = gst_caps_from_string ("application/x-gst-check"); fail_if (mycaps == NULL); len = gst_buffer_list_length (mylist); fail_if (len != 0); buffer = gst_buffer_new_and_alloc (sizeof (gint)); gst_buffer_fill (buffer, 0, &values[0], sizeof (gint)); gst_buffer_list_add (mylist, buffer); buffer = gst_buffer_new_and_alloc (sizeof (gint)); gst_buffer_fill (buffer, 0, &values[1], sizeof (gint)); gst_buffer_list_add (mylist, buffer); buffer = gst_buffer_new_and_alloc (sizeof (gint)); gst_buffer_fill (buffer, 0, &values[2], sizeof (gint)); gst_buffer_list_add (mylist, buffer); gst_pad_set_caps (mysrcpad, mycaps); gst_caps_unref (mycaps); return mylist; }
/* from the given two data buffers, create two streamheader buffers and * some caps that match it, and store them in the given pointers * returns one ref to each of the buffers and the caps */ static void gst_multisocketsink_create_streamheader (const gchar * data1, const gchar * data2, GstBuffer ** hbuf1, GstBuffer ** hbuf2, GstCaps ** caps) { GstBuffer *buf; GValue array = { 0 }; GValue value = { 0 }; GstStructure *structure; guint size1 = strlen (data1); guint size2 = strlen (data2); fail_if (hbuf1 == NULL); fail_if (hbuf2 == NULL); fail_if (caps == NULL); /* create caps with streamheader, set the caps, and push the HEADER * buffers */ *hbuf1 = gst_buffer_new_and_alloc (size1); GST_BUFFER_FLAG_SET (*hbuf1, GST_BUFFER_FLAG_HEADER); gst_buffer_fill (*hbuf1, 0, data1, size1); *hbuf2 = gst_buffer_new_and_alloc (size2); GST_BUFFER_FLAG_SET (*hbuf2, GST_BUFFER_FLAG_HEADER); gst_buffer_fill (*hbuf2, 0, data2, size2); g_value_init (&array, GST_TYPE_ARRAY); g_value_init (&value, GST_TYPE_BUFFER); /* we take a copy, set it on the array (which refs it), then unref our copy */ buf = gst_buffer_copy (*hbuf1); gst_value_set_buffer (&value, buf); ASSERT_BUFFER_REFCOUNT (buf, "copied buffer", 2); gst_buffer_unref (buf); gst_value_array_append_value (&array, &value); g_value_unset (&value); g_value_init (&value, GST_TYPE_BUFFER); buf = gst_buffer_copy (*hbuf2); gst_value_set_buffer (&value, buf); ASSERT_BUFFER_REFCOUNT (buf, "copied buffer", 2); gst_buffer_unref (buf); gst_value_array_append_value (&array, &value); g_value_unset (&value); *caps = gst_caps_from_string ("application/x-gst-check"); structure = gst_caps_get_structure (*caps, 0); gst_structure_set_value (structure, "streamheader", &array); g_value_unset (&array); ASSERT_CAPS_REFCOUNT (*caps, "streamheader caps", 1); /* we want to keep them around for the tests */ gst_buffer_ref (*hbuf1); gst_buffer_ref (*hbuf2); GST_DEBUG ("created streamheader caps %p %" GST_PTR_FORMAT, *caps, *caps); }
static GstBuffer * gst_x265_enc_get_header_buffer (GstX265Enc * encoder) { x265_nal *nal; guint32 i_nal, i, offset; gint32 vps_idx, sps_idx, pps_idx; int header_return; GstBuffer *buf; header_return = x265_encoder_headers (encoder->x265enc, &nal, &i_nal); if (header_return < 0) { GST_ELEMENT_ERROR (encoder, STREAM, ENCODE, ("Encode x265 header failed."), ("x265_encoder_headers return code=%d", header_return)); return FALSE; } GST_DEBUG_OBJECT (encoder, "%d nal units in header", i_nal); /* x265 returns also non header nal units with the call x265_encoder_headers. * The usefull headers are sequential (VPS, SPS and PPS), so we look for this * nal units and only copy these tree nal units as the header */ vps_idx = sps_idx = pps_idx = -1; for (i = 0; i < i_nal; i++) { if (nal[i].type == 32) { vps_idx = i; } else if (nal[i].type == 33) { sps_idx = i; } else if (nal[i].type == 34) { pps_idx = i; } } if (vps_idx == -1 || sps_idx == -1 || pps_idx == -1) { GST_ELEMENT_ERROR (encoder, STREAM, ENCODE, ("Encode x265 header failed."), ("x265_encoder_headers did not return VPS, SPS and PPS")); return FALSE; } offset = 0; buf = gst_buffer_new_allocate (NULL, nal[vps_idx].sizeBytes + nal[sps_idx].sizeBytes + nal[pps_idx].sizeBytes, NULL); gst_buffer_fill (buf, offset, nal[vps_idx].payload, nal[vps_idx].sizeBytes); offset += nal[vps_idx].sizeBytes; gst_buffer_fill (buf, offset, nal[sps_idx].payload, nal[sps_idx].sizeBytes); offset += nal[sps_idx].sizeBytes; gst_buffer_fill (buf, offset, nal[pps_idx].payload, nal[pps_idx].sizeBytes); return buf; }
static gsize gst_imx_vpu_h264_enc_fill_output_buffer(GstImxVpuBaseEnc *vpu_base_enc, GstVideoCodecFrame *frame, gsize output_offset, void *encoded_data_addr, gsize encoded_data_size, G_GNUC_UNUSED gboolean contains_header) { guint8 *in_data; static guint8 start_code[] = { 0x00, 0x00, 0x00, 0x01 }; gsize start_code_size = sizeof(start_code); GstImxVpuH264Enc *enc = GST_IMX_VPU_H264_ENC(vpu_base_enc); /* If the first NAL unit is an SPS then this frame is a sync point */ in_data = (guint8 *)encoded_data_addr; if (memcmp(in_data, start_code, start_code_size) == 0) { guint8 nalu_type; /* Retrieve the NAL unit type from the 5 lower bits of the first byte in the NAL unit */ nalu_type = in_data[start_code_size] & 0x1F; if (nalu_type == NALU_TYPE_SPS) { GST_LOG_OBJECT(enc, "SPS NAL found, setting sync point"); GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT(frame); } } gst_buffer_fill(frame->output_buffer, output_offset, encoded_data_addr, encoded_data_size); return encoded_data_size; }
static GstFlowReturn create (GstBaseSrc *base_src, guint64 offset, guint length, GstBuffer **buffer) { GstBuffer *buf; guint send_size; gboolean is_end_of_buffer = FALSE; GstCutterTestRunnerPrivate *priv = GST_CUTTER_TEST_RUNNER_GET_PRIVATE(base_src); GST_DEBUG("create buffer"); if (priv->xml_string->len < offset + length) { is_end_of_buffer = TRUE; } send_size = priv->xml_string->len - offset; buf = gst_buffer_new_and_alloc(send_size); gst_buffer_fill(buf, 0, priv->xml_string->str + offset, send_size); GST_BUFFER_OFFSET(buf) = offset; GST_BUFFER_OFFSET_END(buf) = offset + send_size; *buffer = buf; return !is_end_of_buffer ? GST_FLOW_OK : GST_FLOW_EOS; }
/*********************************************************************************** * Push functions ***********************************************************************************/ static inline GstBuffer* packet_to_buffer(AVPacket *packet) { GstBuffer* result = gst_buffer_new_allocate(NULL, packet->size, NULL); if (result != NULL) gst_buffer_fill(result, 0, packet->data, packet->size); return result; }
/* takes a copy of the passed buffer data */ static GstBuffer * buffer_new (const unsigned char *buffer_data, guint size) { GstBuffer *buffer; buffer = gst_buffer_new_and_alloc (size); if (buffer_data) { gst_buffer_fill (buffer, 0, buffer_data, size); } else { guint i; GstMapInfo map; gst_buffer_map (buffer, &map, GST_MAP_WRITE); /* Create a recognizable pattern (loop 0x00 -> 0xff) in the data block */ for (i = 0; i < map.size; i++) { map.data[i] = i % 0x100; } gst_buffer_unmap (buffer, &map); } /* gst_buffer_set_caps (buffer, GST_PAD_CAPS (srcpad)); */ GST_BUFFER_OFFSET (buffer) = dataoffset; dataoffset += size; return buffer; }
static GstBuffer * _create_audio_buffer (void) { GstBuffer *buffer; ogg_packet packet; float **vorbis_buffer; gint i; vorbis_buffer = vorbis_analysis_buffer (&vd, 44100); for (i = 0; i < 44100 * 1; ++i) vorbis_buffer[0][i] = 0.0; vorbis_analysis_wrote (&vd, 44100); vorbis_analysis_blockout (&vd, &vb); vorbis_analysis (&vb, NULL); vorbis_bitrate_addblock (&vb); vorbis_bitrate_flushpacket (&vd, &packet); buffer = gst_buffer_new_and_alloc (packet.bytes); gst_buffer_fill (buffer, 0, packet.packet, packet.bytes); vorbis_comment_clear (&vc); vorbis_block_clear (&vb); vorbis_dsp_clear (&vd); vorbis_info_clear (&vi); return buffer; }
static void fill_mp3_buffer (GstElement * fakesrc, GstBuffer * buf, GstPad * pad, guint64 * p_offset) { gsize size; size = gst_buffer_get_size (buf); fail_unless (size == MP3_FRAME_SIZE); GST_LOG ("filling buffer with fake mp3 data, offset = %" G_GUINT64_FORMAT, *p_offset); gst_buffer_fill (buf, 0, mp3_dummyhdr, sizeof (mp3_dummyhdr)); #if 0 /* can't use gst_buffer_set_caps() here because the metadata isn't writable * because of the extra refcounts taken by the signal emission mechanism; * we know it's fine to use GST_BUFFER_CAPS() here though */ GST_BUFFER_CAPS (buf) = gst_caps_new_simple ("audio/mpeg", "mpegversion", G_TYPE_INT, 1, "layer", G_TYPE_INT, 3, NULL); #endif GST_BUFFER_OFFSET (buf) = *p_offset; *p_offset += size; }
static GstFlowReturn gst_dashdemux_http_src_create (GstTestHTTPSrc * src, guint64 offset, guint length, GstBuffer ** retbuf, gpointer context, gpointer user_data) { /* const GstDashDemuxTestInputData *input = (const GstDashDemuxTestInputData *) user_data; */ const GstDashDemuxTestInputData *input = (const GstDashDemuxTestInputData *) context; GstBuffer *buf; buf = gst_buffer_new_allocate (NULL, length, NULL); fail_if (buf == NULL, "Not enough memory to allocate buffer"); if (input->payload) { gst_buffer_fill (buf, 0, input->payload + offset, length); } else { GstMapInfo info; guint pattern; pattern = offset - offset % sizeof (pattern); gst_buffer_map (buf, &info, GST_MAP_WRITE); for (guint64 i = 0; i < length; ++i) { gchar pattern_byte_to_write = (offset + i) % sizeof (pattern); if (pattern_byte_to_write == 0) { pattern = offset + i; } info.data[i] = (pattern >> (pattern_byte_to_write * 8)) & 0xFF; } gst_buffer_unmap (buf, &info); } *retbuf = buf; return GST_FLOW_OK; }
static GstFlowReturn theora_push_packet (GstTheoraEnc * enc, ogg_packet * packet) { GstVideoEncoder *benc; GstFlowReturn ret; GstVideoCodecFrame *frame; benc = GST_VIDEO_ENCODER (enc); frame = gst_video_encoder_get_oldest_frame (benc); if (gst_video_encoder_allocate_output_frame (benc, frame, packet->bytes) != GST_FLOW_OK) { GST_WARNING_OBJECT (enc, "Could not allocate buffer"); gst_video_codec_frame_unref (frame); ret = GST_FLOW_ERROR; goto done; } gst_buffer_fill (frame->output_buffer, 0, packet->packet, packet->bytes); /* the second most significant bit of the first data byte is cleared * for keyframes */ if (packet->bytes > 0 && (packet->packet[0] & 0x40) == 0) { GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame); } else { GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame); } enc->packetno++; ret = gst_video_encoder_finish_frame (benc, frame); done: return ret; }
static GstBuffer * gst_kate_enc_create_buffer (GstKateEnc * ke, kate_packet * kp, kate_int64_t granpos, GstClockTime timestamp, GstClockTime duration, gboolean header) { GstBuffer *buffer; g_return_val_if_fail (kp != NULL, NULL); g_return_val_if_fail (kp->data != NULL, NULL); buffer = gst_buffer_new_allocate (NULL, kp->nbytes, NULL); if (G_UNLIKELY (!buffer)) { GST_WARNING_OBJECT (ke, "Failed to allocate buffer for %u bytes", (guint) kp->nbytes); return NULL; } gst_buffer_fill (buffer, 0, kp->data, kp->nbytes); /* same system as other Ogg codecs, as per ext/ogg/README: OFFSET_END is the granulepos OFFSET is its time representation */ GST_BUFFER_OFFSET_END (buffer) = granpos; GST_BUFFER_OFFSET (buffer) = timestamp; GST_BUFFER_TIMESTAMP (buffer) = timestamp; GST_BUFFER_DURATION (buffer) = duration; return buffer; }
static GstBuffer * gst_ogg_parse_buffer_from_page (ogg_page * page, guint64 offset, GstClockTime timestamp) { int size = page->header_len + page->body_len; GstBuffer *buf = gst_buffer_new_and_alloc (size); gst_buffer_fill (buf, 0, page->header, page->header_len); gst_buffer_fill (buf, page->header_len, page->body, page->body_len); GST_BUFFER_TIMESTAMP (buf) = timestamp; GST_BUFFER_OFFSET (buf) = offset; GST_BUFFER_OFFSET_END (buf) = offset + size; return buf; }
static GstFlowReturn gst_vorbis_enc_output_buffers (GstVorbisEnc * vorbisenc) { GstFlowReturn ret; /* vorbis does some data preanalysis, then divides up blocks for more involved (potentially parallel) processing. Get a single block for encoding now */ while (vorbis_analysis_blockout (&vorbisenc->vd, &vorbisenc->vb) == 1) { ogg_packet op; GST_LOG_OBJECT (vorbisenc, "analysed to a block"); /* analysis */ vorbis_analysis (&vorbisenc->vb, NULL); vorbis_bitrate_addblock (&vorbisenc->vb); while (vorbis_bitrate_flushpacket (&vorbisenc->vd, &op)) { GstBuffer *buf; if (op.e_o_s) { GstAudioEncoder *enc = GST_AUDIO_ENCODER (vorbisenc); GstClockTime duration; GST_DEBUG_OBJECT (vorbisenc, "Got EOS packet from libvorbis"); GST_AUDIO_ENCODER_STREAM_LOCK (enc); if (!GST_CLOCK_TIME_IS_VALID (enc->output_segment.stop)) { GST_DEBUG_OBJECT (vorbisenc, "Output segment has no end time, setting"); duration = gst_util_uint64_scale (op.granulepos, GST_SECOND, vorbisenc->frequency); enc->output_segment.stop = enc->output_segment.start + duration; GST_DEBUG_OBJECT (enc, "new output segment %" GST_SEGMENT_FORMAT, &enc->output_segment); gst_pad_push_event (GST_AUDIO_ENCODER_SRC_PAD (enc), gst_event_new_segment (&enc->output_segment)); } GST_AUDIO_ENCODER_STREAM_UNLOCK (enc); } GST_LOG_OBJECT (vorbisenc, "pushing out a data packet"); buf = gst_audio_encoder_allocate_output_buffer (GST_AUDIO_ENCODER (vorbisenc), op.bytes); gst_buffer_fill (buf, 0, op.packet, op.bytes); /* tracking granulepos should tell us samples accounted for */ ret = gst_audio_encoder_finish_frame (GST_AUDIO_ENCODER (vorbisenc), buf, op.granulepos - vorbisenc->samples_out); vorbisenc->samples_out = op.granulepos; if (ret != GST_FLOW_OK) return ret; } } return GST_FLOW_OK; }
void onAudio(const audio_common_msgs::AudioDataConstPtr &msg) { GstBuffer *buffer = gst_buffer_new_and_alloc(msg->data.size()); gst_buffer_fill(buffer, 0, &msg->data[0], msg->data.size()); GstFlowReturn ret; g_signal_emit_by_name(_source, "push-buffer", buffer, &ret); }
GstBuffer* createGstBufferForData(const char* data, int length) { GstBuffer* buffer = gst_buffer_new_and_alloc(length); gst_buffer_fill(buffer, 0, data, length); return buffer; }
/** * gst_ssa_parse_push_line: * @parse: caller element * @txt: text to push * @start: timestamp for the buffer * @duration: duration for the buffer * * Parse the text in a buffer with the given properties and * push it to the srcpad of the @parse element * * Returns: result of the push of the created buffer */ static GstFlowReturn gst_ssa_parse_push_line (GstSsaParse * parse, gchar * txt, GstClockTime start, GstClockTime duration) { GstFlowReturn ret; GstBuffer *buf; gchar *t, *escaped; gint num, i, len; num = atoi (txt); GST_LOG_OBJECT (parse, "Parsing line #%d at %" GST_TIME_FORMAT, num, GST_TIME_ARGS (start)); /* skip all non-text fields before the actual text */ t = txt; for (i = 0; i < 8; ++i) { t = strchr (t, ','); if (t == NULL) return GST_FLOW_ERROR; ++t; } GST_LOG_OBJECT (parse, "Text : %s", t); if (gst_ssa_parse_remove_override_codes (parse, t)) { GST_LOG_OBJECT (parse, "Clean: %s", t); } /* we claim to output pango markup, so we must escape the * text even if we don't actually use any pango markup yet */ escaped = g_markup_printf_escaped ("%s", t); len = strlen (escaped); /* allocate enough for a terminating NUL, but don't include it in buf size */ buf = gst_buffer_new_and_alloc (len + 1); gst_buffer_fill (buf, 0, escaped, len + 1); gst_buffer_set_size (buf, len); g_free (escaped); GST_BUFFER_TIMESTAMP (buf) = start; GST_BUFFER_DURATION (buf) = duration; GST_LOG_OBJECT (parse, "Pushing buffer with timestamp %" GST_TIME_FORMAT " and duration %" GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (duration)); ret = gst_pad_push (parse->srcpad, buf); if (ret != GST_FLOW_OK) { GST_DEBUG_OBJECT (parse, "Push of text '%s' returned flow %s", txt, gst_flow_get_name (ret)); } return ret; }
static GstFlowReturn gst_v4l2_buffer_pool_copy_buffer (GstV4l2BufferPool * pool, GstBuffer * dest, GstBuffer * src) { const GstVideoFormatInfo *finfo = pool->caps_info.finfo; GST_LOG_OBJECT (pool, "copying buffer"); if (finfo && (finfo->format != GST_VIDEO_FORMAT_UNKNOWN && finfo->format != GST_VIDEO_FORMAT_ENCODED)) { GstVideoFrame src_frame, dest_frame; GST_DEBUG_OBJECT (pool, "copy video frame"); /* we have raw video, use videoframe copy to get strides right */ if (!gst_video_frame_map (&src_frame, &pool->caps_info, src, GST_MAP_READ)) goto invalid_buffer; if (!gst_video_frame_map (&dest_frame, &pool->caps_info, dest, GST_MAP_WRITE)) { gst_video_frame_unmap (&src_frame); goto invalid_buffer; } gst_video_frame_copy (&dest_frame, &src_frame); gst_video_frame_unmap (&src_frame); gst_video_frame_unmap (&dest_frame); } else { GstMapInfo map; GST_DEBUG_OBJECT (pool, "copy raw bytes"); if (!gst_buffer_map (src, &map, GST_MAP_READ)) goto invalid_buffer; gst_buffer_fill (dest, 0, map.data, gst_buffer_get_size (src)); gst_buffer_unmap (src, &map); gst_buffer_resize (dest, 0, gst_buffer_get_size (src)); } GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, pool, "slow copy into buffer %p", dest); return GST_FLOW_OK; invalid_buffer: { GST_ERROR_OBJECT (pool, "could not map buffer"); return GST_FLOW_ERROR; } }
GstBuffer* createGstBufferForData(const char* data, int length) { GstBuffer* buffer = gst_buffer_new_and_alloc(length); #ifdef GST_API_VERSION_1 gst_buffer_fill(buffer, 0, data, length); #else memcpy(GST_BUFFER_DATA(buffer), data, length); #endif return buffer; }
static inline GstBuffer * make_my_output_buffer (GstBuffer * buffer_in) { GstBuffer *buffer; GstMapInfo map; gst_buffer_map (buffer_in, &map, GST_MAP_READ); buffer = gst_buffer_new_and_alloc (map.size); gst_buffer_fill (buffer, 0, map.data, map.size); gst_buffer_unmap (buffer_in, &map); return buffer; }
static GstBuffer* get_codec_extradata(AVCodecContext *codec) { GstBuffer *codec_data = NULL; if (codec->extradata) { codec_data = gst_buffer_new_allocate(NULL, codec->extradata_size, NULL); if (codec_data != NULL) { gst_buffer_fill(codec_data, 0, codec->extradata, codec->extradata_size); } } return codec_data; }
static void got_buffer (GstElement * fakesink, GstBuffer * buf, GstPad * pad, GstBuffer ** p_buf) { gint64 off; GstMapInfo map; off = GST_BUFFER_OFFSET (buf); gst_buffer_map (buf, &map, GST_MAP_READ); GST_LOG ("got buffer, size=%u, offset=%" G_GINT64_FORMAT, map.size, off); fail_unless (GST_BUFFER_OFFSET_IS_VALID (buf)); if (*p_buf == NULL || (off + map.size) > gst_buffer_get_size (*p_buf)) { GstBuffer *newbuf; /* not very elegant, but who cares */ newbuf = gst_buffer_new_and_alloc (off + map.size); if (*p_buf) { GstMapInfo pmap; gst_buffer_map (*p_buf, &pmap, GST_MAP_READ); gst_buffer_fill (newbuf, 0, pmap.data, pmap.size); gst_buffer_unmap (*p_buf, &pmap); } gst_buffer_fill (newbuf, off, map.data, map.size); if (*p_buf) gst_buffer_unref (*p_buf); *p_buf = newbuf; } else { gst_buffer_fill (*p_buf, off, map.data, map.size); } gst_buffer_unmap (buf, &map); }
static int gst_dv1394src_iec61883_receive (unsigned char *data, int len, int complete, void *cbdata) { GstDV1394Src *dv1394src = GST_DV1394SRC (cbdata); if (G_UNLIKELY (!gst_pad_has_current_caps (GST_BASE_SRC_PAD (dv1394src)))) { GstCaps *caps; unsigned char *p = data; // figure format (NTSC/PAL) if (p[3] & 0x80) { // PAL dv1394src->frame_size = PAL_FRAMESIZE; dv1394src->frame_rate = PAL_FRAMERATE; GST_DEBUG ("PAL data"); caps = gst_caps_new_simple ("video/x-dv", "format", G_TYPE_STRING, "PAL", "systemstream", G_TYPE_BOOLEAN, TRUE, NULL); } else { // NTSC (untested) dv1394src->frame_size = NTSC_FRAMESIZE; dv1394src->frame_rate = NTSC_FRAMERATE; GST_DEBUG ("NTSC data [untested] - please report success/failure to <*****@*****.**>"); caps = gst_caps_new_simple ("video/x-dv", "format", G_TYPE_STRING, "NTSC", "systemstream", G_TYPE_BOOLEAN, TRUE, NULL); } gst_pad_set_caps (GST_BASE_SRC_PAD (dv1394src), caps); gst_caps_unref (caps); } dv1394src->frame = NULL; if (G_LIKELY ((dv1394src->frame_sequence + 1) % (dv1394src->skip + dv1394src->consecutive) < dv1394src->consecutive)) { if (complete && len == dv1394src->frame_size) { GstBuffer *buf; buf = gst_buffer_new_and_alloc (dv1394src->frame_size); GST_BUFFER_OFFSET (buf) = dv1394src->frame_sequence; gst_buffer_fill (buf, 0, data, len); dv1394src->buf = buf; } } dv1394src->frame_sequence++; return 0; }
static GstBuffer * create_overlay_buffer (void) { GZlibDecompressor *decompress; GConverterResult decomp_res; guchar *gzipped_pixdata, *pixdata; gsize gzipped_size, bytes_read, pixdata_size; GstBuffer *logo_pixels; guint w, h, stride; gzipped_pixdata = g_base64_decode (gzipped_pixdata_base64, &gzipped_size); g_assert (gzipped_pixdata != NULL); pixdata = g_malloc (64 * 1024); decompress = g_zlib_decompressor_new (G_ZLIB_COMPRESSOR_FORMAT_GZIP); decomp_res = g_converter_convert (G_CONVERTER (decompress), gzipped_pixdata, gzipped_size, pixdata, 64 * 1024, G_CONVERTER_INPUT_AT_END, &bytes_read, &pixdata_size, NULL); g_assert (decomp_res == G_CONVERTER_FINISHED); g_assert (bytes_read == gzipped_size); g_free (gzipped_pixdata); g_object_unref (decompress); /* 0: Pixbuf magic (0x47646b50) */ g_assert (GST_READ_UINT32_BE (pixdata) == 0x47646b50); /* 4: length incl. header */ /* 8: pixdata_type */ /* 12: rowstride (900) */ stride = GST_READ_UINT32_BE (pixdata + 12); /* 16: width (225) */ w = GST_READ_UINT32_BE (pixdata + 16); /* 20: height (57) */ h = GST_READ_UINT32_BE (pixdata + 20); /* 24: pixel_data */ GST_LOG ("%dx%d @ %d", w, h, stride); /* we assume that the last line also has padding at the end */ g_assert (pixdata_size - 24 >= h * stride); logo_pixels = gst_buffer_new_and_alloc (h * stride); gst_buffer_fill (logo_pixels, 0, pixdata + 24, h * stride); gst_buffer_add_video_meta (logo_pixels, GST_VIDEO_FRAME_FLAG_NONE, GST_VIDEO_OVERLAY_COMPOSITION_FORMAT_RGB, w, h); g_free (pixdata); return logo_pixels; }
static inline GstBuffer * make_my_input_buffer (guint8 * test_data_header, gsize test_data_size) { GstBuffer *buffer; gsize total_size = 0, offset = 0; total_size += sizeof (test_data_soi); total_size += test_data_size; total_size += sizeof (test_data_sof0); total_size += sizeof (test_data_eoi); buffer = gst_buffer_new_and_alloc (total_size); gst_buffer_fill (buffer, offset, test_data_soi, sizeof (test_data_soi)); offset += sizeof (test_data_soi); gst_buffer_fill (buffer, offset, test_data_header, test_data_size); offset += test_data_size; gst_buffer_fill (buffer, offset, test_data_sof0, sizeof (test_data_sof0)); offset += sizeof (test_data_sof0); gst_buffer_fill (buffer, offset, test_data_eoi, sizeof (test_data_eoi)); offset += sizeof (test_data_eoi); return buffer; }
static void check_correct_buffer (guint8 * src_data, guint src_size, guint8 * dst_data, guint dst_size) { GstBuffer *buffer = gst_buffer_new_allocate (NULL, src_size, 0); GstBuffer *newBuffer; GstElement *avisubtitle = setup_avisubtitle (); GstEvent *event; fail_unless (g_list_length (buffers) == 0, "Buffers list needs to be empty"); gst_buffer_fill (buffer, 0, src_data, src_size); fail_unless (gst_element_set_state (avisubtitle, GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS, "could not set to playing"); ASSERT_BUFFER_REFCOUNT (buffer, "inbuffer", 1); event = gst_event_new_seek (1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH, GST_SEEK_TYPE_SET, 2 * GST_SECOND, GST_SEEK_TYPE_SET, 5 * GST_SECOND); fail_unless (gst_element_send_event (avisubtitle, event) == FALSE, "Seeking is not possible when there is no buffer yet"); fail_unless (gst_pad_push (mysrcpad, buffer) == GST_FLOW_OK, "not accepted a correct buffer"); /* we gave away our reference to the buffer, don't assume anything */ buffer = NULL; /* a new buffer is created in the list */ fail_unless (g_list_length (buffers) == 1, "No new buffer in the buffers list"); event = gst_event_new_seek (1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH, GST_SEEK_TYPE_SET, 2 * GST_SECOND, GST_SEEK_TYPE_SET, 5 * GST_SECOND); fail_unless (gst_element_send_event (avisubtitle, event) == TRUE, "seeking should be working now"); fail_unless (g_list_length (buffers) == 2, "After seeking we need another buffer in the buffers"); newBuffer = GST_BUFFER (buffers->data); buffers = g_list_remove (buffers, newBuffer); fail_unless (g_list_length (buffers) == 1, "Buffers list needs to be empty"); fail_unless (gst_buffer_get_size (newBuffer) == dst_size, "size of the new buffer is wrong ( %d != %d)", gst_buffer_get_size (newBuffer), dst_size); fail_unless (gst_buffer_memcmp (newBuffer, 0, dst_data, dst_size) == 0, "data of the buffer is not correct"); gst_buffer_unref (newBuffer); /* free the buffer from seeking */ gst_buffer_unref (GST_BUFFER (buffers->data)); buffers = g_list_remove (buffers, buffers->data); fail_unless (gst_element_set_state (avisubtitle, GST_STATE_NULL) == GST_STATE_CHANGE_SUCCESS, "could not set to null"); cleanup_avisubtitle (avisubtitle); }
static GstFlowReturn gst_hlsdemux_test_src_create (GstTestHTTPSrc * src, guint64 offset, guint length, GstBuffer ** retbuf, gpointer context, gpointer user_data) { GstBuffer *buf; /* const GstHlsDemuxTestCase *test_case = (const GstHlsDemuxTestCase *) user_data; */ GstHlsDemuxTestInputData *input = (GstHlsDemuxTestInputData *) context; buf = gst_buffer_new_allocate (NULL, length, NULL); fail_if (buf == NULL, "Not enough memory to allocate buffer"); fail_if (input->payload == NULL); gst_buffer_fill (buf, 0, input->payload + offset, length); *retbuf = buf; return GST_FLOW_OK; }
static GstFlowReturn gst_data_uri_src_create (GstBaseSrc * basesrc, guint64 offset, guint size, GstBuffer ** buf) { GstDataURISrc *src = GST_DATA_URI_SRC (basesrc); GstFlowReturn ret; GST_OBJECT_LOCK (src); if (!src->buffer) goto no_buffer; /* This is only correct because GstBaseSrc already clips size for us to be no * larger than the max. available size if a segment at the end is requested */ if (offset + size > gst_buffer_get_size (src->buffer)) { ret = GST_FLOW_EOS; } else if (*buf != NULL) { GstMapInfo src_info; GstMapInfo dest_info; gsize fill_size; gst_buffer_map (src->buffer, &src_info, GST_MAP_READ); gst_buffer_map (*buf, &dest_info, GST_MAP_WRITE); fill_size = gst_buffer_fill (*buf, 0, src_info.data + offset, size); gst_buffer_unmap (*buf, &dest_info); gst_buffer_unmap (src->buffer, &src_info); gst_buffer_set_size (*buf, fill_size); ret = GST_FLOW_OK; } else { *buf = gst_buffer_copy_region (src->buffer, GST_BUFFER_COPY_ALL, offset, size); ret = GST_FLOW_OK; } GST_OBJECT_UNLOCK (src); return ret; /* ERRORS */ no_buffer: { GST_OBJECT_UNLOCK (src); GST_ELEMENT_ERROR (src, RESOURCE, NOT_FOUND, (NULL), (NULL)); return GST_FLOW_NOT_NEGOTIATED; } }
static GstBuffer * theora_enc_buffer_from_header_packet (GstTheoraEnc * enc, ogg_packet * packet) { GstBuffer *outbuf; outbuf = gst_video_encoder_allocate_output_buffer (GST_VIDEO_ENCODER (enc), packet->bytes); gst_buffer_fill (outbuf, 0, packet->packet, packet->bytes); GST_BUFFER_OFFSET (outbuf) = 0; GST_BUFFER_OFFSET_END (outbuf) = 0; GST_BUFFER_TIMESTAMP (outbuf) = GST_CLOCK_TIME_NONE; GST_BUFFER_DURATION (outbuf) = GST_CLOCK_TIME_NONE; GST_DEBUG ("created header packet buffer, %u bytes", (guint) gst_buffer_get_size (outbuf)); return outbuf; }
static void push_and_test (GstCaps * prop_caps, gboolean join, gboolean replace, GstCaps * in_caps, GstCaps * out_caps) { GstElement *capssetter; GstBuffer *buffer; GstCaps *current_out; capssetter = setup_capssetter (); fail_unless (gst_element_set_state (capssetter, GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS, "could not set to playing"); g_object_set (capssetter, "join", join, NULL); g_object_set (capssetter, "replace", replace, NULL); g_object_set (capssetter, "caps", prop_caps, NULL); gst_caps_unref (prop_caps); buffer = gst_buffer_new_and_alloc (4); ASSERT_BUFFER_REFCOUNT (buffer, "buffer", 1); gst_buffer_fill (buffer, 0, "data", 4); gst_check_setup_events (mysrcpad, capssetter, in_caps, GST_FORMAT_TIME); gst_caps_unref (in_caps); /* pushing gives away my reference ... */ fail_unless (gst_pad_push (mysrcpad, buffer) == GST_FLOW_OK, "Failed pushing buffer to capssetter"); fail_unless (gst_pad_push_event (mysrcpad, gst_event_new_eos ()) == TRUE); /* ... but it should end up being collected on the global buffer list */ fail_unless (g_list_length (buffers) == 1); buffer = g_list_first (buffers)->data; ASSERT_BUFFER_REFCOUNT (buffer, "buffer", 1); current_out = gst_pad_get_current_caps (mysinkpad); fail_unless (gst_caps_is_equal (out_caps, current_out)); gst_caps_unref (current_out); gst_caps_unref (out_caps); /* cleanup */ cleanup_capssetter (capssetter); }