/* parses the first CMML packet (the ident header) */ static void gst_cmml_dec_parse_ident_header (GstCmmlDec * dec, GstBuffer * buffer) { guint8 *data = GST_BUFFER_DATA (buffer); /* the ident header has a fixed length */ if (GST_BUFFER_SIZE (buffer) != CMML_IDENT_HEADER_SIZE) { GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("wrong ident header size: %d", GST_BUFFER_SIZE (buffer))); dec->flow_return = GST_FLOW_ERROR; return; } data += 8; dec->major = GST_READ_UINT16_LE (data); data += 2; dec->minor = GST_READ_UINT16_LE (data); data += 2; dec->granulerate_n = GST_READ_UINT64_LE (data); data += 8; dec->granulerate_d = GST_READ_UINT64_LE (data); data += 8; dec->granuleshift = GST_READ_UINT8 (data); GST_INFO_OBJECT (dec, "bitstream initialized " "(major: %" G_GINT16_FORMAT " minor: %" G_GINT16_FORMAT " granulerate_n: %" G_GINT64_FORMAT " granulerate_d: %" G_GINT64_FORMAT " granuleshift: %d)", dec->major, dec->minor, dec->granulerate_n, dec->granulerate_d, dec->granuleshift); dec->flow_return = GST_FLOW_OK; }
static GstFlowReturn gst_ivf_parse_handle_frame_start (GstIvfParse * ivf, GstBaseParseFrame * frame, gint * skipsize) { GstBuffer *const buffer = frame->buffer; GstMapInfo map; GstFlowReturn ret = GST_FLOW_OK; gst_buffer_map (buffer, &map, GST_MAP_READ); if (map.size >= IVF_FILE_HEADER_SIZE) { guint32 magic = GST_READ_UINT32_LE (map.data); guint16 version = GST_READ_UINT16_LE (map.data + 4); guint16 header_size = GST_READ_UINT16_LE (map.data + 6); guint32 fourcc = GST_READ_UINT32_LE (map.data + 8); guint16 width = GST_READ_UINT16_LE (map.data + 12); guint16 height = GST_READ_UINT16_LE (map.data + 14); guint32 fps_n = GST_READ_UINT32_LE (map.data + 16); guint32 fps_d = GST_READ_UINT32_LE (map.data + 20); #ifndef GST_DISABLE_GST_DEBUG guint32 num_frames = GST_READ_UINT32_LE (map.data + 24); #endif if (magic != GST_MAKE_FOURCC ('D', 'K', 'I', 'F') || version != 0 || header_size != 32 || fourcc_to_media_type (fourcc) == NULL) { GST_ELEMENT_ERROR (ivf, STREAM, WRONG_TYPE, (NULL), (NULL)); ret = GST_FLOW_ERROR; goto end; } ivf->fourcc = fourcc; gst_ivf_parse_set_size (ivf, width, height); gst_ivf_parse_set_framerate (ivf, fps_n, fps_d); GST_LOG_OBJECT (ivf, "Stream has %d frames", num_frames); /* move along */ ivf->state = GST_IVF_PARSE_DATA; gst_base_parse_set_min_frame_size (GST_BASE_PARSE_CAST (ivf), IVF_FRAME_HEADER_SIZE); *skipsize = IVF_FILE_HEADER_SIZE; } else { GST_LOG_OBJECT (ivf, "Header data not yet available."); *skipsize = 0; } end: gst_buffer_unmap (buffer, &map); return ret; }
static gint deserialize_orientation (GstExifReader * exif_reader, GstByteReader * reader, const GstExifTagMatch * exiftag, GstExifTagData * tagdata) { gint ret = 1; const gchar *str = NULL; gint value; GST_LOG ("Starting to parse %s tag in exif 0x%x", exiftag->gst_tag, exiftag->exif_tag); /* validate tag */ if (tagdata->tag_type != EXIF_TYPE_SHORT || tagdata->count != 1) { GST_WARNING ("Orientation tag has unexpected type/count"); return ret; } if (exif_reader->byte_order == G_LITTLE_ENDIAN) { value = GST_READ_UINT16_LE (tagdata->offset_as_data); } else { value = GST_READ_UINT16_BE (tagdata->offset_as_data); } str = gst_tag_image_orientation_from_exif_value (value); if (str == NULL) { GST_WARNING ("Invalid value for exif orientation tag: %d", value); return ret; } gst_tag_list_add (exif_reader->taglist, GST_TAG_MERGE_REPLACE, exiftag->gst_tag, str, NULL); return ret; }
void ipaudio_uncompress (short *buffer, unsigned short buf_len, const unsigned char *data, unsigned char channels) { int i, out = 0; int predictor[2] = {}; int channel_number = 0; for (i = 0; i < channels; ++i) { predictor[i] = GST_READ_UINT16_LE (data); data += 2; if (predictor[i] & 0x8000) predictor[i] -= 0x10000; buffer[out++] = predictor[i]; } /* we count in 16-bit ints, so adjust the buffer size */ buf_len /= 2; while (out < buf_len) { predictor[channel_number] += delta_table[*data++]; if (predictor[channel_number] < -32768) predictor[channel_number] = -32768; else if (predictor[channel_number] > 32767) predictor[channel_number] = 32767; buffer[out++] = predictor[channel_number]; /* toggle channel */ channel_number ^= channels - 1; } }
static void asf_payload_parse_replicated_data_extensions (AsfStream * stream, AsfPayload * payload) { AsfPayloadExtension *ext; guint off; if (!stream->ext_props.valid || stream->ext_props.payload_extensions == NULL) return; off = 8; for (ext = stream->ext_props.payload_extensions; ext->len > 0; ++ext) { if (G_UNLIKELY (off + ext->len > payload->rep_data_len)) { GST_WARNING ("not enough replicated data for defined extensions"); return; } switch (ext->id) { case ASF_PAYLOAD_EXTENSION_DURATION: if (G_LIKELY (ext->len == 2)) { guint16 tdur = GST_READ_UINT16_LE (payload->rep_data + off); /* packet durations of 1ms are mostly invalid */ if (tdur != 1) payload->duration = tdur * GST_MSECOND; } else { GST_WARNING ("unexpected DURATION extensions len %u", ext->len); } break; case ASF_PAYLOAD_EXTENSION_SYSTEM_CONTENT: if (G_LIKELY (ext->len == 1)) { guint8 data = payload->rep_data[off]; payload->interlaced = data & 0x1; payload->rff = data & 0x8; payload->tff = (data & 0x2) || !(data & 0x4); GST_DEBUG ("SYSTEM_CONTENT: interlaced:%d, rff:%d, tff:%d", payload->interlaced, payload->rff, payload->tff); } else { GST_WARNING ("unexpected SYSTEM_CONTE extensions len %u", ext->len); } break; case ASF_PAYLOAD_EXTENSION_SYSTEM_PIXEL_ASPECT_RATIO: if (G_LIKELY (ext->len == 2)) { payload->par_x = payload->rep_data[off]; payload->par_y = payload->rep_data[off + 1]; GST_DEBUG ("PAR %d / %d", payload->par_x, payload->par_y); } else { GST_WARNING ("unexpected SYSTEM_PIXEL_ASPECT_RATIO extensions len %u", ext->len); } break; default: GST_WARNING ("UNKNOWN PAYLOAD EXTENSION !"); break; } off += ext->len; } }
/** * gst_audio_format_fill_silence: * @info: a #GstAudioFormatInfo * @dest: (array length=length) (element-type guint8): a destination * to fill * @length: the length to fill * * Fill @length bytes in @dest with silence samples for @info. */ void gst_audio_format_fill_silence (const GstAudioFormatInfo * info, gpointer dest, gsize length) { guint8 *dptr = dest; g_return_if_fail (info != NULL); g_return_if_fail (dest != NULL); if (info->flags & GST_AUDIO_FORMAT_FLAG_FLOAT || info->flags & GST_AUDIO_FORMAT_FLAG_SIGNED) { /* float or signed always 0 */ orc_memset (dest, 0, length); } else { gint i, j, bps = info->width >> 3; switch (bps) { case 1: orc_memset (dest, info->silence[0], length); break; case 2:{ #if G_BYTE_ORDER == G_LITTLE_ENDIAN guint16 silence = GST_READ_UINT16_LE (info->silence); #else guint16 silence = GST_READ_UINT16_BE (info->silence); #endif audio_orc_splat_u16 (dest, silence, length / bps); break; } case 4:{ #if G_BYTE_ORDER == G_LITTLE_ENDIAN guint32 silence = GST_READ_UINT32_LE (info->silence); #else guint32 silence = GST_READ_UINT32_BE (info->silence); #endif audio_orc_splat_u32 (dest, silence, length / bps); break; } case 8:{ #if G_BYTE_ORDER == G_LITTLE_ENDIAN guint64 silence = GST_READ_UINT64_LE (info->silence); #else guint64 silence = GST_READ_UINT64_BE (info->silence); #endif audio_orc_splat_u64 (dest, silence, length / bps); break; } default: for (i = 0; i < length; i += bps) { for (j = 0; j < bps; j++) *dptr++ = info->silence[j]; } break; } } }
static GstData * gst_v4ljpegsrc_get (GstPad * pad) { GstV4lJpegSrc *v4ljpegsrc; GstV4lSrc *v4lsrc; GstData *data; GstBuffer *buf; GstBuffer *outbuf; int jpeg_size; g_return_val_if_fail (pad != NULL, NULL); v4ljpegsrc = GST_V4LJPEGSRC (gst_pad_get_parent (pad)); v4lsrc = GST_V4LSRC (v4ljpegsrc); /* Fetch from the v4lsrc class get fn. */ data = v4ljpegsrc->getfn (pad); /* If not a buffer, return it unchanged */ if (!data || (!GST_IS_BUFFER (data))) return data; buf = GST_BUFFER (data); /* Confirm that the buffer contains jpeg data */ /* * Create a new subbuffer from the jpeg data * The first 2 bytes in the buffer are the size of the jpeg data */ if (GST_BUFFER_SIZE (buf) > 2) { jpeg_size = (int) (GST_READ_UINT16_LE (GST_BUFFER_DATA (buf))) * 8; } else jpeg_size = 0; /* Check that the size is sensible */ if ((jpeg_size <= 0) || (jpeg_size > GST_BUFFER_SIZE (buf) - 2)) { GST_ELEMENT_ERROR (v4ljpegsrc, STREAM, FORMAT, (NULL), ("Invalid non-jpeg frame from camera")); return NULL; } GST_DEBUG_OBJECT (v4ljpegsrc, "Creating JPEG subbuffer of size %d", jpeg_size); outbuf = gst_buffer_create_sub (buf, 2, jpeg_size); /* Copy timestamps onto the subbuffer */ gst_buffer_stamp (outbuf, buf); /* Release the main buffer */ gst_buffer_unref (buf); return GST_DATA (outbuf); }
static void check_rgb_buf (const guint8 * pixels, guint32 r_mask, guint32 g_mask, guint32 b_mask, guint32 a_mask, guint8 r_expected, guint8 g_expected, guint8 b_expected, guint bpp, guint depth) { guint32 pixel, red, green, blue, alpha; switch (bpp) { case 32: pixel = GST_READ_UINT32_BE (pixels); break; case 24: pixel = (GST_READ_UINT8 (pixels) << 16) | (GST_READ_UINT8 (pixels + 1) << 8) | (GST_READ_UINT8 (pixels + 2) << 0); break; case 16: if (G_BYTE_ORDER == G_LITTLE_ENDIAN) pixel = GST_READ_UINT16_LE (pixels); else pixel = GST_READ_UINT16_BE (pixels); break; default: g_return_if_reached (); } red = right_shift_colour (r_mask, pixel); green = right_shift_colour (g_mask, pixel); blue = right_shift_colour (b_mask, pixel); alpha = right_shift_colour (a_mask, pixel); /* can't enable this by default, valgrind will complain about accessing * uninitialised memory for the depth=24,bpp=32 formats ... */ /* GST_LOG ("pixels: 0x%02x 0x%02x 0x%02x 0x%02x => pixel = 0x%08x", pixels[0], (guint) pixels[1], pixels[2], pixels[3], pixel); */ /* fix up the mask (for rgb15/16) */ if (bpp == 16) { r_expected = fix_expected_colour (r_mask, r_expected); g_expected = fix_expected_colour (g_mask, g_expected); b_expected = fix_expected_colour (b_mask, b_expected); } fail_unless (red == r_expected, "RED: expected 0x%02x, found 0x%02x", r_expected, red); fail_unless (green == g_expected, "GREEN: expected 0x%02x, found 0x%02x", g_expected, green); fail_unless (blue == b_expected, "BLUE: expected 0x%02x, found 0x%02x", b_expected, blue); fail_unless (a_mask == 0 || alpha != 0); /* better than nothing */ }
/* inspired by the original one in wavpack */ gboolean gst_wavpack_read_metadata (GstWavpackMetadata * wpmd, guint8 * header_data, guint8 ** p_data) { WavpackHeader hdr; guint8 *end; gst_wavpack_read_header (&hdr, header_data); end = header_data + hdr.ckSize + 8; if (end - *p_data < 2) return FALSE; wpmd->id = GST_READ_UINT8 (*p_data); wpmd->byte_length = 2 * (guint) GST_READ_UINT8 (*p_data + 1); *p_data += 2; if ((wpmd->id & ID_LARGE) == ID_LARGE) { guint extra; wpmd->id &= ~ID_LARGE; if (end - *p_data < 2) return FALSE; extra = GST_READ_UINT16_LE (*p_data); wpmd->byte_length += (extra << 9); *p_data += 2; } if ((wpmd->id & ID_ODD_SIZE) == ID_ODD_SIZE) { wpmd->id &= ~ID_ODD_SIZE; --wpmd->byte_length; } if (wpmd->byte_length > 0) { if (end - *p_data < wpmd->byte_length + (wpmd->byte_length & 1)) { wpmd->data = NULL; return FALSE; } wpmd->data = *p_data; *p_data += wpmd->byte_length + (wpmd->byte_length & 1); } else { wpmd->data = NULL; } return TRUE; }
GstBuffer * gst_rm_utils_descramble_dnet_buffer (GstBuffer * buf) { guint8 *data, *end; buf = gst_buffer_make_writable (buf); /* dnet = byte-order swapped AC3 */ data = GST_BUFFER_DATA (buf); end = GST_BUFFER_DATA (buf) + GST_BUFFER_SIZE (buf); while ((data + 1) < end) { /* byte-swap in an alignment-safe way */ GST_WRITE_UINT16_BE (data, GST_READ_UINT16_LE (data)); data += sizeof (guint16); } return buf; }
/* we are unlikely to deal with lengths > 2GB here any time soon, so just * return a signed int and use that for error reporting */ static inline gint asf_packet_read_varlen_int (guint lentype_flags, guint lentype_bit_offset, const guint8 ** p_data, guint * p_size) { static const guint lens[4] = { 0, 1, 2, 4 }; guint len, val; len = lens[(lentype_flags >> lentype_bit_offset) & 0x03]; /* will make caller bail out with a short read if there's not enough data */ if (G_UNLIKELY (*p_size < len)) { GST_WARNING ("need %u bytes, but only %u bytes available", len, *p_size); return -1; } switch (len) { case 0: val = 0; break; case 1: val = GST_READ_UINT8 (*p_data); break; case 2: val = GST_READ_UINT16_LE (*p_data); break; case 4: val = GST_READ_UINT32_LE (*p_data); break; default: g_assert_not_reached (); } *p_data += len; *p_size -= len; return (gint) val; }
/* Correctly format samples with width!=depth for the wav format, i.e. * have the data in the highest depth bits and all others zero */ static void gst_wavenc_format_samples (GstBuffer * buf, guint width, guint depth) { guint8 *data = GST_BUFFER_DATA (buf); guint nsamples = (GST_BUFFER_SIZE (buf) * 8) / width; guint32 tmp; for (; nsamples; nsamples--) { switch (width) { case 8: tmp = *data; *data = *data << (width - depth); data += 1; break; case 16: tmp = GST_READ_UINT16_LE (data); tmp = tmp << (width - depth); GST_WRITE_UINT16_LE (data, tmp); data += 2; break; case 24: tmp = READ24_FROM_LE (data); tmp = tmp << (width - depth); WRITE24_TO_LE (data, tmp); data += 3; break; case 32: tmp = GST_READ_UINT32_LE (data); tmp = tmp << (width - depth); GST_WRITE_UINT32_LE (data, tmp); data += 4; break; } } }
gint mve_compress_audio (guint8 * dest, const guint8 * src, guint16 len, guint8 channels) { gint16 prev[2], s; gint delta, real_res; gint cur_chan; guint8 v; for (cur_chan = 0; cur_chan < channels; ++cur_chan) { prev[cur_chan] = GST_READ_UINT16_LE (src); GST_WRITE_UINT16_LE (dest, prev[cur_chan]); src += 2; dest += 2; len -= 2; } cur_chan = 0; while (len > 0) { s = GST_READ_UINT16_LE (src); src += 2; delta = s - prev[cur_chan]; if (delta >= 0) v = mve_enc_delta (delta); else v = 256 - mve_enc_delta (-delta); real_res = dec_table[v] + prev[cur_chan]; if (real_res < -32768 || real_res > 32767) { /* correct overflow */ /* GST_DEBUG ("co:%d + %d = %d -> new v:%d, dec_table:%d will be %d", prev[cur_chan], dec_table[v], real_res, v, dec_table[v], prev[cur_chan]+dec_table[v]); */ if (s > 0) { if (real_res > 32767) --v; } else { if (real_res < -32768) ++v; } real_res = dec_table[v] + prev[cur_chan]; } if (G_UNLIKELY (abs (real_res - s) > 32767)) { GST_ERROR ("sign loss left unfixed in audio stream, deviation:%d", real_res - s); return -1; } *dest++ = v; --len; /* use previous output instead of input. That way output will not go too far from input. */ prev[cur_chan] += dec_table[v]; cur_chan = channels - 1 - cur_chan; } return 0; }
static GstFlowReturn gst_avi_subtitle_parse_gab2_chunk (GstAviSubtitle * sub, GstBuffer * buf) { const guint8 *data; gchar *name_utf8; guint name_length; guint file_length; guint size; data = GST_BUFFER_DATA (buf); size = GST_BUFFER_SIZE (buf); /* check the magic word "GAB2\0", and the next word must be 2 */ if (size < 12 || memcmp (data, "GAB2\0\2\0", 5 + 2) != 0) goto wrong_magic_word; /* read 'name' of subtitle */ name_length = GST_READ_UINT32_LE (data + 5 + 2); GST_LOG_OBJECT (sub, "length of name: %u", name_length); if (size <= 17 + name_length) goto wrong_name_length; name_utf8 = g_convert ((gchar *) data + 11, name_length, "UTF-8", "UTF-16LE", NULL, NULL, NULL); if (name_utf8) { GST_LOG_OBJECT (sub, "subtitle name: %s", name_utf8); gst_avi_subtitle_title_tag (sub, name_utf8); g_free (name_utf8); } /* next word must be 4 */ if (GST_READ_UINT16_LE (data + 11 + name_length) != 0x4) goto wrong_fixed_word_2; file_length = GST_READ_UINT32_LE (data + 13 + name_length); GST_LOG_OBJECT (sub, "length srt/ssa file: %u", file_length); if (size < (17 + name_length + file_length)) goto wrong_total_length; /* store this, so we can send it again after a seek; note that we shouldn't * assume all the remaining data in the chunk is subtitle data, there may * be padding at the end for some reason, so only parse file_length bytes */ sub->subfile = gst_avi_subtitle_extract_file (sub, buf, 17 + name_length, file_length); if (sub->subfile == NULL) goto extract_failed; return GST_FLOW_OK; /* ERRORS */ wrong_magic_word: { GST_ELEMENT_ERROR (sub, STREAM, DECODE, (NULL), ("Wrong magic word")); return GST_FLOW_ERROR; } wrong_name_length: { GST_ELEMENT_ERROR (sub, STREAM, DECODE, (NULL), ("name doesn't fit in buffer (%d < %d)", size, 17 + name_length)); return GST_FLOW_ERROR; } wrong_fixed_word_2: { GST_ELEMENT_ERROR (sub, STREAM, DECODE, (NULL), ("wrong fixed word: expected %u, got %u", 4, GST_READ_UINT16_LE (data + 11 + name_length))); return GST_FLOW_ERROR; } wrong_total_length: { GST_ELEMENT_ERROR (sub, STREAM, DECODE, (NULL), ("buffer size is wrong: need %d bytes, have %d bytes", 17 + name_length + file_length, size)); return GST_FLOW_ERROR; } extract_failed: { GST_ELEMENT_ERROR (sub, STREAM, DECODE, (NULL), ("could not extract subtitles")); return GST_FLOW_ERROR; } }
/* chain function * this function does the actual processing */ static GstFlowReturn gst_ivf_parse_chain (GstPad * pad, GstBuffer * buf) { GstIvfParse *ivf = GST_IVF_PARSE (GST_OBJECT_PARENT (pad)); gboolean res; /* lazy creation of the adapter */ if (G_UNLIKELY (ivf->adapter == NULL)) { ivf->adapter = gst_adapter_new (); } GST_LOG_OBJECT (ivf, "Pushing buffer of size %u to adapter", GST_BUFFER_SIZE (buf)); gst_adapter_push (ivf->adapter, buf); /* adapter takes ownership of buf */ res = GST_FLOW_OK; switch (ivf->state) { case GST_IVF_PARSE_START: if (gst_adapter_available (ivf->adapter) >= 32) { GstCaps *caps; const guint8 *data = gst_adapter_peek (ivf->adapter, 32); guint32 magic = GST_READ_UINT32_LE (data); guint16 version = GST_READ_UINT16_LE (data + 4); guint16 header_size = GST_READ_UINT16_LE (data + 6); guint32 fourcc = GST_READ_UINT32_LE (data + 8); guint16 width = GST_READ_UINT16_LE (data + 12); guint16 height = GST_READ_UINT16_LE (data + 14); guint32 rate_num = GST_READ_UINT32_LE (data + 16); guint32 rate_den = GST_READ_UINT32_LE (data + 20); #ifndef GST_DISABLE_GST_DEBUG guint32 num_frames = GST_READ_UINT32_LE (data + 24); #endif /* last 4 bytes unused */ gst_adapter_flush (ivf->adapter, 32); if (magic != GST_MAKE_FOURCC ('D', 'K', 'I', 'F') || version != 0 || header_size != 32 || fourcc != GST_MAKE_FOURCC ('V', 'P', '8', '0')) { GST_ELEMENT_ERROR (ivf, STREAM, WRONG_TYPE, (NULL), (NULL)); return GST_FLOW_ERROR; } /* create src pad caps */ caps = gst_caps_new_simple ("video/x-vp8", "width", G_TYPE_INT, width, "height", G_TYPE_INT, height, "framerate", GST_TYPE_FRACTION, rate_num, rate_den, NULL); GST_INFO_OBJECT (ivf, "Found stream: %" GST_PTR_FORMAT, caps); GST_LOG_OBJECT (ivf, "Stream has %d frames", num_frames); gst_pad_set_caps (ivf->srcpad, caps); gst_caps_unref (caps); /* keep framerate in instance for convenience */ ivf->rate_num = rate_num; ivf->rate_den = rate_den; gst_pad_push_event (ivf->srcpad, gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, -1, 0)); /* move along */ ivf->state = GST_IVF_PARSE_DATA; } else { GST_LOG_OBJECT (ivf, "Header data not yet available."); break; } /* fall through */ case GST_IVF_PARSE_DATA: while (gst_adapter_available (ivf->adapter) > 12) { const guint8 *data = gst_adapter_peek (ivf->adapter, 12); guint32 frame_size = GST_READ_UINT32_LE (data); guint64 frame_pts = GST_READ_UINT64_LE (data + 4); GST_LOG_OBJECT (ivf, "Read frame header: size %u, pts %" G_GUINT64_FORMAT, frame_size, frame_pts); if (gst_adapter_available (ivf->adapter) >= 12 + frame_size) { GstBuffer *frame; gst_adapter_flush (ivf->adapter, 12); frame = gst_adapter_take_buffer (ivf->adapter, frame_size); gst_buffer_set_caps (frame, GST_PAD_CAPS (ivf->srcpad)); GST_BUFFER_TIMESTAMP (frame) = gst_util_uint64_scale_int (GST_SECOND * frame_pts, ivf->rate_den, ivf->rate_num); GST_BUFFER_DURATION (frame) = gst_util_uint64_scale_int (GST_SECOND, ivf->rate_den, ivf->rate_num); GST_DEBUG_OBJECT (ivf, "Pushing frame of size %u, ts %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT ", off %" G_GUINT64_FORMAT ", off_end %" G_GUINT64_FORMAT, GST_BUFFER_SIZE (frame), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (frame)), GST_TIME_ARGS (GST_BUFFER_DURATION (frame)), GST_BUFFER_OFFSET (frame), GST_BUFFER_OFFSET_END (frame)); res = gst_pad_push (ivf->srcpad, frame); if (res != GST_FLOW_OK) break; } else { GST_LOG_OBJECT (ivf, "Frame data not yet available."); break; } } break; default: g_return_val_if_reached (GST_FLOW_ERROR); } return res; }
/** * gst_riff_parse_strf_auds: * @element: caller element (used for debugging/error). * @buf: input data to be used for parsing, stripped from header. * @strf: a pointer (returned by this function) to a filled-in * strf/auds structure. Caller should free it. * @data: a pointer (returned by this function) to a buffer * containing extradata for this particular stream (e.g. * codec initialization data). * * Parses an audio stream´s strf structure plus optionally some * extradata from input data. This function takes ownership of @buf. * use. * * Returns: TRUE if parsing succeeded, otherwise FALSE. The stream * should be skipped on error, but it is not fatal. */ gboolean gst_riff_parse_strf_auds (GstElement * element, GstBuffer * buf, gst_riff_strf_auds ** _strf, GstBuffer ** data) { gst_riff_strf_auds *strf; guint bufsize; g_return_val_if_fail (buf != NULL, FALSE); g_return_val_if_fail (_strf != NULL, FALSE); g_return_val_if_fail (data != NULL, FALSE); bufsize = GST_BUFFER_SIZE (buf); if (bufsize < sizeof (gst_riff_strf_auds)) goto too_small; strf = g_memdup (GST_BUFFER_DATA (buf), bufsize); #if (G_BYTE_ORDER == G_BIG_ENDIAN) strf->format = GUINT16_FROM_LE (strf->format); strf->channels = GUINT16_FROM_LE (strf->channels); strf->rate = GUINT32_FROM_LE (strf->rate); strf->av_bps = GUINT32_FROM_LE (strf->av_bps); strf->blockalign = GUINT16_FROM_LE (strf->blockalign); strf->size = GUINT16_FROM_LE (strf->size); #endif /* size checking */ *data = NULL; if (bufsize > sizeof (gst_riff_strf_auds) + 2) { gint len; len = GST_READ_UINT16_LE (&GST_BUFFER_DATA (buf)[16]); if (len + 2 + sizeof (gst_riff_strf_auds) > bufsize) { GST_WARNING_OBJECT (element, "Extradata indicated %d bytes, but only %" G_GSSIZE_FORMAT " available", len, bufsize - 2 - sizeof (gst_riff_strf_auds)); len = bufsize - 2 - sizeof (gst_riff_strf_auds); } if (len) *data = gst_buffer_create_sub (buf, sizeof (gst_riff_strf_auds) + 2, len); } /* debug */ GST_INFO_OBJECT (element, "strf tag found in context auds:"); GST_INFO_OBJECT (element, " format %d", strf->format); GST_INFO_OBJECT (element, " channels %d", strf->channels); GST_INFO_OBJECT (element, " rate %d", strf->rate); GST_INFO_OBJECT (element, " av_bps %d", strf->av_bps); GST_INFO_OBJECT (element, " blockalign %d", strf->blockalign); GST_INFO_OBJECT (element, " size %d", strf->size); if (*data) GST_INFO_OBJECT (element, " %d bytes extradata", GST_BUFFER_SIZE (*data)); gst_buffer_unref (buf); *_strf = strf; return TRUE; /* ERROR */ too_small: { GST_ERROR_OBJECT (element, "Too small strf_auds (%d available, %" G_GSSIZE_FORMAT " needed)", bufsize, sizeof (gst_riff_strf_auds)); gst_buffer_unref (buf); return FALSE; } }
static GstFlowReturn gst_mve_video_create_buffer (GstMveDemux * mve, guint8 version, const guint8 * data, guint16 len) { GstBuffer *buf; guint16 w, h, n, true_color, bpp; guint required, size; GST_DEBUG_OBJECT (mve, "create video buffer"); if (mve->video_stream == NULL) { GST_ELEMENT_ERROR (mve, STREAM, DECODE, (NULL), ("trying to create video buffer for uninitialized stream")); return GST_FLOW_ERROR; } /* need 4 to 8 more bytes */ required = (version > 1) ? 8 : (version * 2); if (len < required) return gst_mve_stream_error (mve, required, len); w = GST_READ_UINT16_LE (data) << 3; h = GST_READ_UINT16_LE (data + 2) << 3; if (version > 0) n = GST_READ_UINT16_LE (data + 4); else n = 1; if (version > 1) true_color = GST_READ_UINT16_LE (data + 6); else true_color = 0; bpp = (true_color ? 2 : 1); size = w * h * bpp; if (mve->video_stream->buffer != NULL) { GST_DEBUG_OBJECT (mve, "video buffer already created"); if (GST_BUFFER_SIZE (mve->video_stream->buffer) == size * 2) return GST_FLOW_OK; GST_DEBUG_OBJECT (mve, "video buffer size has changed"); gst_buffer_unref (mve->video_stream->buffer); } GST_DEBUG_OBJECT (mve, "allocating video buffer, w:%u, h:%u, n:%u, true_color:%u", w, h, n, true_color); /* we need a buffer to keep the last 2 frames, since those may be needed for decoding the next one */ buf = gst_buffer_new_and_alloc (size * 2); mve->video_stream->bpp = bpp; mve->video_stream->width = w; mve->video_stream->height = h; mve->video_stream->buffer = buf; mve->video_stream->back_buf1 = GST_BUFFER_DATA (buf); mve->video_stream->back_buf2 = mve->video_stream->back_buf1 + size; mve->video_stream->max_block_offset = (h - 7) * w - 8; memset (mve->video_stream->back_buf1, 0, size * 2); return GST_FLOW_OK; }
static GstFlowReturn gst_mve_video_data (GstMveDemux * mve, const guint8 * data, guint16 len, GstBuffer ** output) { GstFlowReturn ret = GST_FLOW_OK; gint16 cur_frame, last_frame; gint16 x_offset, y_offset; gint16 x_size, y_size; guint16 flags; gint dec; GstBuffer *buf = NULL; GstMveDemuxStream *s = mve->video_stream; GST_LOG_OBJECT (mve, "video data"); if (s == NULL) { GST_ELEMENT_ERROR (mve, STREAM, DECODE, (NULL), ("trying to decode video data before stream was initialized")); return GST_FLOW_ERROR; } if (GST_CLOCK_TIME_IS_VALID (mve->frame_duration)) { if (GST_CLOCK_TIME_IS_VALID (s->last_ts)) s->last_ts += mve->frame_duration; else s->last_ts = 0; } if (!s->code_map_avail) { GST_ELEMENT_ERROR (mve, STREAM, DECODE, (NULL), ("no code map available for decoding")); return GST_FLOW_ERROR; } /* need at least 14 more bytes */ if (len < 14) return gst_mve_stream_error (mve, 14, len); len -= 14; cur_frame = GST_READ_UINT16_LE (data); last_frame = GST_READ_UINT16_LE (data + 2); x_offset = GST_READ_UINT16_LE (data + 4); y_offset = GST_READ_UINT16_LE (data + 6); x_size = GST_READ_UINT16_LE (data + 8); y_size = GST_READ_UINT16_LE (data + 10); flags = GST_READ_UINT16_LE (data + 12); data += 14; GST_DEBUG_OBJECT (mve, "video data hot:%d, cold:%d, xoff:%d, yoff:%d, w:%d, h:%d, flags:%x", cur_frame, last_frame, x_offset, y_offset, x_size, y_size, flags); if (flags & MVE_VIDEO_DELTA_FRAME) { guint8 *temp = s->back_buf1; s->back_buf1 = s->back_buf2; s->back_buf2 = temp; } ret = gst_mve_buffer_alloc_for_pad (s, s->width * s->height * s->bpp, &buf); if (ret != GST_FLOW_OK) return ret; if (s->bpp == 2) { dec = ipvideo_decode_frame16 (s, data, len); } else { if (s->palette == NULL) { GST_ELEMENT_ERROR (mve, STREAM, DECODE, (NULL), ("no palette available")); goto error; } dec = ipvideo_decode_frame8 (s, data, len); } if (dec != 0) goto error; memcpy (GST_BUFFER_DATA (buf), s->back_buf1, GST_BUFFER_SIZE (buf)); GST_BUFFER_DURATION (buf) = mve->frame_duration; GST_BUFFER_OFFSET_END (buf) = ++s->offset; if (s->bpp == 1) { GstCaps *caps; /* set the palette on the outgoing buffer */ caps = gst_caps_copy (s->caps); gst_caps_set_simple (caps, "palette_data", GST_TYPE_BUFFER, s->palette, NULL); gst_buffer_set_caps (buf, caps); gst_caps_unref (caps); } *output = buf; return GST_FLOW_OK; error: gst_buffer_unref (buf); return GST_FLOW_ERROR; }
static GstBuffer * gst_rtp_vp8_depay_process (GstRTPBaseDepayload * depay, GstRTPBuffer * rtp) { GstRtpVP8Depay *self = GST_RTP_VP8_DEPAY (depay); GstBuffer *payload; guint8 *data; guint hdrsize; guint size; if (G_UNLIKELY (GST_BUFFER_IS_DISCONT (rtp->buffer))) { GST_LOG_OBJECT (self, "Discontinuity, flushing adapter"); gst_adapter_clear (self->adapter); self->started = FALSE; } size = gst_rtp_buffer_get_payload_len (rtp); /* At least one header and one vp8 byte */ if (G_UNLIKELY (size < 2)) goto too_small; data = gst_rtp_buffer_get_payload (rtp); if (G_UNLIKELY (!self->started)) { /* Check if this is the start of a VP8 frame, otherwise bail */ /* S=1 and PartID= 0 */ if ((data[0] & 0x17) != 0x10) goto done; self->started = TRUE; } hdrsize = 1; /* Check X optional header */ if ((data[0] & 0x80) != 0) { hdrsize++; /* Check I optional header */ if ((data[1] & 0x80) != 0) { if (G_UNLIKELY (size < 3)) goto too_small; hdrsize++; /* Check for 16 bits PictureID */ if ((data[2] & 0x80) != 0) hdrsize++; } /* Check L optional header */ if ((data[1] & 0x40) != 0) hdrsize++; /* Check T or K optional headers */ if ((data[1] & 0x20) != 0 || (data[1] & 0x10) != 0) hdrsize++; } GST_DEBUG_OBJECT (depay, "hdrsize %u, size %u", hdrsize, size); if (G_UNLIKELY (hdrsize >= size)) goto too_small; payload = gst_rtp_buffer_get_payload_subbuffer (rtp, hdrsize, -1); gst_adapter_push (self->adapter, payload); /* Marker indicates that it was the last rtp packet for this frame */ if (gst_rtp_buffer_get_marker (rtp)) { GstBuffer *out; guint8 header[10]; gst_adapter_copy (self->adapter, &header, 0, 10); out = gst_adapter_take_buffer (self->adapter, gst_adapter_available (self->adapter)); self->started = FALSE; /* mark keyframes */ out = gst_buffer_make_writable (out); if ((header[0] & 0x01)) { GST_BUFFER_FLAG_SET (out, GST_BUFFER_FLAG_DELTA_UNIT); if (!self->caps_sent) { gst_buffer_unref (out); out = NULL; GST_INFO_OBJECT (self, "Dropping inter-frame before intra-frame"); gst_pad_push_event (GST_RTP_BASE_DEPAYLOAD_SINKPAD (depay), gst_video_event_new_upstream_force_key_unit (GST_CLOCK_TIME_NONE, TRUE, 0)); } } else { guint profile, width, height; GST_BUFFER_FLAG_UNSET (out, GST_BUFFER_FLAG_DELTA_UNIT); profile = (header[0] & 0x0e) >> 1; width = GST_READ_UINT16_LE (header + 6) & 0x3fff; height = GST_READ_UINT16_LE (header + 8) & 0x3fff; if (G_UNLIKELY (self->last_width != width || self->last_height != height || self->last_profile != profile)) { gchar profile_str[3]; GstCaps *srccaps; snprintf (profile_str, 3, "%u", profile); srccaps = gst_caps_new_simple ("video/x-vp8", "framerate", GST_TYPE_FRACTION, 0, 1, "height", G_TYPE_INT, height, "width", G_TYPE_INT, width, "profile", G_TYPE_STRING, profile_str, NULL); gst_pad_set_caps (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depay), srccaps); gst_caps_unref (srccaps); self->caps_sent = TRUE; self->last_width = width; self->last_height = height; self->last_profile = profile; } } return out; } done: return NULL; too_small: GST_LOG_OBJECT (self, "Invalid rtp packet (too small), ignoring"); gst_adapter_clear (self->adapter); self->started = FALSE; goto done; }
static GstFlowReturn gst_ivf_parse_handle_frame_data (GstIvfParse * ivf, GstBaseParseFrame * frame, gint * skipsize) { GstBuffer *const buffer = frame->buffer; GstMapInfo map; GstFlowReturn ret = GST_FLOW_OK; GstBuffer *out_buffer; gst_buffer_map (buffer, &map, GST_MAP_READ); if (map.size >= IVF_FILE_HEADER_SIZE) { guint32 frame_size = GST_READ_UINT32_LE (map.data); guint64 frame_pts = GST_READ_UINT64_LE (map.data + 4); GST_LOG_OBJECT (ivf, "Read frame header: size %u, pts %" G_GUINT64_FORMAT, frame_size, frame_pts); if (map.size < IVF_FRAME_HEADER_SIZE + frame_size) { gst_base_parse_set_min_frame_size (GST_BASE_PARSE_CAST (ivf), IVF_FRAME_HEADER_SIZE + frame_size); gst_buffer_unmap (buffer, &map); *skipsize = 0; goto end; } gst_buffer_unmap (buffer, &map); /* Eventually, we would need the buffer memory in a merged state anyway */ out_buffer = gst_buffer_copy_region (buffer, GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS | GST_BUFFER_COPY_META | GST_BUFFER_COPY_MEMORY | GST_BUFFER_COPY_MERGE, IVF_FRAME_HEADER_SIZE, frame_size); if (!out_buffer) { GST_ERROR_OBJECT (ivf, "Failed to copy frame buffer"); ret = GST_FLOW_ERROR; *skipsize = IVF_FRAME_HEADER_SIZE + frame_size; goto end; } gst_buffer_replace (&frame->out_buffer, out_buffer); gst_buffer_unref (out_buffer); /* Detect resolution changes on key frames */ if (gst_buffer_map (frame->out_buffer, &map, GST_MAP_READ)) { guint32 width, height; if (ivf->fourcc == GST_MAKE_FOURCC ('V', 'P', '8', '0')) { guint32 frame_tag; frame_tag = GST_READ_UINT24_LE (map.data); if (!(frame_tag & 0x01) && map.size >= 10) { /* key frame */ GST_DEBUG_OBJECT (ivf, "key frame detected"); width = GST_READ_UINT16_LE (map.data + 6) & 0x3fff; height = GST_READ_UINT16_LE (map.data + 8) & 0x3fff; gst_ivf_parse_set_size (ivf, width, height); } } else if (ivf->fourcc == GST_MAKE_FOURCC ('V', 'P', '9', '0')) { /* Fixme: Add vp9 frame header parsing? */ } else if (ivf->fourcc == GST_MAKE_FOURCC ('A', 'V', '0', '1')) { /* Fixme: Add av1 frame header parsing? */ /* This would allow to parse dynamic resolution changes */ /* implement when gstav1parser is ready */ } gst_buffer_unmap (frame->out_buffer, &map); } if (ivf->fps_n > 0) { GST_BUFFER_TIMESTAMP (out_buffer) = gst_util_uint64_scale_int (GST_SECOND * frame_pts, ivf->fps_d, ivf->fps_n); } gst_ivf_parse_update_src_caps (ivf); ret = gst_base_parse_finish_frame (GST_BASE_PARSE_CAST (ivf), frame, IVF_FRAME_HEADER_SIZE + frame_size); *skipsize = 0; } else { GST_LOG_OBJECT (ivf, "Frame data not yet available."); gst_buffer_unmap (buffer, &map); *skipsize = 0; } end: return ret; }
static GstFlowReturn gst_mve_timer_create (GstMveDemux * mve, const guint8 * data, guint16 len, GstBuffer ** buf) { guint32 t_rate; guint16 t_subdiv; GstMveDemuxStream *s; GstTagList *list; gint rate_nom, rate_den; g_return_val_if_fail (mve->video_stream != NULL, GST_FLOW_ERROR); /* need 6 more bytes */ if (len < 6) return gst_mve_stream_error (mve, 6, len); t_rate = GST_READ_UINT32_LE (data); t_subdiv = GST_READ_UINT16_LE (data + 4); GST_DEBUG_OBJECT (mve, "found timer:%ux%u", t_rate, t_subdiv); mve->frame_duration = t_rate * t_subdiv * GST_USECOND; /* now really start rolling... */ s = mve->video_stream; if ((s->buffer == NULL) || (s->width == 0) || (s->height == 0)) { GST_ELEMENT_ERROR (mve, STREAM, DECODE, (NULL), ("missing or invalid create-video-buffer segment (%dx%d)", s->width, s->height)); return GST_FLOW_ERROR; } if (s->pad != NULL) { if (s->caps != NULL) { gst_caps_unref (s->caps); s->caps = NULL; } if (s->code_map != NULL) { g_free (s->code_map); s->code_map = NULL; } list = NULL; } else { list = gst_tag_list_new (); gst_tag_list_add (list, GST_TAG_MERGE_REPLACE, GST_TAG_VIDEO_CODEC, "Raw RGB video", NULL); } s->caps = gst_caps_from_string ("video/x-raw-rgb"); if (s->caps == NULL) return GST_FLOW_ERROR; rate_nom = GST_SECOND / GST_USECOND; rate_den = mve->frame_duration / GST_USECOND; gst_caps_set_simple (s->caps, "bpp", G_TYPE_INT, s->bpp * 8, "depth", G_TYPE_INT, (s->bpp == 1) ? 8 : 15, "width", G_TYPE_INT, s->width, "height", G_TYPE_INT, s->height, "framerate", GST_TYPE_FRACTION, rate_nom, rate_den, "endianness", G_TYPE_INT, G_BYTE_ORDER, NULL); if (s->bpp > 1) { gst_caps_set_simple (s->caps, "red_mask", G_TYPE_INT, 0x7C00, /* 31744 */ "green_mask", G_TYPE_INT, 0x03E0, /* 992 */ "blue_mask", G_TYPE_INT, 0x001F, /* 31 */ NULL); } s->code_map = g_malloc ((s->width * s->height) / (8 * 8 * 2)); if (gst_mve_add_stream (mve, s, list)) return gst_pad_push_event (s->pad, gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0)) ? GST_FLOW_OK : GST_FLOW_ERROR; else return GST_FLOW_OK; }
static GstFlowReturn gst_mve_audio_data (GstMveDemux * mve, guint8 type, const guint8 * data, guint16 len, GstBuffer ** output) { GstFlowReturn ret; GstMveDemuxStream *s = mve->audio_stream; GstBuffer *buf = NULL; guint16 stream_mask; guint16 size; GST_LOG_OBJECT (mve, "audio data"); if (s == NULL) { GST_ELEMENT_ERROR (mve, STREAM, DECODE, (NULL), ("trying to queue samples with no audio stream")); return GST_FLOW_ERROR; } /* need at least 6 more bytes */ if (len < 6) return gst_mve_stream_error (mve, 6, len); len -= 6; stream_mask = GST_READ_UINT16_LE (data + 2); size = GST_READ_UINT16_LE (data + 4); data += 6; if (stream_mask & MVE_DEFAULT_AUDIO_STREAM) { guint16 n_samples = size / s->n_channels / (s->sample_size / 8); GstClockTime duration = (GST_SECOND / s->sample_rate) * n_samples; if (type == MVE_OC_AUDIO_DATA) { guint16 required = (s->compression ? size / 2 + s->n_channels : size); if (len < required) return gst_mve_stream_error (mve, required, len); ret = gst_mve_buffer_alloc_for_pad (s, size, &buf); if (ret != GST_FLOW_OK) return ret; if (s->compression) ipaudio_uncompress ((gint16 *) GST_BUFFER_DATA (buf), size, data, s->n_channels); else memcpy (GST_BUFFER_DATA (buf), data, size); GST_DEBUG_OBJECT (mve, "created audio buffer, size:%u, stream_mask:%x", size, stream_mask); } else { /* silence - create a minimal buffer with no sound */ size = s->n_channels * (s->sample_size / 8); ret = gst_mve_buffer_alloc_for_pad (s, size, &buf); memset (GST_BUFFER_DATA (buf), 0, size); } GST_BUFFER_DURATION (buf) = duration; GST_BUFFER_OFFSET_END (buf) = s->offset + n_samples; *output = buf; s->offset += n_samples; s->last_ts += duration; } else { /* alternate audio streams not supported. are there any movies which use them? */ if (type == MVE_OC_AUDIO_DATA) GST_WARNING_OBJECT (mve, "found non-empty alternate audio stream"); } return GST_FLOW_OK; }
static GstFlowReturn gst_mve_audio_init (GstMveDemux * mve, guint8 version, const guint8 * data, guint16 len) { GstMveDemuxStream *stream; guint16 flags; guint32 requested_buffer; GstTagList *list; gchar *name; GST_DEBUG_OBJECT (mve, "init audio"); /* need 8 more bytes */ if (len < 8) return gst_mve_stream_error (mve, 8, len); if (mve->audio_stream == NULL) { stream = g_new0 (GstMveDemuxStream, 1); stream->offset = 0; stream->last_ts = 0; stream->last_flow = GST_FLOW_OK; mve->audio_stream = stream; } else { stream = mve->audio_stream; gst_caps_unref (stream->caps); } flags = GST_READ_UINT16_LE (data + 2); stream->sample_rate = GST_READ_UINT16_LE (data + 4); requested_buffer = GST_READ_UINT32_LE (data + 6); /* bit 0: 0 = mono, 1 = stereo */ stream->n_channels = (flags & MVE_AUDIO_STEREO) + 1; /* bit 1: 0 = 8 bit, 1 = 16 bit */ stream->sample_size = (((flags & MVE_AUDIO_16BIT) >> 1) + 1) * 8; /* bit 2: 0 = uncompressed, 1 = compressed */ stream->compression = ((version > 0) && (flags & MVE_AUDIO_COMPRESSED)) ? TRUE : FALSE; GST_DEBUG_OBJECT (mve, "audio init, sample_rate:%d, channels:%d, " "bits_per_sample:%d, compression:%d, buffer:%u", stream->sample_rate, stream->n_channels, stream->sample_size, stream->compression, requested_buffer); stream->caps = gst_caps_from_string ("audio/x-raw-int"); if (stream->caps == NULL) return GST_FLOW_ERROR; gst_caps_set_simple (stream->caps, "signed", G_TYPE_BOOLEAN, (stream->sample_size == 8) ? FALSE : TRUE, "depth", G_TYPE_INT, stream->sample_size, "width", G_TYPE_INT, stream->sample_size, "channels", G_TYPE_INT, stream->n_channels, "rate", G_TYPE_INT, stream->sample_rate, NULL); if (stream->sample_size > 8) { /* for uncompressed audio we can simply copy the incoming buffer which is always in little endian format */ gst_caps_set_simple (stream->caps, "endianness", G_TYPE_INT, (stream->compression ? G_BYTE_ORDER : G_LITTLE_ENDIAN), NULL); } else if (stream->compression) { GST_WARNING_OBJECT (mve, "compression is only supported for 16-bit samples"); stream->compression = FALSE; } list = gst_tag_list_new (); name = g_strdup_printf ("Raw %d-bit PCM audio", stream->sample_size); gst_tag_list_add (list, GST_TAG_MERGE_REPLACE, GST_TAG_AUDIO_CODEC, name, NULL); g_free (name); if (gst_mve_add_stream (mve, stream, list)) return gst_pad_push_event (mve->audio_stream->pad, gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0)) ? GST_FLOW_OK : GST_FLOW_ERROR; else return GST_FLOW_OK; }
static void check_1x1_buffer (GstBuffer * buf, GstCaps * caps) { GstVideoInfo info; GstVideoFrame frame; /* the exact values we check for come from videotestsrc */ static const guint yuv_values[] = { 81, 90, 240, 255 }; static const guint rgb_values[] = { 0xff, 0, 0, 255 }; static const guint gray8_values[] = { 0x51 }; static const guint gray16_values[] = { 0x5151 }; const guint *values; guint i; const GstVideoFormatInfo *finfo; fail_unless (buf != NULL); fail_unless (caps != NULL); fail_unless (gst_video_info_from_caps (&info, caps)); fail_unless (gst_video_frame_map (&frame, &info, buf, GST_MAP_READ)); finfo = info.finfo; if (GST_VIDEO_INFO_IS_YUV (&info)) values = yuv_values; else if (GST_VIDEO_INFO_IS_GRAY (&info)) if (GST_VIDEO_FORMAT_INFO_BITS (finfo) == 8) values = gray8_values; else values = gray16_values; else values = rgb_values; GST_MEMDUMP ("buffer", GST_VIDEO_FRAME_PLANE_DATA (&frame, 0), 8); for (i = 0; i < GST_VIDEO_FRAME_N_COMPONENTS (&frame); i++) { guint8 *data = GST_VIDEO_FRAME_COMP_DATA (&frame, i); GST_DEBUG ("W: %d", GST_VIDEO_FORMAT_INFO_W_SUB (finfo, i)); GST_DEBUG ("H: %d", GST_VIDEO_FORMAT_INFO_H_SUB (finfo, i)); if (GST_VIDEO_FORMAT_INFO_W_SUB (finfo, i) >= GST_VIDEO_FRAME_WIDTH (&frame)) continue; if (GST_VIDEO_FORMAT_INFO_H_SUB (finfo, i) >= GST_VIDEO_FRAME_HEIGHT (&frame)) continue; if (GST_VIDEO_FORMAT_INFO_BITS (finfo) == 8) { fail_unless_equals_int (data[0], values[i]); } else if (GST_VIDEO_FORMAT_INFO_BITS (finfo) == 16) { guint16 pixels, val; gint depth; if (GST_VIDEO_FORMAT_INFO_IS_LE (finfo)) pixels = GST_READ_UINT16_LE (data); else pixels = GST_READ_UINT16_BE (data); depth = GST_VIDEO_FORMAT_INFO_DEPTH (finfo, i); val = pixels >> GST_VIDEO_FORMAT_INFO_SHIFT (finfo, i); val = val & ((1 << depth) - 1); GST_DEBUG ("val %08x %d : %d", pixels, i, val); if (depth <= 8) { fail_unless_equals_int (val, values[i] >> (8 - depth)); } else {
static GstFlowReturn gst_opus_dec_parse_header (GstOpusDec * dec, GstBuffer * buf) { const guint8 *data = GST_BUFFER_DATA (buf); GstCaps *caps; const GstAudioChannelPosition *pos = NULL; if (!gst_opus_header_is_id_header (buf)) { GST_ERROR_OBJECT (dec, "Header is not an Opus ID header"); return GST_FLOW_ERROR; } if (!(dec->n_channels == 0 || dec->n_channels == data[9])) { GST_ERROR_OBJECT (dec, "Opus ID header has invalid channels"); return GST_FLOW_ERROR; } dec->n_channels = data[9]; dec->pre_skip = GST_READ_UINT16_LE (data + 10); dec->r128_gain = GST_READ_UINT16_LE (data + 16); dec->r128_gain_volume = gst_opus_dec_get_r128_volume (dec->r128_gain); GST_INFO_OBJECT (dec, "Found pre-skip of %u samples, R128 gain %d (volume %f)", dec->pre_skip, dec->r128_gain, dec->r128_gain_volume); dec->channel_mapping_family = data[18]; if (dec->channel_mapping_family == 0) { /* implicit mapping */ GST_INFO_OBJECT (dec, "Channel mapping family 0, implicit mapping"); dec->n_streams = dec->n_stereo_streams = 1; dec->channel_mapping[0] = 0; dec->channel_mapping[1] = 1; } else { dec->n_streams = data[19]; dec->n_stereo_streams = data[20]; memcpy (dec->channel_mapping, data + 21, dec->n_channels); if (dec->channel_mapping_family == 1) { GST_INFO_OBJECT (dec, "Channel mapping family 1, Vorbis mapping"); switch (dec->n_channels) { case 1: case 2: /* nothing */ break; case 3: case 4: case 5: case 6: case 7: case 8: pos = gst_opus_channel_positions[dec->n_channels - 1]; break; default:{ gint i; GstAudioChannelPosition *posn = g_new (GstAudioChannelPosition, dec->n_channels); GST_ELEMENT_WARNING (GST_ELEMENT (dec), STREAM, DECODE, (NULL), ("Using NONE channel layout for more than 8 channels")); for (i = 0; i < dec->n_channels; i++) posn[i] = GST_AUDIO_CHANNEL_POSITION_NONE; pos = posn; } } } else { GST_INFO_OBJECT (dec, "Channel mapping family %d", dec->channel_mapping_family); } } caps = gst_opus_dec_negotiate (dec); if (pos) { GST_DEBUG_OBJECT (dec, "Setting channel positions on caps"); gst_audio_set_channel_positions (gst_caps_get_structure (caps, 0), pos); } if (dec->n_channels > 8) { g_free ((GstAudioChannelPosition *) pos); } GST_INFO_OBJECT (dec, "Setting src caps to %" GST_PTR_FORMAT, caps); gst_pad_set_caps (GST_AUDIO_DECODER_SRC_PAD (dec), caps); gst_caps_unref (caps); return GST_FLOW_OK; }
static GstFlowReturn gst_opus_parse_parse_frame (GstBaseParse * base, GstBaseParseFrame * frame) { guint64 duration; GstOpusParse *parse; gboolean is_idheader, is_commentheader; GstMapInfo map; GstAudioClippingMeta *cmeta = gst_buffer_get_audio_clipping_meta (frame->buffer); parse = GST_OPUS_PARSE (base); g_assert (!cmeta || cmeta->format == GST_FORMAT_DEFAULT); is_idheader = gst_opus_header_is_id_header (frame->buffer); is_commentheader = gst_opus_header_is_comment_header (frame->buffer); if (!parse->got_headers || !parse->header_sent) { GstCaps *caps; /* Opus streams can decode to 1 or 2 channels, so use the header value if we have one, or 2 otherwise */ if (is_idheader) { gst_buffer_replace (&parse->id_header, frame->buffer); GST_DEBUG_OBJECT (parse, "Found ID header, keeping"); return GST_BASE_PARSE_FLOW_DROPPED; } else if (is_commentheader) { gst_buffer_replace (&parse->comment_header, frame->buffer); GST_DEBUG_OBJECT (parse, "Found comment header, keeping"); return GST_BASE_PARSE_FLOW_DROPPED; } parse->got_headers = TRUE; if (cmeta && cmeta->start) { parse->pre_skip += cmeta->start; gst_buffer_map (frame->buffer, &map, GST_MAP_READ); duration = packet_duration_opus (map.data, map.size); gst_buffer_unmap (frame->buffer, &map); /* Queue frame for later once we know all initial padding */ if (duration == cmeta->start) { frame->flags |= GST_BASE_PARSE_FRAME_FLAG_QUEUE; } } if (!(frame->flags & GST_BASE_PARSE_FRAME_FLAG_QUEUE)) { if (FALSE && parse->id_header && parse->comment_header) { guint16 pre_skip; gst_buffer_map (parse->id_header, &map, GST_MAP_READWRITE); pre_skip = GST_READ_UINT16_LE (map.data + 10); if (pre_skip != parse->pre_skip) { GST_DEBUG_OBJECT (parse, "Fixing up pre-skip %u -> %" G_GUINT64_FORMAT, pre_skip, parse->pre_skip); GST_WRITE_UINT16_LE (map.data + 10, parse->pre_skip); } gst_buffer_unmap (parse->id_header, &map); caps = gst_codec_utils_opus_create_caps_from_header (parse->id_header, parse->comment_header); } else { GstCaps *sink_caps; guint32 sample_rate = 48000; guint8 n_channels, n_streams, n_stereo_streams, channel_mapping_family; guint8 channel_mapping[256]; GstBuffer *id_header; sink_caps = gst_pad_get_current_caps (GST_BASE_PARSE_SINK_PAD (parse)); if (!sink_caps || !gst_codec_utils_opus_parse_caps (sink_caps, &sample_rate, &n_channels, &channel_mapping_family, &n_streams, &n_stereo_streams, channel_mapping)) { GST_INFO_OBJECT (parse, "No headers and no caps, blindly setting up canonical stereo"); n_channels = 2; n_streams = 1; n_stereo_streams = 1; channel_mapping_family = 0; channel_mapping[0] = 0; channel_mapping[1] = 1; } if (sink_caps) gst_caps_unref (sink_caps); id_header = gst_codec_utils_opus_create_header (sample_rate, n_channels, channel_mapping_family, n_streams, n_stereo_streams, channel_mapping, parse->pre_skip, 0); caps = gst_codec_utils_opus_create_caps_from_header (id_header, NULL); gst_buffer_unref (id_header); } gst_buffer_replace (&parse->id_header, NULL); gst_buffer_replace (&parse->comment_header, NULL); gst_pad_set_caps (GST_BASE_PARSE_SRC_PAD (parse), caps); gst_caps_unref (caps); parse->header_sent = TRUE; } } GST_BUFFER_TIMESTAMP (frame->buffer) = parse->next_ts; gst_buffer_map (frame->buffer, &map, GST_MAP_READ); duration = packet_duration_opus (map.data, map.size); gst_buffer_unmap (frame->buffer, &map); parse->next_ts += duration; GST_BUFFER_DURATION (frame->buffer) = duration; GST_BUFFER_OFFSET_END (frame->buffer) = gst_util_uint64_scale (parse->next_ts, 48000, GST_SECOND); GST_BUFFER_OFFSET (frame->buffer) = parse->next_ts; return GST_FLOW_OK; }
static GstFlowReturn gst_mve_demux_chain (GstPad * sinkpad, GstBuffer * inbuf) { GstMveDemux *mve = GST_MVE_DEMUX (GST_PAD_PARENT (sinkpad)); GstFlowReturn ret = GST_FLOW_OK; gst_adapter_push (mve->adapter, inbuf); GST_DEBUG_OBJECT (mve, "queuing buffer, needed:%d, available:%u", mve->needed_bytes, gst_adapter_available (mve->adapter)); while ((gst_adapter_available (mve->adapter) >= mve->needed_bytes) && (ret == GST_FLOW_OK)) { GstMveDemuxStream *stream = NULL; GstBuffer *outbuf = NULL; switch (mve->state) { case MVEDEMUX_STATE_INITIAL: gst_adapter_flush (mve->adapter, mve->needed_bytes); mve->chunk_offset += mve->needed_bytes; mve->needed_bytes = 4; mve->state = MVEDEMUX_STATE_NEXT_CHUNK; break; case MVEDEMUX_STATE_NEXT_CHUNK:{ const guint8 *data; guint16 size; data = gst_adapter_peek (mve->adapter, mve->needed_bytes); size = GST_MVE_SEGMENT_SIZE (data); if (mve->chunk_offset >= mve->chunk_size) { /* new chunk, flush buffer and proceed with next segment */ guint16 chunk_type = GST_READ_UINT16_LE (data + 2); gst_adapter_flush (mve->adapter, mve->needed_bytes); mve->chunk_size = size; mve->chunk_offset = 0; if (chunk_type > MVE_CHUNK_END) { GST_WARNING_OBJECT (mve, "skipping unknown chunk type 0x%02x of size:%u", chunk_type, size); mve->needed_bytes += size; mve->state = MVEDEMUX_STATE_SKIP; } else { GST_DEBUG_OBJECT (mve, "found new chunk type 0x%02x of size:%u", chunk_type, size); } } else if (mve->chunk_offset <= mve->chunk_size) { /* new segment */ GST_DEBUG_OBJECT (mve, "found segment type 0x%02x of size:%u", GST_MVE_SEGMENT_TYPE (data), size); mve->needed_bytes += size; mve->state = MVEDEMUX_STATE_MOVIE; } } break; case MVEDEMUX_STATE_MOVIE: ret = gst_mve_parse_segment (mve, &stream, &outbuf); if ((ret == GST_FLOW_OK) && (outbuf != NULL)) { /* send buffer */ GST_DEBUG_OBJECT (mve, "pushing buffer with time %" GST_TIME_FORMAT " (%u bytes) on pad %s", GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)), GST_BUFFER_SIZE (outbuf), GST_PAD_NAME (stream->pad)); ret = gst_pad_push (stream->pad, outbuf); stream->last_flow = ret; } if (ret == GST_FLOW_NOT_LINKED) { if (mve->audio_stream && mve->audio_stream->last_flow != GST_FLOW_NOT_LINKED) ret = GST_FLOW_OK; if (mve->video_stream && mve->video_stream->last_flow != GST_FLOW_NOT_LINKED) ret = GST_FLOW_OK; } /* update current offset */ mve->chunk_offset += mve->needed_bytes; mve->state = MVEDEMUX_STATE_NEXT_CHUNK; mve->needed_bytes = 4; break; case MVEDEMUX_STATE_SKIP: mve->chunk_offset += mve->needed_bytes; gst_adapter_flush (mve->adapter, mve->needed_bytes); mve->state = MVEDEMUX_STATE_NEXT_CHUNK; mve->needed_bytes = 4; break; default: GST_ERROR_OBJECT (mve, "invalid state: %d", mve->state); break; } } return ret; }
static GstFlowReturn gst_mve_video_palette (GstMveDemux * mve, const guint8 * data, guint16 len) { GstBuffer *buf; guint16 start, count; const guint8 *pal; guint32 *pal_ptr; gint i; GST_DEBUG_OBJECT (mve, "video palette"); if (mve->video_stream == NULL) { GST_ELEMENT_ERROR (mve, STREAM, DECODE, (NULL), ("found palette before video stream was initialized")); return GST_FLOW_ERROR; } /* need 4 more bytes now, more later */ if (len < 4) return gst_mve_stream_error (mve, 4, len); len -= 4; start = GST_READ_UINT16_LE (data); count = GST_READ_UINT16_LE (data + 2); GST_DEBUG_OBJECT (mve, "found palette start:%u, count:%u", start, count); /* need more bytes */ if (len < count * 3) return gst_mve_stream_error (mve, count * 3, len); /* make sure we don't exceed the buffer */ if (start + count > MVE_PALETTE_COUNT) { GST_ELEMENT_ERROR (mve, STREAM, DECODE, (NULL), ("palette too large for buffer")); return GST_FLOW_ERROR; } if (mve->video_stream->palette != NULL) { /* older buffers floating around might still use the old palette, so make sure we can update it */ buf = gst_buffer_make_writable (mve->video_stream->palette); } else { buf = gst_buffer_new_and_alloc (MVE_PALETTE_COUNT * 4); memset (GST_BUFFER_DATA (buf), 0, GST_BUFFER_SIZE (buf)); } mve->video_stream->palette = buf; pal = data + 4; pal_ptr = ((guint32 *) GST_BUFFER_DATA (buf)) + start; for (i = 0; i < count; ++i) { /* convert from 6-bit VGA to 8-bit palette */ guint8 r, g, b; r = (*pal) << 2; ++pal; g = (*pal) << 2; ++pal; b = (*pal) << 2; ++pal; *pal_ptr = (r << 16) | (g << 8) | (b); ++pal_ptr; } return GST_FLOW_OK; }
static GstFlowReturn gst_opus_dec_parse_header (GstOpusDec * dec, GstBuffer * buf) { const guint8 *data; GstAudioChannelPosition pos[64]; const GstAudioChannelPosition *posn = NULL; GstMapInfo map; if (!gst_opus_header_is_id_header (buf)) { GST_ERROR_OBJECT (dec, "Header is not an Opus ID header"); return GST_FLOW_ERROR; } gst_buffer_map (buf, &map, GST_MAP_READ); data = map.data; if (!(dec->n_channels == 0 || dec->n_channels == data[9])) { gst_buffer_unmap (buf, &map); GST_ERROR_OBJECT (dec, "Opus ID header has invalid channels"); return GST_FLOW_ERROR; } dec->n_channels = data[9]; dec->sample_rate = GST_READ_UINT32_LE (data + 12); dec->pre_skip = GST_READ_UINT16_LE (data + 10); dec->r128_gain = GST_READ_UINT16_LE (data + 16); dec->r128_gain_volume = gst_opus_dec_get_r128_volume (dec->r128_gain); GST_INFO_OBJECT (dec, "Found pre-skip of %u samples, R128 gain %d (volume %f)", dec->pre_skip, dec->r128_gain, dec->r128_gain_volume); dec->channel_mapping_family = data[18]; if (dec->channel_mapping_family == 0) { /* implicit mapping */ GST_INFO_OBJECT (dec, "Channel mapping family 0, implicit mapping"); dec->n_streams = dec->n_stereo_streams = 1; dec->channel_mapping[0] = 0; dec->channel_mapping[1] = 1; } else { dec->n_streams = data[19]; dec->n_stereo_streams = data[20]; memcpy (dec->channel_mapping, data + 21, dec->n_channels); if (dec->channel_mapping_family == 1) { GST_INFO_OBJECT (dec, "Channel mapping family 1, Vorbis mapping"); switch (dec->n_channels) { case 1: case 2: /* nothing */ break; case 3: case 4: case 5: case 6: case 7: case 8: posn = gst_opus_channel_positions[dec->n_channels - 1]; break; default:{ gint i; GST_ELEMENT_WARNING (GST_ELEMENT (dec), STREAM, DECODE, (NULL), ("Using NONE channel layout for more than 8 channels")); for (i = 0; i < dec->n_channels; i++) pos[i] = GST_AUDIO_CHANNEL_POSITION_NONE; posn = pos; } } } else { GST_INFO_OBJECT (dec, "Channel mapping family %d", dec->channel_mapping_family); } } gst_opus_dec_negotiate (dec, posn); gst_buffer_unmap (buf, &map); return GST_FLOW_OK; }
static GstFlowReturn gst_tta_parse_parse_header (GstTtaParse * ttaparse) { guchar *data; GstBuffer *buf = NULL; guint32 crc; double frame_length; int num_frames; GstCaps *caps; int i; guint32 offset; GstEvent *discont; if (gst_pad_pull_range (ttaparse->sinkpad, 0, 22, &buf) != GST_FLOW_OK) goto pull_fail; data = GST_BUFFER_DATA (buf); ttaparse->channels = GST_READ_UINT16_LE (data + 6); ttaparse->bits = GST_READ_UINT16_LE (data + 8); ttaparse->samplerate = GST_READ_UINT32_LE (data + 10); ttaparse->data_length = GST_READ_UINT32_LE (data + 14); crc = crc32 (data, 18); if (crc != GST_READ_UINT32_LE (data + 18)) { GST_DEBUG ("Header CRC wrong!"); } frame_length = FRAME_TIME * ttaparse->samplerate; num_frames = (ttaparse->data_length / frame_length) + 1; ttaparse->num_frames = num_frames; gst_buffer_unref (buf); ttaparse->index = (GstTtaIndex *) g_malloc (num_frames * sizeof (GstTtaIndex)); if (gst_pad_pull_range (ttaparse->sinkpad, 22, num_frames * 4 + 4, &buf) != GST_FLOW_OK) goto pull_fail; data = GST_BUFFER_DATA (buf); offset = 22 + num_frames * 4 + 4; // header size + seektable size for (i = 0; i < num_frames; i++) { ttaparse->index[i].size = GST_READ_UINT32_LE (data + i * 4); ttaparse->index[i].pos = offset; offset += ttaparse->index[i].size; ttaparse->index[i].time = i * FRAME_TIME * GST_SECOND; } crc = crc32 (data, num_frames * 4); if (crc != GST_READ_UINT32_LE (data + num_frames * 4)) { GST_DEBUG ("Seektable CRC wrong!"); } GST_DEBUG ("channels: %u, bits: %u, samplerate: %u, data_length: %u, num_frames: %u", ttaparse->channels, ttaparse->bits, ttaparse->samplerate, ttaparse->data_length, num_frames); ttaparse->header_parsed = TRUE; caps = gst_caps_new_simple ("audio/x-tta", "width", G_TYPE_INT, ttaparse->bits, "channels", G_TYPE_INT, ttaparse->channels, "rate", G_TYPE_INT, ttaparse->samplerate, NULL); gst_pad_set_caps (ttaparse->srcpad, caps); discont = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, num_frames * FRAME_TIME * GST_SECOND, 0); gst_pad_push_event (ttaparse->srcpad, discont); return GST_FLOW_OK; pull_fail: { GST_ELEMENT_ERROR (ttaparse, STREAM, DEMUX, (NULL), ("Couldn't read header")); return GST_FLOW_ERROR; } }