static GstFlowReturn gst_test_reverse_negotiation_sink_buffer_alloc (GstBaseSink * bsink, guint64 offset, guint size, GstCaps * caps, GstBuffer ** buf) { GstTestReverseNegotiationSink *sink = GST_TEST_REVERSE_NEGOTIATION_SINK (bsink); GstVideoFormat fmt; gint width, height; fail_unless (gst_video_format_parse_caps (caps, &fmt, &width, &height)); if (sink->nbuffers < 2) { *buf = gst_buffer_new_and_alloc (gst_video_format_get_size (fmt, width, height)); gst_buffer_set_caps (*buf, caps); } else { gint fps_n, fps_d; fail_unless (gst_video_parse_caps_framerate (caps, &fps_n, &fps_d)); width = 512; height = 128; *buf = gst_buffer_new_and_alloc (gst_video_format_get_size (fmt, width, height)); caps = gst_video_format_new_caps (fmt, width, height, fps_n, fps_d, 1, 1); gst_buffer_set_caps (*buf, caps); gst_caps_unref (caps); } return GST_FLOW_OK; }
gboolean gst_vdp_video_transform_size (GstBaseTransform * trans, GstPadDirection direction, GstCaps * caps, guint size, GstCaps * othercaps, guint * othersize) { GstVdpVideoYUV *video_yuv = GST_VDP_VIDEO_YUV (trans); if (direction == GST_PAD_SINK) { switch (video_yuv->format) { case GST_MAKE_FOURCC ('Y', 'V', '1', '2'): { *othersize = gst_video_format_get_size (GST_VIDEO_FORMAT_YV12, video_yuv->width, video_yuv->height); break; } case GST_MAKE_FOURCC ('I', '4', '2', '0'): { *othersize = gst_video_format_get_size (GST_VIDEO_FORMAT_YV12, video_yuv->width, video_yuv->height); break; } case GST_MAKE_FOURCC ('N', 'V', '1', '2'): { *othersize = video_yuv->width * video_yuv->height + video_yuv->width * video_yuv->height / 2; break; } case GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'): { *othersize = gst_video_format_get_size (GST_VIDEO_FORMAT_UYVY, video_yuv->width, video_yuv->height); break; } case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'): { *othersize = gst_video_format_get_size (GST_VIDEO_FORMAT_YUY2, video_yuv->width, video_yuv->height); break; } default: return FALSE; } } else *othersize = size; return TRUE; }
static void gst_jpegenc_resync (GstJpegEnc * jpegenc) { gint width, height; gint i, j; GST_DEBUG_OBJECT (jpegenc, "resync"); jpegenc->cinfo.image_width = width = jpegenc->width; jpegenc->cinfo.image_height = height = jpegenc->height; jpegenc->cinfo.input_components = jpegenc->channels; GST_DEBUG_OBJECT (jpegenc, "width %d, height %d", width, height); GST_DEBUG_OBJECT (jpegenc, "format %d", jpegenc->format); if (gst_video_format_is_rgb (jpegenc->format)) { GST_DEBUG_OBJECT (jpegenc, "RGB"); jpegenc->cinfo.in_color_space = JCS_RGB; } else if (gst_video_format_is_gray (jpegenc->format)) { GST_DEBUG_OBJECT (jpegenc, "gray"); jpegenc->cinfo.in_color_space = JCS_GRAYSCALE; } else { GST_DEBUG_OBJECT (jpegenc, "YUV"); jpegenc->cinfo.in_color_space = JCS_YCbCr; } /* input buffer size as max output */ jpegenc->bufsize = gst_video_format_get_size (jpegenc->format, width, height); jpeg_set_defaults (&jpegenc->cinfo); jpegenc->cinfo.raw_data_in = TRUE; /* duh, libjpeg maps RGB to YUV ... and don't expect some conversion */ if (jpegenc->cinfo.in_color_space == JCS_RGB) jpeg_set_colorspace (&jpegenc->cinfo, JCS_RGB); GST_DEBUG_OBJECT (jpegenc, "h_max_samp=%d, v_max_samp=%d", jpegenc->h_max_samp, jpegenc->v_max_samp); /* image dimension info */ for (i = 0; i < jpegenc->channels; i++) { GST_DEBUG_OBJECT (jpegenc, "comp %i: h_samp=%d, v_samp=%d", i, jpegenc->h_samp[i], jpegenc->v_samp[i]); jpegenc->cinfo.comp_info[i].h_samp_factor = jpegenc->h_samp[i]; jpegenc->cinfo.comp_info[i].v_samp_factor = jpegenc->v_samp[i]; jpegenc->line[i] = g_realloc (jpegenc->line[i], jpegenc->v_max_samp * DCTSIZE * sizeof (char *)); if (!jpegenc->planar) { for (j = 0; j < jpegenc->v_max_samp * DCTSIZE; j++) { jpegenc->row[i][j] = g_realloc (jpegenc->row[i][j], width); jpegenc->line[i][j] = jpegenc->row[i][j]; } } } /* guard against a potential error in gst_jpegenc_term_destination which occurs iff bufsize % 4 < free_space_remaining */ jpegenc->bufsize = GST_ROUND_UP_4 (jpegenc->bufsize); jpeg_suppress_tables (&jpegenc->cinfo, TRUE); GST_DEBUG_OBJECT (jpegenc, "resync done"); }
static gboolean gst_gamma_set_caps (GstBaseTransform * base, GstCaps * incaps, GstCaps * outcaps) { GstGamma *gamma = GST_GAMMA (base); GST_DEBUG_OBJECT (gamma, "setting caps: in %" GST_PTR_FORMAT " out %" GST_PTR_FORMAT, incaps, outcaps); if (!gst_video_format_parse_caps (incaps, &gamma->format, &gamma->width, &gamma->height)) goto invalid_caps; gamma->size = gst_video_format_get_size (gamma->format, gamma->width, gamma->height); switch (gamma->format) { case GST_VIDEO_FORMAT_I420: case GST_VIDEO_FORMAT_YV12: case GST_VIDEO_FORMAT_Y41B: case GST_VIDEO_FORMAT_Y42B: case GST_VIDEO_FORMAT_Y444: case GST_VIDEO_FORMAT_NV12: case GST_VIDEO_FORMAT_NV21: gamma->process = gst_gamma_planar_yuv_ip; break; case GST_VIDEO_FORMAT_YUY2: case GST_VIDEO_FORMAT_UYVY: case GST_VIDEO_FORMAT_AYUV: case GST_VIDEO_FORMAT_YVYU: gamma->process = gst_gamma_packed_yuv_ip; break; case GST_VIDEO_FORMAT_ARGB: case GST_VIDEO_FORMAT_ABGR: case GST_VIDEO_FORMAT_RGBA: case GST_VIDEO_FORMAT_BGRA: case GST_VIDEO_FORMAT_xRGB: case GST_VIDEO_FORMAT_xBGR: case GST_VIDEO_FORMAT_RGBx: case GST_VIDEO_FORMAT_BGRx: case GST_VIDEO_FORMAT_RGB: case GST_VIDEO_FORMAT_BGR: gamma->process = gst_gamma_packed_rgb_ip; break; default: goto invalid_caps; break; } return TRUE; invalid_caps: GST_ERROR_OBJECT (gamma, "Invalid caps: %" GST_PTR_FORMAT, incaps); return FALSE; }
/* Allocate buffer and copy image data into Y444 format */ static GstFlowReturn theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf, GstBuffer ** out) { gint width, height, stride; GstFlowReturn result; int i, plane; GstVideoFormat format; guint8 *dest, *src; switch (dec->info.pixel_fmt) { case TH_PF_444: format = GST_VIDEO_FORMAT_Y444; break; case TH_PF_420: format = GST_VIDEO_FORMAT_I420; break; case TH_PF_422: format = GST_VIDEO_FORMAT_Y42B; break; default: g_assert_not_reached (); } result = gst_pad_alloc_buffer_and_set_caps (dec->srcpad, GST_BUFFER_OFFSET_NONE, gst_video_format_get_size (format, dec->width, dec->height), GST_PAD_CAPS (dec->srcpad), out); if (G_UNLIKELY (result != GST_FLOW_OK)) { GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s", gst_flow_get_name (result)); return result; } for (plane = 0; plane < 3; plane++) { width = gst_video_format_get_component_width (format, plane, dec->width); height = gst_video_format_get_component_height (format, plane, dec->height); stride = gst_video_format_get_row_stride (format, plane, dec->width); dest = GST_BUFFER_DATA (*out) + gst_video_format_get_component_offset (format, plane, dec->width, dec->height); src = buf[plane].data; src += ((height == dec->height) ? dec->offset_y : dec->offset_y / 2) * buf[plane].stride; src += (width == dec->width) ? dec->offset_x : dec->offset_x / 2; for (i = 0; i < height; i++) { memcpy (dest, src, width); dest += stride; src += buf[plane].stride; } } return GST_FLOW_OK; }
/* get notified of caps and plug in the correct process function */ static gboolean gst_video_balance_set_caps (GstBaseTransform * base, GstCaps * incaps, GstCaps * outcaps) { GstVideoBalance *videobalance = GST_VIDEO_BALANCE (base); GST_DEBUG_OBJECT (videobalance, "in %" GST_PTR_FORMAT " out %" GST_PTR_FORMAT, incaps, outcaps); videobalance->process = NULL; if (!gst_video_format_parse_caps (incaps, &videobalance->format, &videobalance->width, &videobalance->height)) goto invalid_caps; videobalance->size = gst_video_format_get_size (videobalance->format, videobalance->width, videobalance->height); switch (videobalance->format) { case GST_VIDEO_FORMAT_I420: case GST_VIDEO_FORMAT_YV12: case GST_VIDEO_FORMAT_Y41B: case GST_VIDEO_FORMAT_Y42B: case GST_VIDEO_FORMAT_Y444: videobalance->process = gst_video_balance_planar_yuv; break; case GST_VIDEO_FORMAT_YUY2: case GST_VIDEO_FORMAT_UYVY: case GST_VIDEO_FORMAT_AYUV: case GST_VIDEO_FORMAT_YVYU: videobalance->process = gst_video_balance_packed_yuv; break; case GST_VIDEO_FORMAT_ARGB: case GST_VIDEO_FORMAT_ABGR: case GST_VIDEO_FORMAT_RGBA: case GST_VIDEO_FORMAT_BGRA: case GST_VIDEO_FORMAT_xRGB: case GST_VIDEO_FORMAT_xBGR: case GST_VIDEO_FORMAT_RGBx: case GST_VIDEO_FORMAT_BGRx: case GST_VIDEO_FORMAT_RGB: case GST_VIDEO_FORMAT_BGR: videobalance->process = gst_video_balance_packed_rgb; break; default: break; } return videobalance->process != NULL; invalid_caps: GST_ERROR_OBJECT (videobalance, "Invalid caps: %" GST_PTR_FORMAT, incaps); return FALSE; }
static gboolean gst_retinex_get_unit_size(GstBaseTransform * btrans, GstCaps * caps, guint * size) { GstVideoFormat format; gint width, height; if (!gst_video_format_parse_caps(caps, &format, &width, &height)) return FALSE; *size = gst_video_format_get_size(format, width, height); GST_DEBUG_OBJECT(btrans, "unit size = %d for format %d w %d height %d", *size, format, width, height); return TRUE; }
static gboolean gst_patchdetect_get_unit_size (GstBaseTransform * trans, GstCaps * caps, guint * size) { int width, height; GstVideoFormat format; gboolean ret; ret = gst_video_format_parse_caps (caps, &format, &width, &height); *size = gst_video_format_get_size (format, width, height); return ret; }
static gboolean gst_video_scale_get_unit_size (GstBaseTransform * trans, GstCaps * caps, guint * size) { GstVideoFormat format; gint width, height; if (!gst_video_format_parse_caps (caps, &format, &width, &height)) return FALSE; *size = gst_video_format_get_size (format, width, height); return TRUE; }
gboolean gst_vdp_video_buffer_calculate_size (guint32 fourcc, gint width, gint height, guint * size) { switch (fourcc) { case GST_MAKE_FOURCC ('Y', 'V', '1', '2'): { *size = gst_video_format_get_size (GST_VIDEO_FORMAT_YV12, width, height); break; } case GST_MAKE_FOURCC ('I', '4', '2', '0'): { *size = gst_video_format_get_size (GST_VIDEO_FORMAT_YV12, width, height); break; } case GST_MAKE_FOURCC ('N', 'V', '1', '2'): { *size = width * height + width * height / 2; break; } case GST_MAKE_FOURCC ('U', 'Y', 'V', 'Y'): { *size = gst_video_format_get_size (GST_VIDEO_FORMAT_UYVY, width, height); break; } case GST_MAKE_FOURCC ('Y', 'U', 'Y', '2'): { *size = gst_video_format_get_size (GST_VIDEO_FORMAT_YUY2, width, height); break; } default: return FALSE; } return TRUE; }
static gboolean gst_video_flip_get_unit_size (GstBaseTransform * btrans, GstCaps * caps, guint * size) { GstVideoFormat format; gint width, height; if (!gst_video_format_parse_caps (caps, &format, &width, &height)) return FALSE; *size = gst_video_format_get_size (format, width, height); GST_DEBUG_OBJECT (btrans, "our frame size is %d bytes (%dx%d)", *size, width, height); return TRUE; }
static void sink_handoff_cb_I420 (GstElement * object, GstBuffer * buffer, GstPad * pad, gpointer user_data) { guint *sink_pos = (guint *) user_data; gboolean contains_text = (*sink_pos == 1 || *sink_pos == 2); guint c, i, j; guint8 *data = GST_BUFFER_DATA (buffer); gboolean all_red = TRUE; guint8 *comp; gint comp_stride, comp_width, comp_height; const guint8 color[] = { 81, 90, 240 }; fail_unless_equals_int (GST_BUFFER_SIZE (buffer), gst_video_format_get_size (GST_VIDEO_FORMAT_I420, 640, 480)); for (c = 0; c < 3; c++) { comp = data + gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420, c, 640, 480); comp_stride = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420, c, 640); comp_width = gst_video_format_get_component_width (GST_VIDEO_FORMAT_I420, c, 640); comp_height = gst_video_format_get_component_height (GST_VIDEO_FORMAT_I420, c, 480); for (i = 0; i < comp_height; i++) { for (j = 0; j < comp_width; j++) { all_red = all_red && (comp[i * comp_stride + j] == color[c]); } } } fail_unless (contains_text != all_red, "Frame %d is incorrect (all red %d, contains text %d)", *sink_pos, all_red, contains_text); *sink_pos = *sink_pos + 1; }
GstFlowReturn gst_base_video_decoder_alloc_src_frame (GstBaseVideoDecoder * base_video_decoder, GstVideoFrame * frame) { GstFlowReturn flow_ret; int num_bytes; gst_base_video_decoder_set_src_caps (base_video_decoder); num_bytes = gst_video_format_get_size (base_video_decoder->state.format, base_video_decoder->state.width, base_video_decoder->state.height); flow_ret = gst_pad_alloc_buffer_and_set_caps (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder), GST_BUFFER_OFFSET_NONE, num_bytes, GST_PAD_CAPS (GST_BASE_VIDEO_CODEC_SRC_PAD (base_video_decoder)), &frame->src_buffer); if (flow_ret != GST_FLOW_OK) { GST_WARNING ("failed to get buffer"); } return flow_ret; }
static gboolean gst_deinterlace2_setcaps (GstPad * pad, GstCaps * caps) { gboolean res = TRUE; GstDeinterlace2 *self = GST_DEINTERLACE2 (gst_pad_get_parent (pad)); GstPad *otherpad; GstStructure *structure; GstVideoFormat fmt; guint32 fourcc; GstCaps *othercaps; otherpad = (pad == self->srcpad) ? self->sinkpad : self->srcpad; structure = gst_caps_get_structure (caps, 0); res = gst_structure_get_int (structure, "width", &self->frame_width); res &= gst_structure_get_int (structure, "height", &self->frame_height); res &= gst_structure_get_fraction (structure, "framerate", &self->frame_rate_n, &self->frame_rate_d); res &= gst_structure_get_fourcc (structure, "format", &fourcc); /* TODO: get interlaced, field_layout, field_order */ if (!res) goto invalid_caps; if (self->fields == GST_DEINTERLACE2_ALL) { gint fps_n = self->frame_rate_n, fps_d = self->frame_rate_d; if (!gst_fraction_double (&fps_n, &fps_d, otherpad != self->srcpad)) goto invalid_caps; othercaps = gst_caps_copy (caps); gst_caps_set_simple (othercaps, "framerate", GST_TYPE_FRACTION, fps_n, fps_d, NULL); } else { othercaps = gst_caps_ref (caps); } if (!gst_pad_set_caps (otherpad, othercaps)) goto caps_not_accepted; gst_caps_unref (othercaps); /* TODO: introduce self->field_stride */ self->field_height = self->frame_height / 2; fmt = gst_video_format_from_fourcc (fourcc); /* TODO: only true if fields are subbuffers of interlaced frames, change when the buffer-fields concept has landed */ self->field_stride = gst_video_format_get_row_stride (fmt, 0, self->frame_width) * 2; self->output_stride = gst_video_format_get_row_stride (fmt, 0, self->frame_width); /* in bytes */ self->line_length = gst_video_format_get_row_stride (fmt, 0, self->frame_width); self->frame_size = gst_video_format_get_size (fmt, self->frame_width, self->frame_height); if (self->fields == GST_DEINTERLACE2_ALL && otherpad == self->srcpad) self->field_duration = gst_util_uint64_scale (GST_SECOND, self->frame_rate_d, self->frame_rate_n); else self->field_duration = gst_util_uint64_scale (GST_SECOND, self->frame_rate_d, 2 * self->frame_rate_n); GST_DEBUG_OBJECT (self, "Set caps: %" GST_PTR_FORMAT, caps); done: gst_object_unref (self); return res; invalid_caps: res = FALSE; GST_ERROR_OBJECT (pad, "Invalid caps: %" GST_PTR_FORMAT, caps); goto done; caps_not_accepted: res = FALSE; GST_ERROR_OBJECT (pad, "Caps not accepted: %" GST_PTR_FORMAT, othercaps); gst_caps_unref (othercaps); goto done; }
static gboolean gst_base_video_encoder_sink_setcaps (GstPad * pad, GstCaps * caps) { GstBaseVideoEncoder *base_video_encoder; GstBaseVideoEncoderClass *base_video_encoder_class; GstStructure *structure; GstVideoState *state, tmp_state; gboolean ret; gboolean changed = FALSE; base_video_encoder = GST_BASE_VIDEO_ENCODER (gst_pad_get_parent (pad)); base_video_encoder_class = GST_BASE_VIDEO_ENCODER_GET_CLASS (base_video_encoder); /* subclass should do something here ... */ g_return_val_if_fail (base_video_encoder_class->set_format != NULL, FALSE); GST_DEBUG_OBJECT (base_video_encoder, "setcaps %" GST_PTR_FORMAT, caps); GST_BASE_VIDEO_CODEC_STREAM_LOCK (base_video_encoder); state = &GST_BASE_VIDEO_CODEC (base_video_encoder)->state; memset (&tmp_state, 0, sizeof (tmp_state)); tmp_state.caps = gst_caps_ref (caps); structure = gst_caps_get_structure (caps, 0); ret = gst_video_format_parse_caps (caps, &tmp_state.format, &tmp_state.width, &tmp_state.height); if (!ret) goto exit; changed = (tmp_state.format != state->format || tmp_state.width != state->width || tmp_state.height != state->height); if (!gst_video_parse_caps_framerate (caps, &tmp_state.fps_n, &tmp_state.fps_d)) { tmp_state.fps_n = 0; tmp_state.fps_d = 1; } changed = changed || (tmp_state.fps_n != state->fps_n || tmp_state.fps_d != state->fps_d); if (!gst_video_parse_caps_pixel_aspect_ratio (caps, &tmp_state.par_n, &tmp_state.par_d)) { tmp_state.par_n = 1; tmp_state.par_d = 1; } changed = changed || (tmp_state.par_n != state->par_n || tmp_state.par_d != state->par_d); tmp_state.have_interlaced = gst_structure_get_boolean (structure, "interlaced", &tmp_state.interlaced); changed = changed || (tmp_state.have_interlaced != state->have_interlaced || tmp_state.interlaced != state->interlaced); tmp_state.bytes_per_picture = gst_video_format_get_size (tmp_state.format, tmp_state.width, tmp_state.height); tmp_state.clean_width = tmp_state.width; tmp_state.clean_height = tmp_state.height; tmp_state.clean_offset_left = 0; tmp_state.clean_offset_top = 0; if (changed) { /* arrange draining pending frames */ gst_base_video_encoder_drain (base_video_encoder); /* and subclass should be ready to configure format at any time around */ if (base_video_encoder_class->set_format) ret = base_video_encoder_class->set_format (base_video_encoder, &tmp_state); if (ret) { gst_caps_replace (&state->caps, NULL); *state = tmp_state; } } else { /* no need to stir things up */ GST_DEBUG_OBJECT (base_video_encoder, "new video format identical to configured format"); gst_caps_unref (tmp_state.caps); ret = TRUE; } exit: GST_BASE_VIDEO_CODEC_STREAM_UNLOCK (base_video_encoder); if (!ret) { GST_WARNING_OBJECT (base_video_encoder, "rejected caps %" GST_PTR_FORMAT, caps); } g_object_unref (base_video_encoder); return ret; }
static GstFlowReturn gst_frei0r_mixer_collected (GstCollectPads * pads, GstFrei0rMixer * self) { GstBuffer *inbuf0 = NULL, *inbuf1 = NULL, *inbuf2 = NULL; GstBuffer *outbuf = NULL; GstFlowReturn ret = GST_FLOW_OK; GSList *l; GstFrei0rMixerClass *klass = GST_FREI0R_MIXER_GET_CLASS (self); GstClockTime timestamp; gdouble time; GstSegment *segment = NULL; if (G_UNLIKELY (self->width <= 0 || self->height <= 0)) return GST_FLOW_NOT_NEGOTIATED; if (G_UNLIKELY (!self->f0r_instance)) { self->f0r_instance = gst_frei0r_instance_construct (klass->ftable, klass->properties, klass->n_properties, self->property_cache, self->width, self->height); if (G_UNLIKELY (!self->f0r_instance)) return GST_FLOW_ERROR; } if (self->newseg_event) { gst_pad_push_event (self->src, self->newseg_event); self->newseg_event = NULL; } if ((ret = gst_pad_alloc_buffer_and_set_caps (self->src, GST_BUFFER_OFFSET_NONE, gst_video_format_get_size (self->fmt, self->width, self->height), GST_PAD_CAPS (self->src), &outbuf)) != GST_FLOW_OK) return ret; for (l = pads->data; l; l = l->next) { GstCollectData *cdata = l->data; if (cdata->pad == self->sink0) { inbuf0 = gst_collect_pads_pop (pads, cdata); segment = &cdata->segment; } else if (cdata->pad == self->sink1) { inbuf1 = gst_collect_pads_pop (pads, cdata); } else if (cdata->pad == self->sink2) { inbuf2 = gst_collect_pads_pop (pads, cdata); } } if (!inbuf0 || !inbuf1 || (!inbuf2 && self->sink2)) goto eos; g_assert (segment != NULL); timestamp = GST_BUFFER_TIMESTAMP (inbuf0); timestamp = gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp); GST_DEBUG_OBJECT (self, "sync to %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp)); if (GST_CLOCK_TIME_IS_VALID (timestamp)) gst_object_sync_values (G_OBJECT (self), timestamp); gst_buffer_copy_metadata (outbuf, inbuf0, GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS); time = ((gdouble) GST_BUFFER_TIMESTAMP (outbuf)) / GST_SECOND; GST_OBJECT_LOCK (self); klass->ftable->update2 (self->f0r_instance, time, (const guint32 *) GST_BUFFER_DATA (inbuf0), (const guint32 *) GST_BUFFER_DATA (inbuf1), (inbuf2) ? (const guint32 *) GST_BUFFER_DATA (inbuf2) : NULL, (guint32 *) GST_BUFFER_DATA (outbuf)); GST_OBJECT_UNLOCK (self); gst_buffer_unref (inbuf0); gst_buffer_unref (inbuf1); if (inbuf2) gst_buffer_unref (inbuf2); ret = gst_pad_push (self->src, outbuf); return ret; eos: { GST_DEBUG_OBJECT (self, "no data available, must be EOS"); gst_buffer_unref (outbuf); if (inbuf0) gst_buffer_unref (inbuf0); if (inbuf1) gst_buffer_unref (inbuf1); if (inbuf2) gst_buffer_unref (inbuf2); gst_pad_push_event (self->src, gst_event_new_eos ()); return GST_FLOW_UNEXPECTED; } }
/** * gst_video_format_convert: * @format: a #GstVideoFormat * @width: the width of video * @height: the height of video * @fps_n: frame rate numerator * @fps_d: frame rate denominator * @src_format: #GstFormat of the @src_value * @src_value: value to convert * @dest_format: #GstFormat of the @dest_value * @dest_value: pointer to destination value * * Converts among various #GstFormat types. This function handles * GST_FORMAT_BYTES, GST_FORMAT_TIME, and GST_FORMAT_DEFAULT. For * raw video, GST_FORMAT_DEFAULT corresponds to video frames. This * function can be to handle pad queries of the type GST_QUERY_CONVERT. * * Since: 0.10.16 * * Returns: TRUE if the conversion was successful. */ gboolean gst_video_format_convert (GstVideoFormat format, int width, int height, int fps_n, int fps_d, GstFormat src_format, gint64 src_value, GstFormat dest_format, gint64 * dest_value) { gboolean ret = FALSE; int size; g_return_val_if_fail (format != GST_VIDEO_FORMAT_UNKNOWN, 0); g_return_val_if_fail (width > 0 && height > 0, 0); size = gst_video_format_get_size (format, width, height); GST_DEBUG ("converting value %" G_GINT64_FORMAT " from %s to %s", src_value, gst_format_get_name (src_format), gst_format_get_name (dest_format)); if (src_format == dest_format) { *dest_value = src_value; ret = TRUE; goto done; } if (src_value == -1) { *dest_value = -1; ret = TRUE; goto done; } /* bytes to frames */ if (src_format == GST_FORMAT_BYTES && dest_format == GST_FORMAT_DEFAULT) { if (size != 0) { *dest_value = gst_util_uint64_scale_int (src_value, 1, size); } else { GST_ERROR ("blocksize is 0"); *dest_value = 0; } ret = TRUE; goto done; } /* frames to bytes */ if (src_format == GST_FORMAT_DEFAULT && dest_format == GST_FORMAT_BYTES) { *dest_value = gst_util_uint64_scale_int (src_value, size, 1); ret = TRUE; goto done; } /* time to frames */ if (src_format == GST_FORMAT_TIME && dest_format == GST_FORMAT_DEFAULT) { if (fps_d != 0) { *dest_value = gst_util_uint64_scale (src_value, fps_n, GST_SECOND * fps_d); } else { GST_ERROR ("framerate denominator is 0"); *dest_value = 0; } ret = TRUE; goto done; } /* frames to time */ if (src_format == GST_FORMAT_DEFAULT && dest_format == GST_FORMAT_TIME) { if (fps_n != 0) { *dest_value = gst_util_uint64_scale (src_value, GST_SECOND * fps_d, fps_n); } else { GST_ERROR ("framerate numerator is 0"); *dest_value = 0; } ret = TRUE; goto done; } /* time to bytes */ if (src_format == GST_FORMAT_TIME && dest_format == GST_FORMAT_BYTES) { if (fps_d != 0) { *dest_value = gst_util_uint64_scale (src_value, fps_n * size, GST_SECOND * fps_d); } else { GST_ERROR ("framerate denominator is 0"); *dest_value = 0; } ret = TRUE; goto done; } /* bytes to time */ if (src_format == GST_FORMAT_BYTES && dest_format == GST_FORMAT_TIME) { if (fps_n != 0 && size != 0) { *dest_value = gst_util_uint64_scale (src_value, GST_SECOND * fps_d, fps_n * size); } else { GST_ERROR ("framerate denominator and/or blocksize is 0"); *dest_value = 0; } ret = TRUE; } done: GST_DEBUG ("ret=%d result %" G_GINT64_FORMAT, ret, *dest_value); return ret; }
static GstFlowReturn gst_jasper_dec_negotiate (GstJasperDec * dec, jas_image_t * image) { GstFlowReturn flow_ret = GST_FLOW_OK; gint width, height, channels; gint i, j; gboolean negotiate = FALSE; jas_clrspc_t clrspc; GstCaps *allowed_caps, *caps; width = jas_image_width (image); height = jas_image_height (image); channels = jas_image_numcmpts (image); GST_LOG_OBJECT (dec, "%d x %d, %d components", width, height, channels); /* jp2c bitstream has no real colour space info (kept in container), * so decoder may only pretend to know, where it really does not */ if (!jas_clrspc_isunknown (dec->clrspc)) { clrspc = dec->clrspc; GST_DEBUG_OBJECT (dec, "forcing container supplied colour space %d", clrspc); jas_image_setclrspc (image, clrspc); } else clrspc = jas_image_clrspc (image); if (!width || !height || !channels || jas_clrspc_isunknown (clrspc)) goto fail_image; if (dec->width != width || dec->height != height || dec->channels != channels || dec->clrspc != clrspc) negotiate = TRUE; if (channels != 3) goto not_supported; for (i = 0; i < channels; i++) { gint cheight, cwidth, depth, sgnd; cheight = jas_image_cmptheight (image, i); cwidth = jas_image_cmptwidth (image, i); depth = jas_image_cmptprec (image, i); sgnd = jas_image_cmptsgnd (image, i); GST_LOG_OBJECT (dec, "image component %d, %dx%d, depth %d, sgnd %d", i, cwidth, cheight, depth, sgnd); if (depth != 8 || sgnd) goto not_supported; if (dec->cheight[i] != cheight || dec->cwidth[i] != cwidth) { dec->cheight[i] = cheight; dec->cwidth[i] = cwidth; negotiate = TRUE; } } if (!negotiate && dec->format != GST_VIDEO_FORMAT_UNKNOWN) goto done; /* clear and refresh to new state */ flow_ret = GST_FLOW_NOT_NEGOTIATED; dec->format = GST_VIDEO_FORMAT_UNKNOWN; dec->width = width; dec->height = height; dec->channels = channels; /* retrieve allowed caps, and find the first one that reasonably maps * to the parameters of the colourspace */ caps = gst_pad_get_allowed_caps (dec->srcpad); if (!caps) { GST_DEBUG_OBJECT (dec, "... but no peer, using template caps"); /* need to copy because get_allowed_caps returns a ref, and get_pad_template_caps doesn't */ caps = gst_caps_copy (gst_pad_get_pad_template_caps (dec->srcpad)); } /* avoid lists of fourcc, etc */ allowed_caps = gst_caps_normalize (caps); caps = NULL; GST_LOG_OBJECT (dec, "allowed source caps %" GST_PTR_FORMAT, allowed_caps); for (i = 0; i < gst_caps_get_size (allowed_caps); i++) { GstVideoFormat format; gboolean ok; if (caps) gst_caps_unref (caps); caps = gst_caps_copy_nth (allowed_caps, i); /* sigh, ds and _parse_caps need fixed caps for parsing, fixate */ gst_pad_fixate_caps (dec->srcpad, caps); GST_LOG_OBJECT (dec, "checking caps %" GST_PTR_FORMAT, caps); if (!gst_video_format_parse_caps (caps, &format, NULL, NULL)) continue; if (gst_video_format_is_rgb (format) && jas_clrspc_fam (clrspc) == JAS_CLRSPC_FAM_RGB) { GST_DEBUG_OBJECT (dec, "trying RGB"); if ((dec->cmpt[0] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_R))) < 0 || (dec->cmpt[1] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_G))) < 0 || (dec->cmpt[2] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_RGB_B))) < 0) { GST_DEBUG_OBJECT (dec, "missing RGB color component"); continue; } } else if (gst_video_format_is_yuv (format) && jas_clrspc_fam (clrspc) == JAS_CLRSPC_FAM_YCBCR) { GST_DEBUG_OBJECT (dec, "trying YUV"); if ((dec->cmpt[0] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_Y))) < 0 || (dec->cmpt[1] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_CB))) < 0 || (dec->cmpt[2] = jas_image_getcmptbytype (image, JAS_IMAGE_CT_COLOR (JAS_CLRSPC_CHANIND_YCBCR_CR))) < 0) { GST_DEBUG_OBJECT (dec, "missing YUV color component"); continue; } } else continue; /* match format with validity checks */ ok = TRUE; for (j = 0; j < channels; j++) { gint cmpt; cmpt = dec->cmpt[j]; if (dec->cwidth[cmpt] != gst_video_format_get_component_width (format, j, width) || dec->cheight[cmpt] != gst_video_format_get_component_height (format, j, height)) ok = FALSE; } /* commit to this format */ if (ok) { dec->format = format; break; } } if (caps) gst_caps_unref (caps); gst_caps_unref (allowed_caps); if (dec->format != GST_VIDEO_FORMAT_UNKNOWN) { /* cache some video format properties */ for (j = 0; j < channels; ++j) { dec->offset[j] = gst_video_format_get_component_offset (dec->format, j, dec->width, dec->height); dec->inc[j] = gst_video_format_get_pixel_stride (dec->format, j); dec->stride[j] = gst_video_format_get_row_stride (dec->format, j, dec->width); } dec->image_size = gst_video_format_get_size (dec->format, width, height); dec->alpha = gst_video_format_has_alpha (dec->format); if (dec->buf) g_free (dec->buf); dec->buf = g_new0 (glong, dec->width); caps = gst_video_format_new_caps (dec->format, dec->width, dec->height, dec->framerate_numerator, dec->framerate_denominator, 1, 1); GST_DEBUG_OBJECT (dec, "Set format to %d, size to %dx%d", dec->format, dec->width, dec->height); if (!gst_pad_set_caps (dec->srcpad, caps)) flow_ret = GST_FLOW_NOT_NEGOTIATED; else flow_ret = GST_FLOW_OK; gst_caps_unref (caps); } done: return flow_ret; /* ERRORS */ fail_image: { GST_DEBUG_OBJECT (dec, "Failed to process decoded image."); flow_ret = GST_FLOW_NOT_NEGOTIATED; goto done; } not_supported: { GST_DEBUG_OBJECT (dec, "Decoded image has unsupported colour space."); GST_ELEMENT_ERROR (dec, STREAM, DECODE, (NULL), ("Unsupported colorspace")); flow_ret = GST_FLOW_ERROR; goto done; } }
static gboolean gst_video_scale_set_caps (GstBaseTransform * trans, GstCaps * in, GstCaps * out) { GstVideoScale *videoscale = GST_VIDEO_SCALE (trans); gboolean ret; gint from_dar_n, from_dar_d, to_dar_n, to_dar_d; gint from_par_n, from_par_d, to_par_n, to_par_d; ret = gst_video_format_parse_caps (in, &videoscale->format, &videoscale->from_width, &videoscale->from_height); ret &= gst_video_format_parse_caps (out, NULL, &videoscale->to_width, &videoscale->to_height); if (!ret) goto done; videoscale->src_size = gst_video_format_get_size (videoscale->format, videoscale->from_width, videoscale->from_height); videoscale->dest_size = gst_video_format_get_size (videoscale->format, videoscale->to_width, videoscale->to_height); if (!gst_video_parse_caps_pixel_aspect_ratio (in, &from_par_n, &from_par_d)) from_par_n = from_par_d = 1; if (!gst_video_parse_caps_pixel_aspect_ratio (out, &to_par_n, &to_par_d)) to_par_n = to_par_d = 1; if (!gst_util_fraction_multiply (videoscale->from_width, videoscale->from_height, from_par_n, from_par_d, &from_dar_n, &from_dar_d)) { from_dar_n = from_dar_d = -1; } if (!gst_util_fraction_multiply (videoscale->to_width, videoscale->to_height, to_par_n, to_par_d, &to_dar_n, &to_dar_d)) { to_dar_n = to_dar_d = -1; } videoscale->borders_w = videoscale->borders_h = 0; if (to_dar_n != from_dar_n || to_dar_d != from_dar_d) { if (videoscale->add_borders) { gint n, d, to_h, to_w; if (from_dar_n != -1 && from_dar_d != -1 && gst_util_fraction_multiply (from_dar_n, from_dar_d, to_par_n, to_par_d, &n, &d)) { to_h = gst_util_uint64_scale_int (videoscale->to_width, d, n); if (to_h <= videoscale->to_height) { videoscale->borders_h = videoscale->to_height - to_h; videoscale->borders_w = 0; } else { to_w = gst_util_uint64_scale_int (videoscale->to_height, n, d); g_assert (to_w <= videoscale->to_width); videoscale->borders_h = 0; videoscale->borders_w = videoscale->to_width - to_w; } } else { GST_WARNING_OBJECT (videoscale, "Can't calculate borders"); } } else { GST_WARNING_OBJECT (videoscale, "Can't keep DAR!"); } } if (videoscale->tmp_buf) g_free (videoscale->tmp_buf); videoscale->tmp_buf = g_malloc (videoscale->to_width * 8 * 4); gst_base_transform_set_passthrough (trans, (videoscale->from_width == videoscale->to_width && videoscale->from_height == videoscale->to_height)); GST_DEBUG_OBJECT (videoscale, "from=%dx%d (par=%d/%d dar=%d/%d), size %d " "-> to=%dx%d (par=%d/%d dar=%d/%d borders=%d:%d), size %d", videoscale->from_width, videoscale->from_height, from_par_n, from_par_d, from_dar_n, from_dar_d, videoscale->src_size, videoscale->to_width, videoscale->to_height, to_par_n, to_par_d, to_dar_n, to_dar_d, videoscale->borders_w, videoscale->borders_h, videoscale->dest_size); done: return ret; }
static gboolean gst_y4m_dec_parse_header (GstY4mDec * y4mdec, char *header) { char *end; int format = 420; int interlaced_char = 0; if (memcmp (header, "YUV4MPEG2 ", 10) != 0) { return FALSE; } header += 10; while (*header) { GST_DEBUG_OBJECT (y4mdec, "parsing at '%s'", header); switch (*header) { case ' ': header++; break; case 'C': header++; format = strtoul (header, &end, 10); if (end == header) goto error; header = end; break; case 'W': header++; y4mdec->width = strtoul (header, &end, 10); if (end == header) goto error; header = end; break; case 'H': header++; y4mdec->height = strtoul (header, &end, 10); if (end == header) goto error; header = end; break; case 'I': header++; if (header[0] == 0) { GST_WARNING_OBJECT (y4mdec, "Expecting interlaced flag"); return FALSE; } interlaced_char = header[0]; header++; break; case 'F': header++; y4mdec->fps_n = strtoul (header, &end, 10); if (end == header) goto error; header = end; if (header[0] != ':') { GST_WARNING_OBJECT (y4mdec, "Expecting :"); return FALSE; } header++; y4mdec->fps_d = strtoul (header, &end, 10); if (end == header) goto error; header = end; break; case 'A': header++; y4mdec->par_n = strtoul (header, &end, 10); if (end == header) goto error; header = end; if (header[0] != ':') { GST_WARNING_OBJECT (y4mdec, "Expecting :"); return FALSE; } header++; y4mdec->par_d = strtoul (header, &end, 10); if (end == header) goto error; header = end; break; default: GST_WARNING_OBJECT (y4mdec, "Unknown y4m header field '%c', ignoring", *header); while (*header && *header != ' ') header++; break; } } switch (format) { case 420: y4mdec->format = GST_VIDEO_FORMAT_I420; break; case 422: y4mdec->format = GST_VIDEO_FORMAT_Y42B; break; case 444: y4mdec->format = GST_VIDEO_FORMAT_Y444; break; default: GST_WARNING_OBJECT (y4mdec, "unknown y4m format %d", format); return FALSE; } if (y4mdec->width <= 0 || y4mdec->width > MAX_SIZE || y4mdec->height <= 0 || y4mdec->height > MAX_SIZE) { GST_WARNING_OBJECT (y4mdec, "Dimensions %dx%d out of range", y4mdec->width, y4mdec->height); return FALSE; } y4mdec->frame_size = gst_video_format_get_size (y4mdec->format, y4mdec->width, y4mdec->height); switch (interlaced_char) { case 0: case '?': case 'p': y4mdec->interlaced = FALSE; break; case 't': case 'b': y4mdec->interlaced = TRUE; y4mdec->tff = (interlaced_char == 't'); break; default: GST_WARNING_OBJECT (y4mdec, "Unknown interlaced char '%c'", interlaced_char); return FALSE; break; } if (y4mdec->fps_n == 0) y4mdec->fps_n = 1; if (y4mdec->fps_d == 0) y4mdec->fps_d = 1; if (y4mdec->par_n == 0) y4mdec->par_n = 1; if (y4mdec->par_d == 0) y4mdec->par_d = 1; return TRUE; error: GST_WARNING_OBJECT (y4mdec, "Expecting number y4m header at '%s'", header); return FALSE; }
static void omx_setup (GstOmxBaseFilter *omx_base) { GstOmxJpegEnc *self; GOmxCore *gomx; self = GST_OMX_JPEGENC (omx_base); gomx = (GOmxCore *) omx_base->gomx; GST_INFO_OBJECT (omx_base, "begin"); { OMX_PARAM_PORTDEFINITIONTYPE param; /* Output port configuration. */ G_OMX_PORT_GET_DEFINITION (omx_base->out_port, ¶m); param.format.image.eCompressionFormat = OMX_IMAGE_CodingJPEG; G_OMX_PORT_SET_DEFINITION (omx_base->out_port, ¶m); /* some workarounds required for TI components. */ { guint32 fourcc; gint width, height; /* the component should do this instead */ { G_OMX_PORT_GET_DEFINITION (omx_base->in_port, ¶m); width = param.format.image.nFrameWidth; height = param.format.image.nFrameHeight; fourcc = g_omx_colorformat_to_fourcc ( param.format.image.eColorFormat); /* this is against the standard; nBufferSize is read-only. */ param.nBufferSize = gst_video_format_get_size ( gst_video_format_from_fourcc (fourcc), GST_ROUND_UP_16 (width), GST_ROUND_UP_16 (height)); G_OMX_PORT_SET_DEFINITION (omx_base->in_port, ¶m); } /* the component should do this instead */ { G_OMX_PORT_GET_DEFINITION (omx_base->out_port, ¶m); param.nBufferSize = width * height; param.format.image.nFrameWidth = width; param.format.image.nFrameHeight = height; G_OMX_PORT_SET_DEFINITION (omx_base->out_port, ¶m); } } } { OMX_IMAGE_PARAM_QFACTORTYPE param; G_OMX_PORT_GET_PARAM (omx_base->out_port, OMX_IndexParamQFactor, ¶m); param.nQFactor = self->quality; G_OMX_PORT_SET_PARAM (omx_base->out_port, OMX_IndexParamQFactor, ¶m); } GST_INFO_OBJECT (omx_base, "end"); }
CogFrame * gst_cog_buffer_wrap (GstBuffer * buf, GstVideoFormat format, int width, int height) { CogFrame *frame; int size; size = gst_video_format_get_size (format, width, height); if (GST_BUFFER_SIZE (buf) != size) { GST_ERROR ("size incorrect, expected %d, got %d", size, GST_BUFFER_SIZE (buf)); } switch (format) { case GST_VIDEO_FORMAT_I420: frame = cog_frame_new_from_data_I420 (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_YV12: frame = cog_frame_new_from_data_YV12 (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_YUY2: frame = cog_frame_new_from_data_YUY2 (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_UYVY: frame = cog_frame_new_from_data_UYVY (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_AYUV: frame = cog_frame_new_from_data_AYUV (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_Y42B: frame = cog_frame_new_from_data_Y42B (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_Y444: frame = cog_frame_new_from_data_Y444 (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_v210: frame = cog_frame_new_from_data_v210 (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_v216: frame = cog_frame_new_from_data_v216 (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_RGBx: frame = cog_frame_new_from_data_RGBx (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_BGRx: frame = cog_frame_new_from_data_BGRx (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_xRGB: frame = cog_frame_new_from_data_xRGB (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_xBGR: frame = cog_frame_new_from_data_xBGR (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_RGBA: frame = cog_frame_new_from_data_RGBA (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_BGRA: frame = cog_frame_new_from_data_BGRA (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_ARGB: frame = cog_frame_new_from_data_ARGB (GST_BUFFER_DATA (buf), width, height); break; case GST_VIDEO_FORMAT_ABGR: frame = cog_frame_new_from_data_ABGR (GST_BUFFER_DATA (buf), width, height); break; default: g_assert_not_reached (); } cog_frame_set_free_callback (frame, gst_cog_frame_free, buf); return frame; }