static void gst_jpegenc_resync (GstJpegEnc * jpegenc) { gint width, height; gint i, j; GST_DEBUG_OBJECT (jpegenc, "resync"); jpegenc->cinfo.image_width = width = jpegenc->width; jpegenc->cinfo.image_height = height = jpegenc->height; jpegenc->cinfo.input_components = jpegenc->channels; GST_DEBUG_OBJECT (jpegenc, "width %d, height %d", width, height); GST_DEBUG_OBJECT (jpegenc, "format %d", jpegenc->format); if (gst_video_format_is_rgb (jpegenc->format)) { GST_DEBUG_OBJECT (jpegenc, "RGB"); jpegenc->cinfo.in_color_space = JCS_RGB; } else if (gst_video_format_is_gray (jpegenc->format)) { GST_DEBUG_OBJECT (jpegenc, "gray"); jpegenc->cinfo.in_color_space = JCS_GRAYSCALE; } else { GST_DEBUG_OBJECT (jpegenc, "YUV"); jpegenc->cinfo.in_color_space = JCS_YCbCr; } /* input buffer size as max output */ jpegenc->bufsize = gst_video_format_get_size (jpegenc->format, width, height); jpeg_set_defaults (&jpegenc->cinfo); jpegenc->cinfo.raw_data_in = TRUE; /* duh, libjpeg maps RGB to YUV ... and don't expect some conversion */ if (jpegenc->cinfo.in_color_space == JCS_RGB) jpeg_set_colorspace (&jpegenc->cinfo, JCS_RGB); GST_DEBUG_OBJECT (jpegenc, "h_max_samp=%d, v_max_samp=%d", jpegenc->h_max_samp, jpegenc->v_max_samp); /* image dimension info */ for (i = 0; i < jpegenc->channels; i++) { GST_DEBUG_OBJECT (jpegenc, "comp %i: h_samp=%d, v_samp=%d", i, jpegenc->h_samp[i], jpegenc->v_samp[i]); jpegenc->cinfo.comp_info[i].h_samp_factor = jpegenc->h_samp[i]; jpegenc->cinfo.comp_info[i].v_samp_factor = jpegenc->v_samp[i]; jpegenc->line[i] = g_realloc (jpegenc->line[i], jpegenc->v_max_samp * DCTSIZE * sizeof (char *)); if (!jpegenc->planar) { for (j = 0; j < jpegenc->v_max_samp * DCTSIZE; j++) { jpegenc->row[i][j] = g_realloc (jpegenc->row[i][j], width); jpegenc->line[i][j] = jpegenc->row[i][j]; } } } /* guard against a potential error in gst_jpegenc_term_destination which occurs iff bufsize % 4 < free_space_remaining */ jpegenc->bufsize = GST_ROUND_UP_4 (jpegenc->bufsize); jpeg_suppress_tables (&jpegenc->cinfo, TRUE); GST_DEBUG_OBJECT (jpegenc, "resync done"); }
static gboolean gst_jpegenc_setcaps (GstPad * pad, GstCaps * caps) { GstJpegEnc *enc = GST_JPEGENC (gst_pad_get_parent (pad)); GstVideoFormat format; gint width, height; gint fps_num, fps_den; gint par_num, par_den; gint i; GstCaps *othercaps; gboolean ret; /* get info from caps */ if (!gst_video_format_parse_caps (caps, &format, &width, &height)) goto refuse_caps; /* optional; pass along if present */ fps_num = fps_den = -1; par_num = par_den = -1; gst_video_parse_caps_framerate (caps, &fps_num, &fps_den); gst_video_parse_caps_pixel_aspect_ratio (caps, &par_num, &par_den); if (width == enc->width && height == enc->height && enc->format == format && fps_num == enc->fps_num && fps_den == enc->fps_den && par_num == enc->par_num && par_den == enc->par_den) return TRUE; /* store input description */ enc->format = format; enc->width = width; enc->height = height; enc->fps_num = fps_num; enc->fps_den = fps_den; enc->par_num = par_num; enc->par_den = par_den; /* prepare a cached image description */ enc->channels = 3 + (gst_video_format_has_alpha (format) ? 1 : 0); /* ... but any alpha is disregarded in encoding */ if (gst_video_format_is_gray (format)) enc->channels = 1; else enc->channels = 3; enc->h_max_samp = 0; enc->v_max_samp = 0; for (i = 0; i < enc->channels; ++i) { enc->cwidth[i] = gst_video_format_get_component_width (format, i, width); enc->cheight[i] = gst_video_format_get_component_height (format, i, height); enc->offset[i] = gst_video_format_get_component_offset (format, i, width, height); enc->stride[i] = gst_video_format_get_row_stride (format, i, width); enc->inc[i] = gst_video_format_get_pixel_stride (format, i); enc->h_samp[i] = GST_ROUND_UP_4 (width) / enc->cwidth[i]; enc->h_max_samp = MAX (enc->h_max_samp, enc->h_samp[i]); enc->v_samp[i] = GST_ROUND_UP_4 (height) / enc->cheight[i]; enc->v_max_samp = MAX (enc->v_max_samp, enc->v_samp[i]); } /* samp should only be 1, 2 or 4 */ g_assert (enc->h_max_samp <= 4); g_assert (enc->v_max_samp <= 4); /* now invert */ /* maximum is invariant, as one of the components should have samp 1 */ for (i = 0; i < enc->channels; ++i) { enc->h_samp[i] = enc->h_max_samp / enc->h_samp[i]; enc->v_samp[i] = enc->v_max_samp / enc->v_samp[i]; } enc->planar = (enc->inc[0] == 1 && enc->inc[1] == 1 && enc->inc[2] == 1); othercaps = gst_caps_copy (gst_pad_get_pad_template_caps (enc->srcpad)); gst_caps_set_simple (othercaps, "width", G_TYPE_INT, enc->width, "height", G_TYPE_INT, enc->height, NULL); if (enc->fps_den > 0) gst_caps_set_simple (othercaps, "framerate", GST_TYPE_FRACTION, enc->fps_num, enc->fps_den, NULL); if (enc->par_den > 0) gst_caps_set_simple (othercaps, "pixel-aspect-ratio", GST_TYPE_FRACTION, enc->par_num, enc->par_den, NULL); ret = gst_pad_set_caps (enc->srcpad, othercaps); gst_caps_unref (othercaps); if (ret) gst_jpegenc_resync (enc); gst_object_unref (enc); return ret; /* ERRORS */ refuse_caps: { GST_WARNING_OBJECT (enc, "refused caps %" GST_PTR_FORMAT, caps); gst_object_unref (enc); return FALSE; } }
static gdouble gst_compare_ssim (GstCompare * comp, GstBuffer * buf1, GstBuffer * buf2) { GstCaps *caps; GstVideoFormat format, f; gint width, height, w, h, i, comps; gdouble cssim[4], ssim, c[4] = { 1.0, 0.0, 0.0, 0.0 }; guint8 *data1, *data2; caps = GST_BUFFER_CAPS (buf1); if (!caps) goto invalid_input; if (!gst_video_format_parse_caps (caps, &format, &width, &height)) goto invalid_input; caps = GST_BUFFER_CAPS (buf2); if (!caps) goto invalid_input; if (!gst_video_format_parse_caps (caps, &f, &w, &h)) goto invalid_input; if (f != format || w != width || h != height) return comp->threshold + 1; comps = gst_video_format_is_gray (format) ? 1 : 3; if (gst_video_format_has_alpha (format)) comps += 1; /* note that some are reported both yuv and gray */ for (i = 0; i < comps; ++i) c[i] = 1.0; /* increase luma weight if yuv */ if (gst_video_format_is_yuv (format) && (comps > 1)) c[0] = comps - 1; for (i = 0; i < comps; ++i) c[i] /= (gst_video_format_is_yuv (format) && (comps > 1)) ? 2 * (comps - 1) : comps; data1 = GST_BUFFER_DATA (buf1); data2 = GST_BUFFER_DATA (buf2); for (i = 0; i < comps; i++) { gint offset, cw, ch, step, stride; /* only support most common formats */ if (gst_video_format_get_component_depth (format, i) != 8) goto unsupported_input; offset = gst_video_format_get_component_offset (format, i, width, height); cw = gst_video_format_get_component_width (format, i, width); ch = gst_video_format_get_component_height (format, i, height); step = gst_video_format_get_pixel_stride (format, i); stride = gst_video_format_get_row_stride (format, i, width); GST_LOG_OBJECT (comp, "component %d", i); cssim[i] = gst_compare_ssim_component (comp, data1 + offset, data2 + offset, cw, ch, step, stride); GST_LOG_OBJECT (comp, "ssim[%d] = %f", i, cssim[i]); } #ifndef GST_DISABLE_GST_DEBUG for (i = 0; i < 4; i++) { GST_DEBUG_OBJECT (comp, "ssim[%d] = %f, c[%d] = %f", i, cssim[i], i, c[i]); } #endif ssim = cssim[0] * c[0] + cssim[1] * c[1] + cssim[2] * c[2] + cssim[3] * c[3]; return ssim; /* ERRORS */ invalid_input: { GST_ERROR_OBJECT (comp, "ssim method needs raw video input"); return 0; } unsupported_input: { GST_ERROR_OBJECT (comp, "raw video format not supported %" GST_PTR_FORMAT, caps); return 0; } }