PassRefPtr<BitmapTexture> MediaPlayerPrivateGStreamerBase::updateTexture(TextureMapper* textureMapper) { WTF::GMutexLocker<GMutex> lock(m_sampleMutex); if (!GST_IS_SAMPLE(m_sample.get())) return nullptr; GstCaps* caps = gst_sample_get_caps(m_sample.get()); if (!caps) return nullptr; GstVideoInfo videoInfo; gst_video_info_init(&videoInfo); if (!gst_video_info_from_caps(&videoInfo, caps)) return nullptr; IntSize size = IntSize(GST_VIDEO_INFO_WIDTH(&videoInfo), GST_VIDEO_INFO_HEIGHT(&videoInfo)); RefPtr<BitmapTexture> texture = textureMapper->acquireTextureFromPool(size, GST_VIDEO_INFO_HAS_ALPHA(&videoInfo) ? BitmapTexture::SupportsAlpha : BitmapTexture::NoFlag); GstBuffer* buffer = gst_sample_get_buffer(m_sample.get()); #if GST_CHECK_VERSION(1, 1, 0) GstVideoGLTextureUploadMeta* meta; if ((meta = gst_buffer_get_video_gl_texture_upload_meta(buffer))) { if (meta->n_textures == 1) { // BRGx & BGRA formats use only one texture. const BitmapTextureGL* textureGL = static_cast<const BitmapTextureGL*>(texture.get()); guint ids[4] = { textureGL->id(), 0, 0, 0 }; if (gst_video_gl_texture_upload_meta_upload(meta, ids)) return texture; } } #endif // Right now the TextureMapper only supports chromas with one plane ASSERT(GST_VIDEO_INFO_N_PLANES(&videoInfo) == 1); GstVideoFrame videoFrame; if (!gst_video_frame_map(&videoFrame, &videoInfo, buffer, GST_MAP_READ)) return nullptr; int stride = GST_VIDEO_FRAME_PLANE_STRIDE(&videoFrame, 0); const void* srcData = GST_VIDEO_FRAME_PLANE_DATA(&videoFrame, 0); texture->updateContents(srcData, WebCore::IntRect(WebCore::IntPoint(0, 0), size), WebCore::IntPoint(0, 0), stride, BitmapTexture::UpdateCannotModifyOriginalImageData); gst_video_frame_unmap(&videoFrame); return texture; }
static gboolean gst_gdk_pixbuf_sink_set_caps (GstBaseSink * basesink, GstCaps * caps) { GstGdkPixbufSink *sink = GST_GDK_PIXBUF_SINK (basesink); GstVideoInfo info; GstVideoFormat fmt; gint w, h, s, par_n, par_d; GST_LOG_OBJECT (sink, "caps: %" GST_PTR_FORMAT, caps); if (!gst_video_info_from_caps (&info, caps)) { GST_WARNING_OBJECT (sink, "parse_caps failed"); return FALSE; } fmt = GST_VIDEO_INFO_FORMAT (&info); w = GST_VIDEO_INFO_WIDTH (&info); h = GST_VIDEO_INFO_HEIGHT (&info); s = GST_VIDEO_INFO_COMP_PSTRIDE (&info, 0); par_n = GST_VIDEO_INFO_PAR_N (&info); par_d = GST_VIDEO_INFO_PAR_N (&info); g_assert ((fmt == GST_VIDEO_FORMAT_RGB && s == 3) || (fmt == GST_VIDEO_FORMAT_RGBA && s == 4)); GST_VIDEO_SINK_WIDTH (sink) = w; GST_VIDEO_SINK_HEIGHT (sink) = h; sink->par_n = par_n; sink->par_d = par_d; sink->has_alpha = GST_VIDEO_INFO_HAS_ALPHA (&info); GST_INFO_OBJECT (sink, "format : %d", fmt); GST_INFO_OBJECT (sink, "width x height : %d x %d", w, h); GST_INFO_OBJECT (sink, "pixel-aspect-ratio : %d/%d", par_n, par_d); sink->info = info; return TRUE; }
static void gst_wl_window_set_opaque (GstWlWindow * window, const GstVideoInfo * info) { struct wl_region *region; /* Set area opaque */ region = wl_compositor_create_region (window->display->compositor); wl_region_add (region, 0, 0, window->render_rectangle.w, window->render_rectangle.h); wl_surface_set_opaque_region (window->area_surface, region); wl_region_destroy (region); if (!GST_VIDEO_INFO_HAS_ALPHA (info)) { /* Set video opaque */ region = wl_compositor_create_region (window->display->compositor); wl_region_add (region, 0, 0, window->render_rectangle.w, window->render_rectangle.h); wl_surface_set_opaque_region (window->video_surface, region); wl_region_destroy (region); } }
void MediaPlayerPrivateGStreamerBase::paintToTextureMapper(TextureMapper* textureMapper, const FloatRect& targetRect, const TransformationMatrix& matrix, float opacity) { if (!m_player->visible()) return; if (m_usingFallbackVideoSink) { if (RefPtr<BitmapTexture> texture = updateTexture(textureMapper)) textureMapper->drawTexture(*texture.get(), targetRect, matrix, opacity); return; } #if USE(GSTREAMER_GL) if (!GST_IS_SAMPLE(m_sample.get())) return; GstCaps* caps = gst_sample_get_caps(m_sample.get()); if (!caps) return; GstVideoInfo videoInfo; gst_video_info_init(&videoInfo); if (!gst_video_info_from_caps(&videoInfo, caps)) return; GstBuffer* buffer = gst_sample_get_buffer(m_sample.get()); GstVideoFrame videoFrame; if (!gst_video_frame_map(&videoFrame, &videoInfo, buffer, static_cast<GstMapFlags>(GST_MAP_READ | GST_MAP_GL))) return; unsigned textureID = *reinterpret_cast<unsigned*>(videoFrame.data[0]); BitmapTexture::Flags flags = BitmapTexture::NoFlag; if (GST_VIDEO_INFO_HAS_ALPHA(&videoInfo)) flags |= BitmapTexture::SupportsAlpha; IntSize size = IntSize(GST_VIDEO_INFO_WIDTH(&videoInfo), GST_VIDEO_INFO_HEIGHT(&videoInfo)); TextureMapperGL* textureMapperGL = reinterpret_cast<TextureMapperGL*>(textureMapper); textureMapperGL->drawTexture(textureID, flags, size, targetRect, matrix, opacity); gst_video_frame_unmap(&videoFrame); #endif }
static void gst_imx_video_compositor_update_overall_region(GstImxVideoCompositor *compositor) { GList *walk; gboolean first = TRUE; /* Catch redundant calls */ if (compositor->overall_region_valid) return; if ((compositor->overall_width != 0) && (compositor->overall_height != 0)) { /* If the width and height of the overall region are fixed to specific * values by the caller, use these, and don't look at the canvases * in the input pads. */ compositor->overall_region.x2 = compositor->overall_width; compositor->overall_region.y2 = compositor->overall_height; } else { /* Overall width and/or height are set to 0. This means the caller wants * the overall region to adapt to the sizes of the input canvases. The * overall region must encompass and show all of them (exception: * pads with negative xpos/ypos coordinates can have their canvas lie * either partially or fully outside of the overall region). * To compute this overall region, walk through all pads and merge their * outer canvas regions together. */ walk = GST_ELEMENT(compositor)->sinkpads; while (walk != NULL) { GstImxVideoCompositorPad *compositor_pad = GST_IMX_VIDEO_COMPOSITOR_PAD_CAST(walk->data); GstImxRegion *outer_region = &(compositor_pad->canvas.outer_region); /* Update the outer region, since the xpos/ypos/width/height pad properties * might have changed */ gst_imx_video_compositor_pad_compute_outer_region(compositor_pad); /* The pad canvasses are *not* updated here. This is because in order for * these updates to be done, a valid overall region needs to exist first. * And the whole point of this loop is to compute said region. * Furthermore, canvas updates anyway are unnecessary here. They'll be * done later, during frame aggregation, when necessary. The only * value that is needed here from the canvas is the outer region, and * this one is already computed above. */ if (first) { /* This is the first visited pad, so just copy its outer region */ compositor->overall_region = *outer_region; first = FALSE; } else gst_imx_region_merge(&(compositor->overall_region), &(compositor->overall_region), outer_region); GST_DEBUG_OBJECT(compositor, "current outer region: %" GST_IMX_REGION_FORMAT " merged overall region: %" GST_IMX_REGION_FORMAT, GST_IMX_REGION_ARGS(outer_region), GST_IMX_REGION_ARGS(&(compositor->overall_region))); /* Move to next pad */ walk = g_list_next(walk); } } /* Make sure the overall region starts at (0,0), since any other topleft * coordinates make little sense */ compositor->overall_region.x1 = 0; compositor->overall_region.y1 = 0; /* Now that the overall region is computed, walk through the individual * outer regions, and check if any of them completely cover the overall region * If so, the compositor does not have to clear the frame first (= filling * the overall region with fill_region), thus saving bandwidth */ compositor->region_fill_necessary = TRUE; walk = GST_ELEMENT(compositor)->sinkpads; while (walk != NULL) { GstImxVideoCompositorPad *compositor_pad = GST_IMX_VIDEO_COMPOSITOR_PAD_CAST(walk->data); GstImxRegion *outer_region = &(compositor_pad->canvas.outer_region); GstVideoInfo *info = &(GST_IMXBP_VIDEO_AGGREGATOR_PAD(compositor_pad)->info); /* Check if the outer region completely contains the overall region */ if (gst_imx_region_contains(&(compositor->overall_region), outer_region) == GST_IMX_REGION_CONTAINS_FULL) { /* The outer region completely contains the inner region. * If the video frames are opaque, then this means that * their pixels will fully overwrite the entire overall * region, so it does not have to be initially filled * with the background color. This is used as a way for * improving performance. Even if there are multiple * input video streams which each have outer regions * that fully contain the overall region, all it takes * is for just one of these to be 100% opaque, and the * region fill is not necessary. So, if such a fully * opaque region that completely covers the overall * region is found, exit the loop immediately, otherwise * keep looking at the other pads. * * Also, blending may be necessary even if alpha is 1.0, * since video frames might themselves have alpha values * per-pixel (GST_VIDEO_INFO_HAS_ALPHA() returns TRUE in * this case then). */ compositor->region_fill_necessary = (compositor_pad->alpha < 1.0) || GST_VIDEO_INFO_HAS_ALPHA(info); if (!compositor->region_fill_necessary) break; } walk = g_list_next(walk); } compositor->overall_region_valid = TRUE; }