static void clutter_glx_texture_pixmap_init (ClutterGLXTexturePixmap *self) { ClutterGLXTexturePixmapPrivate *priv; priv = self->priv = G_TYPE_INSTANCE_GET_PRIVATE (self, CLUTTER_GLX_TYPE_TEXTURE_PIXMAP, ClutterGLXTexturePixmapPrivate); if (_ext_check_done == FALSE) { const gchar *glx_extensions = NULL; glx_extensions = glXQueryExtensionsString (clutter_x11_get_default_display (), clutter_x11_get_default_screen ()); /* Check for the texture from pixmap extension */ if (cogl_check_extension ("GLX_EXT_texture_from_pixmap", glx_extensions)) { _gl_bind_tex_image = (BindTexImage)cogl_get_proc_address ("glXBindTexImageEXT"); _gl_release_tex_image = (ReleaseTexImage)cogl_get_proc_address ("glXReleaseTexImageEXT"); if (_gl_bind_tex_image && _gl_release_tex_image) _have_tex_from_pixmap_ext = TRUE; } _ext_check_done = TRUE; } }
static ClutterFeatureFlags clutter_backend_glx_get_features (ClutterBackend *backend) { ClutterBackendGLX *backend_glx = CLUTTER_BACKEND_GLX (backend); const gchar *glx_extensions = NULL; const gchar *gl_extensions = NULL; ClutterFeatureFlags flags; gboolean use_dri = FALSE; flags = clutter_backend_x11_get_features (backend); flags |= CLUTTER_FEATURE_STAGE_MULTIPLE; /* this will make sure that the GL context exists */ g_assert (backend_glx->gl_context != None); g_assert (glXGetCurrentDrawable () != None); CLUTTER_NOTE (BACKEND, "Checking features\n" " GL_VENDOR: %s\n" " GL_RENDERER: %s\n" " GL_VERSION: %s\n" " GL_EXTENSIONS: %s", glGetString (GL_VENDOR), glGetString (GL_RENDERER), glGetString (GL_VERSION), glGetString (GL_EXTENSIONS)); glx_extensions = glXQueryExtensionsString (clutter_x11_get_default_display (), clutter_x11_get_default_screen ()); CLUTTER_NOTE (BACKEND, " GLX Extensions: %s", glx_extensions); gl_extensions = (const gchar *)glGetString (GL_EXTENSIONS); /* When using glBlitFramebuffer or glXCopySubBufferMESA for sub stage * redraws, we cannot rely on glXSwapIntervalSGI to throttle the blits * so we need to resort to manually synchronizing with the vblank so we * always check for the video_sync extension... */ if (_cogl_check_extension ("GLX_SGI_video_sync", glx_extensions) && /* Note: the GLX_SGI_video_sync spec explicitly states this extension * only works for direct contexts. */ glXIsDirect (clutter_x11_get_default_display (), backend_glx->gl_context)) { backend_glx->get_video_sync = (GetVideoSyncProc) cogl_get_proc_address ("glXGetVideoSyncSGI"); backend_glx->wait_video_sync = (WaitVideoSyncProc) cogl_get_proc_address ("glXWaitVideoSyncSGI"); } use_dri = check_vblank_env ("dri"); /* First check for explicit disabling or it set elsewhere (eg NVIDIA) */ if (check_vblank_env ("none")) { CLUTTER_NOTE (BACKEND, "vblank sync: disabled at user request"); goto vblank_setup_done; } if (g_getenv ("__GL_SYNC_TO_VBLANK") != NULL) { backend_glx->vblank_type = CLUTTER_VBLANK_GLX_SWAP; flags |= CLUTTER_FEATURE_SYNC_TO_VBLANK; CLUTTER_NOTE (BACKEND, "Using __GL_SYNC_TO_VBLANK hint"); goto vblank_setup_done; } /* We try two GL vblank syncing mechanisms. * glXSwapIntervalSGI is tried first, then glXGetVideoSyncSGI. * * glXSwapIntervalSGI is known to work with Mesa and in particular * the Intel drivers. glXGetVideoSyncSGI has serious problems with * Intel drivers causing terrible frame rate so it only tried as a * fallback. * * How well glXGetVideoSyncSGI works with other driver (ATI etc) needs * to be investigated. glXGetVideoSyncSGI on ATI at least seems to have * no effect. */ if (!use_dri && _cogl_check_extension ("GLX_SGI_swap_control", glx_extensions)) { backend_glx->swap_interval = (SwapIntervalProc) cogl_get_proc_address ("glXSwapIntervalSGI"); CLUTTER_NOTE (BACKEND, "attempting glXSwapIntervalSGI vblank setup"); if (backend_glx->swap_interval != NULL && backend_glx->swap_interval (1) == 0) { backend_glx->vblank_type = CLUTTER_VBLANK_GLX_SWAP; flags |= CLUTTER_FEATURE_SYNC_TO_VBLANK; CLUTTER_NOTE (BACKEND, "glXSwapIntervalSGI setup success"); #ifdef GLX_INTEL_swap_event /* GLX_INTEL_swap_event allows us to avoid blocking the CPU * while we wait for glXSwapBuffers to complete, and instead * we get an X event notifying us of completion... */ if (!(clutter_paint_debug_flags & CLUTTER_DEBUG_DISABLE_SWAP_EVENTS) && _cogl_check_extension ("GLX_INTEL_swap_event", glx_extensions)) { flags |= CLUTTER_FEATURE_SWAP_EVENTS; } #endif /* GLX_INTEL_swap_event */ goto vblank_setup_done; } CLUTTER_NOTE (BACKEND, "glXSwapIntervalSGI vblank setup failed"); } if (!use_dri && !(flags & CLUTTER_FEATURE_SYNC_TO_VBLANK) && _cogl_check_extension ("GLX_SGI_video_sync", glx_extensions)) { CLUTTER_NOTE (BACKEND, "attempting glXGetVideoSyncSGI vblank setup"); if ((backend_glx->get_video_sync != NULL) && (backend_glx->wait_video_sync != NULL)) { CLUTTER_NOTE (BACKEND, "glXGetVideoSyncSGI vblank setup success"); backend_glx->vblank_type = CLUTTER_VBLANK_GLX; flags |= CLUTTER_FEATURE_SYNC_TO_VBLANK; goto vblank_setup_done; } CLUTTER_NOTE (BACKEND, "glXGetVideoSyncSGI vblank setup failed"); } #ifdef __linux__ /* * DRI is really an extreme fallback -rumoured to work with Via chipsets */ if (!(flags & CLUTTER_FEATURE_SYNC_TO_VBLANK)) { CLUTTER_NOTE (BACKEND, "attempting DRI vblank setup"); backend_glx->dri_fd = open("/dev/dri/card0", O_RDWR); if (backend_glx->dri_fd >= 0) { CLUTTER_NOTE (BACKEND, "DRI vblank setup success"); backend_glx->vblank_type = CLUTTER_VBLANK_DRI; flags |= CLUTTER_FEATURE_SYNC_TO_VBLANK; goto vblank_setup_done; } CLUTTER_NOTE (BACKEND, "DRI vblank setup failed"); } #endif /* __linux__ */ CLUTTER_NOTE (BACKEND, "no use-able vblank mechanism found"); vblank_setup_done: if (_cogl_check_extension ("GLX_MESA_copy_sub_buffer", glx_extensions)) { backend_glx->copy_sub_buffer = (CopySubBufferProc) cogl_get_proc_address ("glXCopySubBufferMESA"); backend_glx->can_blit_sub_buffer = TRUE; backend_glx->blit_sub_buffer_is_synchronized = TRUE; } else if (_cogl_check_extension ("GL_EXT_framebuffer_blit", gl_extensions)) { CLUTTER_NOTE (BACKEND, "Using glBlitFramebuffer fallback for sub_buffer copies"); backend_glx->blit_framebuffer = (BlitFramebufferProc) cogl_get_proc_address ("glBlitFramebuffer"); backend_glx->can_blit_sub_buffer = TRUE; backend_glx->blit_sub_buffer_is_synchronized = FALSE; } CLUTTER_NOTE (BACKEND, "backend features checked"); return flags; }
static GLXFBConfig * get_fbconfig_for_depth (guint depth) { GLXFBConfig *fbconfigs, *ret = NULL; int n_elements, i, found; Display *dpy; int db, stencil, alpha, mipmap, rgba, value; dpy = clutter_x11_get_default_display (); fbconfigs = glXGetFBConfigs (dpy, clutter_x11_get_default_screen (), &n_elements); db = G_MAXSHORT; stencil = G_MAXSHORT; mipmap = 0; rgba = 0; found = n_elements; for (i = 0; i < n_elements; i++) { XVisualInfo *vi; int visual_depth; vi = glXGetVisualFromFBConfig (dpy, fbconfigs[i]); if (vi == NULL) continue; visual_depth = vi->depth; XFree (vi); if (visual_depth != depth) continue; glXGetFBConfigAttrib (dpy, fbconfigs[i], GLX_ALPHA_SIZE, &alpha); glXGetFBConfigAttrib (dpy, fbconfigs[i], GLX_BUFFER_SIZE, &value); if (value != depth && (value - alpha) != depth) continue; value = 0; if (depth == 32) { glXGetFBConfigAttrib (dpy, fbconfigs[i], GLX_BIND_TO_TEXTURE_RGBA_EXT, &value); if (value) rgba = 1; } if (!value) { if (rgba) continue; glXGetFBConfigAttrib (dpy, fbconfigs[i], GLX_BIND_TO_TEXTURE_RGB_EXT, &value); if (!value) continue; } glXGetFBConfigAttrib (dpy, fbconfigs[i], GLX_DOUBLEBUFFER, &value); if (value > db) continue; db = value; glXGetFBConfigAttrib (dpy, fbconfigs[i], GLX_STENCIL_SIZE, &value); if (value > stencil) continue; stencil = value; found = i; } if (found != n_elements) { ret = g_malloc (sizeof (GLXFBConfig)); *ret = fbconfigs[found]; } if (n_elements) XFree (fbconfigs); return ret; }
static void clutter_x11_texture_pixmap_update_area_real (ClutterX11TexturePixmap *texture, gint x, gint y, gint width, gint height) { ClutterX11TexturePixmapPrivate *priv; Display *dpy; XImage *image; char *first_pixel; GError *error = NULL; guint bytes_per_line; char *data; int err_code; char pixel_bpp; gboolean pixel_has_alpha; #if 0 clock_t start_t = clock(); #endif if (!CLUTTER_ACTOR_IS_REALIZED (texture)) return; priv = texture->priv; dpy = clutter_x11_get_default_display(); if (!priv->pixmap) return; if (priv->shminfo.shmid == -1) try_alloc_shm (texture); clutter_x11_trap_x_errors (); if (priv->have_shm) { image = XShmCreateImage(dpy, DefaultVisual(dpy, clutter_x11_get_default_screen()), priv->depth, ZPixmap, NULL, &priv->shminfo, width, height); image->data = priv->shminfo.shmaddr; XShmGetImage (dpy, priv->pixmap, image, x, y, AllPlanes); first_pixel = image->data; } else { if (!priv->image) { priv->image = XGetImage (dpy, priv->pixmap, 0, 0, priv->pixmap_width, priv->pixmap_height, AllPlanes, ZPixmap); if (priv->image) first_pixel = priv->image->data + priv->image->bytes_per_line * y + x * priv->image->bits_per_pixel/8; else { g_warning ("%s: XGetImage() failed", __FUNCTION__); return; } } else { XGetSubImage (dpy, priv->pixmap, x, y, width, height, AllPlanes, ZPixmap, priv->image, x, y); first_pixel = priv->image->data + priv->image->bytes_per_line * y + x * priv->image->bits_per_pixel/8; } image = priv->image; } XSync (dpy, FALSE); if ((err_code = clutter_x11_untrap_x_errors ())) { g_warning ("Failed to get XImage of pixmap: %lx, removing", priv->pixmap); /* safe to assume pixmap has gone away? - therefor reset */ clutter_x11_texture_pixmap_set_pixmap (texture, None); goto free_image_and_return; } if (priv->depth == 24) { bytes_per_line = image->bytes_per_line; data = first_pixel; pixel_bpp = 3; pixel_has_alpha = FALSE; } else if (priv->depth == 16) { bytes_per_line = image->bytes_per_line; data = first_pixel; pixel_bpp = 2; pixel_has_alpha = FALSE; } else if (priv->depth == 32) { bytes_per_line = image->bytes_per_line; data = first_pixel; pixel_bpp = 4; pixel_has_alpha = TRUE; } else goto free_image_and_return; if (!priv->allow_alpha) pixel_has_alpha = FALSE; /* For debugging purposes, un comment to simply generate dummy * pixmap data. (A Green background and Blue cross) */ #if 0 { guint xpos, ypos; if (data_allocated) g_free (data); data_allocated = TRUE; data = g_malloc (width*height*4); bytes_per_line = width *4; for (ypos=0; ypos<height; ypos++) for (xpos=0; xpos<width; xpos++) { char *p = data + width*4*ypos + xpos * 4; guint32 *pixel = (guint32 *)p; if ((xpos > width/2 && xpos <= (width/2) + width/4) || (ypos > height/2 && ypos <= (height/2) + height/4)) *pixel=0xff0000ff; else *pixel=0xff00ff00; } } #endif if (x != 0 || y != 0 || width != priv->pixmap_width || height != priv->pixmap_height) clutter_texture_set_area_from_rgb_data (CLUTTER_TEXTURE (texture), (guint8 *)data, pixel_has_alpha, x, y, width, height, bytes_per_line, pixel_bpp, CLUTTER_TEXTURE_RGB_FLAG_BGR, &error); else clutter_texture_set_from_rgb_data (CLUTTER_TEXTURE (texture), (guint8 *)data, pixel_has_alpha, width, height, bytes_per_line, pixel_bpp, CLUTTER_TEXTURE_RGB_FLAG_BGR, &error); if (error) { g_warning ("Error when uploading from pixbuf: %s", error->message); g_error_free (error); } free_image_and_return: if (priv->have_shm) XFree (image); #if 0 clock_t end_t = clock(); int time = (int)((double)(end_t - start_t) * (1000.0 / CLOCKS_PER_SEC)); g_print("clutter-x11-update-area-real(%d,%d,%d,%d) %d bits - %d ms\n",x,y,width,height,priv->depth,time); #endif }
/* Tries to allocate enough shared mem to handle a full size * update size of the X Pixmap. */ static gboolean try_alloc_shm (ClutterX11TexturePixmap *texture) { ClutterX11TexturePixmapPrivate *priv; XImage *dummy_image; Display *dpy; priv = texture->priv; dpy = clutter_x11_get_default_display(); g_return_val_if_fail (priv->pixmap, FALSE); if (!XShmQueryExtension(dpy) || g_getenv("CLUTTER_X11_NO_SHM")) { priv->have_shm = FALSE; return FALSE; } clutter_x11_trap_x_errors (); /* We are creating a dummy_image so we can have Xlib calculate * image->bytes_per_line - including any magic padding it may * want - for the largest possible ximage we might need to use * when handling updates to the texture. * * Note: we pass a NULL shminfo here, but that has no bearing * on the setup of the XImage, except that ximage->obdata will * == NULL. */ dummy_image = XShmCreateImage(dpy, DefaultVisual(dpy, clutter_x11_get_default_screen()), priv->depth, ZPixmap, NULL, NULL, /* shminfo, */ priv->pixmap_width, priv->pixmap_height); if (!dummy_image) goto failed_image_create; priv->shminfo.shmid = shmget (IPC_PRIVATE, dummy_image->bytes_per_line * dummy_image->height, IPC_CREAT|0777); if (priv->shminfo.shmid == -1) goto failed_shmget; priv->shminfo.shmaddr = shmat (priv->shminfo.shmid, 0, 0); if (priv->shminfo.shmaddr == (void *)-1) goto failed_shmat; priv->shminfo.readOnly = False; if (XShmAttach(dpy, &priv->shminfo) == 0) goto failed_xshmattach; if (clutter_x11_untrap_x_errors ()) g_warning ("X Error: Failed to setup XShm"); priv->have_shm = TRUE; if (dummy_image) XFree (dummy_image); return TRUE; failed_xshmattach: g_warning ("XShmAttach failed"); shmdt(priv->shminfo.shmaddr); failed_shmat: g_warning ("shmat failed"); shmctl(priv->shminfo.shmid, IPC_RMID, 0); failed_shmget: g_warning ("shmget failed"); XDestroyImage(dummy_image); failed_image_create: if (clutter_x11_untrap_x_errors ()) g_warning ("X Error: Failed to setup XShm"); priv->have_shm = FALSE; return FALSE; }
static ClutterFeatureFlags clutter_backend_glx_get_features (ClutterBackend *backend) { ClutterBackendGLX *backend_glx = CLUTTER_BACKEND_GLX (backend); const gchar *glx_extensions = NULL; ClutterFeatureFlags flags; flags = clutter_backend_x11_get_features (backend); flags |= CLUTTER_FEATURE_STAGE_MULTIPLE; /* this will make sure that the GL context exists and * it's bound to a drawable */ g_assert (backend_glx->gl_context != None); g_assert (glXGetCurrentDrawable () != None); CLUTTER_NOTE (BACKEND, "Checking features\n" "GL_VENDOR: %s\n" "GL_RENDERER: %s\n" "GL_VERSION: %s\n" "GL_EXTENSIONS: %s\n", glGetString (GL_VENDOR), glGetString (GL_RENDERER), glGetString (GL_VERSION), glGetString (GL_EXTENSIONS)); glx_extensions = glXQueryExtensionsString (clutter_x11_get_default_display (), clutter_x11_get_default_screen ()); CLUTTER_NOTE (BACKEND, "GLX Extensions: %s", glx_extensions); /* First check for explicit disabling or it set elsewhere (eg NVIDIA) */ if (getenv("__GL_SYNC_TO_VBLANK") || check_vblank_env ("none")) { CLUTTER_NOTE (BACKEND, "vblank sync: disabled at user request"); } else { /* We try two GL vblank syncing mechanisms. * glXSwapIntervalSGI is tried first, then glXGetVideoSyncSGI. * * glXSwapIntervalSGI is known to work with Mesa and in particular * the Intel drivers. glXGetVideoSyncSGI has serious problems with * Intel drivers causing terrible frame rate so it only tried as a * fallback. * * How well glXGetVideoSyncSGI works with other driver (ATI etc) needs * to be investigated. glXGetVideoSyncSGI on ATI at least seems to have * no effect. */ if (!check_vblank_env ("dri") && cogl_check_extension ("GLX_SGI_swap_control", glx_extensions)) { backend_glx->swap_interval = (SwapIntervalProc) cogl_get_proc_address ("glXSwapIntervalSGI"); CLUTTER_NOTE (BACKEND, "attempting glXSwapIntervalSGI vblank setup"); if (backend_glx->swap_interval != NULL) { if (backend_glx->swap_interval (1) == 0) { backend_glx->vblank_type = CLUTTER_VBLANK_GLX_SWAP; flags |= CLUTTER_FEATURE_SYNC_TO_VBLANK; CLUTTER_NOTE (BACKEND, "glXSwapIntervalSGI setup success"); } } if (!(flags & CLUTTER_FEATURE_SYNC_TO_VBLANK)) CLUTTER_NOTE (BACKEND, "glXSwapIntervalSGI vblank setup failed"); } if (!check_vblank_env ("dri") && !(flags & CLUTTER_FEATURE_SYNC_TO_VBLANK) && cogl_check_extension ("GLX_SGI_video_sync", glx_extensions)) { CLUTTER_NOTE (BACKEND, "attempting glXGetVideoSyncSGI vblank setup"); backend_glx->get_video_sync = (GetVideoSyncProc) cogl_get_proc_address ("glXGetVideoSyncSGI"); backend_glx->wait_video_sync = (WaitVideoSyncProc) cogl_get_proc_address ("glXWaitVideoSyncSGI"); if ((backend_glx->get_video_sync != NULL) && (backend_glx->wait_video_sync != NULL)) { CLUTTER_NOTE (BACKEND, "glXGetVideoSyncSGI vblank setup success"); backend_glx->vblank_type = CLUTTER_VBLANK_GLX; flags |= CLUTTER_FEATURE_SYNC_TO_VBLANK; } if (!(flags & CLUTTER_FEATURE_SYNC_TO_VBLANK)) CLUTTER_NOTE (BACKEND, "glXGetVideoSyncSGI vblank setup failed"); } #ifdef __linux__ /* * DRI is really an extreme fallback -rumoured to work with Via chipsets */ if (!(flags & CLUTTER_FEATURE_SYNC_TO_VBLANK)) { CLUTTER_NOTE (BACKEND, "attempting DRI vblank setup"); backend_glx->dri_fd = open("/dev/dri/card0", O_RDWR); if (backend_glx->dri_fd >= 0) { CLUTTER_NOTE (BACKEND, "DRI vblank setup success"); backend_glx->vblank_type = CLUTTER_VBLANK_DRI; flags |= CLUTTER_FEATURE_SYNC_TO_VBLANK; } if (!(flags & CLUTTER_FEATURE_SYNC_TO_VBLANK)) CLUTTER_NOTE (BACKEND, "DRI vblank setup failed"); } #endif if (!(flags & CLUTTER_FEATURE_SYNC_TO_VBLANK)) { CLUTTER_NOTE (BACKEND, "no use-able vblank mechanism found"); } } CLUTTER_NOTE (MISC, "backend features checked"); return flags; }