static void virtio_gpu_primary_plane_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = NULL; struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo; uint32_t handle; if (plane->state->crtc) output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); if (old_state->crtc) output = drm_crtc_to_virtio_gpu_output(old_state->crtc); if (WARN_ON(!output)) return; if (plane->state->fb && output->enabled) { vgfb = to_virtio_gpu_framebuffer(plane->state->fb); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); handle = bo->hw_res_handle; if (bo->dumb) { virtio_gpu_cmd_transfer_to_host_2d (vgdev, bo, 0, cpu_to_le32(plane->state->src_w >> 16), cpu_to_le32(plane->state->src_h >> 16), cpu_to_le32(plane->state->src_x >> 16), cpu_to_le32(plane->state->src_y >> 16), NULL); }
static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc); struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo; uint32_t handle; if (plane->fb) { vgfb = to_virtio_gpu_framebuffer(plane->fb); bo = gem_to_virtio_gpu_obj(vgfb->obj); handle = bo->hw_res_handle; } else { handle = 0; } DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle, plane->state->crtc_w, plane->state->crtc_h, plane->state->crtc_x, plane->state->crtc_y); virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, plane->state->crtc_w, plane->state->crtc_h, plane->state->crtc_x, plane->state->crtc_y); }
static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct virtio_gpu_framebuffer *virtio_gpu_fb = to_virtio_gpu_framebuffer(fb); if (virtio_gpu_fb->obj) drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(virtio_gpu_fb); }
static int virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips) { struct virtio_gpu_framebuffer *virtio_gpu_fb = to_virtio_gpu_framebuffer(fb); return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips); }
static int virtio_gpu_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags) { struct virtio_gpu_device *vgdev = crtc->dev->dev_private; struct virtio_gpu_output *output = container_of(crtc, struct virtio_gpu_output, crtc); struct drm_plane *plane = crtc->primary; struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo; unsigned long irqflags; uint32_t handle; plane->fb = fb; vgfb = to_virtio_gpu_framebuffer(plane->fb); bo = gem_to_virtio_gpu_obj(vgfb->obj); handle = bo->hw_res_handle; DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle, bo->dumb ? ", dumb" : "", crtc->mode.hdisplay, crtc->mode.vdisplay); if (bo->dumb) { virtio_gpu_cmd_transfer_to_host_2d (vgdev, handle, 0, cpu_to_le32(crtc->mode.hdisplay), cpu_to_le32(crtc->mode.vdisplay), 0, 0, NULL); } virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, crtc->mode.hdisplay, crtc->mode.vdisplay, 0, 0); virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0, crtc->mode.hdisplay, crtc->mode.vdisplay); if (event) { spin_lock_irqsave(&crtc->dev->event_lock, irqflags); drm_send_vblank_event(crtc->dev, -1, event); spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags); } return 0; }