static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) { unsigned long flags; struct drm_device *drm = ipu_crtc->base.dev; spin_lock_irqsave(&drm->event_lock, flags); if (ipu_crtc->page_flip_event) drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); ipu_crtc->page_flip_event = NULL; imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); spin_unlock_irqrestore(&drm->event_lock, flags); }
/* finish page flip functions */ static void xilinx_drm_crtc_finish_page_flip(struct drm_crtc *base_crtc) { struct xilinx_drm_crtc *crtc = to_xilinx_crtc(base_crtc); struct drm_device *drm = base_crtc->dev; struct drm_pending_vblank_event *event; unsigned long flags; spin_lock_irqsave(&drm->event_lock, flags); event = crtc->event; crtc->event = NULL; if (event) { drm_send_vblank_event(drm, 0, event); drm_vblank_put(drm, 0); } spin_unlock_irqrestore(&drm->event_lock, flags); }
static void xylon_drm_crtc_vblank_handler(struct drm_crtc *base_crtc) { struct drm_device *dev = base_crtc->dev; struct drm_pending_vblank_event *event; struct xylon_drm_crtc *crtc = to_xylon_crtc(base_crtc); unsigned long flags; drm_handle_vblank(dev, 0); spin_lock_irqsave(&dev->event_lock, flags); event = crtc->event; crtc->event = NULL; if (event) { drm_send_vblank_event(dev, 0, event); drm_vblank_put(dev, 0); } spin_unlock_irqrestore(&dev->event_lock, flags); }
static int virtio_gpu_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags) { struct virtio_gpu_device *vgdev = crtc->dev->dev_private; struct virtio_gpu_output *output = container_of(crtc, struct virtio_gpu_output, crtc); struct drm_plane *plane = crtc->primary; struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo; unsigned long irqflags; uint32_t handle; plane->fb = fb; vgfb = to_virtio_gpu_framebuffer(plane->fb); bo = gem_to_virtio_gpu_obj(vgfb->obj); handle = bo->hw_res_handle; DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle, bo->dumb ? ", dumb" : "", crtc->mode.hdisplay, crtc->mode.vdisplay); if (bo->dumb) { virtio_gpu_cmd_transfer_to_host_2d (vgdev, handle, 0, cpu_to_le32(crtc->mode.hdisplay), cpu_to_le32(crtc->mode.vdisplay), 0, 0, NULL); } virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, crtc->mode.hdisplay, crtc->mode.vdisplay, 0, 0); virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0, crtc->mode.hdisplay, crtc->mode.vdisplay); if (event) { spin_lock_irqsave(&crtc->dev->event_lock, irqflags); drm_send_vblank_event(crtc->dev, -1, event); spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags); } return 0; }
static int bochs_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags) { struct bochs_device *bochs = container_of(crtc, struct bochs_device, crtc); struct drm_framebuffer *old_fb = crtc->primary->fb; unsigned long irqflags; crtc->primary->fb = fb; bochs_crtc_mode_set_base(crtc, 0, 0, old_fb); if (event) { spin_lock_irqsave(&bochs->dev->event_lock, irqflags); drm_send_vblank_event(bochs->dev, -1, event); spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags); } return 0; }
static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc) { struct drm_pending_vblank_event *event; struct drm_device *dev = rcrtc->crtc.dev; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); event = rcrtc->event; rcrtc->event = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); if (event == NULL) return; spin_lock_irqsave(&dev->event_lock, flags); drm_send_vblank_event(dev, rcrtc->index, event); spin_unlock_irqrestore(&dev->event_lock, flags); drm_vblank_put(dev, rcrtc->index); }
/* if file!=NULL, this is preclose potential cancel-flip path */ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_pending_vblank_event *event; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); event = mdp4_crtc->event; if (event) { /* if regular vblank case (!file) or if cancel-flip from * preclose on file that requested flip, then send the * event: */ if (!file || (event->base.file_priv == file)) { mdp4_crtc->event = NULL; drm_send_vblank_event(dev, mdp4_crtc->id, event); } } spin_unlock_irqrestore(&dev->event_lock, flags); }
void pl111_common_irq(struct pl111_drm_crtc *pl111_crtc) { struct drm_device *dev = pl111_crtc->crtc.dev; struct pl111_drm_flip_resource *old_flip_res; struct pl111_gem_bo *bo; unsigned long irq_flags; int flips_in_flight; #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS unsigned long flags; #endif spin_lock_irqsave(&pl111_crtc->base_update_lock, irq_flags); /* * Cache the flip resource that caused the IRQ since it will be * dispatched later. Early return if the IRQ isn't associated to * a base register update. * * TODO MIDBASE-2790: disable IRQs when a flip is not pending. */ old_flip_res = pl111_crtc->current_update_res; if (!old_flip_res) { spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); return; } pl111_crtc->current_update_res = NULL; /* Prepare the next flip (if any) of the queue as soon as possible. */ if (!list_empty(&pl111_crtc->update_queue)) { struct pl111_drm_flip_resource *flip_res; /* Remove the head of the list */ flip_res = list_first_entry(&pl111_crtc->update_queue, struct pl111_drm_flip_resource, link); list_del(&flip_res->link); do_flip_to_res(flip_res); /* * current_update_res will be set, so guarentees that * another flip_res coming in gets queued instead of * handled immediately */ } spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); /* Finalize properly the flip that caused the IRQ */ DRM_DEBUG_KMS("DRM Finalizing old_flip_res=%p\n", old_flip_res); bo = PL111_BO_FROM_FRAMEBUFFER(old_flip_res->fb); #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); release_kds_resource_and_display(old_flip_res); spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); #endif /* Release DMA buffer on this flip */ if (bo->gem_object.export_dma_buf != NULL) dma_buf_put(bo->gem_object.export_dma_buf); drm_handle_vblank(dev, pl111_crtc->crtc_index); /* Wake up any processes waiting for page flip event */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) if (old_flip_res->event) { spin_lock_bh(&dev->event_lock); drm_send_vblank_event(dev, pl111_crtc->crtc_index, old_flip_res->event); spin_unlock_bh(&dev->event_lock); } #else if (old_flip_res->event) { struct drm_pending_vblank_event *e = old_flip_res->event; struct timeval now; unsigned int seq; DRM_DEBUG_KMS("%s: wake up page flip event (%p)\n", __func__, old_flip_res->event); spin_lock_bh(&dev->event_lock); seq = drm_vblank_count_and_time(dev, pl111_crtc->crtc_index, &now); e->pipe = pl111_crtc->crtc_index; e->event.sequence = seq; e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); spin_unlock_bh(&dev->event_lock); } #endif drm_vblank_put(dev, pl111_crtc->crtc_index); /* * workqueue.c:process_one_work(): * "It is permissible to free the struct work_struct from * inside the function that is called from it" */ kmem_cache_free(priv.page_flip_slab, old_flip_res); flips_in_flight = atomic_dec_return(&priv.nr_flips_in_flight); if (flips_in_flight == 0 || flips_in_flight == (NR_FLIPS_IN_FLIGHT_THRESHOLD - 1)) wake_up(&priv.wait_for_flips); DRM_DEBUG_KMS("DRM release flip_res=%p\n", old_flip_res); }