static void page_flip_cb(void *arg) { struct drm_crtc *crtc = arg; struct drm_device *dev = crtc->dev; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct drm_pending_vblank_event *event = omap_crtc->event; struct timeval now; unsigned long flags; WARN_ON(!event); omap_crtc->event = NULL; update_scanout(crtc); WARN_ON(commit(crtc)); /* wakeup userspace */ /* TODO: this should happen *after* flip in vsync IRQ handler */ if (event) { spin_lock_irqsave(&dev->event_lock, flags); event->event.sequence = drm_vblank_count_and_time( dev, omap_crtc->id, &now); event->event.tv_sec = now.tv_sec; event->event.tv_usec = now.tv_usec; list_add_tail(&event->base.link, &event->base.file_priv->event_list); wake_up_interruptible(&event->base.file_priv->event_wait); spin_unlock_irqrestore(&dev->event_lock, flags); } }
/** * drm_vblank_off - disable vblank events on a CRTC * @dev: DRM device * @crtc: CRTC in question * * Caller must hold event lock. */ void drm_vblank_off(struct drm_device *dev, int crtc) { struct drmevlist *list; struct drm_pending_event *ev, *tmp; struct drm_pending_vblank_event *vev; struct timeval now; unsigned int seq; mtx_enter(&dev->vbl_lock); vblank_disable_and_save(dev, crtc); wakeup(&dev->vbl_queue[crtc]); list = &dev->vbl_events; /* Send any queued vblank events, lest the natives grow disquiet */ seq = drm_vblank_count_and_time(dev, crtc, &now); mtx_enter(&dev->event_lock); for (ev = TAILQ_FIRST(list); ev != TAILQ_END(list); ev = tmp) { tmp = TAILQ_NEXT(ev, link); vev = (struct drm_pending_vblank_event *)ev; if (vev->pipe != crtc) continue; DRM_DEBUG("Sending premature vblank event on disable: \ wanted %d, current %d\n", vev->event.sequence, seq); TAILQ_REMOVE(list, ev, link); drm_vblank_put(dev, vev->pipe); send_vblank_event(dev, vev, seq, &now); } mtx_leave(&dev->event_lock); mtx_leave(&dev->vbl_lock); }
/** * drm_send_vblank_event - helper to send vblank event after pageflip * @dev: DRM device * @crtc: CRTC in question * @e: the event to send * * Updates sequence # and timestamp on event, and sends it to userspace. * Caller must hold event lock. */ void drm_send_vblank_event(struct drm_device *dev, int crtc, struct drm_pending_vblank_event *e) { struct timeval now; unsigned int seq; if (crtc >= 0) { seq = drm_vblank_count_and_time(dev, crtc, &now); } else { seq = 0; now = get_drm_timestamp(); } send_vblank_event(dev, e, seq, &now); }
void pl111_common_irq(struct pl111_drm_crtc *pl111_crtc) { struct drm_device *dev = pl111_crtc->crtc.dev; struct pl111_drm_flip_resource *old_flip_res; struct pl111_gem_bo *bo; unsigned long irq_flags; int flips_in_flight; #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS unsigned long flags; #endif spin_lock_irqsave(&pl111_crtc->base_update_lock, irq_flags); /* * Cache the flip resource that caused the IRQ since it will be * dispatched later. Early return if the IRQ isn't associated to * a base register update. * * TODO MIDBASE-2790: disable IRQs when a flip is not pending. */ old_flip_res = pl111_crtc->current_update_res; if (!old_flip_res) { spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); return; } pl111_crtc->current_update_res = NULL; /* Prepare the next flip (if any) of the queue as soon as possible. */ if (!list_empty(&pl111_crtc->update_queue)) { struct pl111_drm_flip_resource *flip_res; /* Remove the head of the list */ flip_res = list_first_entry(&pl111_crtc->update_queue, struct pl111_drm_flip_resource, link); list_del(&flip_res->link); do_flip_to_res(flip_res); /* * current_update_res will be set, so guarentees that * another flip_res coming in gets queued instead of * handled immediately */ } spin_unlock_irqrestore(&pl111_crtc->base_update_lock, irq_flags); /* Finalize properly the flip that caused the IRQ */ DRM_DEBUG_KMS("DRM Finalizing old_flip_res=%p\n", old_flip_res); bo = PL111_BO_FROM_FRAMEBUFFER(old_flip_res->fb); #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS spin_lock_irqsave(&pl111_crtc->current_displaying_lock, flags); release_kds_resource_and_display(old_flip_res); spin_unlock_irqrestore(&pl111_crtc->current_displaying_lock, flags); #endif /* Release DMA buffer on this flip */ if (bo->gem_object.export_dma_buf != NULL) dma_buf_put(bo->gem_object.export_dma_buf); drm_handle_vblank(dev, pl111_crtc->crtc_index); /* Wake up any processes waiting for page flip event */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) if (old_flip_res->event) { spin_lock_bh(&dev->event_lock); drm_send_vblank_event(dev, pl111_crtc->crtc_index, old_flip_res->event); spin_unlock_bh(&dev->event_lock); } #else if (old_flip_res->event) { struct drm_pending_vblank_event *e = old_flip_res->event; struct timeval now; unsigned int seq; DRM_DEBUG_KMS("%s: wake up page flip event (%p)\n", __func__, old_flip_res->event); spin_lock_bh(&dev->event_lock); seq = drm_vblank_count_and_time(dev, pl111_crtc->crtc_index, &now); e->pipe = pl111_crtc->crtc_index; e->event.sequence = seq; e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); spin_unlock_bh(&dev->event_lock); } #endif drm_vblank_put(dev, pl111_crtc->crtc_index); /* * workqueue.c:process_one_work(): * "It is permissible to free the struct work_struct from * inside the function that is called from it" */ kmem_cache_free(priv.page_flip_slab, old_flip_res); flips_in_flight = atomic_dec_return(&priv.nr_flips_in_flight); if (flips_in_flight == 0 || flips_in_flight == (NR_FLIPS_IN_FLIGHT_THRESHOLD - 1)) wake_up(&priv.wait_for_flips); DRM_DEBUG_KMS("DRM release flip_res=%p\n", old_flip_res); }
static int drm_queue_vblank_event(struct drm_device *dev, int pipe, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { struct drm_pending_vblank_event *e; struct timeval now; unsigned int seq; int ret; e = malloc(sizeof *e, M_DRM, M_ZERO | M_WAITOK); if (e == NULL) { ret = -ENOMEM; goto err_put; } e->pipe = pipe; e->base.pid = DRM_CURRENTPID; e->event.base.type = DRM_EVENT_VBLANK; e->event.base.length = sizeof e->event; e->event.user_data = vblwait->request.signal; e->base.event = &e->event.base; e->base.file_priv = file_priv; e->base.destroy = (void (*) (struct drm_pending_event *)) drm_free; mtx_enter(&dev->event_lock); if (file_priv->event_space < sizeof e->event) { ret = -EBUSY; goto err_unlock; } file_priv->event_space -= sizeof e->event; seq = drm_vblank_count_and_time(dev, pipe, &now); if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1 << 23)) { vblwait->request.sequence = seq + 1; vblwait->reply.sequence = vblwait->request.sequence; } DPRINTF("event on vblank count %d, current %d, crtc %d\n", vblwait->request.sequence, seq, pipe); #if 0 trace_drm_vblank_event_queued(current->pid, pipe, vblwait->request.sequence); #endif e->event.sequence = vblwait->request.sequence; if ((seq - vblwait->request.sequence) <= (1 << 23)) { drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, &now); vblwait->reply.sequence = seq; } else { /* drm_handle_vblank_events will call drm_vblank_put */ TAILQ_INSERT_TAIL(&dev->vbl_events, &e->base, link); vblwait->reply.sequence = vblwait->request.sequence; } mtx_leave(&dev->event_lock); return 0; err_unlock: mtx_leave(&dev->event_lock); free(e, M_DRM); err_put: drm_vblank_put(dev, pipe); return ret; }