static void request_pending(struct drm_crtc *crtc, uint32_t pending) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); atomic_or(pending, &mdp5_crtc->pending); mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); }
static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask); mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); }
/* * flush updates, to make sure hw is updated to new scanout fb, * so that we can safely queue unref to current fb (ie. next * vblank we know hw is done w/ previous scanout_fb). */ static void crtc_flush_all(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct drm_plane *plane; uint32_t flush_mask = 0; /* we could have already released CTL in the disable path: */ if (!mdp5_crtc->ctl) return; drm_atomic_crtc_for_each_plane(plane, crtc) { flush_mask |= mdp5_plane_get_flush(plane); }
static void request_pp_done_pending(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); reinit_completion(&mdp5_crtc->pp_completion); }