static void dmafetch_set_fmt(struct mmp_overlay *overlay) { u32 tmp; struct mmp_path *path = overlay->path; tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id)); tmp &= ~dma_mask(overlay_is_vid(overlay)); tmp |= fmt_to_reg(overlay, overlay->win.pix_fmt); writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id)); }
static void path_set_default(struct mmp_path *path) { struct lcd_regs *regs = path_regs(path); u32 dma_ctrl1, mask, tmp, path_config; path_config = path_to_path_plat(path)->path_config; /* Configure IOPAD: should be parallel only */ if (PATH_OUT_PARALLEL == path->output_type) { mask = CFG_IOPADMODE_MASK | CFG_BURST_MASK | CFG_BOUNDARY_MASK; tmp = readl_relaxed(ctrl_regs(path) + SPU_IOPAD_CONTROL); tmp &= ~mask; tmp |= path_config; writel_relaxed(tmp, ctrl_regs(path) + SPU_IOPAD_CONTROL); } /* Select path clock source */ tmp = readl_relaxed(ctrl_regs(path) + LCD_SCLK(path)); tmp &= ~SCLK_SRC_SEL_MASK; tmp |= path_config; writel_relaxed(tmp, ctrl_regs(path) + LCD_SCLK(path)); /* * Configure default bits: vsync triggers DMA, * power save enable, configure alpha registers to * display 100% graphics, and set pixel command. */ dma_ctrl1 = 0x2032ff81; dma_ctrl1 |= CFG_VSYNC_INV_MASK; writel_relaxed(dma_ctrl1, ctrl_regs(path) + dma_ctrl(1, path->id)); /* Configure default register values */ writel_relaxed(0x00000000, ®s->blank_color); writel_relaxed(0x00000000, ®s->g_1); writel_relaxed(0x00000000, ®s->g_start); /* * 1.enable multiple burst request in DMA AXI * bus arbiter for faster read if not tv path; * 2.enable horizontal smooth filter; */ if (PATH_PN == path->id) { mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK | CFG_ARBFAST_ENA(1); tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id)); tmp |= mask; writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id)); } else if (PATH_TV == path->id) { mask = CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK | CFG_ARBFAST_ENA(1); tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id)); tmp &= ~mask; tmp |= CFG_GRA_HSMOOTH_MASK | CFG_DMA_HSMOOTH_MASK; writel_relaxed(tmp, ctrl_regs(path) + dma_ctrl(0, path->id)); } }
static void dmafetch_onoff(struct mmp_overlay *overlay, int on) { u32 mask = overlay_is_vid(overlay) ? CFG_DMA_ENA_MASK : CFG_GRA_ENA_MASK; u32 enable = overlay_is_vid(overlay) ? CFG_DMA_ENA(1) : CFG_GRA_ENA(1); u32 tmp; struct mmp_path *path = overlay->path; mutex_lock(&overlay->access_ok); tmp = readl_relaxed(ctrl_regs(path) + dma_ctrl(0, path->id)); tmp &= ~mask; tmp |= (on ? enable : 0); writel(tmp, ctrl_regs(path) + dma_ctrl(0, path->id)); mutex_unlock(&overlay->access_ok); }
/** * sw_dma_ctl - dma ctrl operation * @dma_hdl: dma handle * @op: dma operation type * @parg: arg for the op * * Returns 0 if sucess, otherwise failed */ u32 sw_dma_ctl(dma_hdl_t dma_hdl, dma_op_type_e op, void *parg) { BUG_ON(unlikely(!dma_handle_is_valid(dma_hdl))); return dma_ctrl(dma_hdl, op, parg); }