void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr) { u32 val = addr ? ~0 : 0; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); if (idx == 0) { mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE); mxr_write(mdev, MXR_GRAPHIC_BASE(0), addr); } else if (idx == 1) { mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); mxr_write(mdev, MXR_GRAPHIC_BASE(1), addr); } else if (idx == 2) { mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_MX1_GRP0_ENABLE); mxr_write(mdev, MXR1_GRAPHIC_BASE(0), addr); } else if (idx == 3) { mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_MX1_GRP1_ENABLE); mxr_write(mdev, MXR1_GRAPHIC_BASE(1), addr); } mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_set_colorkey(struct mxr_device *mdev, int sub_mxr, int num, int en) { u32 val = en ? ~0 : 0; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); if (sub_mxr == MXR_SUB_MIXER0 && num == MXR_LAYER_GRP0) mxr_write_mask(mdev, MXR_GRAPHIC_CFG(0), val, MXR_GRP_CFG_BLANK_KEY_EN); else if (sub_mxr == MXR_SUB_MIXER0 && num == MXR_LAYER_GRP1) mxr_write_mask(mdev, MXR_GRAPHIC_CFG(1), val, MXR_GRP_CFG_BLANK_KEY_EN); #if defined(CONFIG_ARCH_EXYNOS5) else if (sub_mxr == MXR_SUB_MIXER1 && num == MXR_LAYER_GRP0) mxr_write_mask(mdev, MXR1_GRAPHIC_CFG(0), val, MXR_GRP_CFG_BLANK_KEY_EN); else if (sub_mxr == MXR_SUB_MIXER1 && num == MXR_LAYER_GRP1) mxr_write_mask(mdev, MXR1_GRAPHIC_CFG(1), val, MXR_GRP_CFG_BLANK_KEY_EN); #endif mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_set_color_range(struct mxr_device *mdev) { u32 val = 0; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); val = mdev->color_range; mxr_write_mask(mdev, MXR_CFG, MXR_CFG_COLOR_RANGE(val), MXR_CFG_COLOR_RANGE_MASK); if (mdev->color_range == MIXER_RGB709_16_235 || mdev->color_range == MIXER_RGB601_16_235) { val = MXR_VIDEO_LIMITER_PARA_Y_UPPER(235) | MXR_VIDEO_LIMITER_PARA_Y_LOWER(16) | MXR_VIDEO_LIMITER_PARA_C_UPPER(235) | MXR_VIDEO_LIMITER_PARA_C_LOWER(16); } else { val = MXR_VIDEO_LIMITER_PARA_Y_UPPER(255) | MXR_VIDEO_LIMITER_PARA_Y_LOWER(0) | MXR_VIDEO_LIMITER_PARA_C_UPPER(255) | MXR_VIDEO_LIMITER_PARA_C_LOWER(0); } mxr_write(mdev, MXR_VIDEO_LIMITER_PARA_CFG, val); mxr_write_mask(mdev, MXR_VIDEO_CFG, 1, MXR_VIDEO_CFG_LIMITER_EN); mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); mxr_init_csc_coef(mdev); }
void mxr_reg_reset(struct mxr_device *mdev) { int i; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); /* set output in RGB888 mode */ mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_YUV444); /* 16 beat burst in DMA */ mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST, MXR_STATUS_BURST_MASK); for (i = 0; i < MXR_MAX_SUB_MIXERS; ++i) mxr_reg_sub_mxr_reset(mdev, i); /* configuration of Video Processor Registers */ __mxr_reg_vp_reset(mdev); mxr_reg_vp_default_filter(mdev); /* enable all interrupts */ mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL); mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_video_layer_stream(struct mxr_device *mdev, int idx, int en) { u32 val = en ? ~0 : 0; if (idx == 0) mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VIDEO_ENABLE); else if (idx == 1) mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_MX1_VIDEO_ENABLE); }
void mxr_reg_reset(struct mxr_device *mdev) { unsigned long flags; u32 val; /* value stored to register */ spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); /* set output in RGB888 mode */ mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888); /* 16 beat burst in DMA */ mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST, MXR_STATUS_BURST_MASK); /* setting default layer priority: layer1 > video > layer0 * because typical usage scenario would be * layer0 - framebuffer * video - video overlay * layer1 - OSD */ val = MXR_LAYER_CFG_GRP0_VAL(1); val |= MXR_LAYER_CFG_VP_VAL(2); val |= MXR_LAYER_CFG_GRP1_VAL(3); mxr_write(mdev, MXR_LAYER_CFG, val); /* use dark teal background color */ mxr_write(mdev, MXR_BG_COLOR0, 0x008080); mxr_write(mdev, MXR_BG_COLOR1, 0x008080); mxr_write(mdev, MXR_BG_COLOR2, 0x008080); /* setting graphical layers */ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ val |= MXR_GRP_CFG_LAYER_BLEND_EN; val &= ~MXR_GRP_CFG_BLEND_PRE_MUL; /* normal mode */ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ /* the same configuration for both layers */ mxr_write(mdev, MXR_GRAPHIC_CFG(0), val); mxr_write(mdev, MXR_GRAPHIC_CFG(1), val); /* configuration of Video Processor Registers */ __mxr_reg_vp_reset(mdev); mxr_reg_vp_default_filter(mdev); /* enable all interrupts */ mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL); mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_s_output(struct mxr_device *mdev, int cookie) { u32 val; val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI; mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK); }
void mxr_reg_local_path_set(struct mxr_device *mdev, int mxr_num, int gsc_num, u32 flags) { u32 val = 0; int mxr0_use = mdev->sub_mxr[MXR_SUB_MIXER0].use; int mxr1_use = mdev->sub_mxr[MXR_SUB_MIXER1].use; if (mxr0_use && !mxr1_use) { /* 1-path : sub-mixer0 */ val = MXR_TVOUT_CFG_ONE_PATH; val |= MXR_TVOUT_CFG_PATH_MIXER0; } else if (!mxr0_use && mxr1_use) { /* 1-path : sub-mixer1 */ val = MXR_TVOUT_CFG_ONE_PATH; val |= MXR_TVOUT_CFG_PATH_MIXER1; } else if (mxr0_use && mxr1_use) /* 2-path */ val = MXR_TVOUT_CFG_TWO_PATH; mxr_write_mask(mdev, MXR_TVOUT_CFG, val, MXR_TVOUT_CFG_PATH_MASK); /* set local path gscaler to mixer */ val = readl(SYSREG_DISP1BLK_CFG); val |= DISP1BLK_CFG_FIFORST_DISP1; if (flags & MEDIA_LNK_FL_ENABLED) { if (mxr_num == MXR_SUB_MIXER0) { val |= DISP1BLK_CFG_MIXER0_VALID; val |= DISP1BLK_CFG_MIXER0_SRC_GSC(gsc_num); } else if (mxr_num == MXR_SUB_MIXER1) { val |= DISP1BLK_CFG_MIXER1_VALID; val |= DISP1BLK_CFG_MIXER1_SRC_GSC(gsc_num); } } mxr_dbg(mdev, "%s: SYSREG value = 0x%x\n", __func__, val); writel(val, SYSREG_DISP1BLK_CFG); }
irqreturn_t mxr_irq_handler(int irq, void *dev_data) { struct mxr_device *mdev = dev_data; u32 val; spin_lock(&mdev->reg_slock); val = mxr_read(mdev, MXR_INT_STATUS); /* wake up process waiting for VSYNC */ if (val & MXR_INT_STATUS_VSYNC) { mdev->vsync_timestamp = ktime_get(); wake_up_interruptible_all(&mdev->vsync_wait); } /* clear interrupts. vsync is updated after write MXR_CFG_LAYER_UPDATE bit */ if (val & MXR_INT_CLEAR_VSYNC) mxr_write_mask(mdev, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC); val = mxr_irq_underrun_handle(mdev, val); mxr_write(mdev, MXR_INT_STATUS, val); spin_unlock(&mdev->reg_slock); return IRQ_HANDLED; }
void mxr_reg_graph_format(struct mxr_device *mdev, int idx, const struct mxr_format *fmt, const struct mxr_geometry *geo) { u32 val; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); /* setup format */ mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx), MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK); /* setup geometry */ mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width); val = MXR_GRP_WH_WIDTH(geo->src.width); val |= MXR_GRP_WH_HEIGHT(geo->src.height); val |= MXR_GRP_WH_H_SCALE(geo->x_ratio); val |= MXR_GRP_WH_V_SCALE(geo->y_ratio); mxr_write(mdev, MXR_GRAPHIC_WH(idx), val); /* setup offsets in source image */ val = MXR_GRP_SXY_SX(geo->src.x_offset); val |= MXR_GRP_SXY_SY(geo->src.y_offset); mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val); /* setup offsets in display image */ val = MXR_GRP_DXY_DX(geo->dst.x_offset); val |= MXR_GRP_DXY_DY(geo->dst.y_offset); mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val); mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_vsync_set_update(struct mxr_device *mdev, int en) { /* block update on vsync */ mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0); }
void mxr_reg_sw_reset(struct mxr_device *mdev) { unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_reset(struct mxr_device *mdev) { unsigned long flags; u32 val; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888); mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST, MXR_STATUS_BURST_MASK); val = MXR_LAYER_CFG_GRP0_VAL(1); val |= MXR_LAYER_CFG_VP_VAL(2); val |= MXR_LAYER_CFG_GRP1_VAL(3); mxr_write(mdev, MXR_LAYER_CFG, val); mxr_write(mdev, MXR_BG_COLOR0, 0x808080); mxr_write(mdev, MXR_BG_COLOR1, 0x808080); mxr_write(mdev, MXR_BG_COLOR2, 0x808080); val = MXR_GRP_CFG_COLOR_KEY_DISABLE; val |= MXR_GRP_CFG_BLEND_PRE_MUL; val |= MXR_GRP_CFG_ALPHA_VAL(0xff); mxr_write(mdev, MXR_GRAPHIC_CFG(0), val); mxr_write(mdev, MXR_GRAPHIC_CFG(1), val); __mxr_reg_vp_reset(mdev); mxr_reg_vp_default_filter(mdev); mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL); mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_graph_pixel_blend_enable(struct mxr_device *mdev , int idx, u32 en) { u32 val = en ? ~0 : 0; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx), val , MXR_GRP_CFG_PIXEL_BLEND_EN); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_vp_layer_blend_enable(struct mxr_device *mdev , int idx, u32 en) { u32 val = en ? ~0 : 0; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_write_mask(mdev,MXR_VIDEO_CFG,val,MXR_VIDEO_CFG_BLEND_EN); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr) { u32 val = addr ? ~0 : 0; if (idx == 0) { mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE); mxr_write(mdev, MXR_GRAPHIC_BASE(0), addr); } else if (idx == 1) { mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE); mxr_write(mdev, MXR_GRAPHIC_BASE(1), addr); } else if (idx == 2) { mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_MX1_GRP0_ENABLE); mxr_write(mdev, MXR1_GRAPHIC_BASE(0), addr); } else if (idx == 3) { mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_MX1_GRP1_ENABLE); mxr_write(mdev, MXR1_GRAPHIC_BASE(1), addr); } }
void mxr_vsync_set_update(struct mxr_device *mdev, int en) { /* block update on vsync */ mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); #if defined(CONFIG_ARCH_EXYNOS4) vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0); #endif }
void mxr_reg_set_mbus_fmt(struct mxr_device *mdev, struct v4l2_mbus_framefmt *fmt, u32 dvi_mode) { u32 val = 0; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); /* choosing between YUV444 and RGB888 as mixer output type */ if (mdev->sub_mxr[MXR_SUB_MIXER0].mbus_fmt[MXR_PAD_SOURCE_GRP0].code == V4L2_MBUS_FMT_YUV8_1X24) { if (dvi_mode) { val = MXR_CFG_OUT_RGB888; fmt->code = V4L2_MBUS_FMT_XRGB8888_4X8_LE; } else { val = MXR_CFG_OUT_YUV444; fmt->code = V4L2_MBUS_FMT_YUV8_1X24; } } else { val = MXR_CFG_OUT_RGB888; fmt->code = V4L2_MBUS_FMT_XRGB8888_4X8_LE; } /* choosing between interlace and progressive mode */ if (fmt->field == V4L2_FIELD_INTERLACED) val |= MXR_CFG_SCAN_INTERLACE; else val |= MXR_CFG_SCAN_PROGRASSIVE; /* choosing between porper HD and SD mode */ if (fmt->height <= 480) val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD; else if (fmt->height <= 576) val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD; else if (fmt->height <= 720) val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; else if (fmt->height <= 1080) val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD; else { WARN(1, "unrecognized mbus height %u!\n", fmt->height); val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; } mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK | MXR_CFG_OUT_MASK); val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0; vp_write_mask(mdev, VP_MODE, val, VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING); mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_streamoff(struct mxr_device *mdev) { unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); /* single write -> no need to block vsync update */ /* stop MIXER */ mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_streamoff(struct mxr_device *mdev) { unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_layer_alpha(struct mxr_device *mdev, int sub_mxr, int num, u32 a) { unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); if (sub_mxr == MXR_SUB_MIXER0 && num == MXR_LAYER_VIDEO) mxr_write_mask(mdev, MXR_VIDEO_CFG, MXR_VIDEO_CFG_ALPHA(a), 0xff); else if (sub_mxr == MXR_SUB_MIXER0 && num == MXR_LAYER_GRP0) mxr_write_mask(mdev, MXR_GRAPHIC_CFG(0), MXR_GRP_CFG_ALPHA(a), 0xff); else if (sub_mxr == MXR_SUB_MIXER0 && num == MXR_LAYER_GRP1) mxr_write_mask(mdev, MXR_GRAPHIC_CFG(1), MXR_GRP_CFG_ALPHA(a), 0xff); #if defined(CONFIG_ARCH_EXYNOS5) else if (sub_mxr == MXR_SUB_MIXER1 && num == MXR_LAYER_VIDEO) mxr_write_mask(mdev, MXR1_VIDEO_CFG, MXR_VIDEO_CFG_ALPHA(a), 0xff); else if (sub_mxr == MXR_SUB_MIXER1 && num == MXR_LAYER_GRP0) mxr_write_mask(mdev, MXR1_GRAPHIC_CFG(0), MXR_GRP_CFG_ALPHA(a), 0xff); else if (sub_mxr == MXR_SUB_MIXER1 && num == MXR_LAYER_GRP1) mxr_write_mask(mdev, MXR1_GRAPHIC_CFG(1), MXR_GRP_CFG_ALPHA(a), 0xff); #endif mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_streamon(struct mxr_device *mdev) { unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); /* single write -> no need to block vsync update */ /* start MIXER */ mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN); set_bit(MXR_EVENT_TOP, &mdev->event_flags); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_set_mbus_fmt(struct mxr_device *mdev, struct v4l2_mbus_framefmt *fmt) { u32 val = 0; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); if (fmt->colorspace == V4L2_COLORSPACE_JPEG) val |= MXR_CFG_OUT_YUV444; else val |= MXR_CFG_OUT_RGB888; if (fmt->field == V4L2_FIELD_INTERLACED) val |= MXR_CFG_SCAN_INTERLACE; else val |= MXR_CFG_SCAN_PROGRASSIVE; if (fmt->height == 480) val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD; else if (fmt->height == 576) val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD; else if (fmt->height == 720) val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; else if (fmt->height == 1080) val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD; else WARN(1, "unrecognized mbus height %u!\n", fmt->height); mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK | MXR_CFG_OUT_MASK); val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0; vp_write_mask(mdev, VP_MODE, val, VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING); mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_vp_buffer(struct mxr_device *mdev, dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2]) { u32 val = luma_addr[0] ? ~0 : 0; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE); vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON); /* TODO: fix tiled mode */ vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]); vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]); vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]); vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]); mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_reg_set_mbus_fmt(struct mxr_device *mdev, struct v4l2_mbus_framefmt *fmt) { u32 val = 0; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); /* choosing between interlace and progressive mode */ if (fmt->field == V4L2_FIELD_INTERLACED) val |= MXR_CFG_SCAN_INTERLACE; else val |= MXR_CFG_SCAN_PROGRASSIVE; /* choosing between porper HD and SD mode */ if (fmt->height == 480) val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD; else if (fmt->height == 576) val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD; else if (fmt->height == 720) val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; else if (fmt->height == 1080) val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD; else WARN(1, "unrecognized mbus height %u!\n", fmt->height); mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK); val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0; vp_write_mask(mdev, VP_MODE, val, VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING); mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }
void mxr_layer_sync(struct mxr_device *mdev, int en) { mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_LAYER_SYNC : 0, MXR_STATUS_LAYER_SYNC); }
void mxr_vsync_enable_update(struct mxr_device *mdev) { mxr_write_mask(mdev, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE); }
void mxr_vsync_disable_update(struct mxr_device *mdev) { mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE); }
void mxr_reg_graph_format(struct mxr_device *mdev, int idx, const struct mxr_format *fmt, const struct mxr_geometry *geo) { u32 wh, sxy, dxy; unsigned long flags; spin_lock_irqsave(&mdev->reg_slock, flags); mxr_vsync_set_update(mdev, MXR_DISABLE); /* Mostly, src width, height and dst width, height are same. * However, in case of doing src and dst cropping, those are different. * So you have to write dst width and height to MXR_GRAPHIC_WH register. */ wh = MXR_GRP_WH_WIDTH(geo->dst.width); wh |= MXR_GRP_WH_HEIGHT(geo->dst.height); wh |= MXR_GRP_WH_H_SCALE(geo->x_ratio); wh |= MXR_GRP_WH_V_SCALE(geo->y_ratio); /* setup offsets in source image */ sxy = MXR_GRP_SXY_SX(geo->src.x_offset); sxy |= MXR_GRP_SXY_SY(geo->src.y_offset); /* setup offsets in display image */ dxy = MXR_GRP_DXY_DX(geo->dst.x_offset); dxy |= MXR_GRP_DXY_DY(geo->dst.y_offset); if (idx == 0) { mxr_write_mask(mdev, MXR_GRAPHIC_CFG(0), MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK); mxr_write(mdev, MXR_GRAPHIC_SPAN(0), geo->src.full_width); mxr_write(mdev, MXR_GRAPHIC_WH(0), wh); mxr_write(mdev, MXR_GRAPHIC_SXY(0), sxy); mxr_write(mdev, MXR_GRAPHIC_DXY(0), dxy); } else if (idx == 1) { mxr_write_mask(mdev, MXR_GRAPHIC_CFG(1), MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK); mxr_write(mdev, MXR_GRAPHIC_SPAN(1), geo->src.full_width); mxr_write(mdev, MXR_GRAPHIC_WH(1), wh); mxr_write(mdev, MXR_GRAPHIC_SXY(1), sxy); mxr_write(mdev, MXR_GRAPHIC_DXY(1), dxy); } else if (idx == 2) { mxr_write_mask(mdev, MXR1_GRAPHIC_CFG(0), MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK); mxr_write(mdev, MXR1_GRAPHIC_SPAN(0), geo->src.full_width); mxr_write(mdev, MXR1_GRAPHIC_WH(0), wh); mxr_write(mdev, MXR1_GRAPHIC_SXY(0), sxy); mxr_write(mdev, MXR1_GRAPHIC_DXY(0), dxy); } else if (idx == 3) { mxr_write_mask(mdev, MXR1_GRAPHIC_CFG(1), MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK); mxr_write(mdev, MXR1_GRAPHIC_SPAN(1), geo->src.full_width); mxr_write(mdev, MXR1_GRAPHIC_WH(1), wh); mxr_write(mdev, MXR1_GRAPHIC_SXY(1), sxy); mxr_write(mdev, MXR1_GRAPHIC_DXY(1), dxy); } mxr_vsync_set_update(mdev, MXR_ENABLE); spin_unlock_irqrestore(&mdev->reg_slock, flags); }