/** * xilinx_drm_dp_aux_cmd_submit - Submit aux command * @dp: DisplayPort IP core structure * @cmd: aux command * @addr: aux address * @buf: buffer for command data * @bytes: number of bytes for @buf * * Submit an aux command. All aux related commands, native or i2c aux * read/write, are submitted through this function. This function involves in * multiple register reads/writes, thus the synchronization needs to be done * by holding @aux_lock if multi-thread access is possible. The calling thread * goes into sleep if there's no immediate reply to the command submission. * * Return: 0 if the command is submitted properly, or corresponding error code: * -EBUSY when there is any request already being processed * -ETIMEDOUT when receiving reply is timed out * -EAGAIN when the command is deferred * -EIO when the command is NACKed, or received data is less than requested */ static int xilinx_drm_dp_aux_cmd_submit(struct xilinx_drm_dp *dp, u32 cmd, u16 addr, u8 *buf, u8 bytes) { bool is_read = (cmd & AUX_READ_BIT) ? true : false; void __iomem *iomem = dp->iomem; u32 reg, i; reg = xilinx_drm_readl(iomem, XILINX_DP_TX_INTR_SIGNAL_STATE); if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REQUEST) return -EBUSY; xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_ADDRESS, addr); if (!buf) return 0; if (!is_read) for (i = 0; i < bytes; i++) xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_WRITE_FIFO, buf[i]); xilinx_drm_writel(iomem, XILINX_DP_TX_AUX_COMMAND, (cmd << XILINX_DP_TX_AUX_COMMAND_CMD_SHIFT) | (bytes - 1) << XILINX_DP_TX_AUX_COMMAND_BYTES_SHIFT); /* Wait for reply to be delivered upto 2ms */ for (i = 0; ; i++) { reg = xilinx_drm_readl(iomem, XILINX_DP_TX_INTR_SIGNAL_STATE); if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY) break; if (reg & XILINX_DP_TX_INTR_SIGNAL_STATE_REPLY_TIMEOUT || i == 2) return -ETIMEDOUT; usleep_range(1000, 1100); } reg = xilinx_drm_readl(iomem, XILINX_DP_TX_AUX_REPLY_CODE); if (reg == XILINX_DP_TX_AUX_REPLY_CODE_AUX_NACK || reg == XILINX_DP_TX_AUX_REPLY_CODE_I2C_NACK) return -EIO; if (reg == XILINX_DP_TX_AUX_REPLY_CODE_AUX_DEFER || reg == XILINX_DP_TX_AUX_REPLY_CODE_I2C_DEFER) return -EAGAIN; if (is_read) { reg = xilinx_drm_readl(iomem, XILINX_DP_TX_REPLY_DATA_CNT); if ((reg & XILINX_DP_TX_AUX_REPLY_CNT_MASK) != bytes) return -EIO; for (i = 0; i < bytes; i++) buf[i] = xilinx_drm_readl(iomem, XILINX_DP_TX_AUX_REPLY_DATA); } return 0; }
static enum drm_connector_status xilinx_drm_dp_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct xilinx_drm_dp *dp = to_dp(encoder); struct xilinx_drm_dp_link_config *link_config = &dp->link_config; u32 state; int ret; state = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_INTR_SIGNAL_STATE); if (state & XILINX_DP_TX_INTR_SIGNAL_STATE_HPD) { ret = xilinx_drm_dp_aux_read(dp, 0x0, dp->dpcd, sizeof(dp->dpcd)); if (ret) return connector_status_disconnected; link_config->max_rate = min_t(int, drm_dp_max_link_rate(dp->dpcd), dp->config.max_link_rate); link_config->max_lanes = min_t(u8, drm_dp_max_lane_count(dp->dpcd), dp->config.max_lanes); return connector_status_connected; }
/** * xilinx_drm_dp_phy_ready - Check if PHY is ready * @dp: DisplayPort IP core structure * * Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times. * This amount of delay was suggested by IP designer. * * Return: 0 if PHY is ready, or -ENODEV if PHY is not ready. */ static int xilinx_drm_dp_phy_ready(struct xilinx_drm_dp *dp) { u32 i, reg, ready, lane; lane = dp->config.max_lanes; ready = (1 << lane) - 1; if (!dp->dp_sub) ready |= XILINX_DP_TX_PHY_STATUS_FPGA_PLL_LOCKED; /* Wait for 100 * 1ms. This should be enough time for PHY to be ready */ for (i = 0; ; i++) { reg = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_PHY_STATUS); if ((reg & ready) == ready) return 0; if (i == 100) { DRM_ERROR("PHY isn't ready\n"); return -ENODEV; } usleep_range(1000, 1100); } return 0; }
/* disable layer */ void xilinx_osd_layer_disable(struct xilinx_osd_layer *layer) { u32 value; DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id); value = xilinx_drm_readl(layer->base, OSD_LXC); value &= ~OSD_LXC_EN; xilinx_drm_writel(layer->base, OSD_LXC, value); }
struct xilinx_osd *xilinx_osd_probe(struct device *dev, struct device_node *node) { struct xilinx_osd *osd; struct xilinx_osd_layer *layer; struct resource res; int i; int ret; osd = devm_kzalloc(dev, sizeof(*osd), GFP_KERNEL); if (!osd) return ERR_PTR(-ENOMEM); ret = of_address_to_resource(node, 0, &res); if (ret) { dev_err(dev, "failed to of_address_to_resource\n"); return ERR_PTR(ret); } osd->base = devm_ioremap_resource(dev, &res); if (IS_ERR(osd->base)) return ERR_CAST(osd->base); ret = of_property_read_u32(node, "xlnx,num-layers", &osd->num_layers); if (ret) { dev_warn(dev, "failed to get num of layers prop\n"); return ERR_PTR(ret); } ret = of_property_read_u32(node, "xlnx,screen-width", &osd->max_width); if (ret) { dev_warn(dev, "failed to get screen width prop\n"); return ERR_PTR(ret); } /* read the video format set by a user */ osd->format = xilinx_drm_readl(osd->base, OSD_ENC) & OSD_VIDEO_FORMAT_MASK; for (i = 0; i < osd->num_layers; i++) { layer = devm_kzalloc(dev, sizeof(*layer), GFP_KERNEL); if (!layer) return ERR_PTR(-ENOMEM); layer->base = osd->base + OSD_L0C + OSD_LAYER_SIZE * i; layer->id = i; layer->osd = osd; layer->avail = true; osd->layers[i] = layer; } return osd; }
/* set layer priority */ void xilinx_osd_layer_set_priority(struct xilinx_osd_layer *layer, u32 prio) { u32 value; DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id); DRM_DEBUG_DRIVER("prio: %d\n", prio); value = xilinx_drm_readl(layer->base, OSD_LXC); value &= ~OSD_LXC_PRIORITY_MASK; value |= (prio << OSD_LXC_PRIORITY_SHIFT) & OSD_LXC_PRIORITY_MASK; xilinx_drm_writel(layer->base, OSD_LXC, value); }
/* set layer alpha */ void xilinx_osd_layer_set_alpha(struct xilinx_osd_layer *layer, u32 enable, u32 alpha) { u32 value; DRM_DEBUG_DRIVER("layer->id: %d\n", layer->id); DRM_DEBUG_DRIVER("alpha: 0x%08x\n", alpha); value = xilinx_drm_readl(layer->base, OSD_LXC); value = enable ? (value | OSD_LXC_GALPHAEN) : (value & ~OSD_LXC_GALPHAEN); value &= ~OSD_LXC_ALPHA_MASK; value |= (alpha << OSD_LXC_ALPHA_SHIFT) & OSD_LXC_ALPHA_MASK; xilinx_drm_writel(layer->base, OSD_LXC, value); }
/** * xilinx_drm_dp_phy_ready - Check if PHY is ready * @dp: DisplayPort IP core structure * * Check if PHY is ready. If PHY is not ready, wait 1ms to check for 100 times. * This amount of delay was suggested by IP designer. * * Return: 0 if PHY is ready, or -ENODEV if PHY is not ready. */ static int xilinx_drm_dp_phy_ready(struct xilinx_drm_dp *dp) { u32 i, reg; /* Wait for 100 * 1ms. This should be enough time for PHY to be ready */ for (i = 0; ; i++) { reg = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_PHY_STATUS); if ((reg & XILINX_DP_TX_PHY_STATUS_READY_MASK) == XILINX_DP_TX_PHY_STATUS_READY_MASK) return 0; if (i == 100) { DRM_ERROR("PHY isn't ready\n"); return -ENODEV; } usleep_range(1000, 1100); } }
/** * xilinx_drm_dp_link_train - Train the link * @dp: DisplayPort IP core structure * * Return: 0 if all trains are done successfully, or corresponding error code. */ static int xilinx_drm_dp_train(struct xilinx_drm_dp *dp) { u32 reg; u8 bw_code = dp->mode.bw_code; u8 lane_cnt = dp->mode.lane_cnt; u8 aux_lane_cnt; bool enhanced; int ret; xilinx_drm_writel(dp->iomem, XILINX_DP_TX_LANE_CNT_SET, lane_cnt); enhanced = drm_dp_enhanced_frame_cap(dp->dpcd); if (enhanced) { xilinx_drm_writel(dp->iomem, XILINX_DP_TX_ENHANCED_FRAME_EN, 1); aux_lane_cnt = lane_cnt | DP_LANE_COUNT_ENHANCED_FRAME_EN; } ret = xilinx_drm_dp_aux_write_byte(dp, DP_LANE_COUNT_SET, aux_lane_cnt); if (ret) { DRM_ERROR("failed to set lane count\n"); return ret; } ret = xilinx_drm_dp_aux_write_byte(dp, DP_LINK_BW_SET, bw_code); if (ret) { DRM_ERROR("failed to set DP bandwidth\n"); return ret; } xilinx_drm_writel(dp->iomem, XILINX_DP_TX_LINK_BW_SET, bw_code); switch (bw_code) { case DP_LINK_BW_1_62: reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_162; break; case DP_LINK_BW_2_7: reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_270; break; case DP_LINK_BW_5_4: reg = XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING_540; break; } xilinx_drm_writel(dp->iomem, XILINX_DP_TX_PHY_CLOCK_FEEDBACK_SETTING, reg); ret = xilinx_drm_dp_phy_ready(dp); if (ret < 0) return ret; xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SCRAMBLING_DISABLE, 1); memset(dp->train_set, 0, 4); ret = xilinx_drm_dp_link_train_cr(dp); if (ret) { DRM_ERROR("failed to train clock recovery\n"); reg = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_PHY_STATUS); return ret; } ret = xilinx_drm_dp_link_train_ce(dp); if (ret) { DRM_ERROR("failed to train channel eq\n"); reg = xilinx_drm_readl(dp->iomem, XILINX_DP_TX_PHY_STATUS); return ret; } xilinx_drm_writel(dp->iomem, XILINX_DP_TX_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); ret = xilinx_drm_dp_aux_write_byte(dp, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); if (ret) { DRM_ERROR("failed to disable training pattern\n"); return ret; } xilinx_drm_writel(dp->iomem, XILINX_DP_TX_SCRAMBLING_DISABLE, 0); return 0; }
/* register-update-enable osd */ void xilinx_osd_disable_rue(struct xilinx_osd *osd) { xilinx_drm_writel(osd->base, OSD_CTL, xilinx_drm_readl(osd->base, OSD_CTL) & ~OSD_CTL_RUE); }
/* enable osd */ void xilinx_osd_enable(struct xilinx_osd *osd) { xilinx_drm_writel(osd->base, OSD_CTL, xilinx_drm_readl(osd->base, OSD_CTL) | OSD_CTL_EN); }