Ejemplo n.º 1
0
static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
				 u8 *send, int send_bytes,
				 u8 *recv, int recv_size,
				 u8 delay, u8 *ack)
{
	struct drm_device *dev = chan->dev;
	struct radeon_device *rdev = dev->dev_private;
	union aux_channel_transaction args;
	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
	unsigned char *base;
	int recv_bytes;

	memset(&args, 0, sizeof(args));

	base = (unsigned char *)rdev->mode_info.atom_context->scratch;

	memcpy(base, send, send_bytes);

	args.v1.lpAuxRequest = 0;
	args.v1.lpDataOut = 16;
	args.v1.ucDataOutLen = 0;
	args.v1.ucChannelID = chan->rec.i2c_id;
	args.v1.ucDelay = delay / 10;
	if (ASIC_IS_DCE4(rdev))
		args.v2.ucHPD_ID = chan->rec.hpd;

	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);

	*ack = args.v1.ucReplyStatus;

	/* timeout */
	if (args.v1.ucReplyStatus == 1) {
		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
		return -ETIMEDOUT;
	}

	/* flags not zero */
	if (args.v1.ucReplyStatus == 2) {
		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
		return -EIO;
	}

	/* error */
	if (args.v1.ucReplyStatus == 3) {
		DRM_DEBUG_KMS("dp_aux_ch error\n");
		return -EIO;
	}

	recv_bytes = args.v1.ucDataOutLen;
	if (recv_bytes > recv_size)
		recv_bytes = recv_size;

	if (recv && recv_size)
		memcpy(recv, base + 16, recv_bytes);

	return recv_bytes;
}
Ejemplo n.º 2
0
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
{
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
	u8 tmp;

	/* power up the sink */
	if (dp_info->dpcd[0] >= 0x11) {
		radeon_write_dpcd_reg(dp_info->radeon_connector,
				      DP_SET_POWER, DP_SET_POWER_D0);
		usleep_range(1000, 2000);
	}

	/* possibly enable downspread on the sink */
	if (dp_info->dpcd[3] & 0x1)
		radeon_write_dpcd_reg(dp_info->radeon_connector,
				      DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
	else
		radeon_write_dpcd_reg(dp_info->radeon_connector,
				      DP_DOWNSPREAD_CTRL, 0);

	if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
	    (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
		radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
	}

	/* set the lane count on the sink */
	tmp = dp_info->dp_lane_count;
	if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);

	/* set the link rate on the sink */
	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);

	/* start training on the source */
	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
		atombios_dig_encoder_setup(dp_info->encoder,
					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
	else
		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
					  dp_info->dp_clock, dp_info->enc_id, 0);

	/* disable the training pattern on the sink */
	radeon_write_dpcd_reg(dp_info->radeon_connector,
			      DP_TRAINING_PATTERN_SET,
			      DP_TRAINING_PATTERN_DISABLE);

	return 0;
}
Ejemplo n.º 3
0
/* radeon aux chan functions */
bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
			   int num_bytes, u8 *read_byte,
			   u8 read_buf_len, u8 delay)
{
	struct drm_device *dev = chan->dev;
	struct radeon_device *rdev = dev->dev_private;
	union aux_channel_transaction args;
	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
	unsigned char *base;
	int retry_count = 0;

	memset(&args, 0, sizeof(args));

	base = (unsigned char *)rdev->mode_info.atom_context->scratch;

retry:
	memcpy(base, req_bytes, num_bytes);

	args.v1.lpAuxRequest = 0;
	args.v1.lpDataOut = 16;
	args.v1.ucDataOutLen = 0;
	args.v1.ucChannelID = chan->rec.i2c_id;
	args.v1.ucDelay = delay / 10;
	if (ASIC_IS_DCE4(rdev))
		args.v2.ucHPD_ID = chan->rec.hpd;

	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);

	if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
		if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
			goto retry;
		DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
			  req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
			  chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
		return false;
	}

	if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
		if (read_buf_len < args.v1.ucDataOutLen) {
			DRM_ERROR("Buffer to small for return answer %d %d\n",
				  read_buf_len, args.v1.ucDataOutLen);
			return false;
		}
		{
			int len = min(read_buf_len, args.v1.ucDataOutLen);
			memcpy(read_byte, base + 16, len);
		}
	}
	return true;
}
Ejemplo n.º 4
0
static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
				     struct drm_connector *connector)
{
	struct drm_device *dev = encoder->dev;
	struct radeon_device *rdev = dev->dev_private;
	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;

	if (!ASIC_IS_DCE4(rdev))
		return;

	if (radeon_connector_encoder_is_dp_bridge(connector))
		panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;

	atombios_dig_encoder_setup(encoder,
				   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
				   panel_mode);
}
Ejemplo n.º 5
0
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
{
	u8 tmp;

	/* power up the sink */
	if (dp_info->dpcd[0] >= 0x11)
		radeon_write_dpcd_reg(dp_info->radeon_connector,
				      DP_SET_POWER, DP_SET_POWER_D0);

	/* possibly enable downspread on the sink */
	if (dp_info->dpcd[3] & 0x1)
		radeon_write_dpcd_reg(dp_info->radeon_connector,
				      DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
	else
		radeon_write_dpcd_reg(dp_info->radeon_connector,
				      DP_DOWNSPREAD_CTRL, 0);

	radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);

	/* set the lane count on the sink */
	tmp = dp_info->dp_lane_count;
	if (dp_info->dpcd[0] >= 0x11)
		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);

	/* set the link rate on the sink */
	tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);

	/* start training on the source */
	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
		atombios_dig_encoder_setup(dp_info->encoder,
					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
	else
		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
					  dp_info->dp_clock, dp_info->enc_id, 0);

	/* disable the training pattern on the sink */
	radeon_write_dpcd_reg(dp_info->radeon_connector,
			      DP_TRAINING_PATTERN_SET,
			      DP_TRAINING_PATTERN_DISABLE);

	return 0;
}
Ejemplo n.º 6
0
static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
				     struct drm_connector *connector)
{
	struct drm_device *dev = encoder->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;

	if (!ASIC_IS_DCE4(rdev))
		return;

	if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
	    ENCODER_OBJECT_ID_NUTMEG)
		panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
	else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
		 ENCODER_OBJECT_ID_TRAVIS) {
		u8 id[6];
		int i;
		for (i = 0; i < 6; i++)
			id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
		if (id[0] == 0x73 &&
		    id[1] == 0x69 &&
		    id[2] == 0x76 &&
		    id[3] == 0x61 &&
		    id[4] == 0x72 &&
		    id[5] == 0x54)
			panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
		else
			panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
		u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
		if (tmp & 1)
			panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
	}

	atombios_dig_encoder_setup(encoder,
				   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
				   panel_mode);

	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
	    (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
		radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
	}
}
Ejemplo n.º 7
0
static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
{
	DRM_UDELAY(400);

	/* disable the training pattern on the sink */
	radeon_write_dpcd_reg(dp_info->radeon_connector,
			      DP_TRAINING_PATTERN_SET,
			      DP_TRAINING_PATTERN_DISABLE);

	/* disable the training pattern on the source */
	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
		atombios_dig_encoder_setup(dp_info->encoder,
					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
	else
		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
					  dp_info->dp_clock, dp_info->enc_id, 0);

	return 0;
}
Ejemplo n.º 8
0
int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
			     struct drm_connector *connector)
{
	struct drm_device *dev = encoder->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
	struct radeon_connector_atom_dig *dig_connector;
	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
	u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
	u8 tmp;

	if (!ASIC_IS_DCE4(rdev))
		return panel_mode;

	if (!radeon_connector->con_priv)
		return panel_mode;

	dig_connector = radeon_connector->con_priv;

	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
		/* DP bridge chips */
		if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
			if (tmp & 1)
				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
			else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
				 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
				panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
			else
				panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
		}
	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
		/* eDP */
		if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
				      DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
			if (tmp & 1)
				panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
		}
	}

	return panel_mode;
}
Ejemplo n.º 9
0
int radeon_asic_init(struct radeon_device *rdev)
{
	radeon_register_accessor_init(rdev);
	switch (rdev->family) {
	case CHIP_R100:
	case CHIP_RV100:
	case CHIP_RS100:
	case CHIP_RV200:
	case CHIP_RS200:
		rdev->asic = &r100_asic;
		break;
	case CHIP_R200:
	case CHIP_RV250:
	case CHIP_RS300:
	case CHIP_RV280:
		rdev->asic = &r200_asic;
		break;
	case CHIP_R300:
	case CHIP_R350:
	case CHIP_RV350:
	case CHIP_RV380:
		if (rdev->flags & RADEON_IS_PCIE)
			rdev->asic = &r300_asic_pcie;
		else
			rdev->asic = &r300_asic;
		break;
	case CHIP_R420:
	case CHIP_R423:
	case CHIP_RV410:
		rdev->asic = &r420_asic;
		/* handle macs */
		if (rdev->bios == NULL) {
			rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
			rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
			rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
			rdev->asic->set_memory_clock = NULL;
		}
		break;
	case CHIP_RS400:
	case CHIP_RS480:
		rdev->asic = &rs400_asic;
		break;
	case CHIP_RS600:
		rdev->asic = &rs600_asic;
		break;
	case CHIP_RS690:
	case CHIP_RS740:
		rdev->asic = &rs690_asic;
		break;
	case CHIP_RV515:
		rdev->asic = &rv515_asic;
		break;
	case CHIP_R520:
	case CHIP_RV530:
	case CHIP_RV560:
	case CHIP_RV570:
	case CHIP_R580:
		rdev->asic = &r520_asic;
		break;
	case CHIP_R600:
	case CHIP_RV610:
	case CHIP_RV630:
	case CHIP_RV620:
	case CHIP_RV635:
	case CHIP_RV670:
		rdev->asic = &r600_asic;
		break;
	case CHIP_RS780:
	case CHIP_RS880:
		rdev->asic = &rs780_asic;
		break;
	case CHIP_RV770:
	case CHIP_RV730:
	case CHIP_RV710:
	case CHIP_RV740:
		rdev->asic = &rv770_asic;
		break;
	case CHIP_CEDAR:
	case CHIP_REDWOOD:
	case CHIP_JUNIPER:
	case CHIP_CYPRESS:
	case CHIP_HEMLOCK:
		rdev->asic = &evergreen_asic;
		break;
	case CHIP_PALM:
		rdev->asic = &sumo_asic;
		break;
	case CHIP_BARTS:
	case CHIP_TURKS:
	case CHIP_CAICOS:
		rdev->asic = &btc_asic;
		break;
	default:
		/* FIXME: not supported yet */
		return -EINVAL;
	}

	if (rdev->flags & RADEON_IS_IGP) {
		rdev->asic->get_memory_clock = NULL;
		rdev->asic->set_memory_clock = NULL;
	}

	/* set the number of crtcs */
	if (rdev->flags & RADEON_SINGLE_CRTC)
		rdev->num_crtc = 1;
	else {
		if (ASIC_IS_DCE41(rdev))
			rdev->num_crtc = 2;
		else if (ASIC_IS_DCE4(rdev))
			rdev->num_crtc = 6;
		else
			rdev->num_crtc = 2;
	}

	return 0;
}
Ejemplo n.º 10
0
static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
				 u8 *send, int send_bytes,
				 u8 *recv, int recv_size,
				 u8 delay, u8 *ack)
{
	struct drm_device *dev = chan->dev;
	struct radeon_device *rdev = dev->dev_private;
	union aux_channel_transaction args;
	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
	unsigned char *base;
	int recv_bytes;
	int r = 0;

	memset(&args, 0, sizeof(args));

	mutex_lock(&chan->mutex);
	mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);

	base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);

	radeon_atom_copy_swap(base, send, send_bytes, true);

	args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
	args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
	args.v1.ucDataOutLen = 0;
	args.v1.ucChannelID = chan->rec.i2c_id;
	args.v1.ucDelay = delay / 10;
	if (ASIC_IS_DCE4(rdev))
		args.v2.ucHPD_ID = chan->rec.hpd;

	atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);

	*ack = args.v1.ucReplyStatus;

	/* timeout */
	if (args.v1.ucReplyStatus == 1) {
		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
		r = -ETIMEDOUT;
		goto done;
	}

	/* flags not zero */
	if (args.v1.ucReplyStatus == 2) {
		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
		r = -EIO;
		goto done;
	}

	/* error */
	if (args.v1.ucReplyStatus == 3) {
		DRM_DEBUG_KMS("dp_aux_ch error\n");
		r = -EIO;
		goto done;
	}

	recv_bytes = args.v1.ucDataOutLen;
	if (recv_bytes > recv_size)
		recv_bytes = recv_size;

	if (recv && recv_size)
		radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);

	r = recv_bytes;
done:
	mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
	mutex_unlock(&chan->mutex);

	return r;
}
Ejemplo n.º 11
0
void dp_link_train(struct drm_encoder *encoder,
		   struct drm_connector *connector)
{
	struct drm_device *dev = encoder->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
	struct radeon_encoder_atom_dig *dig;
	struct radeon_connector *radeon_connector;
	struct radeon_connector_atom_dig *dig_connector;
	int enc_id = 0;
	bool clock_recovery, channel_eq;
	u8 link_status[DP_LINK_STATUS_SIZE];
	u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
	u8 tries, voltage;
	u8 train_set[4];
	int i;

	if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
	    (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
		return;

	if (!radeon_encoder->enc_priv)
		return;
	dig = radeon_encoder->enc_priv;

	radeon_connector = to_radeon_connector(connector);
	if (!radeon_connector->con_priv)
		return;
	dig_connector = radeon_connector->con_priv;

	if (dig->dig_encoder)
		enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
	else
		enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
	if (dig_connector->linkb)
		enc_id |= ATOM_DP_CONFIG_LINK_B;
	else
		enc_id |= ATOM_DP_CONFIG_LINK_A;

	memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
	if (dig_connector->dp_clock == 270000)
		link_configuration[0] = DP_LINK_BW_2_7;
	else
		link_configuration[0] = DP_LINK_BW_1_62;
	link_configuration[1] = dig_connector->dp_lane_count;
	if (dig_connector->dpcd[0] >= 0x11)
		link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;

	/* power up the sink */
	dp_set_power(radeon_connector, DP_SET_POWER_D0);
	/* disable the training pattern on the sink */
	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
	/* set link bw and lanes on the sink */
	dp_set_link_bw_lanes(radeon_connector, link_configuration);
	/* disable downspread on the sink */
	dp_set_downspread(radeon_connector, 0);
	if (ASIC_IS_DCE4(rdev)) {
		/* start training on the source */
		atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
		/* set training pattern 1 on the source */
		atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
	} else {
		/* start training on the source */
		radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
					  dig_connector->dp_clock, enc_id, 0);
		/* set training pattern 1 on the source */
		radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
					  dig_connector->dp_clock, enc_id, 0);
	}

	/* set initial vs/emph */
	memset(train_set, 0, 4);
	udelay(400);
	/* set training pattern 1 on the sink */
	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);

	dp_update_dpvs_emph(radeon_connector, encoder, train_set);

	/* clock recovery loop */
	clock_recovery = false;
	tries = 0;
	voltage = 0xff;
	for (;;) {
		udelay(100);
		if (!atom_dp_get_link_status(radeon_connector, link_status))
			break;

		if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
			clock_recovery = true;
			break;
		}

		for (i = 0; i < dig_connector->dp_lane_count; i++) {
			if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
				break;
		}
		if (i == dig_connector->dp_lane_count) {
			DRM_ERROR("clock recovery reached max voltage\n");
			break;
		}

		if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
			++tries;
			if (tries == 5) {
				DRM_ERROR("clock recovery tried 5 times\n");
				break;
			}
		} else
			tries = 0;

		voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;

		/* Compute new train_set as requested by sink */
		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
		dp_update_dpvs_emph(radeon_connector, encoder, train_set);
	}
	if (!clock_recovery)
		DRM_ERROR("clock recovery failed\n");
	else
		DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
			  DP_TRAIN_PRE_EMPHASIS_SHIFT);


	/* set training pattern 2 on the sink */
	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
	/* set training pattern 2 on the source */
	if (ASIC_IS_DCE4(rdev))
		atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
	else
		radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
					  dig_connector->dp_clock, enc_id, 1);

	/* channel equalization loop */
	tries = 0;
	channel_eq = false;
	for (;;) {
		udelay(400);
		if (!atom_dp_get_link_status(radeon_connector, link_status))
			break;

		if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
			channel_eq = true;
			break;
		}

		/* Try 5 times */
		if (tries > 5) {
			DRM_ERROR("channel eq failed: 5 tries\n");
			break;
		}

		/* Compute new train_set as requested by sink */
		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
		dp_update_dpvs_emph(radeon_connector, encoder, train_set);

		tries++;
	}

	if (!channel_eq)
		DRM_ERROR("channel eq failed\n");
	else
		DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);

	/* disable the training pattern on the sink */
	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);

	/* disable the training pattern on the source */
	if (ASIC_IS_DCE4(rdev))
		atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
	else
		radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
					  dig_connector->dp_clock, enc_id, 0);
}