static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, u8 ucconfig, u8 lane_num) { DP_ENCODER_SERVICE_PARAMETERS args; int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); memset(&args, 0, sizeof(args)); args.ucLinkClock = dp_clock / 10; args.ucConfig = ucconfig; args.ucAction = action; args.ucLaneNum = lane_num; args.ucStatus = 0; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return args.ucStatus; }
void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; SET_CRTC_USING_DTD_TIMING_PARAMETERS args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming); u16 misc = 0; memset(&args, 0, sizeof(args)); args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (amdgpu_crtc->h_border * 2)); args.usH_Blanking_Time = cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (amdgpu_crtc->h_border * 2)); args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (amdgpu_crtc->v_border * 2)); args.usV_Blanking_Time = cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (amdgpu_crtc->v_border * 2)); args.usH_SyncOffset = cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + amdgpu_crtc->h_border); args.usH_SyncWidth = cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); args.usV_SyncOffset = cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + amdgpu_crtc->v_border); args.usV_SyncWidth = cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); args.ucH_Border = amdgpu_crtc->h_border; args.ucV_Border = amdgpu_crtc->v_border; if (mode->flags & DRM_MODE_FLAG_NVSYNC) misc |= ATOM_VSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_NHSYNC) misc |= ATOM_HSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_CSYNC) misc |= ATOM_COMPOSITESYNC; if (mode->flags & DRM_MODE_FLAG_INTERLACE) misc |= ATOM_INTERLACE; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) misc |= ATOM_DOUBLE_CLOCK_MODE; args.susModeMiscInfo.usAccess = cpu_to_le16(misc); args.ucCRTC = amdgpu_crtc->crtc_id; amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); }
void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); SET_CRTC_OVERSCAN_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); int a1, a2; memset(&args, 0, sizeof(args)); args.ucCRTC = amdgpu_crtc->crtc_id; switch (amdgpu_crtc->rmx_type) { case RMX_CENTER: args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2); args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2); break; case RMX_ASPECT: a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; if (a1 > a2) { args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2); } else if (a2 > a1) { args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2); } break; case RMX_FULL: default: args.usOverscanRight = cpu_to_le16(amdgpu_crtc->h_border); args.usOverscanLeft = cpu_to_le16(amdgpu_crtc->h_border); args.usOverscanBottom = cpu_to_le16(amdgpu_crtc->v_border); args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border); break; } amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); }
/* on DCE5, make sure the voltage is high enough to support the * required disp clk. */ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev, u32 dispclk) { u8 frev, crev; int index; union set_pixel_clock args; memset(&args, 0, sizeof(args)); index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 5: /* if the default dcpll clock is specified, * SetPixelClock provides the dividers */ args.v5.ucCRTC = ATOM_CRTC_INVALID; args.v5.usPixelClock = cpu_to_le16(dispclk); args.v5.ucPpll = ATOM_DCPLL; break; case 6: /* if the default dcpll clock is specified, * SetPixelClock provides the dividers */ args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk); args.v6.ucPpll = ATOM_EXT_PLL1; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); }
void display_crtc_blank(uint8 crtcID, int command) { TRACE("%s\n", __func__); BLANK_CRTC_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC); memset(&args, 0, sizeof(args)); args.ucCRTC = crtcID; args.ucBlanking = command; args.usBlackColorRCr = 0; args.usBlackColorGY = 0; args.usBlackColorBCb = 0; atom_execute_table(gAtomContext, index, (uint32*)&args); }
static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, u8 slave_addr, u8 flags, u8 *buf, u8 num) { struct drm_device *dev = chan->dev; struct radeon_device *rdev = dev->dev_private; PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); unsigned char *base; u16 out; memset(&args, 0, sizeof(args)); base = (unsigned char *)rdev->mode_info.atom_context->scratch; if (flags & HW_I2C_WRITE) { if (num > ATOM_MAX_HW_I2C_WRITE) { DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num); return EINVAL; } memcpy(&out, buf, num); args.lpI2CDataOut = cpu_to_le16(out); } args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; args.ucRegIndex = 0; args.ucTransBytes = num; args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { DRM_DEBUG_KMS("hw_i2c error\n"); return EIO; } if (!(flags & HW_I2C_WRITE)) memcpy(buf, base, num); return 0; }
status_t pll_ppll_ss_probe(pll_info* pll, uint32 ssID) { uint8 tableMajor; uint8 tableMinor; uint16 headerOffset; uint16 headerSize; int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); if (atom_parse_data_header(gAtomContext, index, &headerSize, &tableMajor, &tableMinor, &headerOffset) != B_OK) { ERROR("%s: Couldn't parse data header\n", __func__); return B_ERROR; } struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info = (struct _ATOM_SPREAD_SPECTRUM_INFO*)((uint16*)gAtomContext->bios + headerOffset); int indices = (headerSize - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); int i; for (i = 0; i < indices; i++) { if (ss_info->asSS_Info[i].ucSS_Id == ssID) { pll->ssPercentage = B_LENDIAN_TO_HOST_INT16( ss_info->asSS_Info[i].usSpreadSpectrumPercentage); pll->ssType = ss_info->asSS_Info[i].ucSpreadSpectrumType; pll->ssStep = ss_info->asSS_Info[i].ucSS_Step; pll->ssDelay = ss_info->asSS_Info[i].ucSS_Delay; pll->ssRange = ss_info->asSS_Info[i].ucSS_Range; pll->ssReferenceDiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; return B_OK; } } return B_ERROR; }
struct amdgpu_gpio_rec amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev, u8 id) { struct atom_context *ctx = adev->mode_info.atom_context; struct amdgpu_gpio_rec gpio; int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT); struct _ATOM_GPIO_PIN_LUT *gpio_info; ATOM_GPIO_PIN_ASSIGNMENT *pin; u16 data_offset, size; int i, num_indices; memset(&gpio, 0, sizeof(struct amdgpu_gpio_rec)); gpio.valid = false; if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); pin = gpio_info->asGPIO_Pin; for (i = 0; i < num_indices; i++) { if (id == pin->ucGPIO_ID) { gpio.id = pin->ucGPIO_ID; gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex); gpio.shift = pin->ucGpioPinBitShift; gpio.mask = (1 << pin->ucGpioPinBitShift); gpio.valid = true; break; } pin = (ATOM_GPIO_PIN_ASSIGNMENT *) ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT)); } } return gpio; }
u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev, u32 freq, u8 clk_type, u8 clk_src) { u8 frev, crev; int index; union set_dce_clock args; u32 ret_freq = 0; memset(&args, 0, sizeof(args)); index = GetIndexIntoMasterTable(COMMAND, SetDCEClock); if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) return 0; switch (frev) { case 2: switch (crev) { case 1: args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */ args.v2_1.asParam.ucDCEClkType = clk_type; args.v2_1.asParam.ucDCEClkSrc = clk_src; amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return 0; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return 0; } return ret_freq; }
struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev, uint8_t id) { struct atom_context *ctx = adev->mode_info.atom_context; ATOM_GPIO_I2C_ASSIGMENT *gpio; struct amdgpu_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; uint16_t data_offset, size; int i, num_indices; memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec)); i2c.valid = false; if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); gpio = &i2c_info->asGPIO_Info[0]; for (i = 0; i < num_indices; i++) { amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i); if (gpio->sucI2cId.ucAccess == id) { i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio); break; } gpio = (ATOM_GPIO_I2C_ASSIGMENT *) ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT)); } } return i2c; }
/* * VRAM info. */ void rs690_pm_info(struct radeon_device *rdev) { int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); struct _ATOM_INTEGRATED_SYSTEM_INFO *info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; void *ptr; uint16_t data_offset; uint8_t frev, crev; fixed20_12 tmp; atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, &frev, &crev, &data_offset); ptr = rdev->mode_info.atom_context->bios + data_offset; info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; /* Get various system informations from bios */ switch (crev) { case 1: tmp.full = rfixed_const(100); rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); break; case 2: tmp.full = rfixed_const(100); rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); break; default: tmp.full = rfixed_const(100); /* We assume the slower possible clock ie worst case */ /* DDR 333Mhz */ rdev->pm.igp_sideport_mclk.full = rfixed_const(333); /* FIXME: system clock ? */ rdev->pm.igp_system_mclk.full = rfixed_const(100); rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); rdev->pm.igp_ht_link_clk.full = rfixed_const(200); rdev->pm.igp_ht_link_width.full = rfixed_const(8); DRM_ERROR("No integrated system info for your GPU, using safe default\n"); break; } /* Compute various bandwidth */ /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ tmp.full = rfixed_const(4); rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 * = ht_clk * ht_width / 5 */ tmp.full = rfixed_const(5); rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, rdev->pm.igp_ht_link_width); rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); if (tmp.full < rdev->pm.max_bandwidth.full) { /* HT link is a limiting factor */ rdev->pm.max_bandwidth.full = tmp.full; } /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 * = (sideport_clk * 14) / 10 */ tmp.full = rfixed_const(14); rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); tmp.full = rfixed_const(10); rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); }
void radeon_dp_link_train(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *dig_connector; struct radeon_dp_link_train_info dp_info; int index; u8 tmp, frev, crev; if (!radeon_encoder->enc_priv) return; dig = radeon_encoder->enc_priv; radeon_connector = to_radeon_connector(connector); if (!radeon_connector->con_priv) return; dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) && (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP)) return; /* DPEncoderService newer than 1.1 can't program properly the * training pattern. When facing such version use the * DIGXEncoderControl (X== 1 | 2) */ dp_info.use_dpencoder = true; index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) { if (crev > 1) { dp_info.use_dpencoder = false; } } dp_info.enc_id = 0; if (dig->dig_encoder) dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; else dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; if (dig->linkb) dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B; else dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) dp_info.tp3_supported = true; else dp_info.tp3_supported = false; memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); dp_info.rdev = rdev; dp_info.encoder = encoder; dp_info.connector = connector; dp_info.radeon_connector = radeon_connector; dp_info.dp_lane_count = dig_connector->dp_lane_count; dp_info.dp_clock = dig_connector->dp_clock; if (radeon_dp_link_train_init(&dp_info)) goto done; if (radeon_dp_link_train_cr(&dp_info)) goto done; if (radeon_dp_link_train_ce(&dp_info)) goto done; done: if (radeon_dp_link_train_finish(&dp_info)) return; }
status_t pll_asic_ss_probe(pll_info* pll, uint32 ssID) { uint8 tableMajor; uint8 tableMinor; uint16 headerOffset; uint16 headerSize; int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); if (atom_parse_data_header(gAtomContext, index, &headerSize, &tableMajor, &tableMinor, &headerOffset) != B_OK) { ERROR("%s: Couldn't parse data header\n", __func__); return B_ERROR; } union asicSSInfo { struct _ATOM_ASIC_INTERNAL_SS_INFO info; struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; }; union asicSSInfo *ss_info = (union asicSSInfo*)((uint16*)gAtomContext->bios + headerOffset); int i; int indices; switch (tableMajor) { case 1: indices = (headerSize - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT); for (i = 0; i < indices; i++) { if (ss_info->info.asSpreadSpectrum[i].ucClockIndication != ssID) { continue; } TRACE("%s: ss match found\n", __func__); if (pll->pixelClock * 10 > B_LENDIAN_TO_HOST_INT32( ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { TRACE("%s: pixelClock > targetClockRange!\n", __func__); continue; } pll->ssPercentage = B_LENDIAN_TO_HOST_INT16( ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage ); pll->ssType = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; pll->ssRate = B_LENDIAN_TO_HOST_INT16( ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); return B_OK; } break; case 2: indices = (headerSize - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); for (i = 0; i < indices; i++) { if (ss_info->info_2.asSpreadSpectrum[i].ucClockIndication != ssID) { continue; } TRACE("%s: ss match found\n", __func__); if (pll->pixelClock * 10 > B_LENDIAN_TO_HOST_INT32( ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { TRACE("%s: pixelClock > targetClockRange!\n", __func__); continue; } pll->ssPercentage = B_LENDIAN_TO_HOST_INT16( ss_info ->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage ); pll->ssType = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; pll->ssRate = B_LENDIAN_TO_HOST_INT16( ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); return B_OK; } break; case 3: indices = (headerSize - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); for (i = 0; i < indices; i++) { if (ss_info->info_3.asSpreadSpectrum[i].ucClockIndication != ssID) { continue; } TRACE("%s: ss match found\n", __func__); if (pll->pixelClock * 10 > B_LENDIAN_TO_HOST_INT32( ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { TRACE("%s: pixelClock > targetClockRange!\n", __func__); continue; } pll->ssPercentage = B_LENDIAN_TO_HOST_INT16( ss_info ->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage ); pll->ssType = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; pll->ssRate = B_LENDIAN_TO_HOST_INT16( ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); return B_OK; } break; default: ERROR("%s: Unknown SS table version!\n", __func__); return B_ERROR; } ERROR("%s: No potential spread spectrum data found!\n", __func__); return B_ERROR; }
static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *send, int send_bytes, u8 *recv, int recv_size, u8 delay, u8 *ack) { struct drm_device *dev = chan->dev; struct radeon_device *rdev = dev->dev_private; union aux_channel_transaction args; int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); unsigned char *base; int recv_bytes; int r = 0; memset(&args, 0, sizeof(args)); mutex_lock(&chan->mutex); base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); radeon_atom_copy_swap(base, send, send_bytes, true); args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); args.v1.ucDataOutLen = 0; args.v1.ucChannelID = chan->rec.i2c_id; args.v1.ucDelay = delay / 10; if (ASIC_IS_DCE4(rdev)) args.v2.ucHPD_ID = chan->rec.hpd; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); *ack = args.v1.ucReplyStatus; /* timeout */ if (args.v1.ucReplyStatus == 1) { DRM_DEBUG_KMS("dp_aux_ch timeout\n"); r = -ETIMEDOUT; goto done; } /* flags not zero */ if (args.v1.ucReplyStatus == 2) { DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); r = -EIO; goto done; } /* error */ if (args.v1.ucReplyStatus == 3) { DRM_DEBUG_KMS("dp_aux_ch error\n"); r = -EIO; goto done; } recv_bytes = args.v1.ucDataOutLen; if (recv_bytes > recv_size) recv_bytes = recv_size; if (recv && recv_size) radeon_atom_copy_swap(recv, base + 16, recv_bytes, false); r = recv_bytes; done: mutex_unlock(&chan->mutex); return r; }
void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc, u32 crtc_id, int pll_id, u32 encoder_mode, u32 encoder_id, u32 clock, u32 ref_div, u32 fb_div, u32 frac_fb_div, u32 post_div, int bpc, bool ss_enabled, struct amdgpu_atom_ss *ss) { struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; u8 frev, crev; int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); union set_pixel_clock args; memset(&args, 0, sizeof(args)); if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) return; switch (frev) { case 1: switch (crev) { case 1: if (clock == ATOM_DISABLE) return; args.v1.usPixelClock = cpu_to_le16(clock / 10); args.v1.usRefDiv = cpu_to_le16(ref_div); args.v1.usFbDiv = cpu_to_le16(fb_div); args.v1.ucFracFbDiv = frac_fb_div; args.v1.ucPostDiv = post_div; args.v1.ucPpll = pll_id; args.v1.ucCRTC = crtc_id; args.v1.ucRefDivSrc = 1; break; case 2: args.v2.usPixelClock = cpu_to_le16(clock / 10); args.v2.usRefDiv = cpu_to_le16(ref_div); args.v2.usFbDiv = cpu_to_le16(fb_div); args.v2.ucFracFbDiv = frac_fb_div; args.v2.ucPostDiv = post_div; args.v2.ucPpll = pll_id; args.v2.ucCRTC = crtc_id; args.v2.ucRefDivSrc = 1; break; case 3: args.v3.usPixelClock = cpu_to_le16(clock / 10); args.v3.usRefDiv = cpu_to_le16(ref_div); args.v3.usFbDiv = cpu_to_le16(fb_div); args.v3.ucFracFbDiv = frac_fb_div; args.v3.ucPostDiv = post_div; args.v3.ucPpll = pll_id; if (crtc_id == ATOM_CRTC2) args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2; else args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1; if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; args.v3.ucTransmitterId = encoder_id; args.v3.ucEncoderMode = encoder_mode; break; case 5: args.v5.ucCRTC = crtc_id; args.v5.usPixelClock = cpu_to_le16(clock / 10); args.v5.ucRefDiv = ref_div; args.v5.usFbDiv = cpu_to_le16(fb_div); args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); args.v5.ucPostDiv = post_div; args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) && (pll_id < ATOM_EXT_PLL1)) args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { switch (bpc) { case 8: default: args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; break; case 10: /* yes this is correct, the atom define is wrong */ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP; break; case 12: /* yes this is correct, the atom define is wrong */ args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; break; } } args.v5.ucTransmitterID = encoder_id; args.v5.ucEncoderMode = encoder_mode; args.v5.ucPpll = pll_id; break; case 6: args.v6.ulDispEngClkFreq = cpu_to_le32(crtc_id << 24 | clock / 10); args.v6.ucRefDiv = ref_div; args.v6.usFbDiv = cpu_to_le16(fb_div); args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); args.v6.ucPostDiv = post_div; args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ if ((ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) && (pll_id < ATOM_EXT_PLL1) && !is_pixel_clock_source_from_pll(encoder_mode, pll_id)) args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { switch (bpc) { case 8: default: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; break; case 10: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6; break; case 12: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6; break; case 16: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; break; } } args.v6.ucTransmitterID = encoder_id; args.v6.ucEncoderMode = encoder_mode; args.v6.ucPpll = pll_id; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; } amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); }
status_t pll_adjust(pll_info* pll, display_mode* mode, uint8 crtcID) { radeon_shared_info &info = *gInfo->shared_info; uint32 pixelClock = pll->pixelClock; // original as pixel_clock will be adjusted uint32 connectorIndex = gDisplay[crtcID]->connectorIndex; connector_info* connector = gConnector[connectorIndex]; uint32 encoderID = connector->encoder.objectID; uint32 encoderMode = display_get_encoder_mode(connectorIndex); uint32 connectorFlags = connector->flags; uint32 externalEncoderID = 0; pll->adjustedClock = pll->pixelClock; if (connector->encoderExternal.isDPBridge) externalEncoderID = connector->encoderExternal.objectID; if (info.dceMajor >= 3) { uint8 tableMajor; uint8 tableMinor; int index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); if (atom_parse_cmd_header(gAtomContext, index, &tableMajor, &tableMinor) != B_OK) { ERROR("%s: Couldn't find AtomBIOS PLL adjustment\n", __func__); return B_ERROR; } TRACE("%s: table %" B_PRIu8 ".%" B_PRIu8 "\n", __func__, tableMajor, tableMinor); // Prepare arguments for AtomBIOS call union adjustPixelClock { ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3; }; union adjustPixelClock args; memset(&args, 0, sizeof(args)); switch (tableMajor) { case 1: switch (tableMinor) { case 1: case 2: args.v1.usPixelClock = B_HOST_TO_LENDIAN_INT16(pixelClock / 10); args.v1.ucTransmitterID = encoderID; args.v1.ucEncodeMode = encoderMode; if (pll->ssPercentage > 0) { args.v1.ucConfig |= ADJUST_DISPLAY_CONFIG_SS_ENABLE; } atom_execute_table(gAtomContext, index, (uint32*)&args); // get returned adjusted clock pll->adjustedClock = B_LENDIAN_TO_HOST_INT16(args.v1.usPixelClock); pll->adjustedClock *= 10; break; case 3: args.v3.sInput.usPixelClock = B_HOST_TO_LENDIAN_INT16(pixelClock / 10); args.v3.sInput.ucTransmitterID = encoderID; args.v3.sInput.ucEncodeMode = encoderMode; args.v3.sInput.ucDispPllConfig = 0; if (pll->ssPercentage > 0) { args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE; } // Handle DP adjustments if (encoderMode == ATOM_ENCODER_MODE_DP || encoderMode == ATOM_ENCODER_MODE_DP_MST) { TRACE("%s: encoderMode is DP\n", __func__); args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 162000 or 270000 */ uint32 dpLinkSpeed = dp_get_link_rate(connectorIndex, mode); /* 16200 or 27000 */ args.v3.sInput.usPixelClock = B_HOST_TO_LENDIAN_INT16(dpLinkSpeed / 10); } else if ((connectorFlags & ATOM_DEVICE_DFP_SUPPORT) != 0) { #if 0 if (encoderMode == ATOM_ENCODER_MODE_HDMI) { /* deep color support */ args.v3.sInput.usPixelClock = cpu_to_le16((mode->clock * bpc / 8) / 10); } #endif if (pixelClock > 165000) { args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK; } if (1) { // dig coherent mode? args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; } } args.v3.sInput.ucExtTransmitterID = externalEncoderID; atom_execute_table(gAtomContext, index, (uint32*)&args); // get returned adjusted clock pll->adjustedClock = B_LENDIAN_TO_HOST_INT32( args.v3.sOutput.ulDispPllFreq); pll->adjustedClock *= 10; // convert to kHz for storage if (args.v3.sOutput.ucRefDiv) { pll->flags |= PLL_USE_FRAC_FB_DIV; pll->flags |= PLL_USE_REF_DIV; pll->referenceDiv = args.v3.sOutput.ucRefDiv; } if (args.v3.sOutput.ucPostDiv) { pll->flags |= PLL_USE_FRAC_FB_DIV; pll->flags |= PLL_USE_POST_DIV; pll->postDiv = args.v3.sOutput.ucPostDiv; } break; default: TRACE("%s: ERROR: table version %" B_PRIu8 ".%" B_PRIu8 " unknown\n", __func__, tableMajor, tableMinor); return B_ERROR; } break; default: TRACE("%s: ERROR: table version %" B_PRIu8 ".%" B_PRIu8 " unknown\n", __func__, tableMajor, tableMinor); return B_ERROR; } } TRACE("%s: was: %" B_PRIu32 ", now: %" B_PRIu32 "\n", __func__, pixelClock, pll->adjustedClock); return B_OK; }
status_t pll_external_set(uint32 clock) { TRACE("%s: set external pll clock to %" B_PRIu32 "\n", __func__, clock); if (clock == 0) ERROR("%s: Warning: default display clock is 0?\n", __func__); // also known as PLL display engineering uint8 tableMajor; uint8 tableMinor; int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); atom_parse_cmd_header(gAtomContext, index, &tableMajor, &tableMinor); TRACE("%s: table %" B_PRIu8 ".%" B_PRIu8 "\n", __func__, tableMajor, tableMinor); union setPixelClock { SET_PIXEL_CLOCK_PS_ALLOCATION base; PIXEL_CLOCK_PARAMETERS v1; PIXEL_CLOCK_PARAMETERS_V2 v2; PIXEL_CLOCK_PARAMETERS_V3 v3; PIXEL_CLOCK_PARAMETERS_V5 v5; PIXEL_CLOCK_PARAMETERS_V6 v6; }; union setPixelClock args; memset(&args, 0, sizeof(args)); radeon_shared_info &info = *gInfo->shared_info; uint32 dceVersion = (info.dceMajor * 100) + info.dceMinor; switch (tableMajor) { case 1: switch(tableMinor) { case 5: // If the default DC PLL clock is specified, // SetPixelClock provides the dividers. args.v5.ucCRTC = ATOM_CRTC_INVALID; args.v5.usPixelClock = B_HOST_TO_LENDIAN_INT16(clock / 10); args.v5.ucPpll = ATOM_DCPLL; break; case 6: // If the default DC PLL clock is specified, // SetPixelClock provides the dividers. args.v6.ulDispEngClkFreq = B_HOST_TO_LENDIAN_INT32(clock / 10); if (dceVersion == 601) args.v6.ucPpll = ATOM_EXT_PLL1; else if (dceVersion >= 600) args.v6.ucPpll = ATOM_PPLL0; else args.v6.ucPpll = ATOM_DCPLL; break; default: ERROR("%s: Unknown table version %" B_PRIu8 ".%" B_PRIu8 "\n", __func__, tableMajor, tableMinor); } break; default: ERROR("%s: Unknown table version %" B_PRIu8 ".%" B_PRIu8 "\n", __func__, tableMajor, tableMinor); } return B_OK; }
status_t pll_set(display_mode* mode, uint8 crtcID) { uint32 connectorIndex = gDisplay[crtcID]->connectorIndex; pll_info* pll = &gConnector[connectorIndex]->encoder.pll; uint32 dp_clock = gConnector[connectorIndex]->dpInfo.linkRate; bool ssEnabled = false; pll->pixelClock = mode->timing.pixel_clock; radeon_shared_info &info = *gInfo->shared_info; // Probe for PLL spread spectrum info; pll->ssPercentage = 0; pll->ssType = 0; pll->ssStep = 0; pll->ssDelay = 0; pll->ssRange = 0; pll->ssReferenceDiv = 0; switch (display_get_encoder_mode(connectorIndex)) { case ATOM_ENCODER_MODE_DP_MST: case ATOM_ENCODER_MODE_DP: if (info.dceMajor >= 4) pll_asic_ss_probe(pll, ASIC_INTERNAL_SS_ON_DP); else { if (dp_clock == 162000) { ssEnabled = pll_ppll_ss_probe(pll, ATOM_DP_SS_ID2); if (!ssEnabled) // id2 failed, try id1 ssEnabled = pll_ppll_ss_probe(pll, ATOM_DP_SS_ID1); } else ssEnabled = pll_ppll_ss_probe(pll, ATOM_DP_SS_ID1); } break; case ATOM_ENCODER_MODE_LVDS: if (info.dceMajor >= 4) ssEnabled = pll_asic_ss_probe(pll, gInfo->lvdsSpreadSpectrumID); else ssEnabled = pll_ppll_ss_probe(pll, gInfo->lvdsSpreadSpectrumID); break; case ATOM_ENCODER_MODE_DVI: if (info.dceMajor >= 4) ssEnabled = pll_asic_ss_probe(pll, ASIC_INTERNAL_SS_ON_TMDS); break; case ATOM_ENCODER_MODE_HDMI: if (info.dceMajor >= 4) ssEnabled = pll_asic_ss_probe(pll, ASIC_INTERNAL_SS_ON_HDMI); break; } pll_setup_flags(pll, crtcID); // set up any special flags pll_adjust(pll, mode, crtcID); // get any needed clock adjustments, set reference/post dividers pll_compute(pll); // compute dividers display_crtc_ss(pll, ATOM_DISABLE); // disable ss uint8 tableMajor; uint8 tableMinor; int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); atom_parse_cmd_header(gAtomContext, index, &tableMajor, &tableMinor); TRACE("%s: table %" B_PRIu8 ".%" B_PRIu8 "\n", __func__, tableMajor, tableMinor); uint32 bitsPerColor = 8; // TODO: Digital Depth, EDID 1.4+ on digital displays // isn't in Haiku edid common code? // Prepare arguments for AtomBIOS call union setPixelClock { SET_PIXEL_CLOCK_PS_ALLOCATION base; PIXEL_CLOCK_PARAMETERS v1; PIXEL_CLOCK_PARAMETERS_V2 v2; PIXEL_CLOCK_PARAMETERS_V3 v3; PIXEL_CLOCK_PARAMETERS_V5 v5; PIXEL_CLOCK_PARAMETERS_V6 v6; }; union setPixelClock args; memset(&args, 0, sizeof(args)); switch (tableMinor) { case 1: args.v1.usPixelClock = B_HOST_TO_LENDIAN_INT16(pll->pixelClock / 10); args.v1.usRefDiv = B_HOST_TO_LENDIAN_INT16(pll->referenceDiv); args.v1.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v1.ucFracFbDiv = pll->feedbackDivFrac; args.v1.ucPostDiv = pll->postDiv; args.v1.ucPpll = pll->id; args.v1.ucCRTC = crtcID; args.v1.ucRefDivSrc = 1; break; case 2: args.v2.usPixelClock = B_HOST_TO_LENDIAN_INT16(pll->pixelClock / 10); args.v2.usRefDiv = B_HOST_TO_LENDIAN_INT16(pll->referenceDiv); args.v2.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v2.ucFracFbDiv = pll->feedbackDivFrac; args.v2.ucPostDiv = pll->postDiv; args.v2.ucPpll = pll->id; args.v2.ucCRTC = crtcID; args.v2.ucRefDivSrc = 1; break; case 3: args.v3.usPixelClock = B_HOST_TO_LENDIAN_INT16(pll->pixelClock / 10); args.v3.usRefDiv = B_HOST_TO_LENDIAN_INT16(pll->referenceDiv); args.v3.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v3.ucFracFbDiv = pll->feedbackDivFrac; args.v3.ucPostDiv = pll->postDiv; args.v3.ucPpll = pll->id; args.v3.ucMiscInfo = (pll->id << 2); if (pll->ssPercentage > 0 && (pll->ssType & ATOM_EXTERNAL_SS_MASK) != 0) { args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; } args.v3.ucTransmitterId = gConnector[connectorIndex]->encoder.objectID; args.v3.ucEncoderMode = display_get_encoder_mode(connectorIndex); break; case 5: args.v5.ucCRTC = crtcID; args.v5.usPixelClock = B_HOST_TO_LENDIAN_INT16(pll->pixelClock / 10); args.v5.ucRefDiv = pll->referenceDiv; args.v5.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v5.ulFbDivDecFrac = B_HOST_TO_LENDIAN_INT32(pll->feedbackDivFrac * 100000); args.v5.ucPostDiv = pll->postDiv; args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ if (pll->ssPercentage > 0 && (pll->ssType & ATOM_EXTERNAL_SS_MASK) != 0) { args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; } switch (bitsPerColor) { case 8: default: args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; break; case 10: args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; break; } args.v5.ucTransmitterID = gConnector[connectorIndex]->encoder.objectID; args.v5.ucEncoderMode = display_get_encoder_mode(connectorIndex); args.v5.ucPpll = pll->id; break; case 6: args.v6.ulDispEngClkFreq = B_HOST_TO_LENDIAN_INT32(crtcID << 24 | pll->pixelClock / 10); args.v6.ucRefDiv = pll->referenceDiv; args.v6.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v6.ulFbDivDecFrac = B_HOST_TO_LENDIAN_INT32(pll->feedbackDivFrac * 100000); args.v6.ucPostDiv = pll->postDiv; args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ if (pll->ssPercentage > 0 && (pll->ssType & ATOM_EXTERNAL_SS_MASK) != 0) { args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; } switch (bitsPerColor) { case 8: default: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; break; case 10: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; break; case 12: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; break; case 16: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; break; } args.v6.ucTransmitterID = gConnector[connectorIndex]->encoder.objectID; args.v6.ucEncoderMode = display_get_encoder_mode(connectorIndex); args.v6.ucPpll = pll->id; break; default: TRACE("%s: ERROR: table version %" B_PRIu8 ".%" B_PRIu8 " TODO\n", __func__, tableMajor, tableMinor); return B_ERROR; } TRACE("%s: set adjusted pixel clock %" B_PRIu32 " (was %" B_PRIu32 ")\n", __func__, pll->pixelClock, mode->timing.pixel_clock); status_t result = atom_execute_table(gAtomContext, index, (uint32*)&args); if (ssEnabled) display_crtc_ss(pll, ATOM_ENABLE); return result; }
status_t pll_adjust(pll_info *pll, uint8 crtcID) { // TODO: PLL flags radeon_shared_info &info = *gInfo->shared_info; uint32 pixelClock = pll->pixelClock; // original as pixel_clock will be adjusted uint32 connectorIndex = gDisplay[crtcID]->connectorIndex; uint32 encoderID = gConnector[connectorIndex]->encoder.objectID; uint32 encoderMode = display_get_encoder_mode(connectorIndex); if (info.device_chipset >= (RADEON_R600 | 0x20)) { union adjust_pixel_clock args; uint8 tableMajor; uint8 tableMinor; int index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); if (atom_parse_cmd_header(gAtomContext, index, &tableMajor, &tableMinor) != B_OK) { return B_ERROR; } memset(&args, 0, sizeof(args)); switch (tableMajor) { case 1: switch (tableMinor) { case 1: case 2: args.v1.usPixelClock = B_HOST_TO_LENDIAN_INT16(pixelClock / 10); args.v1.ucTransmitterID = encoderID; args.v1.ucEncodeMode = encoderMode; // TODO: SS and SS % > 0 if (0) { args.v1.ucConfig |= ADJUST_DISPLAY_CONFIG_SS_ENABLE; } atom_execute_table(gAtomContext, index, (uint32*)&args); // get returned adjusted clock pll->pixelClock = B_LENDIAN_TO_HOST_INT16(args.v1.usPixelClock); pll->pixelClock *= 10; break; case 3: args.v3.sInput.usPixelClock = B_HOST_TO_LENDIAN_INT16(pixelClock / 10); args.v3.sInput.ucTransmitterID = encoderID; args.v3.sInput.ucEncodeMode = encoderMode; args.v3.sInput.ucDispPllConfig = 0; // TODO: SS and SS % > 0 if (0) { args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE; } // TODO: if ATOM_DEVICE_DFP_SUPPORT // TODO: display port DP // TODO: is DP? args.v3.sInput.ucExtTransmitterID = 0; atom_execute_table(gAtomContext, index, (uint32*)&args); // get returned adjusted clock pll->pixelClock = B_LENDIAN_TO_HOST_INT32( args.v3.sOutput.ulDispPllFreq); pll->pixelClock *= 10; // convert to kHz for storage if (args.v3.sOutput.ucRefDiv) { pll->flags |= PLL_USE_FRAC_FB_DIV; pll->flags |= PLL_USE_REF_DIV; pll->referenceDiv = args.v3.sOutput.ucRefDiv; } if (args.v3.sOutput.ucPostDiv) { pll->flags |= PLL_USE_FRAC_FB_DIV; pll->flags |= PLL_USE_POST_DIV; pll->postDiv = args.v3.sOutput.ucPostDiv; } break; default: TRACE("%s: ERROR: table version %" B_PRIu8 ".%" B_PRIu8 " unknown\n", __func__, tableMajor, tableMinor); return B_ERROR; } break; default: TRACE("%s: ERROR: table version %" B_PRIu8 ".%" B_PRIu8 " unknown\n", __func__, tableMajor, tableMinor); return B_ERROR; } } TRACE("%s: was: %" B_PRIu32 ", now: %" B_PRIu32 "\n", __func__, pixelClock, pll->pixelClock); return B_OK; }
static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct drm_encoder *encoder = amdgpu_crtc->encoder; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); u32 adjusted_clock = mode->clock; int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(encoder); u32 dp_clock = mode->clock; u32 clock = mode->clock; int bpc = amdgpu_crtc->bpc; bool is_duallink = amdgpu_dig_monitor_is_duallink(encoder, mode->clock); union adjust_pixel_clock args; u8 frev, crev; int index; amdgpu_crtc->pll_flags = AMDGPU_PLL_USE_FRAC_FB_DIV; if ((amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { if (connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv; dp_clock = dig_connector->dp_clock; } } /* use recommended ref_div for ss */ if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { if (amdgpu_crtc->ss_enabled) { if (amdgpu_crtc->ss.refdiv) { amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV; amdgpu_crtc->pll_reference_div = amdgpu_crtc->ss.refdiv; amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV; } } } /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) adjusted_clock = mode->clock * 2; if (amdgpu_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) amdgpu_crtc->pll_flags |= AMDGPU_PLL_PREFER_CLOSEST_LOWER; if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) amdgpu_crtc->pll_flags |= AMDGPU_PLL_IS_LCD; /* adjust pll for deep color modes */ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { switch (bpc) { case 8: default: break; case 10: clock = (clock * 5) / 4; break; case 12: clock = (clock * 3) / 2; break; case 16: clock = clock * 2; break; } } /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock * accordingly based on the encoder/transmitter to work around * special hw requirements. */ index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) return adjusted_clock; memset(&args, 0, sizeof(args)); switch (frev) { case 1: switch (crev) { case 1: case 2: args.v1.usPixelClock = cpu_to_le16(clock / 10); args.v1.ucTransmitterID = amdgpu_encoder->encoder_id; args.v1.ucEncodeMode = encoder_mode; if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage) args.v1.ucConfig |= ADJUST_DISPLAY_CONFIG_SS_ENABLE; amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; break; case 3: args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10); args.v3.sInput.ucTransmitterID = amdgpu_encoder->encoder_id; args.v3.sInput.ucEncodeMode = encoder_mode; args.v3.sInput.ucDispPllConfig = 0; if (amdgpu_crtc->ss_enabled && amdgpu_crtc->ss.percentage) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE; if (ENCODER_MODE_IS_DP(encoder_mode)) { args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; /* 16200 or 27000 */ args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); } else if (amdgpu_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; if (dig->coherent_mode) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_COHERENT_MODE; if (is_duallink) args.v3.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK; } if (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) args.v3.sInput.ucExtTransmitterID = amdgpu_encoder_get_dp_bridge_encoder_id(encoder); else args.v3.sInput.ucExtTransmitterID = 0; amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; if (args.v3.sOutput.ucRefDiv) { amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV; amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_REF_DIV; amdgpu_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv; } if (args.v3.sOutput.ucPostDiv) { amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV; amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_POST_DIV; amdgpu_crtc->pll_post_div = args.v3.sOutput.ucPostDiv; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return adjusted_clock; } break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return adjusted_clock; } return adjusted_clock; }
status_t dp_link_train(uint8 crtcID) { TRACE("%s\n", __func__); uint32 connectorIndex = gDisplay[crtcID]->connectorIndex; dp_info* dp = &gConnector[connectorIndex]->dpInfo; display_mode* mode = &gDisplay[crtcID]->currentMode; if (dp->valid != true) { ERROR("%s: started on invalid DisplayPort connector #%" B_PRIu32 "\n", __func__, connectorIndex); return B_ERROR; } int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); // Table version uint8 tableMajor; uint8 tableMinor; dp->trainingUseEncoder = true; if (atom_parse_cmd_header(gAtomContext, index, &tableMajor, &tableMinor) == B_OK) { if (tableMinor > 1) { // The AtomBIOS DPEncoderService greater then 1.1 can't program the // training pattern properly. dp->trainingUseEncoder = false; } } uint32 linkEnumeration = gConnector[connectorIndex]->encoder.linkEnumeration; uint32 gpioID = gConnector[connectorIndex]->gpioID; uint32 hwPin = gGPIOInfo[gpioID]->hwPin; uint32 dpEncoderID = 0; if (encoder_pick_dig(connectorIndex) > 0) dpEncoderID |= ATOM_DP_CONFIG_DIG2_ENCODER; else dpEncoderID |= ATOM_DP_CONFIG_DIG1_ENCODER; if (linkEnumeration == GRAPH_OBJECT_ENUM_ID2) dpEncoderID |= ATOM_DP_CONFIG_LINK_B; else dpEncoderID |= ATOM_DP_CONFIG_LINK_A; dp->trainingReadInterval = dpcd_reg_read(hwPin, DP_TRAINING_AUX_RD_INTERVAL); uint8 sandbox = dpcd_reg_read(hwPin, DP_MAX_LANE_COUNT); radeon_shared_info &info = *gInfo->shared_info; //bool dpTPS3Supported = false; //if (info.dceMajor >= 5 && (sandbox & DP_TPS3_SUPPORTED) != 0) // dpTPS3Supported = true; // *** DisplayPort link training initialization // Power up the DP sink if (dp->config[0] >= DP_DPCD_REV_11) dpcd_reg_write(hwPin, DP_SET_POWER, DP_SET_POWER_D0); // Possibly enable downspread on the sink if ((dp->config[3] & 0x1) != 0) dpcd_reg_write(hwPin, DP_DOWNSPREAD_CTRL, DP_DOWNSPREAD_CTRL_AMP_EN); else dpcd_reg_write(hwPin, DP_DOWNSPREAD_CTRL, 0); encoder_dig_setup(connectorIndex, mode->timing.pixel_clock, ATOM_ENCODER_CMD_SETUP_PANEL_MODE); // TODO: Doesn't this overwrite important dpcd info? sandbox = dp->laneCount; if ((dp->config[0] >= DP_DPCD_REV_11) && (dp->config[2] & DP_ENHANCED_FRAME_CAP_EN)) sandbox |= DP_ENHANCED_FRAME_EN; dpcd_reg_write(hwPin, DP_LANE_COUNT, sandbox); // Set the link rate on the DP sink sandbox = dp_encode_link_rate(dp->linkRate); dpcd_reg_write(hwPin, DP_LINK_RATE, sandbox); // Start link training on source if (info.dceMajor >= 4 || !dp->trainingUseEncoder) { encoder_dig_setup(connectorIndex, mode->timing.pixel_clock, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START); } else { ERROR("%s: TODO: cannot use AtomBIOS DPEncoderService on card!\n", __func__); } // Disable the training pattern on the sink dpcd_reg_write(hwPin, DP_TRAIN, DP_TRAIN_PATTERN_DISABLED); dp_link_train_cr(connectorIndex); dp_link_train_ce(connectorIndex); // *** DisplayPort link training finish snooze(400); // Disable the training pattern on the sink dpcd_reg_write(hwPin, DP_TRAIN, DP_TRAIN_PATTERN_DISABLED); // Disable the training pattern on the source if (info.dceMajor >= 4 || !dp->trainingUseEncoder) { encoder_dig_setup(connectorIndex, mode->timing.pixel_clock, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); } else { ERROR("%s: TODO: cannot use AtomBIOS DPEncoderService on card!\n", __func__); } return B_OK; }
static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, u8 slave_addr, u8 flags, u8 *buf, u8 num) { struct drm_device *dev = chan->dev; struct radeon_device *rdev = dev->dev_private; PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); unsigned char *base; u16 out = cpu_to_le16(0); int r = 0; memset(&args, 0, sizeof(args)); mutex_lock(&chan->mutex); mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); base = (unsigned char *)rdev->mode_info.atom_context->scratch; if (flags & HW_I2C_WRITE) { if (num > ATOM_MAX_HW_I2C_WRITE) { DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num); r = -EINVAL; goto done; } if (buf == NULL) args.ucRegIndex = 0; else args.ucRegIndex = buf[0]; if (num) num--; if (num) memcpy(&out, &buf[1], num); args.lpI2CDataOut = cpu_to_le16(out); } else { if (num > ATOM_MAX_HW_I2C_READ) { DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); r = -EINVAL; goto done; } args.ucRegIndex = 0; args.lpI2CDataOut = 0; } args.ucFlag = flags; args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; args.ucTransBytes = num; args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { DRM_DEBUG_KMS("hw_i2c error\n"); r = -EIO; goto done; } if (!(flags & HW_I2C_WRITE)) radeon_atom_copy_swap(buf, base, num, false); done: mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); mutex_unlock(&chan->mutex); return r; }
status_t pll_limit_probe(pll_info *pll) { int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); uint8 tableMajor; uint8 tableMinor; uint16 tableOffset; if (atom_parse_data_header(gAtomContext, index, NULL, &tableMajor, &tableMinor, &tableOffset) != B_OK) { ERROR("%s: Couldn't parse data header\n", __func__); return B_ERROR; } union firmware_info *firmwareInfo = (union firmware_info *)(gAtomContext->bios + tableOffset); /* pixel clock limits */ pll->referenceFreq = B_LENDIAN_TO_HOST_INT16(firmwareInfo->info.usReferenceClock) * 10; if (tableMinor < 2) { pll->pllOutMin = B_LENDIAN_TO_HOST_INT16( firmwareInfo->info.usMinPixelClockPLL_Output) * 10; } else { pll->pllOutMin = B_LENDIAN_TO_HOST_INT32( firmwareInfo->info_12.ulMinPixelClockPLL_Output) * 10; } pll->pllOutMax = B_LENDIAN_TO_HOST_INT32( firmwareInfo->info.ulMaxPixelClockPLL_Output) * 10; if (tableMinor >= 4) { pll->lcdPllOutMin = B_LENDIAN_TO_HOST_INT16( firmwareInfo->info_14.usLcdMinPixelClockPLL_Output) * 1000; if (pll->lcdPllOutMin == 0) pll->lcdPllOutMin = pll->pllOutMin; pll->lcdPllOutMax = B_LENDIAN_TO_HOST_INT16( firmwareInfo->info_14.usLcdMaxPixelClockPLL_Output) * 1000; if (pll->lcdPllOutMax == 0) pll->lcdPllOutMax = pll->pllOutMax; } else { pll->lcdPllOutMin = pll->pllOutMin; pll->lcdPllOutMax = pll->pllOutMax; } if (pll->pllOutMin == 0) { pll->pllOutMin = 64800 * 10; // Avivo+ limit } pll->minPostDiv = POST_DIV_MIN; pll->maxPostDiv = POST_DIV_LIMIT; pll->minRefDiv = REF_DIV_MIN; pll->maxRefDiv = REF_DIV_LIMIT; pll->minFeedbackDiv = FB_DIV_MIN; pll->maxFeedbackDiv = FB_DIV_LIMIT; pll->pllInMin = B_LENDIAN_TO_HOST_INT16( firmwareInfo->info.usMinPixelClockPLL_Input) * 10; pll->pllInMax = B_LENDIAN_TO_HOST_INT16( firmwareInfo->info.usMaxPixelClockPLL_Input) * 10; TRACE("%s: referenceFreq: %" B_PRIu16 "; pllOutMin: %" B_PRIu16 "; " " pllOutMax: %" B_PRIu16 "; pllInMin: %" B_PRIu16 ";" "pllInMax: %" B_PRIu16 "\n", __func__, pll->referenceFreq, pll->pllOutMin, pll->pllOutMax, pll->pllInMin, pll->pllInMax); return B_OK; }
bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev) { struct amdgpu_mode_info *mode_info = &adev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); u16 size, data_offset; u8 frev, crev; ATOM_CONNECTOR_OBJECT_TABLE *con_obj; ATOM_ENCODER_OBJECT_TABLE *enc_obj; ATOM_OBJECT_TABLE *router_obj; ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; ATOM_OBJECT_HEADER *obj_header; int i, j, k, path_size, device_support; int connector_type; u16 conn_id, connector_object_id; struct amdgpu_i2c_bus_rec ddc_bus; struct amdgpu_router router; struct amdgpu_gpio_rec gpio; struct amdgpu_hpd hpd; if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) return false; if (crev < 2) return false; obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usDisplayPathTableOffset)); con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usConnectorObjectTableOffset)); enc_obj = (ATOM_ENCODER_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usEncoderObjectTableOffset)); router_obj = (ATOM_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usRouterObjectTableOffset)); device_support = le16_to_cpu(obj_header->usDeviceSupport); path_size = 0; for (i = 0; i < path_obj->ucNumOfDispPath; i++) { uint8_t *addr = (uint8_t *) path_obj->asDispPath; ATOM_DISPLAY_OBJECT_PATH *path; addr += path_size; path = (ATOM_DISPLAY_OBJECT_PATH *) addr; path_size += le16_to_cpu(path->usSize); if (device_support & le16_to_cpu(path->usDeviceTag)) { uint8_t con_obj_id, con_obj_num, con_obj_type; con_obj_id = (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; con_obj_num = (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) >> ENUM_ID_SHIFT; con_obj_type = (le16_to_cpu(path->usConnObjectId) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; connector_type = object_connector_convert[con_obj_id]; connector_object_id = con_obj_id; if (connector_type == DRM_MODE_CONNECTOR_Unknown) continue; router.ddc_valid = false; router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { uint8_t grph_obj_id, grph_obj_num, grph_obj_type; grph_obj_id = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; grph_obj_num = (le16_to_cpu(path->usGraphicObjIds[j]) & ENUM_ID_MASK) >> ENUM_ID_SHIFT; grph_obj_type = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { for (k = 0; k < enc_obj->ucNumberOfObjects; k++) { u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID); if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(enc_obj->asObjects[k].usRecordOffset)); ATOM_ENCODER_CAP_RECORD *cap_record; u16 caps = 0; while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_ENCODER_CAP_RECORD_TYPE: cap_record =(ATOM_ENCODER_CAP_RECORD *) record; caps = le16_to_cpu(cap_record->usEncoderCap); break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record->ucRecordSize); } amdgpu_display_add_encoder(adev, encoder_obj, le16_to_cpu(path->usDeviceTag), caps); } } } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { for (k = 0; k < router_obj->ucNumberOfObjects; k++) { u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usRecordOffset)); ATOM_I2C_RECORD *i2c_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); u8 *num_dst_objs = (u8 *) ((u8 *)router_src_dst_table + 1 + (router_src_dst_table->ucNumberOfSrc * 2)); u16 *dst_objs = (u16 *)(num_dst_objs + 1); int enum_id; router.router_id = router_obj_id; for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) { if (le16_to_cpu(path->usConnObjectId) == le16_to_cpu(dst_objs[enum_id])) break; } while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = (ATOM_I2C_RECORD *) record; i2c_config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_record->sucI2cId; router.i2c_info = amdgpu_atombios_lookup_i2c_gpio(adev, i2c_config-> ucAccess); router.i2c_addr = i2c_record->ucI2CAddr >> 1; break; case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) record; router.ddc_valid = true; router.ddc_mux_type = ddc_path->ucMuxType; router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; break; case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) record; router.cd_valid = true; router.cd_mux_type = cd_path->ucMuxType; router.cd_mux_control_pin = cd_path->ucMuxControlPin; router.cd_mux_state = cd_path->ucMuxState[enum_id]; break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record->ucRecordSize); } } } } } /* look up gpio for ddc, hpd */ ddc_bus.valid = false; hpd.hpd = AMDGPU_HPD_NONE; if ((le16_to_cpu(path->usDeviceTag) & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { for (j = 0; j < con_obj->ucNumberOfObjects; j++) { if (le16_to_cpu(path->usConnObjectId) == le16_to_cpu(con_obj->asObjects[j]. usObjectID)) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(con_obj-> asObjects[j]. usRecordOffset)); ATOM_I2C_RECORD *i2c_record; ATOM_HPD_INT_RECORD *hpd_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = (ATOM_I2C_RECORD *) record; i2c_config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_record->sucI2cId; ddc_bus = amdgpu_atombios_lookup_i2c_gpio(adev, i2c_config-> ucAccess); break; case ATOM_HPD_INT_RECORD_TYPE: hpd_record = (ATOM_HPD_INT_RECORD *) record; gpio = amdgpu_atombios_lookup_gpio(adev, hpd_record->ucHPDIntGPIOID); hpd = amdgpu_atombios_get_hpd_info_from_gpio(adev, &gpio); hpd.plugged_state = hpd_record->ucPlugged_PinState; break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record-> ucRecordSize); } break; } } } /* needed for aux chan transactions */ ddc_bus.hpd = hpd.hpd; conn_id = le16_to_cpu(path->usConnObjectId); amdgpu_display_add_connector(adev, conn_id, le16_to_cpu(path->usDeviceTag), connector_type, &ddc_bus, connector_object_id, &hpd, &router); }
status_t pll_set(uint8 pllID, uint32 pixelClock, uint8 crtcID) { uint32 connectorIndex = gDisplay[crtcID]->connectorIndex; pll_info *pll = &gConnector[connectorIndex]->encoder.pll; pll->pixelClock = pixelClock; pll->id = pllID; pll_setup_flags(pll, crtcID); // set up any special flags pll_adjust(pll, crtcID); // get any needed clock adjustments, set reference/post dividers pll_compute(pll); // compute dividers int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); union set_pixel_clock args; memset(&args, 0, sizeof(args)); uint8 tableMajor; uint8 tableMinor; atom_parse_cmd_header(gAtomContext, index, &tableMajor, &tableMinor); uint32 bitsPerChannel = 8; // TODO: Digital Depth, EDID 1.4+ on digital displays // isn't in Haiku edid common code? switch (tableMinor) { case 1: args.v1.usPixelClock = B_HOST_TO_LENDIAN_INT16(pll->pixelClock / 10); args.v1.usRefDiv = B_HOST_TO_LENDIAN_INT16(pll->referenceDiv); args.v1.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v1.ucFracFbDiv = pll->feedbackDivFrac; args.v1.ucPostDiv = pll->postDiv; args.v1.ucPpll = pll->id; args.v1.ucCRTC = crtcID; args.v1.ucRefDivSrc = 1; break; case 2: args.v2.usPixelClock = B_HOST_TO_LENDIAN_INT16(pll->pixelClock / 10); args.v2.usRefDiv = B_HOST_TO_LENDIAN_INT16(pll->referenceDiv); args.v2.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v2.ucFracFbDiv = pll->feedbackDivFrac; args.v2.ucPostDiv = pll->postDiv; args.v2.ucPpll = pll->id; args.v2.ucCRTC = crtcID; args.v2.ucRefDivSrc = 1; break; case 3: args.v3.usPixelClock = B_HOST_TO_LENDIAN_INT16(pll->pixelClock / 10); args.v3.usRefDiv = B_HOST_TO_LENDIAN_INT16(pll->referenceDiv); args.v3.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v3.ucFracFbDiv = pll->feedbackDivFrac; args.v3.ucPostDiv = pll->postDiv; args.v3.ucPpll = pll->id; args.v3.ucMiscInfo = (pll->id << 2); // if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) // args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; args.v3.ucTransmitterId = gConnector[connectorIndex]->encoder.objectID; args.v3.ucEncoderMode = display_get_encoder_mode(connectorIndex); break; case 5: args.v5.ucCRTC = crtcID; args.v5.usPixelClock = B_HOST_TO_LENDIAN_INT16(pll->pixelClock / 10); args.v5.ucRefDiv = pll->referenceDiv; args.v5.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v5.ulFbDivDecFrac = B_HOST_TO_LENDIAN_INT32(pll->feedbackDivFrac * 100000); args.v5.ucPostDiv = pll->postDiv; args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ // if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) // args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; switch (bitsPerChannel) { case 8: default: args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; break; case 10: args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; break; } args.v5.ucTransmitterID = gConnector[connectorIndex]->encoder.objectID; args.v5.ucEncoderMode = display_get_encoder_mode(connectorIndex); args.v5.ucPpll = pllID; break; case 6: args.v6.ulDispEngClkFreq = B_HOST_TO_LENDIAN_INT32(crtcID << 24 | pll->pixelClock / 10); args.v6.ucRefDiv = pll->referenceDiv; args.v6.usFbDiv = B_HOST_TO_LENDIAN_INT16(pll->feedbackDiv); args.v6.ulFbDivDecFrac = B_HOST_TO_LENDIAN_INT32(pll->feedbackDivFrac * 100000); args.v6.ucPostDiv = pll->postDiv; args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ // if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) // args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; switch (bitsPerChannel) { case 8: default: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; break; case 10: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; break; case 12: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; break; case 16: args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; break; } args.v6.ucTransmitterID = gConnector[connectorIndex]->encoder.objectID; args.v6.ucEncoderMode = display_get_encoder_mode(connectorIndex); args.v6.ucPpll = pllID; break; default: TRACE("%s: ERROR: table version %" B_PRIu8 ".%" B_PRIu8 " TODO\n", __func__, tableMajor, tableMinor); return B_ERROR; } TRACE("%s: set adjusted pixel clock %" B_PRIu32 " (was %" B_PRIu32 ")\n", __func__, pll->pixelClock, pixelClock); return atom_execute_table(gAtomContext, index, (uint32*)&args); }