static void wait_for_fbc_state_changed( struct dce110_compressor *cp110, bool enabled) { uint8_t counter = 0; uint32_t addr = mmFBC_STATUS; uint32_t value; while (counter < 10) { value = dm_read_reg(cp110->base.ctx, addr); if (get_reg_field_value( value, FBC_STATUS, FBC_ENABLE_STATUS) == enabled) break; msleep(10); counter++; } if (counter == 10) { DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied", __func__); } else { DC_LOG_SYNC("FBC status changed to %d", enabled); } }
static void dce_transform_set_pixel_storage_depth( struct transform *xfm, enum lb_pixel_depth depth, const struct bit_depth_reduction_params *bit_depth_params) { struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); int pixel_depth, expan_mode; enum dc_color_depth color_depth; switch (depth) { case LB_PIXEL_DEPTH_18BPP: color_depth = COLOR_DEPTH_666; pixel_depth = 2; expan_mode = 1; break; case LB_PIXEL_DEPTH_24BPP: color_depth = COLOR_DEPTH_888; pixel_depth = 1; expan_mode = 1; break; case LB_PIXEL_DEPTH_30BPP: color_depth = COLOR_DEPTH_101010; pixel_depth = 0; expan_mode = 1; break; case LB_PIXEL_DEPTH_36BPP: color_depth = COLOR_DEPTH_121212; pixel_depth = 3; expan_mode = 0; break; default: color_depth = COLOR_DEPTH_101010; pixel_depth = 0; expan_mode = 1; BREAK_TO_DEBUGGER(); break; } set_denormalization(xfm_dce, color_depth); program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params); REG_UPDATE_2(LB_DATA_FORMAT, PIXEL_DEPTH, pixel_depth, PIXEL_EXPAN_MODE, expan_mode); if (!(xfm_dce->lb_pixel_depth_supported & depth)) { /*we should use unsupported capabilities * unless it is required by w/a*/ DC_LOG_WARNING("%s: Capability not supported", __func__); } }
void dce110_compressor_program_compressed_surface_address_and_pitch( struct compressor *compressor, struct compr_addr_and_pitch_params *params) { struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor); uint32_t value = 0; uint32_t fbc_pitch = 0; uint32_t compressed_surf_address_low_part = compressor->compr_surface_address.addr.low_part; /* Clear content first. */ dm_write_reg( compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH), 0); dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0); /* Write address, HIGH has to be first. */ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH), compressor->compr_surface_address.addr.high_part); dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), compressed_surf_address_low_part); fbc_pitch = align_to_chunks_number_per_line(params->source_view_width); if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1) fbc_pitch = fbc_pitch / 8; else DC_LOG_WARNING("%s: Unexpected DCE11 compression ratio", __func__); /* Clear content first. */ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0); /* Write FBC Pitch. */ set_reg_field_value( value, fbc_pitch, GRPH_COMPRESS_PITCH, GRPH_COMPRESS_PITCH); dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value); }
static int dce_transform_get_max_num_of_supported_lines( struct dce_transform *xfm_dce, enum lb_pixel_depth depth, int pixel_width) { int pixels_per_entries = 0; int max_pixels_supports = 0; ASSERT(pixel_width); /* Find number of pixels that can fit into a single LB entry and * take floor of the value since we cannot store a single pixel * across multiple entries. */ switch (depth) { case LB_PIXEL_DEPTH_18BPP: pixels_per_entries = xfm_dce->lb_bits_per_entry / 18; break; case LB_PIXEL_DEPTH_24BPP: pixels_per_entries = xfm_dce->lb_bits_per_entry / 24; break; case LB_PIXEL_DEPTH_30BPP: pixels_per_entries = xfm_dce->lb_bits_per_entry / 30; break; case LB_PIXEL_DEPTH_36BPP: pixels_per_entries = xfm_dce->lb_bits_per_entry / 36; break; default: DC_LOG_WARNING("%s: Invalid LB pixel depth", __func__); BREAK_TO_DEBUGGER(); break; } ASSERT(pixels_per_entries); max_pixels_supports = pixels_per_entries * xfm_dce->lb_memory_size; return (max_pixels_supports / pixel_width); }
static void construct( struct i2c_hw_engine_dce110 *hw_engine, const struct i2c_hw_engine_dce110_create_arg *arg) { uint32_t xtal_ref_div = 0; dal_i2c_hw_engine_construct(&hw_engine->base, arg->ctx); hw_engine->base.base.base.funcs = &engine_funcs; hw_engine->base.base.funcs = &i2c_engine_funcs; hw_engine->base.funcs = &i2c_hw_engine_funcs; hw_engine->base.default_speed = arg->default_speed; hw_engine->regs = arg->regs; hw_engine->i2c_shift = arg->i2c_shift; hw_engine->i2c_mask = arg->i2c_mask; hw_engine->engine_id = arg->engine_id; hw_engine->buffer_used_bytes = 0; hw_engine->transaction_count = 0; hw_engine->engine_keep_power_up_count = 1; REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); if (xtal_ref_div == 0) { DC_LOG_WARNING("Invalid base timer divider [%s]\n", __func__); xtal_ref_div = 2; } /*Calculating Reference Clock by divding original frequency by * XTAL_REF_DIV. * At upper level, uint32_t reference_frequency = * dal_i2caux_get_reference_clock(as) >> 1 * which already divided by 2. So we need x2 to get original * reference clock from ppll_info */ hw_engine->reference_frequency = (arg->reference_frequency * 2) / xtal_ref_div; }
void generic_reg_wait(const struct dc_context *ctx, uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value, unsigned int delay_between_poll_us, unsigned int time_out_num_tries, const char *func_name, int line) { uint32_t field_value; uint32_t reg_val; int i; /* something is terribly wrong if time out is > 200ms. (5Hz) */ ASSERT(delay_between_poll_us * time_out_num_tries <= 200000); for (i = 0; i <= time_out_num_tries; i++) { if (i) { if (delay_between_poll_us >= 1000) msleep(delay_between_poll_us/1000); else if (delay_between_poll_us > 0) udelay(delay_between_poll_us); } reg_val = dm_read_reg(ctx, addr); field_value = get_reg_field_value_ex(reg_val, mask, shift); if (field_value == condition_value) { if (i * delay_between_poll_us > 1000 && !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", delay_between_poll_us * i / 1000, func_name, line); return; } } DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", delay_between_poll_us, time_out_num_tries, func_name, line); if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) BREAK_TO_DEBUGGER(); }
static void dce110_update_hdmi_info_packet( struct dce110_stream_encoder *enc110, uint32_t packet_index, const struct encoder_info_packet *info_packet) { uint32_t cont, send, line; if (info_packet->valid) { dce110_update_generic_info_packet( enc110, packet_index, info_packet); /* enable transmission of packet(s) - * packet transmission begins on the next frame */ cont = 1; /* send packet(s) every frame */ send = 1; /* select line number to send packets on */ line = 2; } else { cont = 0; send = 0; line = 0; } /* choose which generic packet control to use */ switch (packet_index) { case 0: REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send, HDMI_GENERIC0_LINE, line); break; case 1: REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; case 2: REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send, HDMI_GENERIC0_LINE, line); break; case 3: REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case 4: if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send, HDMI_GENERIC0_LINE, line); break; case 5: if (REG(HDMI_GENERIC_PACKET_CONTROL2)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; case 6: if (REG(HDMI_GENERIC_PACKET_CONTROL3)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC0_CONT, cont, HDMI_GENERIC0_SEND, send, HDMI_GENERIC0_LINE, line); break; case 7: if (REG(HDMI_GENERIC_PACKET_CONTROL3)) REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3, HDMI_GENERIC1_CONT, cont, HDMI_GENERIC1_SEND, send, HDMI_GENERIC1_LINE, line); break; #endif default: /* invalid HW packet index */ DC_LOG_WARNING( "Invalid HW packet index: %s()\n", __func__); return; } }
bool hubbub1_verify_allow_pstate_change_high( struct hubbub *hubbub) { /* pstate latency is ~20us so if we wait over 40us and pstate allow * still not asserted, we are probably stuck and going to hang * * TODO: Figure out why it takes ~100us on linux * pstate takes around ~100us on linux. Unknown currently as to * why it takes that long on linux */ static unsigned int pstate_wait_timeout_us = 200; static unsigned int pstate_wait_expected_timeout_us = 40; static unsigned int max_sampled_pstate_wait_us; /* data collection */ static bool forced_pstate_allow; /* help with revert wa */ unsigned int debug_data; unsigned int i; if (forced_pstate_allow) { /* we hacked to force pstate allow to prevent hang last time * we verify_allow_pstate_change_high. so disable force * here so we can check status */ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); forced_pstate_allow = false; } /* RV2: * dchubbubdebugind, at: 0xB * description * 0: Pipe0 Plane0 Allow Pstate Change * 1: Pipe0 Plane1 Allow Pstate Change * 2: Pipe0 Cursor0 Allow Pstate Change * 3: Pipe0 Cursor1 Allow Pstate Change * 4: Pipe1 Plane0 Allow Pstate Change * 5: Pipe1 Plane1 Allow Pstate Change * 6: Pipe1 Cursor0 Allow Pstate Change * 7: Pipe1 Cursor1 Allow Pstate Change * 8: Pipe2 Plane0 Allow Pstate Change * 9: Pipe2 Plane1 Allow Pstate Change * 10: Pipe2 Cursor0 Allow Pstate Change * 11: Pipe2 Cursor1 Allow Pstate Change * 12: Pipe3 Plane0 Allow Pstate Change * 13: Pipe3 Plane1 Allow Pstate Change * 14: Pipe3 Cursor0 Allow Pstate Change * 15: Pipe3 Cursor1 Allow Pstate Change * 16: Pipe4 Plane0 Allow Pstate Change * 17: Pipe4 Plane1 Allow Pstate Change * 18: Pipe4 Cursor0 Allow Pstate Change * 19: Pipe4 Cursor1 Allow Pstate Change * 20: Pipe5 Plane0 Allow Pstate Change * 21: Pipe5 Plane1 Allow Pstate Change * 22: Pipe5 Cursor0 Allow Pstate Change * 23: Pipe5 Cursor1 Allow Pstate Change * 24: Pipe6 Plane0 Allow Pstate Change * 25: Pipe6 Plane1 Allow Pstate Change * 26: Pipe6 Cursor0 Allow Pstate Change * 27: Pipe6 Cursor1 Allow Pstate Change * 28: WB0 Allow Pstate Change * 29: WB1 Allow Pstate Change * 30: Arbiter's allow_pstate_change * 31: SOC pstate change request" * * RV1: * dchubbubdebugind, at: 0x7 * description "3-0: Pipe0 cursor0 QOS * 7-4: Pipe1 cursor0 QOS * 11-8: Pipe2 cursor0 QOS * 15-12: Pipe3 cursor0 QOS * 16: Pipe0 Plane0 Allow Pstate Change * 17: Pipe1 Plane0 Allow Pstate Change * 18: Pipe2 Plane0 Allow Pstate Change * 19: Pipe3 Plane0 Allow Pstate Change * 20: Pipe0 Plane1 Allow Pstate Change * 21: Pipe1 Plane1 Allow Pstate Change * 22: Pipe2 Plane1 Allow Pstate Change * 23: Pipe3 Plane1 Allow Pstate Change * 24: Pipe0 cursor0 Allow Pstate Change * 25: Pipe1 cursor0 Allow Pstate Change * 26: Pipe2 cursor0 Allow Pstate Change * 27: Pipe3 cursor0 Allow Pstate Change * 28: WB0 Allow Pstate Change * 29: WB1 Allow Pstate Change * 30: Arbiter's allow_pstate_change * 31: SOC pstate change request */ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate); for (i = 0; i < pstate_wait_timeout_us; i++) { debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); if (debug_data & (1 << 30)) { if (i > pstate_wait_expected_timeout_us) DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i); return true; } if (max_sampled_pstate_wait_us < i) max_sampled_pstate_wait_us = i; udelay(1); } /* force pstate allow to prevent system hang * and break to debugger to investigate */ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); forced_pstate_allow = true; DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", debug_data); return false; }
static void program_pwl(struct dce_transform *xfm_dce, const struct pwl_params *params) { int retval; uint8_t max_tries = 10; uint8_t counter = 0; uint32_t i = 0; const struct pwl_result_data *rgb = params->rgb_resulted; /* Power on LUT memory */ if (REG(DCFE_MEM_PWR_CTRL)) REG_UPDATE(DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, 1); else REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, 1); while (counter < max_tries) { if (REG(DCFE_MEM_PWR_STATUS)) { REG_GET(DCFE_MEM_PWR_STATUS, DCP_REGAMMA_MEM_PWR_STATE, &retval); if (retval == 0) break; ++counter; } else { REG_GET(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_MEM_PWR_STATE, &retval); if (retval == 0) break; ++counter; } } if (counter == max_tries) { DC_LOG_WARNING("%s: regamma lut was not powered on " "in a timely manner," " programming still proceeds\n", __func__); } REG_UPDATE(REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, 7); REG_WRITE(REGAMMA_LUT_INDEX, 0); /* Program REGAMMA_LUT_DATA */ while (i != params->hw_points_num) { REG_WRITE(REGAMMA_LUT_DATA, rgb->red_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->green_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->blue_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_red_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_green_reg); REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_blue_reg); ++rgb; ++i; } /* we are done with DCP LUT memory; re-enable low power mode */ if (REG(DCFE_MEM_PWR_CTRL)) REG_UPDATE(DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, 0); else REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, 0); }