/* Flushes the compression bit cache as well as "data". * Note: the name here is a bit of a misnomer. ELPG uses this * internally... but ELPG doesn't have to be on to do it manually. */ static void gk20a_mm_g_elpg_flush_locked(struct gk20a *g) { u32 data; s32 retry = 100; gk20a_dbg_fn(""); /* Make sure all previous writes are committed to the L2. There's no guarantee that writes are to DRAM. This will be a sysmembar internal to the L2. */ gk20a_writel(g, ltc_ltcs_ltss_g_elpg_r(), ltc_ltcs_ltss_g_elpg_flush_pending_f()); do { data = gk20a_readl(g, ltc_ltc0_ltss_g_elpg_r()); if (ltc_ltc0_ltss_g_elpg_flush_v(data) == ltc_ltc0_ltss_g_elpg_flush_pending_v()) { gk20a_dbg_info("g_elpg_flush 0x%x", data); retry--; usleep_range(20, 40); } else break; } while (retry >= 0 || !tegra_platform_is_silicon()); if (retry < 0) gk20a_warn(dev_from_gk20a(g), "g_elpg_flush too many retries"); }
void gm20b_ltc_g_elpg_flush_locked(struct gk20a *g) { u32 data; bool done[g->ltc_count]; s32 retry = 100; int i; int num_done = 0; u32 ltc_d = ltc_ltc1_ltss_g_elpg_r() - ltc_ltc0_ltss_g_elpg_r(); gk20a_dbg_fn(""); trace_gk20a_mm_g_elpg_flush_locked(g->dev->name); for (i = 0; i < g->ltc_count; i++) done[i] = 0; gk20a_writel(g, ltc_ltcs_ltss_g_elpg_r(), ltc_ltcs_ltss_g_elpg_flush_pending_f()); do { for (i = 0; i < g->ltc_count; i++) { if (done[i]) continue; data = gk20a_readl(g, ltc_ltc0_ltss_g_elpg_r() + ltc_d * i); if (ltc_ltc0_ltss_g_elpg_flush_v(data)) { gk20a_dbg_info("g_elpg_flush 0x%x", data); } else { done[i] = 1; num_done++; } } if (num_done < g->ltc_count) { retry--; udelay(5); } else break; } while (retry >= 0 || !tegra_platform_is_silicon()); if (retry < 0 && tegra_platform_is_silicon()) gk20a_warn(dev_from_gk20a(g), "g_elpg_flush too many retries"); trace_gk20a_mm_g_elpg_flush_locked_done(g->dev->name); }
void arbdeg_sata_clk_gate(void) { u32 val; if (!tegra_platform_is_silicon()) return; val = readl(IO_ADDRESS(CLK_RST_CNTRL_RST_DEV_W_SET)); if (val & SET_CEC_RST) writel(0x108, IO_ADDRESS(CLK_RST_CNTRL_RST_DEV_V_SET)); val = readl(IO_ADDRESS(CLK_RST_CNTRL_RST_DEV_W_SET)); while (val & SET_CEC_RST) val = readl(IO_ADDRESS(CLK_RST_CNTRL_RST_DEV_W_SET)); }
static int vic03_wait_mem_scrubbing(struct platform_device *dev) { int retries = VIC_IDLE_TIMEOUT_DEFAULT / VIC_IDLE_CHECK_PERIOD; nvhost_dbg_fn(""); do { u32 w = host1x_readl(dev, flcn_dmactl_r()) & (flcn_dmactl_dmem_scrubbing_m() | flcn_dmactl_imem_scrubbing_m()); if (!w) { nvhost_dbg_fn("done"); return 0; } udelay(VIC_IDLE_CHECK_PERIOD); } while (--retries || !tegra_platform_is_silicon()); nvhost_err(&dev->dev, "Falcon mem scrubbing timeout"); return -ETIMEDOUT; }
static int tegra_pwr_detect_cell_probe(struct platform_device *pdev) { int i, ret; u32 package_mask; unsigned long flags; bool rails_found = true; struct device *dev = NULL; if (!tegra_platform_is_silicon()) return -ENOSYS; /* When setup from DT we need to pass the device to regulator_get() */ if (pdev->dev.of_node) dev = &pdev->dev; i = tegra_package_id(); if ((i != -1) && (i & (~0x1F))) { pr_err("tegra: not supported package id %d - io power detection" " is left always on\n", i); return 0; } package_mask = (i == -1) ? i : (0x1 << i); for (i = 0; i < ARRAY_SIZE(pwr_detect_cells); i++) { struct pwr_detect_cell *cell = &pwr_detect_cells[i]; if (cell->regulator_nb.notifier_call) continue; if (!(cell->package_mask & package_mask)) { pwrio_disabled_mask |= cell->pwrio_mask; continue; } ret = pwr_detect_cell_init_one(dev, cell, &pwrio_disabled_mask); if (ret == -EPROBE_DEFER) return ret; if (ret) { pr_err("tegra: failed to map regulator to power detect" " cell %s(%d)\n", cell->reg_id, ret); rails_found = false; } } if (!rails_found) { pr_err("tegra: failed regulators mapping - io power detection" " is left always on\n"); return 0; } pwrdet_rails_found = true; /* Latch initial i/o power levels, disable all detection cells and not powered interfaces */ spin_lock_irqsave(&pwr_lock, flags); if (!pwrdet_always_on) pwr_detect_latch(); if (!pwrio_always_on) pwr_io_disable(pwrio_disabled_mask); spin_unlock_irqrestore(&pwr_lock, flags); pr_info("tegra: started io power detection dynamic control\n"); pr_info("tegra: NO_IO_POWER setting 0x%x\n", pwrio_disabled_mask); return 0; }
void tegra_init_speedo_data(void) { int i; if (!tegra_platform_is_silicon()) { cpu_process_id = 0; core_process_id = 0; gpu_process_id = 0; cpu_speedo_id = 0; soc_speedo_id = 0; gpu_speedo_id = 0; package_id = -1; cpu_speedo_value = 1777; gpu_speedo_value = 2000; cpu_speedo_0_value = 0; cpu_speedo_1_value = 0; soc_speedo_0_value = 0; soc_speedo_1_value = 0; soc_speedo_2_value = 0; soc_iddq_value = 0; gpu_iddq_value = 0; return; } cpu_speedo_0_value = tegra_fuse_readl(FUSE_CPU_SPEEDO_0); cpu_speedo_1_value = tegra_fuse_readl(FUSE_CPU_SPEEDO_1); /* GPU Speedo is stored in CPU_SPEEDO_2 */ gpu_speedo_value = tegra_fuse_readl(FUSE_CPU_SPEEDO_2); soc_speedo_0_value = tegra_fuse_readl(FUSE_SOC_SPEEDO_0); soc_speedo_1_value = tegra_fuse_readl(FUSE_SOC_SPEEDO_1); soc_speedo_2_value = tegra_fuse_readl(FUSE_SOC_SPEEDO_2); cpu_iddq_value = tegra_fuse_readl(FUSE_CPU_IDDQ); soc_iddq_value = tegra_fuse_readl(FUSE_SOC_IDDQ); gpu_iddq_value = tegra_fuse_readl(FUSE_GPU_IDDQ); cpu_speedo_value = cpu_speedo_0_value; if (cpu_speedo_value == 0) { cpu_speedo_value = 2100; pr_warn("Tegra13: Warning: CPU Speedo value not fused. PLEASE FIX!!!!!!!!!!!\n"); pr_warn("Tegra13: Warning: PLEASE USE BOARD WITH FUSED SPEEDO VALUE !!!!\n"); } if (gpu_speedo_value == 0) { gpu_speedo_value = 2000; pr_warn("Tegra13: Warning: GPU Speedo value not fused. PLEASE FIX!!!!!!!!!!!\n"); pr_warn("Tegra13: Warning: PLEASE USE BOARD WITH FUSED SPEEDO VALUE !!!!\n"); } rev_sku_to_speedo_ids(tegra_revision, tegra_get_sku_id()); for (i = 0; i < GPU_PROCESS_CORNERS_NUM; i++) { if (gpu_speedo_value < gpu_process_speedos[threshold_index][i]) { break; } } gpu_process_id = i; for (i = 0; i < CPU_PROCESS_CORNERS_NUM; i++) { if (cpu_speedo_value < cpu_process_speedos[threshold_index][i]) { break; } } cpu_process_id = i; for (i = 0; i < CORE_PROCESS_CORNERS_NUM; i++) { if (soc_speedo_0_value < core_process_speedos[threshold_index][i]) { break; } } core_process_id = i; pr_info("Tegra13: CPU Speedo ID %d, Soc Speedo ID %d, Gpu Speedo ID %d\n", cpu_speedo_id, soc_speedo_id, gpu_speedo_id); pr_info("Tegra13: CPU Process ID %d,Soc Process ID %d,Gpu Process ID %d\n", cpu_process_id, core_process_id, gpu_process_id); pr_info("Tegra13: CPU Speedo value %d, Soc Speedo value %d, Gpu Speedo value %d\n", cpu_speedo_value, soc_speedo_0_value, gpu_speedo_value); }
void tegra_init_speedo_data(void) { int i; u32 tegra_sku_id; if (!tegra_platform_is_silicon()) { cpu_process_id = 0; core_process_id = 0; gpu_process_id = 0; cpu_speedo_id = 0; soc_speedo_id = 0; gpu_speedo_id = 0; package_id = -1; cpu_speedo_value = TEGRA21_CPU_SPEEDO; gpu_speedo_value = TEGRA21_GPU_SPEEDO; soc_speedo_value = TEGRA21_SOC_SPEEDO; cpu_speedo_0_value = 0; cpu_speedo_1_value = 0; soc_speedo_0_value = 0; soc_speedo_1_value = 0; soc_speedo_2_value = 0; soc_iddq_value = 0; gpu_iddq_value = 0; pr_info("Tegra21: CPU Speedo value %d, Soc Speedo value %d, Gpu Speedo value %d\n", cpu_speedo_value, soc_speedo_value, gpu_speedo_value); pr_info("Tegra21: CPU Speedo ID %d, Soc Speedo ID %d, Gpu Speedo ID %d\n", cpu_speedo_id, soc_speedo_id, gpu_speedo_id); pr_info("Tegra21: CPU Process ID %d,Soc Process ID %d,Gpu Process ID %d\n", cpu_process_id, core_process_id, gpu_process_id); return; } /* Read speedo/iddq fuses */ cpu_speedo_0_value = tegra_fuse_readl(FUSE_CPU_SPEEDO_0); cpu_speedo_1_value = tegra_fuse_readl(FUSE_CPU_SPEEDO_1); cpu_speedo_2_value = tegra_fuse_readl(FUSE_CPU_SPEEDO_2); soc_speedo_0_value = tegra_fuse_readl(FUSE_SOC_SPEEDO_0); soc_speedo_1_value = tegra_fuse_readl(FUSE_SOC_SPEEDO_1); soc_speedo_2_value = tegra_fuse_readl(FUSE_SOC_SPEEDO_2); cpu_iddq_value = tegra_fuse_readl(FUSE_CPU_IDDQ) * 4; soc_iddq_value = tegra_fuse_readl(FUSE_SOC_IDDQ) * 4; gpu_iddq_value = tegra_fuse_readl(FUSE_GPU_IDDQ) * 5; /* * Determine CPU, GPU, SOC speedo values depending on speedo fusing * revision. Note that GPU speedo value is fused in CPU_SPEEDO_2 */ speedo_rev = get_speedo_rev(); if (speedo_rev >= 3) { cpu_speedo_value = cpu_speedo_0_value; gpu_speedo_value = cpu_speedo_2_value; soc_speedo_value = soc_speedo_0_value; } else if (speedo_rev == 2) { cpu_speedo_value = (-1938 + (1095*cpu_speedo_0_value/100)) / 10; gpu_speedo_value = (-1662 + (1082*cpu_speedo_2_value/100)) / 10; soc_speedo_value = (-705 + (1037*soc_speedo_0_value/100)) / 10; } else { /* FIXME: do we need hard-coded IDDQ here? */ cpu_speedo_value = TEGRA21_CPU_SPEEDO; gpu_speedo_value = cpu_speedo_2_value - TEGRA21_GPU_SPEEDO_OFFS; soc_speedo_value = TEGRA21_SOC_SPEEDO; } if (cpu_speedo_value <= 0) { cpu_speedo_value = TEGRA21_CPU_SPEEDO; pr_warn("Tegra21: Warning: CPU Speedo value not fused. PLEASE FIX!!!!!!!!!!!\n"); pr_warn("Tegra21: Warning: PLEASE USE BOARD WITH FUSED SPEEDO VALUE !!!!\n"); } if (gpu_speedo_value <= 0) { gpu_speedo_value = TEGRA21_GPU_SPEEDO; pr_warn("Tegra21: Warning: GPU Speedo value not fused. PLEASE FIX!!!!!!!!!!!\n"); pr_warn("Tegra21: Warning: PLEASE USE BOARD WITH FUSED SPEEDO VALUE !!!!\n"); } if (soc_speedo_value <= 0) { soc_speedo_value = TEGRA21_SOC_SPEEDO; pr_warn("Tegra21: Warning: SOC Speedo value not fused. PLEASE FIX!!!!!!!!!!!\n"); pr_warn("Tegra21: Warning: PLEASE USE BOARD WITH FUSED SPEEDO VALUE !!!!\n"); } /* Map chip sku, rev, speedo values into speedo and process IDs */ tegra_sku_id = tegra_get_sku_id(); rev_sku_to_speedo_ids(tegra_revision, tegra_sku_id, speedo_rev); for (i = 0; i < GPU_PROCESS_CORNERS_NUM; i++) { if (gpu_speedo_value < gpu_process_speedos[threshold_index][i]) { break; } } gpu_process_id = i; for (i = 0; i < CPU_PROCESS_CORNERS_NUM; i++) { if (cpu_speedo_value < cpu_process_speedos[threshold_index][i]) { break; } } cpu_process_id = i; for (i = 0; i < CORE_PROCESS_CORNERS_NUM; i++) { if (soc_speedo_value < core_process_speedos[threshold_index][i]) { break; } } core_process_id = i; pr_info("Tegra21: Speedo/IDDQ fuse revision %d\n", speedo_rev); pr_info("Tegra21: CPU Speedo ID %d, Soc Speedo ID %d, Gpu Speedo ID %d\n", cpu_speedo_id, soc_speedo_id, gpu_speedo_id); pr_info("Tegra21: CPU Process ID %d, Soc Process ID %d, Gpu Process ID %d\n", cpu_process_id, core_process_id, gpu_process_id); pr_info("Tegra21: CPU Speedo value %d, Soc Speedo value %d, Gpu Speedo value %d\n", cpu_speedo_value, soc_speedo_value, gpu_speedo_value); pr_info("Tegra21: CPU IDDQ %d, Soc IDDQ %d, Gpu IDDQ %d\n", cpu_iddq_value, soc_iddq_value, gpu_iddq_value); }
int gm20b_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, u32 min, u32 max) { int err = 0; struct gr_gk20a *gr = &g->gr; u32 ltc, slice, ctrl1, val, hw_op = 0; s32 retry = 200; u32 slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v( gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); gk20a_dbg_fn(""); trace_gk20a_ltc_cbc_ctrl_start(g->dev->name, op, min, max); if (gr->compbit_store.mem.size == 0) return 0; mutex_lock(&g->mm.l2_op_lock); if (op == gk20a_cbc_op_clear) { gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl2_r(), ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(min)); gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl3_r(), ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(max)); hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(); } else if (op == gk20a_cbc_op_clean) { hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(); } else if (op == gk20a_cbc_op_invalidate) { hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(); } else { BUG_ON(1); } gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(), gk20a_readl(g, ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op); for (ltc = 0; ltc < g->ltc_count; ltc++) { for (slice = 0; slice < slices_per_ltc; slice++) { ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() + ltc * proj_ltc_stride_v() + slice * proj_lts_stride_v(); retry = 200; do { val = gk20a_readl(g, ctrl1); if (!(val & hw_op)) break; retry--; udelay(5); } while (retry >= 0 || !tegra_platform_is_silicon()); if (retry < 0 && tegra_platform_is_silicon()) { gk20a_err(dev_from_gk20a(g), "comp tag clear timeout\n"); err = -EBUSY; goto out; } } } out: trace_gk20a_ltc_cbc_ctrl_done(g->dev->name); mutex_unlock(&g->mm.l2_op_lock); return err; }
static int tegra_dc_dpaux_write_chunk(struct tegra_dc_dp_data *dp, u32 cmd, u32 addr, u8 *data, u32 *size, u32 *aux_stat) { int i; u32 reg_val; u32 timeout_retries = DP_AUX_TIMEOUT_MAX_TRIES; u32 defer_retries = DP_AUX_DEFER_MAX_TRIES; u32 temp_data; if (*size > DP_AUX_MAX_BYTES) return -1; /* only write one chunk of data */ /* Make sure the command is write command */ switch (cmd) { case DPAUX_DP_AUXCTL_CMD_I2CWR: case DPAUX_DP_AUXCTL_CMD_MOTWR: case DPAUX_DP_AUXCTL_CMD_AUXWR: break; default: printk(BIOS_SPEW,"dp: aux write cmd 0x%x is invalid\n", cmd); return -1; } #if 0 /* interesting. */ if (tegra_platform_is_silicon()) { *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT); if (!(*aux_stat & DPAUX_DP_AUXSTAT_HPD_STATUS_PLUGGED)) { printk(BIOS_SPEW,"dp: HPD is not detected\n"); return -EFAULT; } } #endif tegra_dpaux_writel(dp, DPAUX_DP_AUXADDR, addr); for (i = 0; i < DP_AUX_MAX_BYTES/4; ++i) { memcpy(&temp_data, data, 4); tegra_dpaux_writel(dp, DPAUX_DP_AUXDATA_WRITE_W(i), temp_data); data += 4; } reg_val = tegra_dpaux_readl(dp, DPAUX_DP_AUXCTL); reg_val &= ~DPAUX_DP_AUXCTL_CMD_MASK; reg_val |= cmd; reg_val &= ~DPAUX_DP_AUXCTL_CMDLEN_FIELD; reg_val |= ((*size-1) << DPAUX_DP_AUXCTL_CMDLEN_SHIFT); while ((timeout_retries > 0) && (defer_retries > 0)) { if ((timeout_retries != DP_AUX_TIMEOUT_MAX_TRIES) || (defer_retries != DP_AUX_DEFER_MAX_TRIES)) udelay(1); reg_val |= DPAUX_DP_AUXCTL_TRANSACTREQ_PENDING; tegra_dpaux_writel(dp, DPAUX_DP_AUXCTL, reg_val); if (tegra_dpaux_wait_transaction(dp)) printk(BIOS_SPEW,"dp: aux write transaction timeout\n"); *aux_stat = tegra_dpaux_readl(dp, DPAUX_DP_AUXSTAT); if ((*aux_stat & DPAUX_DP_AUXSTAT_TIMEOUT_ERROR_PENDING) || (*aux_stat & DPAUX_DP_AUXSTAT_RX_ERROR_PENDING) || (*aux_stat & DPAUX_DP_AUXSTAT_SINKSTAT_ERROR_PENDING) || (*aux_stat & DPAUX_DP_AUXSTAT_NO_STOP_ERROR_PENDING)) { if (timeout_retries-- > 0) { printk(BIOS_SPEW,"dp: aux write retry (0x%x) -- %d\n", *aux_stat, timeout_retries); /* clear the error bits */ tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT, *aux_stat); continue; } else { printk(BIOS_SPEW,"dp: aux write got error (0x%x)\n", *aux_stat); return -1; } } if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_I2CDEFER) || (*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_DEFER)) { if (defer_retries-- > 0) { printk(BIOS_SPEW, "dp: aux write defer (0x%x) -- %d\n", *aux_stat, defer_retries); /* clear the error bits */ tegra_dpaux_writel(dp, DPAUX_DP_AUXSTAT, *aux_stat); continue; } else { printk(BIOS_SPEW, "dp: aux write defer exceeds max retries " "(0x%x)\n", *aux_stat); return -1; } } if ((*aux_stat & DPAUX_DP_AUXSTAT_REPLYTYPE_MASK) == DPAUX_DP_AUXSTAT_REPLYTYPE_ACK) { *size = ((*aux_stat) & DPAUX_DP_AUXSTAT_REPLY_M_MASK); return 0; } else { printk(BIOS_SPEW,"dp: aux write failed (0x%x)\n", *aux_stat); return -1; } } /* Should never come to here */ return -1; }
static int clk_program_gpc_pll(struct gk20a *g, struct clk_gk20a *clk, int allow_slide) { u32 data, cfg, coeff, timeout; u32 m, n, pl; u32 nlo; gk20a_dbg_fn(""); if (!tegra_platform_is_silicon()) return 0; /* get old coefficients */ coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r()); m = trim_sys_gpcpll_coeff_mdiv_v(coeff); n = trim_sys_gpcpll_coeff_ndiv_v(coeff); pl = trim_sys_gpcpll_coeff_pldiv_v(coeff); /* do NDIV slide if there is no change in M and PL */ cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); if (allow_slide && clk->gpc_pll.M == m && clk->gpc_pll.PL == pl && trim_sys_gpcpll_cfg_enable_v(cfg)) { return clk_slide_gpc_pll(g, clk->gpc_pll.N); } /* slide down to NDIV_LO */ nlo = DIV_ROUND_UP(m * gpc_pll_params.min_vco, clk->gpc_pll.clk_in); if (allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg)) { int ret = clk_slide_gpc_pll(g, nlo); if (ret) return ret; } /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(), trim_sys_gpc2clk_out_vcodiv_f(2)); gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); /* put PLL in bypass before programming it */ data = gk20a_readl(g, trim_sys_sel_vco_r()); data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(), trim_sys_sel_vco_gpc2clk_out_bypass_f()); udelay(2); gk20a_writel(g, trim_sys_sel_vco_r(), data); /* get out from IDDQ */ cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); if (trim_sys_gpcpll_cfg_iddq_v(cfg)) { cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(), trim_sys_gpcpll_cfg_iddq_power_on_v()); gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); gk20a_readl(g, trim_sys_gpcpll_cfg_r()); udelay(2); } /* disable PLL before changing coefficients */ cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(), trim_sys_gpcpll_cfg_enable_no_f()); gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); gk20a_readl(g, trim_sys_gpcpll_cfg_r()); /* change coefficients */ nlo = DIV_ROUND_UP(clk->gpc_pll.M * gpc_pll_params.min_vco, clk->gpc_pll.clk_in); coeff = trim_sys_gpcpll_coeff_mdiv_f(clk->gpc_pll.M) | trim_sys_gpcpll_coeff_ndiv_f(allow_slide ? nlo : clk->gpc_pll.N) | trim_sys_gpcpll_coeff_pldiv_f(clk->gpc_pll.PL); gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff); /* enable PLL after changing coefficients */ cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(), trim_sys_gpcpll_cfg_enable_yes_f()); gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); /* lock pll */ cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); if (cfg & trim_sys_gpcpll_cfg_enb_lckdet_power_off_f()){ cfg = set_field(cfg, trim_sys_gpcpll_cfg_enb_lckdet_m(), trim_sys_gpcpll_cfg_enb_lckdet_power_on_f()); gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg); } /* wait pll lock */ timeout = clk->pll_delay / 2 + 1; do { cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r()); if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f()) goto pll_locked; udelay(2); } while (--timeout > 0); /* PLL is messed up. What can we do here? */ BUG(); return -EBUSY; pll_locked: /* put PLL back on vco */ data = gk20a_readl(g, trim_sys_sel_vco_r()); data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(), trim_sys_sel_vco_gpc2clk_out_vco_f()); gk20a_writel(g, trim_sys_sel_vco_r(), data); clk->gpc_pll.enabled = true; /* restore out divider 1:1 */ data = gk20a_readl(g, trim_sys_gpc2clk_out_r()); data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(), trim_sys_gpc2clk_out_vcodiv_by1_f()); udelay(2); gk20a_writel(g, trim_sys_gpc2clk_out_r(), data); /* slide up to target NDIV */ return clk_slide_gpc_pll(g, clk->gpc_pll.N); }
static int gk20a_ltc_cbc_ctrl(struct gk20a *g, enum gk20a_cbc_op op, u32 min, u32 max) { int err = 0; struct gr_gk20a *gr = &g->gr; u32 fbp, slice, ctrl1, val, hw_op = 0; unsigned long end_jiffies = jiffies + msecs_to_jiffies(gk20a_get_gr_idle_timeout(g)); u32 delay = GR_IDLE_CHECK_DEFAULT; u32 slices_per_fbp = ltc_ltcs_ltss_cbc_param_slices_per_fbp_v( gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r())); gk20a_dbg_fn(""); if (gr->compbit_store.size == 0) return 0; mutex_lock(&g->mm.l2_op_lock); if (op == gk20a_cbc_op_clear) { gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl2_r(), ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(min)); gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl3_r(), ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(max)); hw_op = ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(); } else if (op == gk20a_cbc_op_clean) { hw_op = ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(); } else if (op == gk20a_cbc_op_invalidate) { hw_op = ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(); } else { BUG_ON(1); } gk20a_writel(g, ltc_ltcs_ltss_cbc_ctrl1_r(), gk20a_readl(g, ltc_ltcs_ltss_cbc_ctrl1_r()) | hw_op); for (fbp = 0; fbp < gr->num_fbps; fbp++) { for (slice = 0; slice < slices_per_fbp; slice++) { delay = GR_IDLE_CHECK_DEFAULT; ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() + fbp * proj_ltc_stride_v() + slice * proj_lts_stride_v(); do { val = gk20a_readl(g, ctrl1); if (!(val & hw_op)) break; usleep_range(delay, delay * 2); delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); } while (time_before(jiffies, end_jiffies) || !tegra_platform_is_silicon()); if (!time_before(jiffies, end_jiffies)) { gk20a_err(dev_from_gk20a(g), "comp tag clear timeout\n"); err = -EBUSY; goto out; } } } out: mutex_unlock(&g->mm.l2_op_lock); return 0; }