bool ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan) { u32 channelSel = 0; u32 bModeSynth = 0; u32 aModeRefSel = 0; u32 reg32 = 0; u16 freq; struct chan_centers centers; ath9k_hw_get_channel_centers(ah, chan, ¢ers); freq = centers.synth_center; if (freq < 4800) { u32 txctl; if (((freq - 2192) % 5) == 0) { channelSel = ((freq - 672) * 2 - 3040) / 10; bModeSynth = 0; } else if (((freq - 2224) % 5) == 0) { channelSel = ((freq - 704) * 2 - 3040) / 10; bModeSynth = 1; } else { DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, "Invalid channel %u MHz\n", freq); return false; } channelSel = (channelSel << 2) & 0xff; channelSel = ath9k_hw_reverse_bits(channelSel, 8); txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); if (freq == 2484) { REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl | AR_PHY_CCK_TX_CTRL_JAPAN); } else { REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); } } else if ((freq % 20) == 0 && freq >= 5120) { channelSel = ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8); aModeRefSel = ath9k_hw_reverse_bits(1, 2); } else if ((freq % 10) == 0) { channelSel = ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8); if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) aModeRefSel = ath9k_hw_reverse_bits(2, 2); else aModeRefSel = ath9k_hw_reverse_bits(1, 2); } else if ((freq % 5) == 0) { channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8); aModeRefSel = ath9k_hw_reverse_bits(1, 2); } else { DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, "Invalid channel %u MHz\n", freq); return false; } reg32 = (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) | (1 << 5) | 0x1; REG_WRITE(ah, AR_PHY(0x37), reg32); ah->ah_curchan = chan; AH5416(ah)->ah_curchanRadIndex = -1; return true; }
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) { return REG_READ(ah, AR_QTXDP(q)); }
static void ath9k_hw_do_getnf(struct ath_hal *ah, int16_t nfarray[NUM_NF_READINGS]) { int16_t nf; if (AR_SREV_9280_10_OR_LATER(ah)) nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); else nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "NF calibrated [ctl] [chain 0] is %d\n", nf); nfarray[0] = nf; if (AR_SREV_9280_10_OR_LATER(ah)) nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR); else nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "NF calibrated [ctl] [chain 1] is %d\n", nf); nfarray[1] = nf; if (!AR_SREV_9280(ah)) { nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "NF calibrated [ctl] [chain 2] is %d\n", nf); nfarray[2] = nf; } if (AR_SREV_9280_10_OR_LATER(ah)) nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR); else nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "NF calibrated [ext] [chain 0] is %d\n", nf); nfarray[3] = nf; if (AR_SREV_9280_10_OR_LATER(ah)) nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR); else nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "NF calibrated [ext] [chain 1] is %d\n", nf); nfarray[4] = nf; if (!AR_SREV_9280(ah)) { nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR); if (nf & 0x100) nf = 0 - ((nf ^ 0x1ff) + 1); DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "NF calibrated [ext] [chain 2] is %d\n", nf); nfarray[5] = nf; } }
static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask) { u32 intr_stat_reg = sender->mipi_intr_stat_reg; struct drm_device *dev = sender->dev; dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask); switch (mask) { case BIT(0): case BIT(1): case BIT(2): case BIT(3): case BIT(4): case BIT(5): case BIT(6): case BIT(7): case BIT(8): case BIT(9): case BIT(10): case BIT(11): case BIT(12): case BIT(13): dev_dbg(sender->dev->dev, "No Action required\n"); break; case BIT(14): /*wait for all fifo empty*/ /*wait_for_all_fifos_empty(sender)*/; break; case BIT(15): dev_dbg(sender->dev->dev, "No Action required\n"); break; case BIT(16): break; case BIT(17): break; case BIT(18): case BIT(19): dev_dbg(sender->dev->dev, "High/Low contention detected\n"); /*wait for contention recovery time*/ /*mdelay(10);*/ /*wait for all fifo empty*/ if (0) wait_for_all_fifos_empty(sender); break; case BIT(20): dev_dbg(sender->dev->dev, "No Action required\n"); break; case BIT(21): /*wait for all fifo empty*/ /*wait_for_all_fifos_empty(sender);*/ break; case BIT(22): break; case BIT(23): case BIT(24): case BIT(25): case BIT(26): case BIT(27): dev_dbg(sender->dev->dev, "HS Gen fifo full\n"); REG_WRITE(intr_stat_reg, mask); wait_for_hs_fifos_empty(sender); break; case BIT(28): dev_dbg(sender->dev->dev, "LP Gen fifo full\n"); REG_WRITE(intr_stat_reg, mask); wait_for_lp_fifos_empty(sender); break; case BIT(29): case BIT(30): case BIT(31): dev_dbg(sender->dev->dev, "No Action required\n"); break; } if (mask & REG_READ(intr_stat_reg)) dev_dbg(sender->dev->dev, "Cannot clean interrupt 0x%08x\n", mask); return 0; }
static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) { u32 isr = 0; u32 mask2 = 0; struct ath9k_hw_capabilities *pCap = &ah->caps; u32 sync_cause = 0; bool fatal_int = false; struct ath_common *common = ath9k_hw_common(ah); if (!AR_SREV_9100(ah)) { if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_ON) { isr = REG_READ(ah, AR_ISR); } } sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT; *masked = 0; if (!isr && !sync_cause) return false; } else { *masked = 0; isr = REG_READ(ah, AR_ISR); } if (isr) { if (isr & AR_ISR_BCNMISC) { u32 isr2; isr2 = REG_READ(ah, AR_ISR_S2); if (isr2 & AR_ISR_S2_TIM) mask2 |= ATH9K_INT_TIM; if (isr2 & AR_ISR_S2_DTIM) mask2 |= ATH9K_INT_DTIM; if (isr2 & AR_ISR_S2_DTIMSYNC) mask2 |= ATH9K_INT_DTIMSYNC; if (isr2 & (AR_ISR_S2_CABEND)) mask2 |= ATH9K_INT_CABEND; if (isr2 & AR_ISR_S2_GTT) mask2 |= ATH9K_INT_GTT; if (isr2 & AR_ISR_S2_CST) mask2 |= ATH9K_INT_CST; if (isr2 & AR_ISR_S2_TSFOOR) mask2 |= ATH9K_INT_TSFOOR; } isr = REG_READ(ah, AR_ISR_RAC); if (isr == 0xffffffff) { *masked = 0; return false; } *masked = isr & ATH9K_INT_COMMON; if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM | AR_ISR_RXOK | AR_ISR_RXERR)) *masked |= ATH9K_INT_RX; if (isr & (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR | AR_ISR_TXEOL)) { u32 s0_s, s1_s; *masked |= ATH9K_INT_TX; s0_s = REG_READ(ah, AR_ISR_S0_S); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); s1_s = REG_READ(ah, AR_ISR_S1_S); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); } if (isr & AR_ISR_RXORN) { ath_dbg(common, ATH_DBG_INTERRUPT, "receive FIFO overrun interrupt\n"); } *masked |= mask2; } if (AR_SREV_9100(ah)) return true; if (isr & AR_ISR_GENTMR) { u32 s5_s; s5_s = REG_READ(ah, AR_ISR_S5_S); ah->intr_gen_timer_trigger = MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); ah->intr_gen_timer_thresh = MS(s5_s, AR_ISR_S5_GENTIMER_THRESH); if (ah->intr_gen_timer_trigger) *masked |= ATH9K_INT_GENTIMER; if ((s5_s & AR_ISR_S5_TIM_TIMER) && !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) *masked |= ATH9K_INT_TIM_TIMER; } if (sync_cause) { fatal_int = (sync_cause & (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR)) ? true : false; if (fatal_int) { if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { ath_dbg(common, ATH_DBG_ANY, "received PCI FATAL interrupt\n"); } if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { ath_dbg(common, ATH_DBG_ANY, "received PCI PERR interrupt\n"); } *masked |= ATH9K_INT_FATAL; } if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { ath_dbg(common, ATH_DBG_INTERRUPT, "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n"); REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); REG_WRITE(ah, AR_RC, 0); *masked |= ATH9K_INT_FATAL; } if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { ath_dbg(common, ATH_DBG_INTERRUPT, "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); } REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); } return true; }
static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset) { u32 regVal; int i, offset, offs_6_1, offs_0; u32 ccomp_org, reg_field; u32 regList[][2] = { { 0x786c, 0 }, { 0x7854, 0 }, { 0x7820, 0 }, { 0x7824, 0 }, { 0x7868, 0 }, { 0x783c, 0 }, { 0x7838, 0 }, }; DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "Running PA Calibration\n"); /* PA CAL is not needed for high power solution */ if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == AR5416_EEP_TXGAIN_HIGH_POWER) return; if (AR_SREV_9285_11(ah)) { REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14)); udelay(10); } for (i = 0; i < ARRAY_SIZE(regList); i++) regList[i][1] = REG_READ(ah, regList[i][0]); regVal = REG_READ(ah, 0x7834); regVal &= (~(0x1)); REG_WRITE(ah, 0x7834, regVal); regVal = REG_READ(ah, 0x9808); regVal |= (0x1 << 27); REG_WRITE(ah, 0x9808, regVal); REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1); REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1); REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1); REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1); REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7); REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0); ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP); REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf); REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0); udelay(30); REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0); for (i = 6; i > 0; i--) { regVal = REG_READ(ah, 0x7834); regVal |= (1 << (19 + i)); REG_WRITE(ah, 0x7834, regVal); udelay(1); regVal = REG_READ(ah, 0x7834); regVal &= (~(0x1 << (19 + i))); reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9); regVal |= (reg_field << (19 + i)); REG_WRITE(ah, 0x7834, regVal); } REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1); udelay(1); reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9); REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field); offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS); offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP); offset = (offs_6_1<<1) | offs_0; offset = offset - 0; offs_6_1 = offset>>1; offs_0 = offset & 1; if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) { if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT) ah->pacal_info.max_skipcount = 2 * ah->pacal_info.max_skipcount; ah->pacal_info.skipcount = ah->pacal_info.max_skipcount; } else { ah->pacal_info.max_skipcount = 1; ah->pacal_info.skipcount = 0; ah->pacal_info.prev_offset = offset; } REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1); REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0); regVal = REG_READ(ah, 0x7834); regVal |= 0x1; REG_WRITE(ah, 0x7834, regVal); regVal = REG_READ(ah, 0x9808); regVal &= (~(0x1 << 27)); REG_WRITE(ah, 0x9808, regVal); for (i = 0; i < ARRAY_SIZE(regList); i++) REG_WRITE(ah, regList[i][0], regList[i][1]); REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org); if (AR_SREV_9285_11(ah)) REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT); }
static int mdfld_dpu_update_fb(struct drm_device * dev) { struct drm_crtc * crtc; struct psb_intel_crtc * psb_crtc; struct mdfld_dsi_dbi_output ** dbi_output; struct drm_psb_private * dev_priv = dev->dev_private; struct mdfld_dbi_dpu_info * dpu_info = dev_priv->dbi_dpu_info; bool pipe_updated[2]; unsigned long irq_flags; u32 dpll_reg = MRST_DPLL_A; u32 dspcntr_reg = DSPACNTR; u32 pipeconf_reg = PIPEACONF; u32 dsplinoff_reg = DSPALINOFF; u32 dspsurf_reg = DSPASURF; u32 mipi_state_reg = MIPIA_INTR_STAT_REG; u32 reg_offset = 0; int pipe; int i; int ret; dbi_output = dpu_info->dbi_outputs; pipe_updated[0] = pipe_updated[1] = false; if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true)) return -EAGAIN; /*try to prevent any new damage reports*/ if(!spin_trylock_irqsave(&dpu_info->dpu_update_lock, irq_flags)) { return -EAGAIN; } for(i=0; i<dpu_info->dbi_output_num; i++) { crtc = dbi_output[i]->base.base.crtc; psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL; pipe = dbi_output[i]->channel_num ? 2 : 0; if(pipe == 2) { dspcntr_reg = DSPCCNTR; pipeconf_reg = PIPECCONF; dsplinoff_reg = DSPCLINOFF; dspsurf_reg = DSPCSURF; reg_offset = MIPIC_REG_OFFSET; } if(!(REG_READ((MIPIA_GEN_FIFO_STAT_REG + reg_offset)) & BIT27) || !(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) || !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) || !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE)) { PSB_DEBUG_ENTRY("DBI FIFO is busy, DSI %d state %x\n", pipe, REG_READ(mipi_state_reg + reg_offset)); continue; } /*if dbi output is in a exclusive state, pipe change won't be updated*/ if(dbi_output[i]->dbi_panel_on && !(dbi_output[i]->mode_flags & MODE_SETTING_ON_GOING) && !(psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) && !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) { ret = mdfld_dpu_update_pipe(dbi_output[i], dpu_info, dbi_output[i]->channel_num ? 2 : 0); if(!ret) { pipe_updated[i] = true; } } } for(i=0; i<dpu_info->dbi_output_num; i++) { if(pipe_updated[i]) { mdfld_dbi_flush_cb(dbi_output[i], dbi_output[i]->channel_num ? 2 : 0); } } spin_unlock_irqrestore(&dpu_info->dpu_update_lock, irq_flags); ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); return 0; }
/* setup stream encoder in dp mode */ static void dce110_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space, uint32_t enable_sdp_splitting) { #if defined(CONFIG_DRM_AMD_DC_DCN1_0) uint32_t h_active_start; uint32_t v_active_start; uint32_t misc0 = 0; uint32_t misc1 = 0; uint32_t h_blank; uint32_t h_back_porch; uint8_t synchronous_clock = 0; /* asynchronous mode */ uint8_t colorimetry_bpc; uint8_t dynamic_range_rgb = 0; /*full range*/ uint8_t dynamic_range_ycbcr = 1; /*bt709*/ #endif struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); struct dc_crtc_timing hw_crtc_timing = *crtc_timing; if (hw_crtc_timing.flags.INTERLACE) { /*the input timing is in VESA spec format with Interlace flag =1*/ hw_crtc_timing.v_total /= 2; hw_crtc_timing.v_border_top /= 2; hw_crtc_timing.v_addressable /= 2; hw_crtc_timing.v_border_bottom /= 2; hw_crtc_timing.v_front_porch /= 2; hw_crtc_timing.v_sync_width /= 2; } /* set pixel encoding */ switch (hw_crtc_timing.pixel_encoding) { case PIXEL_ENCODING_YCBCR422: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_YCBCR422); break; case PIXEL_ENCODING_YCBCR444: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_YCBCR444); if (hw_crtc_timing.flags.Y_ONLY) if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) /* HW testing only, no use case yet. * Color depth of Y-only could be * 8, 10, 12, 16 bits */ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_Y_ONLY); /* Note: DP_MSA_MISC1 bit 7 is the indicator * of Y-only mode. * This bit is set in HW if register * DP_PIXEL_ENCODING is programmed to 0x4 */ break; case PIXEL_ENCODING_YCBCR420: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_YCBCR420); if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN) REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1); #if defined(CONFIG_DRM_AMD_DC_DCN1_0) if (enc110->se_mask->DP_VID_N_MUL) REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1); #endif break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_RGB444); break; } #if defined(CONFIG_DRM_AMD_DC_DCN1_0) if (REG(DP_MSA_MISC)) misc1 = REG_READ(DP_MSA_MISC); #endif /* set color depth */ switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, 0); break; case COLOR_DEPTH_888: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_8BPC); break; case COLOR_DEPTH_101010: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_10BPC); break; case COLOR_DEPTH_121212: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_12BPC); break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_6BPC); break; } /* set dynamic range and YCbCr range */ #if defined(CONFIG_DRM_AMD_DC_DCN1_0) switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: colorimetry_bpc = 0; break; case COLOR_DEPTH_888: colorimetry_bpc = 1; break; case COLOR_DEPTH_101010: colorimetry_bpc = 2; break; case COLOR_DEPTH_121212: colorimetry_bpc = 3; break; default: colorimetry_bpc = 0; break; } misc0 = misc0 | synchronous_clock; misc0 = colorimetry_bpc << 5; if (REG(DP_MSA_TIMING_PARAM1)) { switch (output_color_space) { case COLOR_SPACE_SRGB: misc0 = misc0 | 0x0; misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_rgb = 0; /*full range*/ break; case COLOR_SPACE_SRGB_LIMITED: misc0 = misc0 | 0x8; /* bit3=1 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_rgb = 1; /*limited range*/ break; case COLOR_SPACE_YCBCR601: case COLOR_SPACE_YCBCR601_LIMITED: misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_ycbcr = 0; /*bt601*/ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR709_LIMITED: case COLOR_SPACE_YCBCR709_BLACK: misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_ycbcr = 1; /*bt709*/ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_2020_RGB_LIMITEDRANGE: dynamic_range_rgb = 1; /*limited range*/ break; case COLOR_SPACE_2020_RGB_FULLRANGE: case COLOR_SPACE_2020_YCBCR: case COLOR_SPACE_XR_RGB: case COLOR_SPACE_MSREF_SCRGB: case COLOR_SPACE_ADOBERGB: case COLOR_SPACE_DCIP3: case COLOR_SPACE_XV_YCC_709: case COLOR_SPACE_XV_YCC_601: case COLOR_SPACE_DISPLAYNATIVE: case COLOR_SPACE_DOLBYVISION: case COLOR_SPACE_APPCTRL: case COLOR_SPACE_CUSTOMPOINTS: case COLOR_SPACE_UNKNOWN: /* do nothing */ break; } if (enc110->se_mask->DP_DYN_RANGE && enc110->se_mask->DP_YCBCR_RANGE) REG_UPDATE_2( DP_PIXEL_FORMAT, DP_DYN_RANGE, dynamic_range_rgb, DP_YCBCR_RANGE, dynamic_range_ycbcr); #if defined(CONFIG_DRM_AMD_DC_DCN1_0) if (REG(DP_MSA_COLORIMETRY)) REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0); if (REG(DP_MSA_MISC)) REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */ /* dcn new register * dc_crtc_timing is vesa dmt struct. data from edid */ if (REG(DP_MSA_TIMING_PARAM1)) REG_SET_2(DP_MSA_TIMING_PARAM1, 0, DP_MSA_HTOTAL, hw_crtc_timing.h_total, DP_MSA_VTOTAL, hw_crtc_timing.v_total); #endif /* calcuate from vesa timing parameters * h_active_start related to leading edge of sync */ h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left - hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right; h_back_porch = h_blank - hw_crtc_timing.h_front_porch - hw_crtc_timing.h_sync_width; /* start at begining of left border */ h_active_start = hw_crtc_timing.h_sync_width + h_back_porch; v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top - hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom - hw_crtc_timing.v_front_porch; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) /* start at begining of left border */ if (REG(DP_MSA_TIMING_PARAM2)) REG_SET_2(DP_MSA_TIMING_PARAM2, 0, DP_MSA_HSTART, h_active_start, DP_MSA_VSTART, v_active_start); if (REG(DP_MSA_TIMING_PARAM3)) REG_SET_4(DP_MSA_TIMING_PARAM3, 0, DP_MSA_HSYNCWIDTH, hw_crtc_timing.h_sync_width, DP_MSA_HSYNCPOLARITY, !hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY, DP_MSA_VSYNCWIDTH, hw_crtc_timing.v_sync_width, DP_MSA_VSYNCPOLARITY, !hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY); /* HWDITH include border or overscan */ if (REG(DP_MSA_TIMING_PARAM4)) REG_SET_2(DP_MSA_TIMING_PARAM4, 0, DP_MSA_HWIDTH, hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right, DP_MSA_VHEIGHT, hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom); #endif } #endif }
static void dce110_update_generic_info_packet( struct dce110_stream_encoder *enc110, uint32_t packet_index, const struct dc_info_packet *info_packet) { uint32_t regval; /* TODOFPGA Figure out a proper number for max_retries polling for lock * use 50 for now. */ uint32_t max_retries = 50; /*we need turn on clock before programming AFMT block*/ if (REG(AFMT_CNTL)) REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); if (REG(AFMT_VBI_PACKET_CONTROL1)) { if (packet_index >= 8) ASSERT(0); /* poll dig_update_lock is not locked -> asic internal signal * assume otg master lock will unlock it */ /* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, 0, 10, max_retries);*/ /* check if HW reading GSP memory */ REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, 0, 10, max_retries); /* HW does is not reading GSP memory not reading too long -> * something wrong. clear GPS memory access and notify? * hw SW is writing to GSP memory */ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1); } /* choose which generic packet to use */ { regval = REG_READ(AFMT_VBI_PACKET_CONTROL); REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, packet_index); } /* write generic packet header * (4th byte is for GENERIC0 only) */ { REG_SET_4(AFMT_GENERIC_HDR, 0, AFMT_GENERIC_HB0, info_packet->hb0, AFMT_GENERIC_HB1, info_packet->hb1, AFMT_GENERIC_HB2, info_packet->hb2, AFMT_GENERIC_HB3, info_packet->hb3); } /* write generic packet contents * (we never use last 4 bytes) * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers */ { const uint32_t *content = (const uint32_t *) &info_packet->sb[0]; REG_WRITE(AFMT_GENERIC_0, *content++); REG_WRITE(AFMT_GENERIC_1, *content++); REG_WRITE(AFMT_GENERIC_2, *content++); REG_WRITE(AFMT_GENERIC_3, *content++); REG_WRITE(AFMT_GENERIC_4, *content++); REG_WRITE(AFMT_GENERIC_5, *content++); REG_WRITE(AFMT_GENERIC_6, *content++); REG_WRITE(AFMT_GENERIC_7, *content); } if (!REG(AFMT_VBI_PACKET_CONTROL1)) { /* force double-buffered packet update */ REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, (packet_index == 0), AFMT_GENERIC2_UPDATE, (packet_index == 2)); } #if defined(CONFIG_DRM_AMD_DC_DCN1_0) if (REG(AFMT_VBI_PACKET_CONTROL1)) { switch (packet_index) { case 0: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE, 1); break; case 1: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE, 1); break; case 2: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, 1); break; case 3: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, 1); break; case 4: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, 1); break; case 5: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, 1); break; case 6: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, 1); break; case 7: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, 1); break; default: break; } } #endif }
uint32_t SIZE_OPTIMIZATION FlashReadResource(PFLASH_SSD_CONFIG pSSDConfig, \ uint32_t dest, \ uint8_t* pDataArray, \ uint8_t resourceSelectCode, \ pFLASHCOMMANDSEQUENCE pFlashCommandSequence) { uint8_t i; uint32_t ret = FTFx_OK; /* return code variable */ uint32_t temp; /* convert to byte address */ dest = WORD2BYTE(dest); /* check if the destination is aligned or not */ #if (DEBLOCK_SIZE) temp = WORD2BYTE(pSSDConfig->DFlashBase); if((dest >= temp) && (dest < (temp + pSSDConfig->DFlashSize))) { dest = dest - temp + 0x800000U; } else #endif { temp = WORD2BYTE(pSSDConfig->PFlashBase); if((dest >= temp) && (dest < (temp + pSSDConfig->PFlashSize))) { dest -= temp; } else { ret = FTFx_ERR_ACCERR; } } if(ret == FTFx_OK) { /* clear RDCOLERR & ACCERR & FPVIOL flag in flash status register. Write 1 to clear */ temp = pSSDConfig->ftfxRegBase + FTFx_SSD_FSTAT_OFFSET; REG_WRITE(temp, FTFx_SSD_FSTAT_ERROR_BITS); /* passing parameter to the command */ temp = pSSDConfig->ftfxRegBase + FTFx_SSD_FCCOB0_OFFSET; REG_WRITE(temp, FTFx_READ_RESOURCE); temp = pSSDConfig->ftfxRegBase + FTFx_SSD_FCCOB1_OFFSET; REG_WRITE(temp, GET_BIT_16_23(dest)); temp = pSSDConfig->ftfxRegBase + FTFx_SSD_FCCOB2_OFFSET; REG_WRITE(temp, GET_BIT_8_15(dest)); temp = pSSDConfig->ftfxRegBase + FTFx_SSD_FCCOB3_OFFSET; REG_WRITE(temp, GET_BIT_0_7(dest)); temp = pSSDConfig->ftfxRegBase + RSRC_CODE_OFSSET; REG_WRITE(temp, resourceSelectCode); /* calling flash command sequence function to execute the command */ ret = pFlashCommandSequence(pSSDConfig); if (FTFx_OK == ret) { /* Read the data from the FCCOB registers into the pDataArray */ for (i = 0x0U; i < PGM_SIZE_BYTE; i ++) { temp = pSSDConfig->ftfxRegBase + i + 0x08U; pDataArray[i] = REG_READ(temp); } } } #if C90TFS_ENABLE_DEBUG /* Enter Debug state if enabled */ if (TRUE == (pSSDConfig->DebugEnable)) { ENTER_DEBUG_MODE; } #endif return(ret); }
static esp_err_t initialise_flash_encryption(void) { uint32_t coding_scheme = REG_GET_FIELD(EFUSE_BLK0_RDATA6_REG, EFUSE_CODING_SCHEME); if (coding_scheme != EFUSE_CODING_SCHEME_VAL_NONE && coding_scheme != EFUSE_CODING_SCHEME_VAL_34) { ESP_LOGE(TAG, "Unknown/unsupported CODING_SCHEME value 0x%x", coding_scheme); return ESP_ERR_NOT_SUPPORTED; } /* Before first flash encryption pass, need to initialise key & crypto config */ /* Generate key */ uint32_t dis_reg = REG_READ(EFUSE_BLK0_RDATA0_REG); bool efuse_key_read_protected = dis_reg & EFUSE_RD_DIS_BLK1; bool efuse_key_write_protected = dis_reg & EFUSE_WR_DIS_BLK1; if (efuse_key_read_protected == false && efuse_key_write_protected == false && REG_READ(EFUSE_BLK1_RDATA0_REG) == 0 && REG_READ(EFUSE_BLK1_RDATA1_REG) == 0 && REG_READ(EFUSE_BLK1_RDATA2_REG) == 0 && REG_READ(EFUSE_BLK1_RDATA3_REG) == 0 && REG_READ(EFUSE_BLK1_RDATA4_REG) == 0 && REG_READ(EFUSE_BLK1_RDATA5_REG) == 0 && REG_READ(EFUSE_BLK1_RDATA6_REG) == 0 && REG_READ(EFUSE_BLK1_RDATA7_REG) == 0) { ESP_LOGI(TAG, "Generating new flash encryption key..."); esp_efuse_write_random_key(EFUSE_BLK1_WDATA0_REG); esp_efuse_burn_new_values(); ESP_LOGI(TAG, "Read & write protecting new key..."); REG_WRITE(EFUSE_BLK0_WDATA0_REG, EFUSE_WR_DIS_BLK1 | EFUSE_RD_DIS_BLK1); esp_efuse_burn_new_values(); } else { if(!(efuse_key_read_protected && efuse_key_write_protected)) { ESP_LOGE(TAG, "Flash encryption key has to be either unset or both read and write protected"); return ESP_ERR_INVALID_STATE; } ESP_LOGW(TAG, "Using pre-loaded flash encryption key in EFUSE block 1"); } /* CRYPT_CONFIG determines which bits of the AES block key are XORed with bits from the flash address, to provide the key tweak. CRYPT_CONFIG == 0 is effectively AES ECB mode (NOT SUPPORTED) For now this is hardcoded to XOR all 256 bits of the key. If you need to override it, you can pre-burn this efuse to the desired value and then write-protect it, in which case this operation does nothing. Please note this is not recommended! */ ESP_LOGI(TAG, "Setting CRYPT_CONFIG efuse to 0xF"); REG_WRITE(EFUSE_BLK0_WDATA5_REG, EFUSE_FLASH_CRYPT_CONFIG_M); esp_efuse_burn_new_values(); uint32_t new_wdata6 = 0; #ifndef CONFIG_FLASH_ENCRYPTION_UART_BOOTLOADER_ALLOW_ENCRYPT ESP_LOGI(TAG, "Disable UART bootloader encryption..."); new_wdata6 |= EFUSE_DISABLE_DL_ENCRYPT; #else ESP_LOGW(TAG, "Not disabling UART bootloader encryption"); #endif #ifndef CONFIG_FLASH_ENCRYPTION_UART_BOOTLOADER_ALLOW_DECRYPT ESP_LOGI(TAG, "Disable UART bootloader decryption..."); new_wdata6 |= EFUSE_DISABLE_DL_DECRYPT; #else ESP_LOGW(TAG, "Not disabling UART bootloader decryption - SECURITY COMPROMISED"); #endif #ifndef CONFIG_FLASH_ENCRYPTION_UART_BOOTLOADER_ALLOW_CACHE ESP_LOGI(TAG, "Disable UART bootloader MMU cache..."); new_wdata6 |= EFUSE_DISABLE_DL_CACHE; #else ESP_LOGW(TAG, "Not disabling UART bootloader MMU cache - SECURITY COMPROMISED"); #endif #ifndef CONFIG_SECURE_BOOT_ALLOW_JTAG ESP_LOGI(TAG, "Disable JTAG..."); new_wdata6 |= EFUSE_RD_DISABLE_JTAG; #else ESP_LOGW(TAG, "Not disabling JTAG - SECURITY COMPROMISED"); #endif #ifndef CONFIG_SECURE_BOOT_ALLOW_ROM_BASIC ESP_LOGI(TAG, "Disable ROM BASIC interpreter fallback..."); new_wdata6 |= EFUSE_RD_CONSOLE_DEBUG_DISABLE; #else ESP_LOGW(TAG, "Not disabling ROM BASIC fallback - SECURITY COMPROMISED"); #endif if (new_wdata6 != 0) { REG_WRITE(EFUSE_BLK0_WDATA6_REG, new_wdata6); esp_efuse_burn_new_values(); } return ESP_OK; }
void MrvResGetSelfResize(tsMrvScale *ptScale) { // FIXME: phase registers ??? #if (MARVIN_FEATURE_SSCALE == MARVIN_FEATURE_EXIST) volatile tsMrvRegister *ptMrvReg = (tsMrvRegister*)MEM_MRV_REG_BASE; UINT32 srsz_ctrl = REG_READ(ptMrvReg->srsz_ctrl); memset(ptScale, 0, sizeof(tsMrvScale)); if (REG_GET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_ENABLE)) { ptScale->ulScaleHY = REG_GET_SLICE(ptMrvReg->srsz_scale_hy, MRV_SRSZ_SCALE_HY); #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) if (REG_GET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_UP) == MRV_SRSZ_SCALE_HY_UP_UPSCALE) { ptScale->ulScaleHY |= RSZ_UPSCALE_ENABLE; } #endif // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) } else { ptScale->ulScaleHY = RSZ_SCALER_BYPASS; } if (REG_GET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_ENABLE)) { #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) ptScale->ulScaleHCB = REG_GET_SLICE(ptMrvReg->srsz_scale_hcb, MRV_SRSZ_SCALE_HCB); ptScale->ulScaleHCR = REG_GET_SLICE(ptMrvReg->srsz_scale_hcr, MRV_SRSZ_SCALE_HCR); if (REG_GET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_UP) == MRV_SRSZ_SCALE_HC_UP_UPSCALE) { ptScale->ulScaleHCB |= RSZ_UPSCALE_ENABLE; ptScale->ulScaleHCR |= RSZ_UPSCALE_ENABLE; } #else // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) ptScale->ulScaleHCB = REG_GET_SLICE(ptMrvReg->srsz_scale_hc, MRV_SRSZ_SCALE_HC); ptScale->ulScaleHCR = ptScale->ulScaleHCB; #endif // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) } else { ptScale->ulScaleHCB = RSZ_SCALER_BYPASS; ptScale->ulScaleHCR = RSZ_SCALER_BYPASS; } if (REG_GET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_ENABLE)) { ptScale->ulScaleVY = REG_GET_SLICE(ptMrvReg->srsz_scale_vy, MRV_SRSZ_SCALE_VY); #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) if (REG_GET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_UP) == MRV_SRSZ_SCALE_VY_UP_UPSCALE) { ptScale->ulScaleVY |= RSZ_UPSCALE_ENABLE; } #endif // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) } else { ptScale->ulScaleVY = RSZ_SCALER_BYPASS; } if (REG_GET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_ENABLE)) { ptScale->ulScaleVC = REG_GET_SLICE(ptMrvReg->srsz_scale_vc, MRV_SRSZ_SCALE_VC); #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) if (REG_GET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_UP) == MRV_SRSZ_SCALE_VC_UP_UPSCALE) { ptScale->ulScaleVC |= RSZ_UPSCALE_ENABLE; } #endif // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) } else { ptScale->ulScaleVC = RSZ_SCALER_BYPASS; } #else // #if (MARVIN_FEATURE_SSCALE == MARVIN_FEATURE_EXIST) memset(ptScale, 0, sizeof(tsMrvScale)); #endif // #if (MARVIN_FEATURE_SSCALE == MARVIN_FEATURE_EXIST) }
void MrvResSetSelfResize(const tsMrvScale *ptScale, teMrvConfUpdateTime eUpdateTime, const tsMrvRszLut *ptMrvRszLut) { #if (MARVIN_FEATURE_SSCALE == MARVIN_FEATURE_EXIST) volatile tsMrvRegister *ptMrvReg = (tsMrvRegister*)MEM_MRV_REG_BASE; UINT32 srsz_ctrl = REG_READ(ptMrvReg->srsz_ctrl); BOOL iUpscaling = FALSE; ASSERT((RSZ_FLAGS_MASK & MRV_RSZ_SCALE_MASK) == 0); // flags must be "outside" scaler value ASSERT((ptScale->ulScaleHY & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX); ASSERT((ptScale->ulScaleHCB & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX); ASSERT((ptScale->ulScaleHCR & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX); ASSERT((ptScale->ulScaleVY & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX); ASSERT((ptScale->ulScaleVC & ~RSZ_FLAGS_MASK) <= MRV_RSZ_SCALE_MAX); // horizontal luminance scale factor if (ptScale->ulScaleHY & RSZ_SCALER_BYPASS) { // disable (bypass) scaler REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_ENABLE, DISABLE); } else { // enable scaler REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_ENABLE, ENABLE); //program scale factor and phase REG_SET_SLICE(ptMrvReg->srsz_scale_hy, MRV_SRSZ_SCALE_HY, (UINT32)ptScale->ulScaleHY); REG_SET_SLICE(ptMrvReg->srsz_phase_hy, MRV_SRSZ_PHASE_HY, (UINT32)ptScale->usPhaseHY); #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) if (ptScale->ulScaleHY & RSZ_UPSCALE_ENABLE) { // enable upscaling mode REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_UP, MRV_SRSZ_SCALE_HY_UP_UPSCALE); iUpscaling = TRUE; // scaler and upscaling enabled } else { // disable upscaling mode REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HY_UP, MRV_SRSZ_SCALE_HY_UP_DOWNSCALE); } #else // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) { // upscaling mode -> not supported ASSERT("MrvResSetSelfResize()" == "Upscaling not supported!"); } #endif //#if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) } // horizontal chrominance scale factors #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) ASSERT((ptScale->ulScaleHCB & RSZ_FLAGS_MASK) == (ptScale->ulScaleHCR & RSZ_FLAGS_MASK)); #else // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) ASSERT( ptScale->ulScaleHCB == ptScale->ulScaleHCR ); #endif //#if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) if (ptScale->ulScaleHCB & RSZ_SCALER_BYPASS) { // disable (bypass) scaler REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_ENABLE, DISABLE); } else { // enable scaler REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_ENABLE, ENABLE); //program scale factor and phase #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) REG_SET_SLICE(ptMrvReg->srsz_scale_hcb, MRV_SRSZ_SCALE_HCB, (UINT32)ptScale->ulScaleHCB); REG_SET_SLICE(ptMrvReg->srsz_scale_hcr, MRV_SRSZ_SCALE_HCB, (UINT32)ptScale->ulScaleHCR); #else // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) REG_SET_SLICE(ptMrvReg->srsz_scale_hc, MRV_SRSZ_SCALE_HC, (UINT32)ptScale->ulScaleHCB); #endif //#if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) REG_SET_SLICE(ptMrvReg->srsz_phase_hc, MRV_SRSZ_PHASE_HC, (UINT32)ptScale->usPhaseHC); if (ptScale->ulScaleHCB & RSZ_UPSCALE_ENABLE) #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) { // enable upscaling mode REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_UP, MRV_SRSZ_SCALE_HC_UP_UPSCALE); iUpscaling = TRUE; // scaler and upscaling enabled } else { // disable upscaling mode REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_HC_UP, MRV_SRSZ_SCALE_HC_UP_DOWNSCALE); } #else // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) { // upscaling mode -> not supported UNUSED_PARAM(iUpscaling); ASSERT("MrvResSetSelfResize()" == "Upscaling not supported!"); } #endif //#if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) } // vertical luminance scale factor if (ptScale->ulScaleVY & RSZ_SCALER_BYPASS) { // disable (bypass) scaler REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_ENABLE, DISABLE); } else { // enable scaler REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_ENABLE, ENABLE); //program scale factor and phase REG_SET_SLICE(ptMrvReg->srsz_scale_vy, MRV_SRSZ_SCALE_VY, (UINT32)ptScale->ulScaleVY); REG_SET_SLICE(ptMrvReg->srsz_phase_vy, MRV_SRSZ_PHASE_VY, (UINT32)ptScale->usPhaseVY); if (ptScale->ulScaleVY & RSZ_UPSCALE_ENABLE) #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) { // enable upscaling mode REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_UP, MRV_SRSZ_SCALE_VY_UP_UPSCALE); iUpscaling = TRUE; // scaler and upscaling enabled } else { // disable upscaling mode REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VY_UP, MRV_SRSZ_SCALE_VY_UP_DOWNSCALE); } #else // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) { // upscaling mode -> not supported ASSERT("MrvResSetSelfResize()" == "Upscaling not supported!"); } #endif //#if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) } // vertical chrominance scale factor if (ptScale->ulScaleVC & RSZ_SCALER_BYPASS) { // disable (bypass) scaler REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_ENABLE, DISABLE); } else { // enable scaler REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_ENABLE, ENABLE); //program scale factor and phase REG_SET_SLICE(ptMrvReg->srsz_scale_vc, MRV_SRSZ_SCALE_VC, (UINT32)ptScale->ulScaleVC); REG_SET_SLICE(ptMrvReg->srsz_phase_vc, MRV_SRSZ_PHASE_VC, (UINT32)ptScale->usPhaseVC); if (ptScale->ulScaleVC & RSZ_UPSCALE_ENABLE) #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) { // enable upscaling mode REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_UP, MRV_SRSZ_SCALE_VC_UP_UPSCALE); iUpscaling = TRUE; // scaler and upscaling enabled } else { // disable upscaling mode REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_SCALE_VC_UP, MRV_SRSZ_SCALE_VC_UP_DOWNSCALE); } #else // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) { // upscaling mode -> not supported ASSERT("MrvResSetSelfResize()" == "Upscaling not supported!"); } #endif //#if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) } #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) // apply upscaling lookup table if (ptMrvRszLut) { UINT32 i; for (i = 0; i <= MRV_SRSZ_SCALE_LUT_ADDR_MASK; i++) { REG_SET_SLICE(ptMrvReg->srsz_scale_lut_addr, MRV_SRSZ_SCALE_LUT_ADDR, i); REG_SET_SLICE(ptMrvReg->srsz_scale_lut, MRV_SRSZ_SCALE_LUT, ptMrvRszLut->ucMrvRszLut[i]); } } else if (iUpscaling) { ASSERT("MrvResSetSelfResize()" == "Upscaling requires lookup table!"); } #else //#if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) UNUSED_PARAM(ptMrvRszLut); #endif //#if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) // handle immediate update flag and write mrsz_ctrl switch (eUpdateTime) { case eMrvCfgUpdateFrameSync: //frame synchronous update of shadow registers REG_WRITE(ptMrvReg->srsz_ctrl, srsz_ctrl); REG_SET_SLICE(ptMrvReg->isp_ctrl, MRV_ISP_ISP_GEN_CFG_UPD, ON); break; case eMrvCfgUpdateImmediate: //immediate update of shadow registers #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_CFG_UPD, ON); #else // #if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_CFG_UPD_C, ON); REG_SET_SLICE(srsz_ctrl, MRV_SRSZ_CFG_UPD_Y, ON); #endif //#if (MARVIN_FEATURE_SSCALE_UP == MARVIN_FEATURE_EXIST) REG_WRITE(ptMrvReg->srsz_ctrl, srsz_ctrl); break; case eMrvCfgUpdateLater: default: // no update from within this function REG_WRITE(ptMrvReg->srsz_ctrl, srsz_ctrl); break; } #else //#if (MARVIN_FEATURE_SSCALE == MARVIN_FEATURE_EXIST) UNUSED_PARAM(ptScale); UNUSED_PARAM(eUpdateTime); UNUSED_PARAM1(ptMrvRszLut); #endif //#if (MARVIN_FEATURE_SSCALE == MARVIN_FEATURE_EXIST) }
static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ar5416AniState *aniState = &ah->curchan->ani; u32 ofdm_base = 0; u32 cck_base = 0; u32 ofdmPhyErrCnt, cckPhyErrCnt; u32 phyCnt1, phyCnt2; int32_t listenTime; ath_hw_cycle_counters_update(common); listenTime = ath_hw_get_listen_time(common); if (listenTime <= 0) { ah->stats.ast_ani_lneg_or_lzero++; ath9k_ani_restart(ah); return false; } if (!use_new_ani(ah)) { ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high; cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high; } aniState->listenTime += listenTime; ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) { if (phyCnt1 < ofdm_base) { ath_dbg(common, ANI, "phyCnt1 0x%x, resetting counter value to 0x%x\n", phyCnt1, ofdm_base); REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base); REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); } if (phyCnt2 < cck_base) { ath_dbg(common, ANI, "phyCnt2 0x%x, resetting counter value to 0x%x\n", phyCnt2, cck_base); REG_WRITE(ah, AR_PHY_ERR_2, cck_base); REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); } return false; } ofdmPhyErrCnt = phyCnt1 - ofdm_base; ah->stats.ast_ani_ofdmerrs += ofdmPhyErrCnt - aniState->ofdmPhyErrCount; aniState->ofdmPhyErrCount = ofdmPhyErrCnt; cckPhyErrCnt = phyCnt2 - cck_base; ah->stats.ast_ani_cckerrs += cckPhyErrCnt - aniState->cckPhyErrCount; aniState->cckPhyErrCount = cckPhyErrCnt; return true; }
void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath9k_nfcal_hist *h; int i, j; int32_t val; const u32 ar5416_cca_regs[6] = { AR_PHY_CCA, AR_PHY_CH1_CCA, AR_PHY_CH2_CCA, AR_PHY_EXT_CCA, AR_PHY_CH1_EXT_CCA, AR_PHY_CH2_EXT_CCA }; u8 chainmask, rx_chain_status; rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK); if (AR_SREV_9285(ah)) chainmask = 0x9; else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) { if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4)) chainmask = 0x1B; else chainmask = 0x09; } else { if (rx_chain_status & 0x4) chainmask = 0x3F; else if (rx_chain_status & 0x2) chainmask = 0x1B; else chainmask = 0x09; } h = ah->nfCalHist; for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { val = REG_READ(ah, ar5416_cca_regs[i]); val &= 0xFFFFFE00; val |= (((u32) (h[i].privNF) << 1) & 0x1ff); REG_WRITE(ah, ar5416_cca_regs[i], val); } } REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_ENABLE_NF); REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NO_UPDATE_NF); REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); for (j = 0; j < 1000; j++) { if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) == 0) break; udelay(10); } for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { val = REG_READ(ah, ar5416_cca_regs[i]); val &= 0xFFFFFE00; val |= (((u32) (-50) << 1) & 0x1ff); REG_WRITE(ah, ar5416_cca_regs[i], val); } } }
/** * cdv_save_display_registers - save registers lost on suspend * @dev: our DRM device * * Save the state we need in order to be able to restore the interface * upon resume from suspend */ static int cdv_save_display_registers(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_save_area *regs = &dev_priv->regs; struct drm_connector *connector; dev_dbg(dev->dev, "Saving GPU registers.\n"); pci_read_config_byte(dev->pdev, 0xF4, ®s->cdv.saveLBB); regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D); regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D); regs->cdv.saveDSPARB = REG_READ(DSPARB); regs->cdv.saveDSPFW[0] = REG_READ(DSPFW1); regs->cdv.saveDSPFW[1] = REG_READ(DSPFW2); regs->cdv.saveDSPFW[2] = REG_READ(DSPFW3); regs->cdv.saveDSPFW[3] = REG_READ(DSPFW4); regs->cdv.saveDSPFW[4] = REG_READ(DSPFW5); regs->cdv.saveDSPFW[5] = REG_READ(DSPFW6); regs->cdv.saveADPA = REG_READ(ADPA); regs->cdv.savePP_CONTROL = REG_READ(PP_CONTROL); regs->cdv.savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS); regs->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL); regs->saveBLC_PWM_CTL2 = REG_READ(BLC_PWM_CTL2); regs->cdv.saveLVDS = REG_READ(LVDS); regs->cdv.savePFIT_CONTROL = REG_READ(PFIT_CONTROL); regs->cdv.savePP_ON_DELAYS = REG_READ(PP_ON_DELAYS); regs->cdv.savePP_OFF_DELAYS = REG_READ(PP_OFF_DELAYS); regs->cdv.savePP_CYCLE = REG_READ(PP_CYCLE); regs->cdv.saveVGACNTRL = REG_READ(VGACNTRL); regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R); regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R); list_for_each_entry(connector, &dev->mode_config.connector_list, head) connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); return 0; }
static void ath9k_hw_9271_pa_cal(struct ath_hw *ah) { u32 regVal; unsigned int i; u32 regList [][2] = { { 0x786c, 0 }, { 0x7854, 0 }, { 0x7820, 0 }, { 0x7824, 0 }, { 0x7868, 0 }, { 0x783c, 0 }, { 0x7838, 0 } , { 0x7828, 0 } , }; for (i = 0; i < ARRAY_SIZE(regList); i++) regList[i][1] = REG_READ(ah, regList[i][0]); regVal = REG_READ(ah, 0x7834); regVal &= (~(0x1)); REG_WRITE(ah, 0x7834, regVal); regVal = REG_READ(ah, 0x9808); regVal |= (0x1 << 27); REG_WRITE(ah, 0x9808, regVal); /* 786c,b23,1, pwddac=1 */ REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1); /* 7854, b5,1, pdrxtxbb=1 */ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1); /* 7854, b7,1, pdv2i=1 */ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1); /* 7854, b8,1, pddacinterface=1 */ REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1); /* 7824,b12,0, offcal=0 */ REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0); /* 7838, b1,0, pwddb=0 */ REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0); /* 7820,b11,0, enpacal=0 */ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0); /* 7820,b25,1, pdpadrv1=0 */ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0); /* 7820,b24,0, pdpadrv2=0 */ REG_RMW_FIELD(ah, AR9285_AN_RF2G1,AR9285_AN_RF2G1_PDPADRV2,0); /* 7820,b23,0, pdpaout=0 */ REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0); /* 783c,b14-16,7, padrvgn2tab_0=7 */ REG_RMW_FIELD(ah, AR9285_AN_RF2G8,AR9285_AN_RF2G8_PADRVGN2TAB0, 7); /* * 7838,b29-31,0, padrvgn1tab_0=0 * does not matter since we turn it off */ REG_RMW_FIELD(ah, AR9285_AN_RF2G7,AR9285_AN_RF2G7_PADRVGN2TAB0, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff); /* Set: * localmode=1,bmode=1,bmoderxtx=1,synthon=1, * txon=1,paon=1,oscon=1,synthon_force=1 */ REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0); udelay(30); REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0); /* find off_6_1; */ for (i = 6; i >= 0; i--) { regVal = REG_READ(ah, 0x7834); regVal |= (1 << (20 + i)); REG_WRITE(ah, 0x7834, regVal); udelay(1); //regVal = REG_READ(ah, 0x7834); regVal &= (~(0x1 << (20 + i))); regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9) << (20 + i)); REG_WRITE(ah, 0x7834, regVal); } /* Empirical offset correction */ #if 0 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0x20); #endif regVal = REG_READ(ah, 0x7834); regVal |= 0x1; REG_WRITE(ah, 0x7834, regVal); regVal = REG_READ(ah, 0x9808); regVal &= (~(0x1 << 27)); REG_WRITE(ah, 0x9808, regVal); for (i = 0; i < ARRAY_SIZE(regList); i++) REG_WRITE(ah, regList[i][0], regList[i][1]); }
/** * cdv_restore_display_registers - restore lost register state * @dev: our DRM device * * Restore register state that was lost during suspend and resume. * * FIXME: review */ static int cdv_restore_display_registers(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_save_area *regs = &dev_priv->regs; struct drm_connector *connector; u32 temp; pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB); REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D); REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D); /* BIOS does below anyway */ REG_WRITE(DPIO_CFG, 0); REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N); temp = REG_READ(DPLL_A); if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) { REG_WRITE(DPLL_A, temp | DPLL_SYNCLOCK_ENABLE); REG_READ(DPLL_A); } temp = REG_READ(DPLL_B); if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) { REG_WRITE(DPLL_B, temp | DPLL_SYNCLOCK_ENABLE); REG_READ(DPLL_B); } udelay(500); REG_WRITE(DSPFW1, regs->cdv.saveDSPFW[0]); REG_WRITE(DSPFW2, regs->cdv.saveDSPFW[1]); REG_WRITE(DSPFW3, regs->cdv.saveDSPFW[2]); REG_WRITE(DSPFW4, regs->cdv.saveDSPFW[3]); REG_WRITE(DSPFW5, regs->cdv.saveDSPFW[4]); REG_WRITE(DSPFW6, regs->cdv.saveDSPFW[5]); REG_WRITE(DSPARB, regs->cdv.saveDSPARB); REG_WRITE(ADPA, regs->cdv.saveADPA); REG_WRITE(BLC_PWM_CTL2, regs->saveBLC_PWM_CTL2); REG_WRITE(LVDS, regs->cdv.saveLVDS); REG_WRITE(PFIT_CONTROL, regs->cdv.savePFIT_CONTROL); REG_WRITE(PFIT_PGM_RATIOS, regs->cdv.savePFIT_PGM_RATIOS); REG_WRITE(BLC_PWM_CTL, regs->saveBLC_PWM_CTL); REG_WRITE(PP_ON_DELAYS, regs->cdv.savePP_ON_DELAYS); REG_WRITE(PP_OFF_DELAYS, regs->cdv.savePP_OFF_DELAYS); REG_WRITE(PP_CYCLE, regs->cdv.savePP_CYCLE); REG_WRITE(PP_CONTROL, regs->cdv.savePP_CONTROL); REG_WRITE(VGACNTRL, regs->cdv.saveVGACNTRL); REG_WRITE(PSB_INT_ENABLE_R, regs->cdv.saveIER); REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR); /* Fix arbitration bug */ cdv_errata(dev); drm_mode_config_reset(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); return 0; }
static void ar9003_mci_prep_interface(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 saved_mci_int_en; u32 mci_timeout = 150; mci->bt_state = MCI_BT_SLEEP; saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN); REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0); REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW)); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, REG_READ(ah, AR_MCI_INTERRUPT_RAW)); ar9003_mci_remote_reset(ah, true); ar9003_mci_send_req_wake(ah, true); if (!ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) goto clear_redunt; mci->bt_state = MCI_BT_AWAKE; /* * we don't need to send more remote_reset at this moment. * If BT receive first remote_reset, then BT HW will * be cleaned up and will be able to receive req_wake * and BT HW will respond sys_waking. * In this case, WLAN will receive BT's HW sys_waking. * Otherwise, if BT SW missed initial remote_reset, * that remote_reset will still clean up BT MCI RX, * and the req_wake will wake BT up, * and BT SW will respond this req_wake with a remote_reset and * sys_waking. In this case, WLAN will receive BT's SW * sys_waking. In either case, BT's RX is cleaned up. So we * don't need to reply BT's remote_reset now, if any. * Similarly, if in any case, WLAN can receive BT's sys_waking, * that means WLAN's RX is also fine. */ ar9003_mci_send_sys_waking(ah, true); udelay(10); /* * Set BT priority interrupt value to be 0xff to * avoid having too many BT PRIORITY interrupts. */ REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF); REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF); REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF); REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF); REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF); /* * A contention reset will be received after send out * sys_waking. Also BT priority interrupt bits will be set. * Clear those bits before the next step. */ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_CONT_RST); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI); if (mci->is_2g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) { ar9003_mci_send_lna_transfer(ah, true); udelay(5); } if (mci->is_2g && !mci->update_2g5g && MCI_ANT_ARCH_PA_LNA_SHARED(mci)) { if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_LNA_INFO, mci_timeout)) ath_dbg(common, MCI, "MCI WLAN has control over the LNA & BT obeys it\n"); else ath_dbg(common, MCI, "MCI BT didn't respond to LNA_TRANS\n"); } clear_redunt: /* Clear the extra redundant SYS_WAKING from BT */ if ((mci->bt_state == MCI_BT_AWAKE) && (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) && (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) { REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING); REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE); } REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en); }
static int cdv_backlight_combination_mode(struct drm_device *dev) { return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE; }
static int __mdfld_dbi_exit_dsr(struct mdfld_dsi_dbi_output * dbi_output, int pipe) { struct drm_device * dev = dbi_output->dev; struct drm_crtc * crtc = dbi_output->base.base.crtc; struct psb_intel_crtc * psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL; u32 reg_val; u32 dpll_reg = MRST_DPLL_A; u32 pipeconf_reg = PIPEACONF; u32 dspcntr_reg = DSPACNTR; u32 dspbase_reg = DSPABASE; u32 dspsurf_reg = DSPASURF; u32 reg_offset = 0; PSB_DEBUG_ENTRY("\n"); if(!dbi_output) { return 0; } /*if mode setting on-going, back off*/ if((dbi_output->mode_flags & MODE_SETTING_ON_GOING) || (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING)) return -EAGAIN; if(pipe == 2) { dpll_reg = MRST_DPLL_A; pipeconf_reg = PIPECCONF; dspcntr_reg = DSPCCNTR; dspbase_reg = MDFLD_DSPCBASE; dspsurf_reg = DSPCSURF; reg_offset = MIPIC_REG_OFFSET; } if (!ospm_power_using_hw_begin(OSPM_DISPLAY_ISLAND, true)) return -EAGAIN; /*enable DPLL*/ reg_val = REG_READ(dpll_reg); if(!(reg_val & DPLL_VCO_ENABLE)) { if(reg_val & MDFLD_PWR_GATE_EN) { reg_val &= ~MDFLD_PWR_GATE_EN; REG_WRITE(dpll_reg, reg_val); REG_READ(dpll_reg); udelay(500); } reg_val |= DPLL_VCO_ENABLE; REG_WRITE(dpll_reg, reg_val); REG_READ(dpll_reg); udelay(500); /*FIXME: add timeout*/ while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)); } /*enable pipe*/ reg_val = REG_READ(pipeconf_reg); if(!(reg_val & PIPEACONF_ENABLE)) { reg_val |= PIPEACONF_ENABLE; REG_WRITE(pipeconf_reg, reg_val); REG_READ(pipeconf_reg); udelay(500); mdfldWaitForPipeEnable(dev, pipe); } /*enable plane*/ reg_val = REG_READ(dspcntr_reg); if(!(reg_val & DISPLAY_PLANE_ENABLE)) { reg_val |= DISPLAY_PLANE_ENABLE; REG_WRITE(dspcntr_reg, reg_val); REG_READ(dspcntr_reg); udelay(500); } ospm_power_using_hw_end(OSPM_DISPLAY_ISLAND); /*clean IN_DSR flag*/ dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR; return 0; }
uint8_t pm8x41_get_pmic_rev() { return REG_READ(REVID_REVISION4); }
int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector, int pipe) { struct mdfld_dsi_pkg_sender *pkg_sender; struct mdfld_dsi_config *dsi_config = mdfld_dsi_get_config(dsi_connector); struct drm_device *dev = dsi_config->dev; struct drm_psb_private *dev_priv = dev->dev_private; const struct psb_offset *map = &dev_priv->regmap[pipe]; u32 mipi_val = 0; if (!dsi_connector) { DRM_ERROR("Invalid parameter\n"); return -EINVAL; } pkg_sender = dsi_connector->pkg_sender; if (!pkg_sender || IS_ERR(pkg_sender)) { pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender), GFP_KERNEL); if (!pkg_sender) { DRM_ERROR("Create DSI pkg sender failed\n"); return -ENOMEM; } dsi_connector->pkg_sender = (void *)pkg_sender; } pkg_sender->dev = dev; pkg_sender->dsi_connector = dsi_connector; pkg_sender->pipe = pipe; pkg_sender->pkg_num = 0; pkg_sender->panel_mode = 0; pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE; /*init regs*/ /* FIXME: should just copy the regmap ptr ? */ pkg_sender->dpll_reg = map->dpll; pkg_sender->dspcntr_reg = map->cntr; pkg_sender->pipeconf_reg = map->conf; pkg_sender->dsplinoff_reg = map->linoff; pkg_sender->dspsurf_reg = map->surf; pkg_sender->pipestat_reg = map->status; pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe); pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe); pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe); pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe); pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe); pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe); pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe); pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe); pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe); pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe); /*init lock*/ spin_lock_init(&pkg_sender->lock); if (mdfld_get_panel_type(dev, pipe) != TC35876X) { /** * For video mode, don't enable DPI timing output here, * will init the DPI timing output during mode setting. */ mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX; if (pipe == 0) mipi_val |= 0x2; REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val); REG_READ(MIPI_PORT_CONTROL(pipe)); /* do dsi controller init */ mdfld_dsi_controller_init(dsi_config, pipe); } return 0; }
/** * ar9002_hw_set_channel - set channel on single-chip device * @ah: atheros hardware structure * @chan: * * This is the function to change channel on single-chip devices, that is * all devices after ar9280. * * This function takes the channel value in MHz and sets * hardware channel value. Assumes writes have been enabled to analog bus. * * Actual Expression, * * For 2GHz channel, * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17) * (freq_ref = 40MHz) * * For 5GHz channel, * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10) * (freq_ref = 40MHz/(24>>amodeRefSel)) */ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) { u16 bMode, fracMode, aModeRefSel = 0; u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0; struct chan_centers centers; u32 refDivA = 24; ath9k_hw_get_channel_centers(ah, chan, ¢ers); freq = centers.synth_center; reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL); reg32 &= 0xc0000000; if (freq < 4800) { /* 2 GHz, fractional mode */ u32 txctl; int regWrites = 0; bMode = 1; fracMode = 1; aModeRefSel = 0; channelSel = CHANSEL_2G(freq); if (AR_SREV_9287_11_OR_LATER(ah)) { if (freq == 2484) { /* Enable channel spreading for channel 14 */ REG_WRITE_ARRAY(&ah->iniCckfirJapan2484, 1, regWrites); } else { REG_WRITE_ARRAY(&ah->iniCckfirNormal, 1, regWrites); } } else { txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); if (freq == 2484) { /* Enable channel spreading for channel 14 */ REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl | AR_PHY_CCK_TX_CTRL_JAPAN); } else { REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); } } } else { bMode = 0; fracMode = 0; switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) { case 0: if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan)) aModeRefSel = 0; else if ((freq % 20) == 0) aModeRefSel = 3; else if ((freq % 10) == 0) aModeRefSel = 2; if (aModeRefSel) break; case 1: default: aModeRefSel = 0; /* * Enable 2G (fractional) mode for channels * which are 5MHz spaced. */ fracMode = 1; refDivA = 1; channelSel = CHANSEL_5G(freq); /* RefDivA setting */ ath9k_hw_analog_shift_rmw(ah, AR_AN_SYNTH9, AR_AN_SYNTH9_REFDIVA, AR_AN_SYNTH9_REFDIVA_S, refDivA); } if (!fracMode) { ndiv = (freq * (refDivA >> aModeRefSel)) / 60; channelSel = ndiv & 0x1ff; channelFrac = (ndiv & 0xfffffe00) * 2; channelSel = (channelSel << 17) | channelFrac; } }
static int mt_hotplug_mechanism_write_test1(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { int len = 0, test1 = 0; char desc[32]; len = (count < (sizeof(desc) - 1)) ? count : (sizeof(desc) - 1); if (copy_from_user(desc, buffer, len)) { return 0; } desc[len] = '\0'; if (sscanf(desc, "%d", &test1) == 1) { g_test1 = test1; switch (g_test1) { case 0: spm_mtcmos_ctrl_dbg0(STA_POWER_ON); break; case 1: spm_mtcmos_ctrl_dbg0(STA_POWER_DOWN); break; case 2: //spm_mtcmos_ctrl_dbg1(STA_POWER_ON); pr_emerg("SPM_CA7_CPU0_PWR_CON: 0x%08x\n", REG_READ(SPM_CA7_CPU0_PWR_CON)); pr_emerg("SPM_CA7_CPU1_PWR_CON: 0x%08x\n", REG_READ(SPM_CA7_CPU1_PWR_CON)); pr_emerg("SPM_CA7_CPU2_PWR_CON: 0x%08x\n", REG_READ(SPM_CA7_CPU2_PWR_CON)); pr_emerg("SPM_CA7_CPU3_PWR_CON: 0x%08x\n", REG_READ(SPM_CA7_CPU3_PWR_CON)); pr_emerg("SPM_CA7_DBG_PWR_CON: 0x%08x\n", REG_READ(SPM_CA7_DBG_PWR_CON)); pr_emerg("SPM_CA7_CPUTOP_PWR_CON: 0x%08x\n", REG_READ(SPM_CA7_CPUTOP_PWR_CON)); pr_emerg("SPM_CA7_CPU0_L1_PDN: 0x%08x\n", REG_READ(SPM_CA7_CPU0_L1_PDN)); pr_emerg("SPM_CA7_CPU1_L1_PDN: 0x%08x\n", REG_READ(SPM_CA7_CPU1_L1_PDN)); pr_emerg("SPM_CA7_CPU2_L1_PDN: 0x%08x\n", REG_READ(SPM_CA7_CPU2_L1_PDN)); pr_emerg("SPM_CA7_CPU3_L1_PDN: 0x%08x\n", REG_READ(SPM_CA7_CPU3_L1_PDN)); pr_emerg("SPM_CA7_CPUTOP_L2_PDN: 0x%08x\n", REG_READ(SPM_CA7_CPUTOP_L2_PDN)); pr_emerg("SPM_CA15_CPU0_PWR_CON: 0x%08x\n", REG_READ(SPM_CA15_CPU0_PWR_CON)); pr_emerg("SPM_CA15_CPU1_PWR_CON: 0x%08x\n", REG_READ(SPM_CA15_CPU1_PWR_CON)); pr_emerg("SPM_CA15_CPU2_PWR_CON: 0x%08x\n", REG_READ(SPM_CA15_CPU2_PWR_CON)); pr_emerg("SPM_CA15_CPU3_PWR_CON: 0x%08x\n", REG_READ(SPM_CA15_CPU3_PWR_CON)); pr_emerg("SPM_CA15_CPUTOP_PWR_CON: 0x%08x\n", REG_READ(SPM_CA15_CPUTOP_PWR_CON)); pr_emerg("SPM_CA15_L1_PWR_CON: 0x%08x\n", REG_READ(SPM_CA15_L1_PWR_CON)); pr_emerg("SPM_CA15_L2_PWR_CON: 0x%08x\n", REG_READ(SPM_CA15_L2_PWR_CON)); break; case 3: //spm_mtcmos_ctrl_dbg1(STA_POWER_DOWN); break; } return count; } else { HOTPLUG_INFO("mt_hotplug_mechanism_write_test1, bad argument\n"); } return -EINVAL; }
void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath9k_nfcal_hist *h = NULL; unsigned i, j; int32_t val; u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; s16 default_nf = ath9k_hw_get_default_nf(ah, chan); if (ah->caldata) h = ah->caldata->nfCalHist; for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { s16 nfval; if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)) continue; if (h) nfval = h[i].privNF; else nfval = default_nf; val = REG_READ(ah, ah->nf_regs[i]); val &= 0xFFFFFE00; val |= (((u32) nfval << 1) & 0x1ff); REG_WRITE(ah, ah->nf_regs[i], val); } } /* * Load software filtered NF value into baseband internal minCCApwr * variable. */ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_ENABLE_NF); REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NO_UPDATE_NF); REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); /* * Wait for load to complete, should be fast, a few 10s of us. * The max delay was changed from an original 250us to 10000us * since 250us often results in NF load timeout and causes deaf * condition during stress testing 12/12/2009 */ for (j = 0; j < 10000; j++) { if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) == 0) break; udelay(10); } /* * We timed out waiting for the noisefloor to load, probably due to an * in-progress rx. Simply return here and allow the load plenty of time * to complete before the next calibration interval. We need to avoid * trying to load -50 (which happens below) while the previous load is * still in progress as this can cause rx deafness. Instead by returning * here, the baseband nf cal will just be capped by our present * noisefloor until the next calibration timer. */ if (j == 10000) { ath_dbg(common, ATH_DBG_ANY, "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", REG_READ(ah, AR_PHY_AGC_CONTROL)); return; } /* * Restore maxCCAPower register parameter again so that we're not capped * by the median we just loaded. This will be initial (and max) value * of next noise floor calibration the baseband does. */ ENABLE_REGWRITE_BUFFER(ah); for (i = 0; i < NUM_NF_READINGS; i++) { if (chainmask & (1 << i)) { if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)) continue; val = REG_READ(ah, ah->nf_regs[i]); val &= 0xFFFFFE00; val |= (((u32) (-50) << 1) & 0x1ff); REG_WRITE(ah, ah->nf_regs[i], val); } } REGWRITE_BUFFER_FLUSH(ah); }
static inline void ath9k_hw_9285_pa_cal(struct ath_hal *ah) { u32 regVal; int i, offset, offs_6_1, offs_0; u32 ccomp_org, reg_field; u32 regList[][2] = { { 0x786c, 0 }, { 0x7854, 0 }, { 0x7820, 0 }, { 0x7824, 0 }, { 0x7868, 0 }, { 0x783c, 0 }, { 0x7838, 0 }, }; if (AR_SREV_9285_11(ah)) { REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14)); udelay(10); } for (i = 0; i < ARRAY_SIZE(regList); i++) regList[i][1] = REG_READ(ah, regList[i][0]); regVal = REG_READ(ah, 0x7834); regVal &= (~(0x1)); REG_WRITE(ah, 0x7834, regVal); regVal = REG_READ(ah, 0x9808); regVal |= (0x1 << 27); REG_WRITE(ah, 0x9808, regVal); REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1); REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1); REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1); REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1); REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 1); REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7); REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0); ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP); REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 7); REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0); udelay(30); REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0); REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0); for (i = 6; i > 0; i--) { regVal = REG_READ(ah, 0x7834); regVal |= (1 << (19 + i)); REG_WRITE(ah, 0x7834, regVal); udelay(1); regVal = REG_READ(ah, 0x7834); regVal &= (~(0x1 << (19 + i))); reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9); regVal |= (reg_field << (19 + i)); REG_WRITE(ah, 0x7834, regVal); } REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1); udelay(1); reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9); REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field); offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS); offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP); offset = (offs_6_1<<1) | offs_0; offset = offset - 0; offs_6_1 = offset>>1; offs_0 = offset & 1; REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1); REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0); regVal = REG_READ(ah, 0x7834); regVal |= 0x1; REG_WRITE(ah, 0x7834, regVal); regVal = REG_READ(ah, 0x9808); regVal &= (~(0x1 << 27)); REG_WRITE(ah, 0x9808, regVal); for (i = 0; i < ARRAY_SIZE(regList); i++) REG_WRITE(ah, regList[i][0], regList[i][1]); REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org); if (AR_SREV_9285_11(ah)) REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT); }
bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) { if (AR_SREV_9285_12_OR_LATER(ah)) { if (!ar9285_clc(ah, chan)) return false; } else { if (AR_SREV_9280_10_OR_LATER(ah)) { if (!AR_SREV_9287_10_OR_LATER(ah)) REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC); REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL); } /* Calibrate the AGC */ REG_WRITE(ah, AR_PHY_AGC_CONTROL, REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL); /* Poll for offset calibration complete */ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "offset calibration failed to complete in 1ms; " "noisy environment?\n"); return false; } if (AR_SREV_9280_10_OR_LATER(ah)) { if (!AR_SREV_9287_10_OR_LATER(ah)) REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC); REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL); } } /* Do PA Calibration */ if (AR_SREV_9285_11_OR_LATER(ah)) ath9k_hw_9285_pa_cal(ah, true); /* Do NF Calibration after DC offset and other calibrations */ REG_WRITE(ah, AR_PHY_AGC_CONTROL, REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF); ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; /* Enable IQ, ADC Gain and ADC DC offset CALs */ if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) { if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) { INIT_CAL(&ah->adcgain_caldata); INSERT_CAL(ah, &ah->adcgain_caldata); DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "enabling ADC Gain Calibration.\n"); } if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) { INIT_CAL(&ah->adcdc_caldata); INSERT_CAL(ah, &ah->adcdc_caldata); DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "enabling ADC DC Calibration.\n"); } if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) { INIT_CAL(&ah->iq_caldata); INSERT_CAL(ah, &ah->iq_caldata); DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "enabling IQ Calibration.\n"); } ah->cal_list_curr = ah->cal_list; if (ah->cal_list_curr) ath9k_hw_reset_calibration(ah, ah->cal_list_curr); } chan->CalValid = 0; return true; }
static u16 get_reg(struct eth_device *dev, int regno) { struct cs8900_priv *priv = (struct cs8900_priv *)(dev->priv); REG_WRITE(regno, &priv->regs->pptr); return REG_READ(&priv->regs->pdata); }
/** * Power off sequence for video mode MIPI panel. * NOTE: do NOT modify this function */ static int __dpi_panel_power_off(struct mdfld_dsi_config *dsi_config, struct panel_funcs *p_funcs) { u32 val = 0; u32 tmp = 0; struct mdfld_dsi_hw_registers *regs; struct mdfld_dsi_hw_context *ctx; struct drm_device *dev; struct drm_psb_private *dev_priv; int retry; int i; int err = 0; u32 guit_val = 0; u32 power_island = 0; int offset = 0; PSB_DEBUG_ENTRY("\n"); if (!dsi_config) return -EINVAL; regs = &dsi_config->regs; ctx = &dsi_config->dsi_hw_context; dev = dsi_config->dev; dev_priv = dev->dev_private; /* Don't reset brightness to 0.*/ ctx->lastbrightnesslevel = psb_brightness; tmp = REG_READ(regs->pipeconf_reg); /*save color_coef (chrome) */ for (i = 0; i < 6; i++) ctx->color_coef[i] = REG_READ(regs->color_coef_reg + (i<<2)); /* save palette (gamma) */ for (i = 0; i < 256; i++) ctx->palette[i] = REG_READ(regs->palette_reg + (i<<2)); /* * Couldn't disable the pipe until DRM_WAIT_ON signaled by last * vblank event when playing video, otherwise the last vblank event * will lost when pipe disabled before vblank interrupt coming * sometimes. */ /*Disable panel*/ val = ctx->dspcntr; REG_WRITE(regs->dspcntr_reg, (val & ~BIT31)); /*Disable overlay & cursor panel assigned to this pipe*/ REG_WRITE(regs->pipeconf_reg, (tmp | (0x000c0000))); /*Disable pipe*/ val = REG_READ(regs->pipeconf_reg); ctx->pipeconf = val; REG_WRITE(regs->pipeconf_reg, (val & ~BIT31)); /*wait for pipe disabling, pipe synchronization plus , only avaiable when timer generator is working*/ if (REG_READ(regs->mipi_reg) & BIT31) { retry = 100000; while (--retry && (REG_READ(regs->pipeconf_reg) & BIT30)) udelay(5); if (!retry) { DRM_ERROR("Failed to disable pipe\n"); err = -EAGAIN; goto power_off_err; } } /** * Different panel may have different ways to have * panel turned off. Support it! */ if (p_funcs && p_funcs->power_off) { if (p_funcs->power_off(dsi_config)) { DRM_ERROR("Failed to power off panel\n"); err = -EAGAIN; goto power_off_err; } } /*Disable MIPI port*/ REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT31)); /*clear Low power output hold*/ REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT16)); /*Disable DSI controller*/ REG_WRITE(regs->device_ready_reg, (ctx->device_ready & ~BIT0)); /*enter ULPS*/ __dpi_enter_ulps_locked(dsi_config, offset); if (is_dual_dsi(dev)) { offset = 0x1000; /*Disable MIPI port*/ REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT31)); /*clear Low power output hold*/ REG_WRITE(regs->mipi_reg, (REG_READ(regs->mipi_reg) & ~BIT16)); offset = 0x800; /*Disable DSI controller*/ REG_WRITE(regs->device_ready_reg, (ctx->device_ready & ~BIT0)); /*enter ULPS*/ __dpi_enter_ulps_locked(dsi_config, offset); offset = 0x0; } power_off_err: power_island = pipe_to_island(dsi_config->pipe); if (power_island & (OSPM_DISPLAY_A | OSPM_DISPLAY_C)) power_island |= OSPM_DISPLAY_MIO; if (is_dual_dsi(dev)) power_island |= OSPM_DISPLAY_C; if (!power_island_put(power_island)) return -EINVAL; return err; }