Пример #1
0
/* the mask of disabled rbs of the **selected sh** */
static u32 sh_rbs_dis_get(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u32 rbs_dis;
	u32 mask;
	u32 backend_dis_shift;

	dd = pci_get_drvdata(dev);

	/* disabling a rb for a sh is done at 2 levels: CC_ and GC_USER_ */
	rbs_dis = rr32(dev, CC_RB_BACKEND_DIS);
	if (rbs_dis & CRBD_BACKEND_DIS_VALID)
		rbs_dis &= CRBD_BACKEND_DIS;
	else
		rbs_dis = 0;
	rbs_dis |= rr32(dev, GC_USER_RB_BACKEND_DIS);

	/* don't use get(), since we may have major bits on top */
	backend_dis_shift = ffs(CRBD_BACKEND_DIS);
	rbs_dis >>= backend_dis_shift;

	/* get a bit mask for the rbs which are disabled for this sh */
	mask = bitmask_create(dd->cfg.gpu.se_rbs_n / dd->cfg.gpu.se_shs_n);
	return rbs_dis & mask;
}
Пример #2
0
void gpu_soft_reset(struct pci_dev *dev)
{
	u32 grbm_reset;
	u32 tmp;

	/* reset all the gfx blocks */
	grbm_reset = (	GSR_SOFT_RESET_CP |
			GSR_SOFT_RESET_RLC |
			GSR_SOFT_RESET_CB |
			GSR_SOFT_RESET_DB |
			GSR_SOFT_RESET_GDS |
			GSR_SOFT_RESET_PA |
			GSR_SOFT_RESET_SC |
			GSR_SOFT_RESET_BCI |
			GSR_SOFT_RESET_SPI |
			GSR_SOFT_RESET_SX |
			GSR_SOFT_RESET_TC |
			GSR_SOFT_RESET_TA |
			GSR_SOFT_RESET_VGT |
			GSR_SOFT_RESET_IA);

	tmp=rr32(dev, GRBM_SOFT_RESET);
	tmp|=grbm_reset;
	wr32(dev, tmp, GRBM_SOFT_RESET);
	tmp=rr32(dev, GRBM_SOFT_RESET);
	udelay(50);

	tmp &= ~grbm_reset;
	wr32(dev, tmp, GRBM_SOFT_RESET);
	rr32(dev, GRBM_SOFT_RESET);
	udelay(50);
}
Пример #3
0
void ih_ena(struct pci_dev *dev)
{
	u32 ih_ctl;
	u32 ih_rb_ctl;

	ih_ctl = rr32(dev, IH_CTL);
	ih_rb_ctl = rr32(dev, IH_RB_CTL);

	ih_ctl |= IC_ENA_INTR;
	ih_rb_ctl |= IRC_IH_RB_ENA;
	wr32(dev, ih_ctl, IH_CTL);
	wr32(dev, ih_rb_ctl, IH_RB_CTL);
}
Пример #4
0
void ih_dis(struct pci_dev *dev)
{
	u32 ih_rb_ctl;
	u32 ih_ctl;

	ih_rb_ctl = rr32(dev, IH_RB_CTL);
	ih_ctl = rr32(dev, IH_CTL);

	ih_rb_ctl &= ~IRC_IH_RB_ENA;
	ih_ctl &= ~IC_ENA_INTR;

	/* works even if ucode in not loaded */
	wr32(dev, ih_rb_ctl, IH_RB_CTL);
	wr32(dev, ih_ctl, IH_CTL);
}
Пример #5
0
void rlc_lb_pw_ena(struct pci_dev *dev)
{
	u32 rlc_lb_ctl;
	rlc_lb_ctl = rr32(dev, RLC_LB_CTL);
	rlc_lb_ctl |= RLC_LB_ENA;
	wr32(dev, rlc_lb_ctl, RLC_LB_CTL);
}
Пример #6
0
static u64 wp_overflow(struct pci_dev *dev, u32 wp)
{
	struct dev_drv_data *dd;
	u32 tmp;

	dd = pci_get_drvdata(dev);

	if ((wp & IRW_RB_OVERFLOW) != 0) {
		/*
		 * When a ring buffer overflow happen start parsing interrupt
		 * from the last not overwritten vector (wptr + 16). Hopefully
		 * this should allow us to catchup.
		 */
		dev_warn(&dev->dev, "ih ring buffer overflow wp=0x%08x"
				"rp=0x%08x, trying next vector at 0x%08x\n",
				(u32)(wp & (~IRW_RB_OVERFLOW)), dd->ih.rp,
					(wp + VECTOR_SZ) & IH_RING_MASK);

		dd->ih.rp = (wp + VECTOR_SZ) & IH_RING_MASK;

		tmp = rr32(dev, IH_RB_CTL);
		tmp |= IRC_IH_WPTR_OVERFLOW_CLR;
		wr32(dev, tmp, IH_RB_CTL);

		wp &= ~IRW_RB_OVERFLOW;
	}
	return wp;
}
Пример #7
0
static void spi_setup(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u32 se;
	u32 sh;
	u32 cu;
	u32 spi_static_thd_mgmt_2;
	u32 cus_ena;
	u32 mask;

	dd = pci_get_drvdata(dev);

	for (se = 0; se < dd->cfg.gpu.ses_n; ++se) {
		for (sh = 0; sh < dd->cfg.gpu.se_shs_n; ++sh) {
			se_sh_select(dev, se, sh);
			spi_static_thd_mgmt_2 = rr32(dev,
							SPI_STATIC_THD_MGMT_2);
			cus_ena = cus_ena_get(dev);

			mask = 1;
			for (cu = 0; cu < CGSAC_SH_CUS_N_MAX; ++cu) {
				mask <<= cu;
				if (cus_ena & mask) {
					spi_static_thd_mgmt_2 &= ~mask;
					wr32(dev, spi_static_thd_mgmt_2, 
							SPI_STATIC_THD_MGMT_2);
					break;
				}
			}
		}
	}
	se_sh_select(dev, 0xffffffff, 0xffffffff);

	wr32(dev, set(SCC_VTX_DONE_DELAY, SCC_DELAY_22_CLKS), SPI_CFG_CTL_1);
}
Пример #8
0
void rlc_lb_pw_dis(struct pci_dev *dev)
{
	u32 rlc_lb_ctl;
	rlc_lb_ctl = rr32(dev, RLC_LB_CTL);
	rlc_lb_ctl &= ~RLC_LB_ENA;
	wr32(dev, rlc_lb_ctl, RLC_LB_CTL);
}
Пример #9
0
void rlc_update_ctl(struct pci_dev *dev, u32 prev_rlc_ctl)
{
	u32 cur_rlc_ctl;

	cur_rlc_ctl = rr32(dev, RLC_CTL);
	if (cur_rlc_ctl != prev_rlc_ctl)
		wr32(dev, prev_rlc_ctl, RLC_CTL);
}
Пример #10
0
/*
 * gpu register defaults which need to be inited only once, most of them
 * are un the cfg space (PA_SC_RASTER_CFG is in the ctx space though)
 */
void gpu_defaults(struct pci_dev *dev, u32 addr_cfg, u32 mem_row_sz_kb)
{
	struct dev_drv_data *dd;
	u32 sx_debug_1;

	dd = pci_get_drvdata(dev);

	wr32(dev, set(GC_RD_TIMEOUT, 0xff), GRBM_CTL);
	wr32(dev, addr_cfg, GB_ADDR_CFG);

	tiling_modes_tbl_init(dev, mem_row_sz_kb);
	rbs_setup(dev);
	spi_setup(dev);

	sx_debug_1 = rr32(dev, SX_DEBUG_1);
	wr32(dev, sx_debug_1, SX_DEBUG_1);

	/* cfg space */
	wr32(dev, set(PSFS_SC_FRONTEND_PRIM_FIFO_SZ,
					dd->cfg.gpu.sc_prim_fifo_sz_frontend)
		| set(PSFS_SC_BACKEND_PRIM_FIFO_SZ,
					dd->cfg.gpu.sc_prim_fifo_sz_backend)
		| set(PSFS_SC_HIZ_TILE_FIFO_SZ, dd->cfg.gpu.sc_hiz_tile_fifo_sz)
		| set(PSFS_SC_EARLYZ_TILE_FIFO_SZ,
					dd->cfg.gpu.sc_earlyz_tile_fifo_sz),
								PA_SC_FIFO_SZ);
	wr32(dev, 1, VGT_INSTS_N);

	wr32(dev, 0, SQ_CFG);

	wr32(dev, set(PSFEMC_FORCE_EOV_MAX_CLK_CNT, 4095)
				| set(PSFEMC_FORCE_EOV_MAX_REZ_CNT, 255),
						PA_SC_FORCE_EOV_MAX_CNTS);


	wr32(dev, set(VCI_CACHE_INVALIDATION, VCI_VC_AND_TC)
				| set(VCI_AUTO_INVLD_ENA, VCI_ES_AND_GS_AUTO),
							VGT_CACHE_INVALIDATION);

	wr32(dev, 16, VGT_GS_VTX_REUSE);
	wr32(dev, 0, PA_SC_LINE_STIPPLE_STATE);

	wr32(dev, 0, CB_PERF_CTR_0_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_0_SEL_1);
	wr32(dev, 0, CB_PERF_CTR_1_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_1_SEL_1);
	wr32(dev, 0, CB_PERF_CTR_2_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_2_SEL_1);
	wr32(dev, 0, CB_PERF_CTR_3_SEL_0);
	wr32(dev, 0, CB_PERF_CTR_3_SEL_1);

	wr32(dev, PCE_CLIP_VTX_REORDER_ENA | set(PCE_CLIP_SEQ_N, 3),
								PA_CL_ENHANCE);

	udelay(50);
}
Пример #11
0
void rlc_mgcg_dis(struct pci_dev *dev)
{
	u32 cur;
	u32 want;

	cur = rr32(dev, RLC_CGTT_MGCG_OVERRIDE);
	want = cur | 0x00000003;
	if (cur != want)
		wr32(dev, want, RLC_CGTT_MGCG_OVERRIDE);
}
Пример #12
0
void rlc_mgcg_ena(struct pci_dev *dev)
{
	u32 cur;
	u32 want;
	
	cur = rr32(dev, RLC_CGTT_MGCG_OVERRIDE);
	want = cur & 0xffffffc0;
	if (cur != want)
		wr32(dev, want, RLC_CGTT_MGCG_OVERRIDE);
}
Пример #13
0
void gpu_mgcg_ena(struct pci_dev *dev)
{
	u32 cur;
	u32 want;

	cur = rr32(dev, CGTS_SM_CTL);
	want = 0x96940200;
	if (cur != want)
		wr32(dev, want, CGTS_SM_CTL);
}
Пример #14
0
void gpu_mgcg_dis(struct pci_dev *dev)
{
	u32 cur;
	u32 want;

	cur = rr32(dev, CGTS_SM_CTL);
	want = cur | CSC_LS_OVERRIDE | CSC_OVERRIDE;
	if (cur != want)
		wr32(dev, want, CGTS_SM_CTL);
}
Пример #15
0
/* ih ring size is 2^IH_RING_LOG2_DWS(=14) dwords or 4096 vectors of 16 bytes */
void ih_init(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u32 intr_ctl_0;
	u32 ih_rb_ctl;
	u32 ih_ctl;
	u64 wb_ih_wptr_gpu_addr;

	dd = pci_get_drvdata(dev);

	/*
	 * setup interrupt control
	 * set dummy read gpu address to ring gpu address
	 * 256 bytes block index
	 */
	wr32(dev, dd->ba.ih_ring_map->gpu_addr >> 8, INTR_CTL_1); 

	intr_ctl_0 = rr32(dev, INTR_CTL_0);
	/*
	 * IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled
	 *                          without msi
	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_ENA
	 */
	intr_ctl_0 &= ~IC_IH_DUMMY_RD_OVERRIDE;

	/* IH_REQ_NONSNOOP_ENA=1 if ring is in non-cacheable mem, e.g. vram */
	intr_ctl_0 &= ~IC_IH_REQ_NONSNOOP_ENA; /* we are in bus aperture */
	wr32(dev, intr_ctl_0, INTR_CTL_0);

	/* 256 bytes block index */
	wr32(dev, dd->ba.ih_ring_map->gpu_addr >> 8, IH_RB_BASE); 

	ih_rb_ctl = (IRC_IH_WPTR_OVERFLOW_ENA | IRC_IH_WPTR_OVERFLOW_CLR
				| set(IRC_IH_IB_LOG2_DWS, IH_RING_LOG2_DWS)
					| IRC_IH_WPTR_WRITEBACK_ENA);

	/* must be dw aligned */
	wb_ih_wptr_gpu_addr = dd->ba.wb_map->gpu_addr + WB_IH_WPTR_OF;
	wr32(dev, lower_32_bits(wb_ih_wptr_gpu_addr), IH_RB_WPTR_ADDR_LO);
	wr32(dev, upper_32_bits(wb_ih_wptr_gpu_addr), IH_RB_WPTR_ADDR_HI);
	wr32(dev, ih_rb_ctl, IH_RB_CTL);

	ih_reset(dev);

	/* default settings for IH_CTL (disabled at first) */
	ih_ctl = set(IC_MC_WR_REQ_CREDIT, 0x10) | set(IC_MC_WR_CLEAN_CNT, 0x10)
					| set(IC_MC_VM_ID, 0) | IC_RPTR_REARM;
	wr32(dev, ih_ctl, IH_CTL);

	dd->ih.rp = 0;
	spin_lock_init(&dd->ih.lock);
};
Пример #16
0
void rlc_serdes_wait(struct pci_dev *dev)
{
	u32 i;

	for (i = 0; i < USEC_TIMEOUT; ++i) {
		u32 rlc_serdes_master_busy_0;

		rlc_serdes_master_busy_0 = rr32(dev, RLC_SERDES_MASTER_BUSY_0);
		if (rlc_serdes_master_busy_0 == 0)
			break;
		udelay(1);
	}

	for (i = 0; i < USEC_TIMEOUT; ++i) {
		u32 rlc_serdes_master_busy_1;

		rlc_serdes_master_busy_1 = rr32(dev, RLC_SERDES_MASTER_BUSY_1);
		if (rlc_serdes_master_busy_1 == 0)
			break;
		udelay(1);
	}
}
Пример #17
0
static u32 rlc_dis(struct pci_dev *dev)
{
	u32 rlc_ctl;

	rlc_ctl = rr32(dev, RLC_CTL);
	
	if (rlc_ctl & RC_RLC_ENA) {
		rlc_ctl &= ~RC_RLC_ENA;
		wr32(dev, rlc_ctl, RLC_CTL);

		rlc_serdes_wait(dev);
	}
	return rlc_ctl;
}
Пример #18
0
void rlc_reset(struct pci_dev *dev)
{
	u32 grbm_soft_reset;

	grbm_soft_reset = rr32(dev, GRBM_SOFT_RESET);

	grbm_soft_reset |= GSR_SOFT_RESET_RLC;
	wr32(dev, grbm_soft_reset, GRBM_SOFT_RESET);
	udelay(50);

	grbm_soft_reset &= ~GSR_SOFT_RESET_RLC;
	wr32(dev, grbm_soft_reset, GRBM_SOFT_RESET);
	udelay(50);
}
Пример #19
0
/* get enabled cus for the **selected sh** */
static u32 cus_ena_get(struct pci_dev *dev)
{
	struct dev_drv_data *dd;
	u32 cus_dis;
	u32 mask;
	u32 inactive_cus_shift;

	dd = pci_get_drvdata(dev);

	/* disabling a cu for a sh is done at 2 levels: CC_ and GC_USER_ */
	cus_dis = rr32(dev, CC_GC_SHADER_ARRAY_CFG);
	if (cus_dis & CGSAC_INACTIVE_CUS_VALID)
		cus_dis &= CGSAC_INACTIVE_CUS;
	else
		cus_dis = 0;
	cus_dis |= rr32(dev, GC_USER_SHADER_ARRAY_CFG);

	inactive_cus_shift = ffs(CGSAC_INACTIVE_CUS);
	cus_dis >>= inactive_cus_shift;

	mask = bitmask_create(dd->cfg.gpu.sh_cus_n);
	return ~cus_dis & mask;
}
Пример #20
0
void rlc_wait(struct pci_dev *dev)
{
	u32 mask;
	u32 i;

	mask = RS_RLC_BUSY_STATUS | RS_GFX_PWR_STATUS | RS_GFX_CLK_STATUS
							| RS_GFX_LS_STATUS;
	for (i = 0; i < USEC_TIMEOUT; ++i) {
		u32 rlc_stat;

		rlc_stat = rr32(dev, RLC_STAT);
		if ((rlc_stat & mask) == (RS_GFX_CLK_STATUS
							| RS_GFX_PWR_STATUS))
			break;
		udelay(1);
	}
}
Пример #21
0
Файл: timer.c Проект: elinx/os
u32 timer_cur_value()
{
        return rr32(T1_VALUE);
}
Пример #22
0
Файл: timer.c Проект: elinx/os
int timer_msk_int_status()
{
        return rr32(T1_MIS) & TIMER_MSK_INTERRUPT_MASK;
}
Пример #23
0
Файл: timer.c Проект: elinx/os
int timer_raw_int_status()
{
        return rr32(T1_RIS) & TIMER_RAW_INTERRUPT_MASK;
}