Example #1
0
/*
 * MC
 */
int rv515_mc_init(struct radeon_device *rdev)
{
	uint32_t tmp;
	int r;

	if (r100_debugfs_rbbm_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
	}
	if (rv515_debugfs_pipes_info_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for pipes !\n");
	}
	if (rv515_debugfs_ga_info_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for pipes !\n");
	}

	rv515_gpu_init(rdev);
	rv370_pcie_gart_disable(rdev);

	/* Setup GPU memory space */
	rdev->mc.vram_location = 0xFFFFFFFFUL;
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
	if (rdev->flags & RADEON_IS_AGP) {
		r = radeon_agp_init(rdev);
		if (r) {
			printk(KERN_WARNING "[drm] Disabling AGP\n");
			rdev->flags &= ~RADEON_IS_AGP;
			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
		} else {
			rdev->mc.gtt_location = rdev->mc.agp_base;
		}
	}
	r = radeon_mc_setup(rdev);
	if (r) {
		return r;
	}

	/* Program GPU memory space */
	rs600_mc_disable_clients(rdev);
	if (rv515_mc_wait_for_idle(rdev)) {
		printk(KERN_WARNING "Failed to wait MC idle while "
		       "programming pipes. Bad things might happen.\n");
	}
	/* Write VRAM size in case we are limiting it */
	WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
	tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
	WREG32(0x134, tmp);
	tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
	tmp = REG_SET(MC_FB_TOP, tmp >> 16);
	tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
	WREG32_MC(MC_FB_LOCATION, tmp);
	WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
	WREG32(0x310, rdev->mc.vram_location);
	if (rdev->flags & RADEON_IS_AGP) {
		tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
		tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
		tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
		WREG32_MC(MC_AGP_LOCATION, tmp);
		WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
		WREG32_MC(MC_AGP_BASE_2, 0);
	} else {
Example #2
0
void rs400_gart_disable(struct radeon_device *rdev)
{
	uint32_t tmp;

	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
}
Example #3
0
void rs600_gart_disable(struct radeon_device *rdev)
{
	uint32_t tmp;

	/* FIXME: disable out of gart access */
	WREG32_MC(RS600_MC_PT0_CNTL, 0);
	tmp = RREG32_MC(RS600_MC_CNTL1);
	tmp &= ~RS600_ENABLE_PAGE_TABLES;
	WREG32_MC(RS600_MC_CNTL1, tmp);
	radeon_object_kunmap(rdev->gart.table.vram.robj);
	radeon_object_unpin(rdev->gart.table.vram.robj);
}
Example #4
0
void rs400_gart_tlb_flush(struct radeon_device *rdev)
{
	uint32_t tmp;
	unsigned int timeout = rdev->usec_timeout;

	WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
	do {
		tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
		if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
			break;
		DRM_UDELAY(1);
		timeout--;
	} while (timeout > 0);
	WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
}
Example #5
0
/*
 * GART.
 */
void rs600_gart_tlb_flush(struct radeon_device *rdev)
{
	uint32_t tmp;

	tmp = RREG32_MC(RS600_MC_PT0_CNTL);
	tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
	WREG32_MC(RS600_MC_PT0_CNTL, tmp);

	tmp = RREG32_MC(RS600_MC_PT0_CNTL);
	tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE;
	WREG32_MC(RS600_MC_PT0_CNTL, tmp);

	tmp = RREG32_MC(RS600_MC_PT0_CNTL);
	tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE);
	WREG32_MC(RS600_MC_PT0_CNTL, tmp);
	tmp = RREG32_MC(RS600_MC_PT0_CNTL);
}
Example #6
0
int rs600_mc_init(struct radeon_device *rdev)
{
	uint32_t tmp;
	int r;

	if (r100_debugfs_rbbm_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
	}

	rs600_gpu_init(rdev);
	rs600_gart_disable(rdev);

	/* Setup GPU memory space */
	rdev->mc.vram_location = 0xFFFFFFFFUL;
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
	r = radeon_mc_setup(rdev);
	if (r) {
		return r;
	}

	/* Program GPU memory space */
	/* Enable bus master */
	tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
	WREG32(RADEON_BUS_CNTL, tmp);
	/* FIXME: What does AGP means for such chipset ? */
	WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
	/* FIXME: are this AGP reg in indirect MC range ? */
	WREG32_MC(RS600_MC_AGP_BASE, 0);
	WREG32_MC(RS600_MC_AGP_BASE_2, 0);
	rs600_mc_disable_clients(rdev);
	if (rs600_mc_wait_for_idle(rdev)) {
		printk(KERN_WARNING "Failed to wait MC idle while "
		       "programming pipes. Bad things might happen.\n");
	}
	tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
	tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
	tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
	WREG32_MC(RS600_MC_FB_LOCATION, tmp);
	WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
	return 0;
}
Example #7
0
/*
 * MC functions.
 */
int rs690_mc_init(struct radeon_device *rdev)
{
	uint32_t tmp;
	int r;

	if (r100_debugfs_rbbm_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
	}

	rs690_gpu_init(rdev);
	rs400_gart_disable(rdev);

	/* Setup GPU memory space */
	rdev->mc.gtt_location = rdev->mc.mc_vram_size;
	rdev->mc.gtt_location += (rdev->mc.gtt_size - 1);
	rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1);
	rdev->mc.vram_location = 0xFFFFFFFFUL;
	r = radeon_mc_setup(rdev);
	if (r) {
		return r;
	}

	/* Program GPU memory space */
	rs600_mc_disable_clients(rdev);
	if (rs690_mc_wait_for_idle(rdev)) {
		printk(KERN_WARNING "Failed to wait MC idle while "
		       "programming pipes. Bad things might happen.\n");
	}
	tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
	tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16);
	tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16);
	WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp);
	/* FIXME: Does this reg exist on RS480,RS740 ? */
	WREG32(0x310, rdev->mc.vram_location);
	WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
	return 0;
}
Example #8
0
void rs690_bandwidth_update(struct radeon_device *rdev)
{
	struct drm_display_mode *mode0 = NULL;
	struct drm_display_mode *mode1 = NULL;
	struct rs690_watermark wm0;
	struct rs690_watermark wm1;
	u32 tmp;
	fixed20_12 priority_mark02, priority_mark12, fill_rate;
	fixed20_12 a, b;

	if (rdev->mode_info.crtcs[0]->base.enabled)
		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
	if (rdev->mode_info.crtcs[1]->base.enabled)
		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
	/*
	 * Set display0/1 priority up in the memory controller for
	 * modes if the user specifies HIGH for displaypriority
	 * option.
	 */
	if (rdev->disp_priority == 2) {
		tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER);
		tmp &= ~MC_DISP1R_INIT_LAT_MASK;
		tmp &= ~MC_DISP0R_INIT_LAT_MASK;
		if (mode1)
			tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT);
		if (mode0)
			tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT);
		WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp);
	}
	rs690_line_buffer_adjust(rdev, mode0, mode1);

	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
		WREG32(DCP_CONTROL, 0);
	if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
		WREG32(DCP_CONTROL, 2);

	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);

	tmp = (wm0.lb_request_fifo_depth - 1);
	tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
	WREG32(LB_MAX_REQ_OUTSTANDING, tmp);

	if (mode0 && mode1) {
		if (rfixed_trunc(wm0.dbpp) > 64)
			a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
		else
			a.full = wm0.num_line_pair.full;
		if (rfixed_trunc(wm1.dbpp) > 64)
			b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
		else
			b.full = wm1.num_line_pair.full;
		a.full += b.full;
		fill_rate.full = rfixed_div(wm0.sclk, a);
		if (wm0.consumption_rate.full > fill_rate.full) {
			b.full = wm0.consumption_rate.full - fill_rate.full;
			b.full = rfixed_mul(b, wm0.active_time);
			a.full = rfixed_mul(wm0.worst_case_latency,
						wm0.consumption_rate);
			a.full = a.full + b.full;
			b.full = rfixed_const(16 * 1000);
			priority_mark02.full = rfixed_div(a, b);
		} else {
			a.full = rfixed_mul(wm0.worst_case_latency,
						wm0.consumption_rate);
			b.full = rfixed_const(16 * 1000);
			priority_mark02.full = rfixed_div(a, b);
		}
		if (wm1.consumption_rate.full > fill_rate.full) {
			b.full = wm1.consumption_rate.full - fill_rate.full;
			b.full = rfixed_mul(b, wm1.active_time);
			a.full = rfixed_mul(wm1.worst_case_latency,
						wm1.consumption_rate);
			a.full = a.full + b.full;
			b.full = rfixed_const(16 * 1000);
			priority_mark12.full = rfixed_div(a, b);
		} else {
			a.full = rfixed_mul(wm1.worst_case_latency,
						wm1.consumption_rate);
			b.full = rfixed_const(16 * 1000);
			priority_mark12.full = rfixed_div(a, b);
		}
		if (wm0.priority_mark.full > priority_mark02.full)
			priority_mark02.full = wm0.priority_mark.full;
		if (rfixed_trunc(priority_mark02) < 0)
			priority_mark02.full = 0;
		if (wm0.priority_mark_max.full > priority_mark02.full)
			priority_mark02.full = wm0.priority_mark_max.full;
		if (wm1.priority_mark.full > priority_mark12.full)
			priority_mark12.full = wm1.priority_mark.full;
		if (rfixed_trunc(priority_mark12) < 0)
			priority_mark12.full = 0;
		if (wm1.priority_mark_max.full > priority_mark12.full)
			priority_mark12.full = wm1.priority_mark_max.full;
		WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
		WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
		WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
		WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
	} else if (mode0) {
		if (rfixed_trunc(wm0.dbpp) > 64)
			a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
		else
			a.full = wm0.num_line_pair.full;
		fill_rate.full = rfixed_div(wm0.sclk, a);
		if (wm0.consumption_rate.full > fill_rate.full) {
			b.full = wm0.consumption_rate.full - fill_rate.full;
			b.full = rfixed_mul(b, wm0.active_time);
			a.full = rfixed_mul(wm0.worst_case_latency,
						wm0.consumption_rate);
			a.full = a.full + b.full;
			b.full = rfixed_const(16 * 1000);
			priority_mark02.full = rfixed_div(a, b);
		} else {
			a.full = rfixed_mul(wm0.worst_case_latency,
						wm0.consumption_rate);
			b.full = rfixed_const(16 * 1000);
			priority_mark02.full = rfixed_div(a, b);
		}
		if (wm0.priority_mark.full > priority_mark02.full)
			priority_mark02.full = wm0.priority_mark.full;
		if (rfixed_trunc(priority_mark02) < 0)
			priority_mark02.full = 0;
		if (wm0.priority_mark_max.full > priority_mark02.full)
			priority_mark02.full = wm0.priority_mark_max.full;
		WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
		WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
		WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
		WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
	} else {
		if (rfixed_trunc(wm1.dbpp) > 64)
			a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair);
		else
			a.full = wm1.num_line_pair.full;
		fill_rate.full = rfixed_div(wm1.sclk, a);
		if (wm1.consumption_rate.full > fill_rate.full) {
			b.full = wm1.consumption_rate.full - fill_rate.full;
			b.full = rfixed_mul(b, wm1.active_time);
			a.full = rfixed_mul(wm1.worst_case_latency,
						wm1.consumption_rate);
			a.full = a.full + b.full;
			b.full = rfixed_const(16 * 1000);
			priority_mark12.full = rfixed_div(a, b);
		} else {
			a.full = rfixed_mul(wm1.worst_case_latency,
						wm1.consumption_rate);
			b.full = rfixed_const(16 * 1000);
			priority_mark12.full = rfixed_div(a, b);
		}
		if (wm1.priority_mark.full > priority_mark12.full)
			priority_mark12.full = wm1.priority_mark.full;
		if (rfixed_trunc(priority_mark12) < 0)
			priority_mark12.full = 0;
		if (wm1.priority_mark_max.full > priority_mark12.full)
			priority_mark12.full = wm1.priority_mark_max.full;
		WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
		WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
		WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
		WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
	}
}
Example #9
0
int rs600_gart_enable(struct radeon_device *rdev)
{
	uint32_t tmp;
	int i;
	int r;

	/* Initialize common gart structure */
	r = radeon_gart_init(rdev);
	if (r) {
		return r;
	}
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
	r = radeon_gart_table_vram_alloc(rdev);
	if (r) {
		return r;
	}
	/* FIXME: setup default page */
	WREG32_MC(RS600_MC_PT0_CNTL,
		 (RS600_EFFECTIVE_L2_CACHE_SIZE(6) |
		  RS600_EFFECTIVE_L2_QUEUE_SIZE(6)));
	for (i = 0; i < 19; i++) {
		WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i,
			 (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE |
			  RS600_SYSTEM_ACCESS_MODE_IN_SYS |
			  RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE |
			  RS600_EFFECTIVE_L1_CACHE_SIZE(3) |
			  RS600_ENABLE_FRAGMENT_PROCESSING |
			  RS600_EFFECTIVE_L1_QUEUE_SIZE(3)));
	}

	/* System context map to GART space */
	WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location);
	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
	WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);

	/* enable first context */
	WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location);
	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
	WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp);
	WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL,
		 (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT));
	/* disable all other contexts */
	for (i = 1; i < 8; i++) {
		WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0);
	}

	/* setup the page table */
	WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
		 rdev->gart.table_addr);
	WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);

	/* enable page tables */
	tmp = RREG32_MC(RS600_MC_PT0_CNTL);
	WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT));
	tmp = RREG32_MC(RS600_MC_CNTL1);
	WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES));
	rs600_gart_tlb_flush(rdev);
	rdev->gart.ready = true;
	return 0;
}
Example #10
0
int rs400_gart_enable(struct radeon_device *rdev)
{
	uint32_t size_reg;
	uint32_t tmp;

	tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
	tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
	WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
	/* Check gart size */
	switch(rdev->mc.gtt_size / (1024 * 1024)) {
	case 32:
		size_reg = RS480_VA_SIZE_32MB;
		break;
	case 64:
		size_reg = RS480_VA_SIZE_64MB;
		break;
	case 128:
		size_reg = RS480_VA_SIZE_128MB;
		break;
	case 256:
		size_reg = RS480_VA_SIZE_256MB;
		break;
	case 512:
		size_reg = RS480_VA_SIZE_512MB;
		break;
	case 1024:
		size_reg = RS480_VA_SIZE_1GB;
		break;
	case 2048:
		size_reg = RS480_VA_SIZE_2GB;
		break;
	default:
		return -EINVAL;
	}
	/* It should be fine to program it to max value */
	if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
		WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
		WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
	} else {
		WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
		WREG32(RS480_AGP_BASE_2, 0);
	}
	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
	tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
	tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
		WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
		tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
		WREG32(RADEON_BUS_CNTL, tmp);
	} else {
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
		tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
		WREG32(RADEON_BUS_CNTL, tmp);
	}
	/* Table should be in 32bits address space so ignore bits above. */
	tmp = (u32)rdev->gart.table_addr & 0xfffff000;
	tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;

	WREG32_MC(RS480_GART_BASE, tmp);
	/* TODO: more tweaking here */
	WREG32_MC(RS480_GART_FEATURE_ID,
		  (RS480_TLB_ENABLE |
		   RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
	/* Disable snooping */
	WREG32_MC(RS480_AGP_MODE_CNTL,
		  (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
	/* Disable AGP mode */
	/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
	 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
		WREG32_MC(RS480_MC_MISC_CNTL,
			  (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
	} else {
		WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
	}
	/* Enable gart */
	WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
	rs400_gart_tlb_flush(rdev);
	rdev->gart.ready = true;
	return 0;
}