int rs400_gart_init(struct radeon_device *rdev) { int r; if (rdev->gart.table.ram.ptr) { WARN(1, "RS400 GART already initialized.\n"); return 0; } /* Check gart size */ switch(rdev->mc.gtt_size / (1024 * 1024)) { case 32: case 64: case 128: case 256: case 512: case 1024: case 2048: break; default: return -EINVAL; } /* Initialize common gart structure */ r = radeon_gart_init(rdev); if (r) return r; if (rs400_debugfs_pcie_gart_info_init(rdev)) DRM_ERROR("Failed to register debugfs file for RS400 GART !\n"); rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; return radeon_gart_table_ram_alloc(rdev); }
int rs600_gart_enable(struct radeon_device *rdev) { uint32_t tmp; int i; int r; /* Initialize common gart structure */ r = radeon_gart_init(rdev); if (r) { return r; } rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; r = radeon_gart_table_vram_alloc(rdev); if (r) { return r; } /* FIXME: setup default page */ WREG32_MC(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); for (i = 0; i < 19; i++) { WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | RS600_SYSTEM_ACCESS_MODE_IN_SYS | RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | RS600_EFFECTIVE_L1_CACHE_SIZE(3) | RS600_ENABLE_FRAGMENT_PROCESSING | RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); } /* System context map to GART space */ WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp); /* enable first context */ WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); /* disable all other contexts */ for (i = 1; i < 8; i++) { WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); } /* setup the page table */ WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, rdev->gart.table_addr); WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); /* enable page tables */ tmp = RREG32_MC(RS600_MC_PT0_CNTL); WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); tmp = RREG32_MC(RS600_MC_CNTL1); WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); rs600_gart_tlb_flush(rdev); rdev->gart.ready = true; return 0; }
int r600_pcie_gart_init(struct radeon_device *rdev) { int r; if (rdev->gart.table.vram.robj) { WARN(1, "R600 PCIE GART already initialized.\n"); return 0; } r = radeon_gart_init(rdev); if (r) return r; rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; return radeon_gart_table_vram_alloc(rdev); }