static int sfbhack_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *nvbo = NULL; uint32_t tile_flags = dev_priv->card_type == NV_50 ? 0x7000 : 0x0000; int ret, size; if (dev_priv->sfb_gem) return 0; size = nouveau_mem_fb_amount(dev); if (size > drm_get_resource_len(dev, 1)) size = drm_get_resource_len(dev, 1); size >>= 1; ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM, 0, tile_flags, false, true, &nvbo); if (ret) return ret; ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); if (ret) { nouveau_bo_ref(NULL, &nvbo); return ret; } dev_priv->sfb_gem = nvbo->gem; return 0; }
static int nouveau_init_card_mappings(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; /* resource 0 is mmio regs */ /* resource 1 is linear FB */ /* resource 2 is RAMIN (mmio regs + 0x1000000) */ /* resource 6 is bios */ /* map the mmio regs */ ret = drm_addmap(dev, drm_get_resource_start(dev, 0), drm_get_resource_len(dev, 0), _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); if (ret) { DRM_ERROR("Unable to initialize the mmio mapping (%d). " "Please report your setup to " DRIVER_EMAIL "\n", ret); return -EINVAL; } DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset); /* map larger RAMIN aperture on NV40 cards */ dev_priv->ramin = NULL; if (dev_priv->card_type >= NV_40) { int ramin_resource = 2; if (drm_get_resource_len(dev, ramin_resource) == 0) ramin_resource = 3; ret = drm_addmap(dev, drm_get_resource_start(dev, ramin_resource), drm_get_resource_len(dev, ramin_resource), _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->ramin); if (ret) { DRM_ERROR("Failed to init RAMIN mapping, " "limited instance memory available\n"); dev_priv->ramin = NULL; } } /* On older cards (or if the above failed), create a map covering * the BAR0 PRAMIN aperture */ if (!dev_priv->ramin) { ret = drm_addmap(dev, drm_get_resource_start(dev, 0) + NV_RAMIN, (1*1024*1024), _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->ramin); if (ret) { DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret); return ret; } } return 0; }
void rs690_vram_info(struct radeon_device *rdev) { uint32_t tmp; fixed20_12 a; rs400_gart_adjust_size(rdev); /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; /* FIXME: is this correct for RS690/RS740 ? */ tmp = RREG32(RADEON_MEM_CNTL); if (tmp & R300_MEM_NUM_CHANNELS_MASK) { rdev->mc.vram_width = 128; } else { rdev->mc.vram_width = 64; } rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); rdev->mc.mc_vram_size = rdev->mc.real_vram_size; rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); rs690_pm_info(rdev); /* FIXME: we should enforce default clock in case GPU is not in * default setup */ a.full = rfixed_const(100); rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); a.full = rfixed_const(16); /* core_bandwidth = sclk(Mhz) * 16 */ rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); }
static int mali_driver_load(struct drm_device *dev, unsigned long chipset) { int ret; unsigned long base, size; drm_mali_private_t *dev_priv; printk(KERN_ERR "DRM: mali_driver_load start\n"); dev_priv = drm_calloc(1, sizeof(drm_mali_private_t), DRM_MEM_DRIVER); if ( dev_priv == NULL ) return -ENOMEM; dev->dev_private = (void *)dev_priv; if ( NULL == dev->platformdev ) { dev->platformdev = platform_device_register_simple(mali_drm_device_name, 0, NULL, 0); pdev = dev->platformdev; } #if 0 base = drm_get_resource_start(dev, 1 ); size = drm_get_resource_len(dev, 1 ); #endif ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); if ( ret ) drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER); //if ( ret ) kfree( dev_priv ); printk(KERN_ERR "DRM: mali_driver_load done\n"); return ret; }
void rs780_vram_info(struct radeon_device *rdev) { rs780_vram_get_type(rdev); /* FIXME: implement */ /* Could aper size report 0 ? */ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); }
void pscnv_mem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; dev_priv->vram->takedown(dev); if (dev_priv->fb_mtrr >= 0) { drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1), drm_get_resource_len(dev, 1), DRM_MTRR_WC); dev_priv->fb_mtrr = 0; } }
int pscnv_mem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; int dma_bits = 32; #ifdef __linux__ if (dev_priv->card_type >= NV_50 && pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) dma_bits = 40; ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits)); if (ret) { NV_ERROR(dev, "Error setting DMA mask: %d\n", ret); return ret; } #else if (dev_priv->card_type >= NV_50) dma_bits = 40; #endif dev_priv->dma_mask = DMA_BIT_MASK(dma_bits); spin_lock_init(&dev_priv->pramin_lock); mutex_init(&dev_priv->vram_mutex); switch (dev_priv->card_type) { case NV_50: ret = nv50_vram_init(dev); break; case NV_D0: case NV_C0: ret = nvc0_vram_init(dev); break; default: NV_ERROR(dev, "No VRAM allocator for NV%02x!\n", dev_priv->chipset); ret = -ENOSYS; } if (ret) return ret; dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), drm_get_resource_len(dev, 1), DRM_MTRR_WC); return 0; }
/* * Initalize mappings. On Savage4 and SavageIX the alignment * and size of the aperture is not suitable for automatic MTRR setup * in drm_addmap. Therefore we add them manually before the maps are * initialized, and tear them down on last close. */ int savage_driver_firstopen(drm_device_t *dev) { drm_savage_private_t *dev_priv = dev->dev_private; unsigned long mmio_base, fb_base, fb_size, aperture_base; /* fb_rsrc and aper_rsrc aren't really used currently, but still exist * in case we decide we need information on the BAR for BSD in the * future. */ unsigned int fb_rsrc, aper_rsrc; int ret = 0; dev_priv->mtrr[0].handle = -1; dev_priv->mtrr[1].handle = -1; dev_priv->mtrr[2].handle = -1; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { fb_rsrc = 0; fb_base = drm_get_resource_start(dev, 0); fb_size = SAVAGE_FB_SIZE_S3; mmio_base = fb_base + SAVAGE_FB_SIZE_S3; aper_rsrc = 0; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (drm_get_resource_len(dev, 0) == 0x08000000) { /* Don't make MMIO write-cobining! We need 3 * MTRRs. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x01000000; dev_priv->mtrr[0].handle = mtrr_add( dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB, 1); dev_priv->mtrr[1].base = fb_base+0x02000000; dev_priv->mtrr[1].size = 0x02000000; dev_priv->mtrr[1].handle = mtrr_add( dev_priv->mtrr[1].base, dev_priv->mtrr[1].size, MTRR_TYPE_WRCOMB, 1); dev_priv->mtrr[2].base = fb_base+0x04000000; dev_priv->mtrr[2].size = 0x04000000; dev_priv->mtrr[2].handle = mtrr_add( dev_priv->mtrr[2].base, dev_priv->mtrr[2].size, MTRR_TYPE_WRCOMB, 1); } else { DRM_ERROR("strange pci_resource_len %08lx\n", drm_get_resource_len(dev, 0)); } } else if (dev_priv->chipset != S3_SUPERSAVAGE && dev_priv->chipset != S3_SAVAGE2000) { mmio_base = drm_get_resource_start(dev, 0); fb_rsrc = 1; fb_base = drm_get_resource_start(dev, 1); fb_size = SAVAGE_FB_SIZE_S4; aper_rsrc = 1; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (drm_get_resource_len(dev, 1) == 0x08000000) { /* Can use one MTRR to cover both fb and * aperture. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x08000000; dev_priv->mtrr[0].handle = mtrr_add( dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB, 1); } else { DRM_ERROR("strange pci_resource_len %08lx\n", drm_get_resource_len(dev, 1)); } } else { mmio_base = drm_get_resource_start(dev, 0); fb_rsrc = 1; fb_base = drm_get_resource_start(dev, 1); fb_size = drm_get_resource_len(dev, 1); aper_rsrc = 2; aperture_base = drm_get_resource_start(dev, 2); /* Automatic MTRR setup will do the right thing. */ } ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); if (ret) return ret; ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &dev_priv->fb); if (ret) return ret; ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &dev_priv->aperture); if (ret) return ret; return ret; }
int nouveau_load(struct drm_device *dev, unsigned long flags) { struct drm_pscnv_virt_private *dev_priv; resource_size_t mmio_start_offs, call_start_offset; int ret; /* allocate the private device data */ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (!dev_priv) return -ENOMEM; dev->dev_private = dev_priv; dev_priv->dev = dev; dev_priv->flags = flags; /* resource 0 is mmio regs */ /* resource 1 is hypercall buffer */ /* resource 2 is mapped vram */ /* map the mmio regs */ mmio_start_offs = drm_get_resource_start(dev, 0); ret = drm_addmap(dev, mmio_start_offs, PSCNV_VIRT_MMIO_SIZE, _DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio); if (ret) { NV_ERROR(dev, "Unable to initialize the mmio mapping.\n"); return ret; } /* map the ring buffer */ call_start_offset = drm_get_resource_start(dev, 1); ret = drm_addmap(dev, call_start_offset, PSCNV_CALL_AREA_SIZE, _DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->call_data); if (ret) { NV_ERROR(dev, "Unable to initialize the call data mapping.\n"); return ret; } dev_priv->vram_base = drm_get_resource_start(dev, 2); dev_priv->vram_size = drm_get_resource_len(dev, 2); /* initialize the hypercall interface */ pscnv_virt_call_init(dev_priv); ret = drm_irq_install(dev); if (ret) { NV_ERROR(dev, "Unable to register the call interrupt.\n"); return ret; } memset(dev_priv->vspaces, 0, sizeof(dev_priv->vspaces)); memset(dev_priv->chans, 0, sizeof(dev_priv->chans)); /* the channels are directly mapped to the fourth BAR */ dev_priv->chan_base = drm_get_resource_start(dev, 3); dev_priv->chan_size = drm_get_resource_len(dev, 3); dev_priv->is_nv50 = dev_priv->chan_size == 128 * 0x2000 ? 1 : 0; #if 0 struct drm_nouveau_private *dev_priv; uint32_t reg0, strap; resource_size_t mmio_start_offs; int ret; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (!dev_priv) return -ENOMEM; dev->dev_private = dev_priv; dev_priv->dev = dev; dev_priv->flags = flags/* & NOUVEAU_FLAGS*/; dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; NV_DEBUG(dev, "vendor: 0x%X device: 0x%X\n", dev->pci_vendor, dev->pci_device); dev_priv->wq = create_workqueue("nouveau"); if (!dev_priv->wq) return -EINVAL; /* resource 0 is mmio regs */ /* resource 1 is linear FB */ /* resource 2 is RAMIN (mmio regs + 0x1000000) */ /* resource 6 is bios */ /* map the mmio regs */ mmio_start_offs = drm_get_resource_start(dev, 0); ret = drm_addmap(dev, mmio_start_offs, 0x00800000, _DRM_REGISTERS, _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio); if (ret) { NV_ERROR(dev, "Unable to initialize the mmio mapping. " "Please report your setup to " DRIVER_EMAIL "\n"); return ret; } NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", (unsigned long long)mmio_start_offs); #ifdef __BIG_ENDIAN /* Put the card in BE mode if it's not */ if (nv_rd32(dev, NV03_PMC_BOOT_1)) nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001); DRM_MEMORYBARRIER(); #endif /* Time to determine the card architecture */ reg0 = nv_rd32(dev, NV03_PMC_BOOT_0); /* We're dealing with >=NV10 */ if ((reg0 & 0x0f000000) > 0) { /* Bit 27-20 contain the architecture in hex */ dev_priv->chipset = (reg0 & 0xff00000) >> 20; /* NV04 or NV05 */ } else if ((reg0 & 0xff00fff0) == 0x20004000) {
int nv20_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = (struct drm_nouveau_private *)dev->dev_private; uint32_t tmp, vramsz; int ret, i; NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); /* Create Context Pointer Table */ dev_priv->ctx_table_size = 32 * 4; if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, dev_priv->ctx_table_size, 16, NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ctx_table))) return ret; NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE, dev_priv->ctx_table->instance >> 4); nv20_graph_rdi(dev); NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000); NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000); NV_WRITE(0x40009C , 0x00000040); if (dev_priv->chipset >= 0x25) { NV_WRITE(0x400890, 0x00080000); NV_WRITE(0x400610, 0x304B1FB6); NV_WRITE(0x400B80, 0x18B82880); NV_WRITE(0x400B84, 0x44000000); NV_WRITE(0x400098, 0x40000080); NV_WRITE(0x400B88, 0x000000ff); } else { NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */ NV_WRITE(0x400094, 0x00000005); NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */ NV_WRITE(0x400B84, 0x24000000); NV_WRITE(0x400098, 0x00000040); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038); NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038); NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030); } /* copy tile info from PFB */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); /* which is NV40_PGRAPH_TLIMIT0(i) ?? */ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i))); NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); /* which is NV40_PGRAPH_TSIZE0(i) ?? */ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i))); NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); /* which is NV40_PGRAPH_TILE0(i) ?? */ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i))); } for (i = 0; i < 8; i++) { NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4)); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4)); } NV_WRITE(0x4009a0, NV_READ(0x100324)); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C); NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324)); NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100); NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF); NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001); tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00; NV_WRITE(NV10_PGRAPH_SURFACE, tmp); tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100; NV_WRITE(NV10_PGRAPH_SURFACE, tmp); /* begin RAM config */ vramsz = drm_get_resource_len(dev, 0) - 1; NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0)); NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1)); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000); NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG0)); NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004); NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG1)); NV_WRITE(0x400820, 0); NV_WRITE(0x400824, 0); NV_WRITE(0x400864, vramsz-1); NV_WRITE(0x400868, vramsz-1); /* interesting.. the below overwrites some of the tile setup above.. */ NV_WRITE(0x400B20, 0x00000000); NV_WRITE(0x400B04, 0xFFFFFFFF); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff); NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff); return 0; }