static int mtrr_setup(struct pci_dev *pdev, resource_size_t mem_start, resource_size_t mem_size) { int err = 0; int mtrr; /* Reset MTRR */ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_UNCACHABLE, 0); if (mtrr < 0) { dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", __LINE__, __func__, mtrr); err = mtrr; goto err_out; } err = mtrr_del(mtrr, mem_start, mem_size); if (err < 0) { dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", __LINE__, __func__, err); goto err_out; } mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRBACK, 0); if (mtrr < 0) { /* Stop, but not an error as this may be already be setup */ dev_dbg(&pdev->dev, "%d - %s: mtrr_del failed (%d) - probably means the mtrr is already setup\n", __LINE__, __func__, err); err = 0; goto err_out; } err = mtrr_del(mtrr, mem_start, mem_size); if (err < 0) { dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", __LINE__, __func__, err); goto err_out; } if (mtrr == 0) { /* Replace 0 with a non-overlapping WRBACK mtrr */ err = mtrr_add(0, mem_start, MTRR_TYPE_WRBACK, 0); if (err < 0) { dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", __LINE__, __func__, err); goto err_out; } } mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRCOMB, 0); if (mtrr < 0) dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", __LINE__, __func__, mtrr); err = 0; err_out: return err; }
/* * Initalize mappings. On Savage4 and SavageIX the alignment * and size of the aperture is not suitable for automatic MTRR setup * in drm_addmap. Therefore we add them manually before the maps are * initialized, and tear them down on last close. */ int savage_driver_firstopen(drm_device_t *dev) { drm_savage_private_t *dev_priv = dev->dev_private; unsigned long mmio_base, fb_base, fb_size, aperture_base; /* fb_rsrc and aper_rsrc aren't really used currently, but still exist * in case we decide we need information on the BAR for BSD in the * future. */ unsigned int fb_rsrc, aper_rsrc; int ret = 0; dev_priv->mtrr[0].handle = -1; dev_priv->mtrr[1].handle = -1; dev_priv->mtrr[2].handle = -1; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { fb_rsrc = 0; fb_base = drm_get_resource_start(dev, 0); fb_size = SAVAGE_FB_SIZE_S3; mmio_base = fb_base + SAVAGE_FB_SIZE_S3; aper_rsrc = 0; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (drm_get_resource_len(dev, 0) == 0x08000000) { /* Don't make MMIO write-cobining! We need 3 * MTRRs. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x01000000; dev_priv->mtrr[0].handle = mtrr_add( dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB, 1); dev_priv->mtrr[1].base = fb_base+0x02000000; dev_priv->mtrr[1].size = 0x02000000; dev_priv->mtrr[1].handle = mtrr_add( dev_priv->mtrr[1].base, dev_priv->mtrr[1].size, MTRR_TYPE_WRCOMB, 1); dev_priv->mtrr[2].base = fb_base+0x04000000; dev_priv->mtrr[2].size = 0x04000000; dev_priv->mtrr[2].handle = mtrr_add( dev_priv->mtrr[2].base, dev_priv->mtrr[2].size, MTRR_TYPE_WRCOMB, 1); } else { DRM_ERROR("strange pci_resource_len %08lx\n", drm_get_resource_len(dev, 0)); } } else if (dev_priv->chipset != S3_SUPERSAVAGE && dev_priv->chipset != S3_SAVAGE2000) { mmio_base = drm_get_resource_start(dev, 0); fb_rsrc = 1; fb_base = drm_get_resource_start(dev, 1); fb_size = SAVAGE_FB_SIZE_S4; aper_rsrc = 1; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ if (drm_get_resource_len(dev, 1) == 0x08000000) { /* Can use one MTRR to cover both fb and * aperture. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x08000000; dev_priv->mtrr[0].handle = mtrr_add( dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB, 1); } else { DRM_ERROR("strange pci_resource_len %08lx\n", drm_get_resource_len(dev, 1)); } } else { mmio_base = drm_get_resource_start(dev, 0); fb_rsrc = 1; fb_base = drm_get_resource_start(dev, 1); fb_size = drm_get_resource_len(dev, 1); aper_rsrc = 2; aperture_base = drm_get_resource_start(dev, 2); /* Automatic MTRR setup will do the right thing. */ } ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); if (ret) return ret; ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &dev_priv->fb); if (ret) return ret; ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &dev_priv->aperture); if (ret) return ret; return ret; }
static int goldfish_audio_probe(struct platform_device *pdev) { int ret; struct resource *r; struct goldfish_audio *data; dma_addr_t buf_addr; printk("goldfish_audio_probe\n"); data = kzalloc(sizeof(*data), GFP_KERNEL); if(data == NULL) { ret = -ENOMEM; goto err_data_alloc_failed; } spin_lock_init(&data->lock); init_waitqueue_head(&data->wait); platform_set_drvdata(pdev, data); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if(r == NULL) { printk("platform_get_resource failed\n"); ret = -ENODEV; goto err_no_io_base; } #if defined(CONFIG_ARM) data->reg_base = (char __iomem *)IO_ADDRESS(r->start - IO_START); #elif defined(CONFIG_X86) || defined(CONFIG_MIPS) data->reg_base = ioremap(r->start, PAGE_SIZE); #else #error NOT SUPPORTED #endif data->irq = platform_get_irq(pdev, 0); if(data->irq < 0) { printk("platform_get_irq failed\n"); ret = -ENODEV; goto err_no_irq; } #if defined(CONFIG_ARM) data->buffer_virt = dma_alloc_writecombine(&pdev->dev, COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL); #elif defined(CONFIG_X86) || defined(CONFIG_MIPS) data->buffer_virt = dma_alloc_coherent(NULL, COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL); #else #error NOT SUPPORTED #endif if(data->buffer_virt == 0) { ret = -ENOMEM; goto err_alloc_write_buffer_failed; } #ifdef CONFIG_X86 mtrr_add(buf_addr, COMBINED_BUFFER_SIZE, MTRR_TYPE_WRBACK, 1); #endif data->buffer_phys = buf_addr; data->write_buffer1 = data->buffer_virt; data->write_buffer2 = data->buffer_virt + WRITE_BUFFER_SIZE; data->read_buffer = data->buffer_virt + 2 * WRITE_BUFFER_SIZE; ret = request_irq(data->irq, goldfish_audio_interrupt, IRQF_SHARED, pdev->name, data); if(ret) goto err_request_irq_failed; if((ret = misc_register(&goldfish_audio_device))) { printk("misc_register returned %d in goldfish_audio_init\n", ret); goto err_misc_register_failed; } GOLDFISH_AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_1, buf_addr); GOLDFISH_AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_2, buf_addr + WRITE_BUFFER_SIZE); data->read_supported = GOLDFISH_AUDIO_READ(data, AUDIO_READ_SUPPORTED); if (data->read_supported) GOLDFISH_AUDIO_WRITE(data, AUDIO_SET_READ_BUFFER, buf_addr + 2 * WRITE_BUFFER_SIZE); audio_data = data; return 0; err_misc_register_failed: err_request_irq_failed: #if defined(CONFIG_ARM) dma_free_writecombine(&pdev->dev, COMBINED_BUFFER_SIZE, data->buffer_virt, data->buffer_phys); err_alloc_write_buffer_failed: err_no_irq: #elif defined(CONFIG_X86) || defined(CONFIG_MIPS) dma_free_coherent(NULL, COMBINED_BUFFER_SIZE, data->buffer_virt, data->buffer_phys); err_alloc_write_buffer_failed: err_no_irq: iounmap(data->reg_base); #else #error NOT SUPPORTED #endif err_no_io_base: kfree(data); err_data_alloc_failed: return ret; }
/*************************************************************************/ /*! @Function OSPCIClearResourceMTRRs @Description Clear any BIOS-configured MTRRs for a PCI memory region @Input hPVRPCI PCI device handle @Input ui32Index Address range index @Return PVRSRV_ERROR Services error code */ /**************************************************************************/ PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) { PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; resource_size_t start, end; int err; start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; err = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0); if (err < 0) { printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", err); return PVRSRV_ERROR_PCI_CALL_FAILED; } err = mtrr_del(err, start, end - start); if (err < 0) { printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", err); return PVRSRV_ERROR_PCI_CALL_FAILED; } #if 1 /* Workaround for overlapping MTRRs. */ { IMG_BOOL bGotMTRR0 = IMG_FALSE; /* Current mobo BIOSes will normally set up a WRBACK MTRR spanning * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic & * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour. * * WRBACK is incompatible with some PCI devices, so try to split * the UNCACHABLE regions up and insert a WRCOMB region instead. */ err = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0); if (err < 0) { /* If this fails, services has probably run before and created * a write-combined MTRR for the test chip. Assume it has, and * don't return an error here. */ return PVRSRV_OK; } if(err == 0) bGotMTRR0 = IMG_TRUE; err = mtrr_del(err, start, end - start); if(err < 0) { printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", err); return PVRSRV_ERROR_PCI_CALL_FAILED; } if(bGotMTRR0) { /* Replace 0 with a non-overlapping WRBACK MTRR */ err = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0); if(err < 0) { printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", err); return PVRSRV_ERROR_PCI_CALL_FAILED; } /* Add a WRCOMB MTRR for the PCI device memory bar */ err = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0); if(err < 0) { printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", err); return PVRSRV_ERROR_PCI_CALL_FAILED; } } } #endif return PVRSRV_OK; }
static int fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { int retcode; spin_lock_init(&dev->count_lock); init_timer(&dev->timer); sema_init(&dev->struct_sem, 1); sema_init(&dev->ctxlist_sem, 1); dev->pdev = pdev; #ifdef __alpha__ dev->hose = pdev->sysdata; dev->pci_domain = dev->hose->bus->number; #else dev->pci_domain = 0; #endif dev->pci_bus = pdev->bus->number; dev->pci_slot = PCI_SLOT(pdev->devfn); dev->pci_func = PCI_FUNC(pdev->devfn); dev->irq = pdev->irq; dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS); if (dev->maplist == NULL) return -ENOMEM; INIT_LIST_HEAD(&dev->maplist->head); /* the DRM has 6 counters */ dev->counters = 6; dev->types[0] = _DRM_STAT_LOCK; dev->types[1] = _DRM_STAT_OPENS; dev->types[2] = _DRM_STAT_CLOSES; dev->types[3] = _DRM_STAT_IOCTLS; dev->types[4] = _DRM_STAT_LOCKS; dev->types[5] = _DRM_STAT_UNLOCKS; dev->driver = driver; if (dev->driver->load) if ((retcode = dev->driver->load(dev, ent->driver_data))) goto error_out_unreg; if (drm_core_has_AGP(dev)) { if (drm_device_is_agp(dev)) dev->agp = drm_agp_init(dev); if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) { DRM_ERROR("Cannot initialize the agpgart module.\n"); retcode = -EINVAL; goto error_out_unreg; } if (drm_core_has_MTRR(dev)) { if (dev->agp) dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base, dev->agp->agp_info.aper_size * 1024 * 1024, MTRR_TYPE_WRCOMB, 1); } } retcode = drm_ctxbitmap_init(dev); if (retcode) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); goto error_out_unreg; } return 0; error_out_unreg: drm_lastclose(dev); return retcode; }
static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { int retcode; INIT_LIST_HEAD(&dev->filelist); INIT_LIST_HEAD(&dev->ctxlist); INIT_LIST_HEAD(&dev->vmalist); INIT_LIST_HEAD(&dev->maplist); spin_lock_init(&dev->count_lock); spin_lock_init(&dev->drw_lock); spin_lock_init(&dev->tasklet_lock); spin_lock_init(&dev->lock.spinlock); init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); idr_init(&dev->drw_idr); dev->pdev = pdev; dev->pci_device = pdev->device; dev->pci_vendor = pdev->vendor; #ifdef __alpha__ dev->hose = pdev->sysdata; #endif dev->irq = pdev->irq; if (drm_ht_create(&dev->map_hash, 12)) { return -ENOMEM; } /* the DRM has 6 basic counters */ dev->counters = 6; dev->types[0] = _DRM_STAT_LOCK; dev->types[1] = _DRM_STAT_OPENS; dev->types[2] = _DRM_STAT_CLOSES; dev->types[3] = _DRM_STAT_IOCTLS; dev->types[4] = _DRM_STAT_LOCKS; dev->types[5] = _DRM_STAT_UNLOCKS; dev->driver = driver; if (drm_core_has_AGP(dev)) { if (drm_device_is_agp(dev)) dev->agp = drm_agp_init(dev); if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) { DRM_ERROR("Cannot initialize the agpgart module.\n"); retcode = -EINVAL; goto error_out_unreg; } if (drm_core_has_MTRR(dev)) { if (dev->agp) dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base, dev->agp->agp_info.aper_size * 1024 * 1024, MTRR_TYPE_WRCOMB, 1); } } if (dev->driver->load) if ((retcode = dev->driver->load(dev, ent->driver_data))) goto error_out_unreg; retcode = drm_ctxbitmap_init(dev); if (retcode) { DRM_ERROR("Cannot allocate memory for context bitmap.\n"); goto error_out_unreg; } return 0; error_out_unreg: drm_lastclose(dev); return retcode; }
RM_STATUS KernInitAGP( nv_stack_t *sp, nv_state_t *nv, NvU64 *ap_phys_base, NvU64 *ap_limit ) { #ifndef AGPGART return RM_ERROR; #else RM_STATUS status = RM_ERROR; nv_linux_state_t *nvl; void *bitmap; agp_kern_info agp_info; NvU32 bitmap_size; NvU32 agp_rate = (8 | 4 | 2 | 1); NvU32 enable_sba = 0; NvU32 enable_fw = 0; NvU32 agp_mode = 0; #if defined(KERNEL_2_4) if (!(drm_agp_p = inter_module_get_request("drm_agp", "agpgart"))) return RM_ERR_NOT_SUPPORTED; #endif /* NOTE: from here down, return an error code of '-1' * that indicates that agpgart is loaded, but we failed to use it * in some way. This is so we don't try to use nvagp and lock up * the memory controller. */ nvl = NV_GET_NVL_FROM_NV_STATE(nv); if (NV_AGPGART_BACKEND_ACQUIRE(drm_agp_p, nvl->agp_bridge, nvl->dev)) { nv_printf(NV_DBG_INFO, "NVRM: AGPGART: no backend available\n"); status = RM_ERR_NOT_SUPPORTED; goto bailout; } if (NV_AGPGART_COPY_INFO(drm_agp_p, nvl->agp_bridge, &agp_info)) { nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: kernel reports chipset as unsupported\n"); goto release; } if (nv_pat_mode == NV_PAT_MODE_DISABLED) { #ifdef CONFIG_MTRR /* * Failure to set a write-combining range on the AGP aperture may * be due to the presence of other memory ranges with conflicting * caching attributes. Play safe and fail AGP initialization. */ if (mtrr_add(agp_info.aper_base, agp_info.aper_size << 20, MTRR_TYPE_WRCOMB, 0) < 0) #endif { nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: unable to set MTRR write-combining\n"); goto release; } } // allocate and set the bitmap for tracking agp allocations bitmap_size = (agp_info.aper_size << 20)/PAGE_SIZE/8; if (os_alloc_mem(&bitmap, bitmap_size)) { nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: unable to allocate bitmap\n"); goto failed; } os_mem_set(bitmap, 0xff, bitmap_size); status = rm_set_agp_bitmap(sp, nv, bitmap); if (status != RM_OK) { nv_printf(NV_DBG_ERRORS, "NVRM: AGPGART: unable to set bitmap\n"); os_free_mem(bitmap); goto failed; } agp_mode = agp_info.mode; rm_read_registry_dword(sp, NULL, "NVreg", "ReqAGPRate", &agp_rate); agp_mode = NV_AGPGART_MODE_BITS_RATE(agp_mode, agp_rate); agp_mode |= 1; /* avoid 0x mode request */ if (agp_mode & 2) agp_mode &= ~1; if (agp_mode & 4) agp_mode &= ~2; rm_read_registry_dword(sp, NULL, "NVreg", "EnableAGPSBA", &enable_sba); agp_mode |= NV_AGPGART_MODE_BITS_SBA(enable_sba); rm_read_registry_dword(sp, NULL, "NVreg", "EnableAGPFW", &enable_fw); agp_mode |= NV_AGPGART_MODE_BITS_FW(enable_fw); agp_info.mode &= (0xff000000 | agp_mode); NV_AGPGART_BACKEND_ENABLE(drm_agp_p, nvl->agp_bridge, agp_info.mode); *ap_phys_base = (unsigned)agp_info.aper_base; *ap_limit = (unsigned)((agp_info.aper_size << 20) - 1); return RM_OK; failed: #ifdef CONFIG_MTRR if (nv_pat_mode == NV_PAT_MODE_DISABLED) mtrr_del(-1, agp_info.aper_base, agp_info.aper_size << 20); #endif release: NV_AGPGART_BACKEND_RELEASE(drm_agp_p, nvl->agp_bridge); bailout: #if defined(KERNEL_2_4) inter_module_put("drm_agp"); #endif return status; #endif /* AGPGART */ }