struct intel_register_map intel_get_register_map(uint32_t devid) { struct intel_register_map map; const int gen = intel_gen(devid); if (gen >= 6) { map.map = gen6_gt_register_map; map.top = 0x180000; } else if (IS_BROADWATER(devid) || IS_CRESTLINE(devid)) { map.map = gen_bwcl_register_map; map.top = 0x80000; } else if (gen >= 4) { map.map = gen4_register_map; map.top = 0x80000; } else { igt_fail_on("Gen2/3 Ranges are not supported. Please use ""unsafe access."); } map.alignment_mask = 0x3; return map; }
int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; struct intel_device_info *info, *device_info; int ret = 0, mmio_bar, mmio_size; uint32_t aperture_size; info = (struct intel_device_info *) flags; /* Refuse to load on gen6+ without kms enabled. */ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) { DRM_INFO("Your hardware requires kernel modesetting (KMS)\n"); DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n"); return -ENODEV; } /* UMS needs agp support. */ if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp) return -EINVAL; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; gpu_perf_dev_priv = (void *)dev_priv; dev_priv->dev = dev; /* Setup the write-once "constant" device info */ device_info = (struct intel_device_info *)&dev_priv->info; memcpy(device_info, info, sizeof(dev_priv->info)); device_info->device_id = dev->pdev->device; spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->mm.object_stat_lock); spin_lock_init(&dev_priv->mmio_flip_lock); mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->modeset_restore_lock); intel_pm_setup(dev); intel_display_crc_init(dev); i915_dump_device_info(dev_priv); /* Not all pre-production machines fall into this category, only the * very first ones. Almost everything should work, except for maybe * suspend/resume. And we don't implement workarounds that affect only * pre-production machines. */ if (IS_HSW_EARLY_SDV(dev)) DRM_INFO("This is an early pre-production Haswell machine. " "It may not be fully functional.\n"); if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; } mmio_bar = IS_GEN2(dev) ? 1 : 0; /* Before gen4, the registers and the GTT are behind different BARs. * However, from gen4 onwards, the registers and the GTT are shared * in the same BAR, so we want to restrict this ioremap from * clobbering the GTT which we want ioremap_wc instead. Fortunately, * the register BAR remains the same size for all the earlier * generations up to Ironlake. */ if (info->gen < 5) mmio_size = 512*1024; else mmio_size = 2*1024*1024; dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; goto put_bridge; } /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev); intel_uncore_init(dev); if (i915_start_vgt(dev->pdev)) i915_host_mediate = true; printk("i915_start_vgt: %s\n", i915_host_mediate ? "success" : "fail"); i915_check_vgt(dev_priv); if (USES_VGT(dev)) i915.enable_fbc = 0; ret = i915_gem_gtt_init(dev); if (ret) goto out_regs; if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. */ ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); goto out_gtt; } ret = i915_kick_out_vgacon(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); goto out_gtt; } } pci_set_master(dev->pdev); /* overlay on gen2 is broken and can't address above 1G */ if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); /* 965GM sometimes incorrectly writes to hardware status page (HWS) * using 32bit addressing, overwriting memory if HWS is located * above 4GB. * * The documentation also mentions an issue with undefined * behaviour if any general state is accessed within a page above 4GB, * which also needs to be handled carefully. */ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); aperture_size = dev_priv->gtt.mappable_end; dev_priv->gtt.mappable = io_mapping_create_wc(dev_priv->gtt.mappable_base, aperture_size); if (dev_priv->gtt.mappable == NULL) { ret = -EIO; goto out_gtt; } dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, aperture_size); /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed * by the GPU. i915_gem_retire_requests() is called directly when we * need high-priority retirement, such as waiting for an explicit * bo. * * It is also used for periodic low-priority events, such as * idle-timers and recording error state. * * All tasks on the workqueue are expected to acquire the dev mutex * so there is no point in running more than one instance of the * workqueue at any time. Use an ordered one. */ dev_priv->wq = alloc_ordered_workqueue("i915", 0); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; goto out_mtrrfree; } dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0); if (dev_priv->dp_wq == NULL) { DRM_ERROR("Failed to create our dp workqueue.\n"); ret = -ENOMEM; goto out_freewq; } intel_irq_init(dev_priv); intel_uncore_sanitize(dev); /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); intel_setup_gmbus(dev); intel_opregion_setup(dev); intel_setup_bios(dev); i915_gem_load(dev); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function * correctly in testing on 945G. * This may be a side effect of MSI having been made available for PEG * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may * be lost or delayed, but we use them anyways to avoid * stuck interrupts on some machines. */ if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); intel_device_info_runtime_init(dev); if (INTEL_INFO(dev)->num_pipes) { ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); if (ret) goto out_gem_unload; } intel_power_domains_init(dev_priv); if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_power_well; } #ifdef DRM_I915_VGT_SUPPORT if (USES_VGT(dev)) { /* * Tell VGT that we have a valid surface to show * after modesetting. We doesn't distinguish DOM0 and * Linux guest here, The PVINFO write handler will * handle this. */ I915_WRITE(vgt_info_off(display_ready), 1); } #endif } i915_setup_sysfs(dev); if (INTEL_INFO(dev)->num_pipes) { /* Must be done after probing outputs */ intel_opregion_init(dev); acpi_video_register(); } if (IS_GEN5(dev)) intel_gpu_ips_init(dev_priv); intel_runtime_pm_enable(dev_priv); return 0; out_power_well: intel_power_domains_fini(dev_priv); drm_vblank_cleanup(dev); out_gem_unload: WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); unregister_shrinker(&dev_priv->mm.shrinker); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); pm_qos_remove_request(&dev_priv->pm_qos); destroy_workqueue(dev_priv->dp_wq); out_freewq: destroy_workqueue(dev_priv->wq); out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); io_mapping_free(dev_priv->gtt.mappable); out_gtt: i915_global_gtt_cleanup(dev); out_regs: intel_uncore_fini(dev); pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: if (dev_priv->slab) kmem_cache_destroy(dev_priv->slab); kfree(dev_priv); return ret; }
static struct sg_table * i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned int npages = obj->base.size / PAGE_SIZE; struct sg_table *st; struct scatterlist *sg; int max_order; gfp_t gfp; st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return ERR_PTR(-ENOMEM); if (sg_alloc_table(st, npages, GFP_KERNEL)) { kfree(st); return ERR_PTR(-ENOMEM); } sg = st->sgl; st->nents = 0; max_order = MAX_ORDER; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); #endif gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) { /* 965gm cannot relocate objects above 4GiB. */ gfp &= ~__GFP_HIGHMEM; gfp |= __GFP_DMA32; } do { int order = min(fls(npages) - 1, max_order); struct page *page; do { page = alloc_pages(gfp | (order ? QUIET : 0), order); if (page) break; if (!order--) goto err; /* Limit subsequent allocations as well */ max_order = order; } while (1); sg_set_page(sg, page, PAGE_SIZE << order, 0); st->nents++; npages -= 1 << order; if (!npages) { sg_mark_end(sg); break; } sg = __sg_next(sg); } while (1); if (i915_gem_gtt_prepare_pages(obj, st)) goto err; /* Mark the pages as dontneed whilst they are still pinned. As soon * as they are unpinned they are allowed to be reaped by the shrinker, * and the caller is expected to repopulate - the contents of this * object are only valid whilst active and pinned. */ obj->mm.madv = I915_MADV_DONTNEED; return st; err: sg_mark_end(sg); internal_free_pages(st); return ERR_PTR(-ENOMEM); }