Exemple #1
0
/*
 * Initialize mappings. On Savage4 and SavageIX the alignment
 * and size of the aperture is not suitable for automatic MTRR setup
 * in drm_addmap. Therefore we add them manually before the maps are
 * initialized, and tear them down on last close.
 */
int savage_driver_firstopen(struct drm_device *dev)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	unsigned long mmio_base, fb_base, fb_size, aperture_base;
	/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
	 * in case we decide we need information on the BAR for BSD in the
	 * future.
	 */
	unsigned int fb_rsrc, aper_rsrc;
	int ret = 0;

	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
		fb_rsrc = 0;
		fb_base = pci_resource_start(dev->pdev, 0);
		fb_size = SAVAGE_FB_SIZE_S3;
		mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
		aper_rsrc = 0;
		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
		/* this should always be true */
		if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
			/* Don't make MMIO write-cobining! We need 3
			 * MTRRs. */
			dev_priv->mtrr_handles[0] =
				arch_phys_wc_add(fb_base, 0x01000000);
			dev_priv->mtrr_handles[1] =
				arch_phys_wc_add(fb_base + 0x02000000,
						 0x02000000);
			dev_priv->mtrr_handles[2] =
				arch_phys_wc_add(fb_base + 0x04000000,
						0x04000000);
		} else {
			DRM_ERROR("strange pci_resource_len %08llx\n",
				  (unsigned long long)
				  pci_resource_len(dev->pdev, 0));
		}
	} else if (dev_priv->chipset != S3_SUPERSAVAGE &&
		   dev_priv->chipset != S3_SAVAGE2000) {
		mmio_base = pci_resource_start(dev->pdev, 0);
		fb_rsrc = 1;
		fb_base = pci_resource_start(dev->pdev, 1);
		fb_size = SAVAGE_FB_SIZE_S4;
		aper_rsrc = 1;
		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
		/* this should always be true */
		if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
			/* Can use one MTRR to cover both fb and
			 * aperture. */
			dev_priv->mtrr_handles[0] =
				arch_phys_wc_add(fb_base,
						 0x08000000);
		} else {
			DRM_ERROR("strange pci_resource_len %08llx\n",
				  (unsigned long long)
				  pci_resource_len(dev->pdev, 1));
		}
	} else {
		mmio_base = pci_resource_start(dev->pdev, 0);
		fb_rsrc = 1;
		fb_base = pci_resource_start(dev->pdev, 1);
		fb_size = pci_resource_len(dev->pdev, 1);
		aper_rsrc = 2;
		aperture_base = pci_resource_start(dev->pdev, 2);
		/* Automatic MTRR setup will do the right thing. */
	}

	ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
			 _DRM_READ_ONLY, &dev_priv->mmio);
	if (ret)
		return ret;

	ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
			 _DRM_WRITE_COMBINING, &dev_priv->fb);
	if (ret)
		return ret;

	ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
			 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
			 &dev_priv->aperture);
	return ret;
}
Exemple #2
0
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
	struct drm_i915_private *dev_priv;
	struct intel_device_info *info, *device_info;
	int ret = 0, mmio_bar, mmio_size;
	uint32_t aperture_size;

	info = (struct intel_device_info *) flags;

	/* Refuse to load on gen6+ without kms enabled. */
	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
		return -ENODEV;
	}

	/* UMS needs agp support. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
		return -EINVAL;

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	dev->dev_private = (void *)dev_priv;
	gpu_perf_dev_priv = (void *)dev_priv;
	dev_priv->dev = dev;

	/* Setup the write-once "constant" device info */
	device_info = (struct intel_device_info *)&dev_priv->info;
	memcpy(device_info, info, sizeof(dev_priv->info));
	device_info->device_id = dev->pdev->device;

	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
	spin_lock_init(&dev_priv->mm.object_stat_lock);
	spin_lock_init(&dev_priv->mmio_flip_lock);
	mutex_init(&dev_priv->dpio_lock);
	mutex_init(&dev_priv->modeset_restore_lock);

	intel_pm_setup(dev);

	intel_display_crc_init(dev);

	i915_dump_device_info(dev_priv);

	/* Not all pre-production machines fall into this category, only the
	 * very first ones. Almost everything should work, except for maybe
	 * suspend/resume. And we don't implement workarounds that affect only
	 * pre-production machines. */
	if (IS_HSW_EARLY_SDV(dev))
		DRM_INFO("This is an early pre-production Haswell machine. "
			 "It may not be fully functional.\n");

	if (i915_get_bridge_dev(dev)) {
		ret = -EIO;
		goto free_priv;
	}

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/* Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (info->gen < 5)
		mmio_size = 512*1024;
	else
		mmio_size = 2*1024*1024;

	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (!dev_priv->regs) {
		DRM_ERROR("failed to map registers\n");
		ret = -EIO;
		goto put_bridge;
	}

	/* This must be called before any calls to HAS_PCH_* */
	intel_detect_pch(dev);

	intel_uncore_init(dev);

	if (i915_start_vgt(dev->pdev))
		i915_host_mediate = true;
	printk("i915_start_vgt: %s\n", i915_host_mediate ? "success" : "fail");

	i915_check_vgt(dev_priv);
	if (USES_VGT(dev))
		i915.enable_fbc = 0;

	ret = i915_gem_gtt_init(dev);
	if (ret)
		goto out_regs;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		/* WARNING: Apparently we must kick fbdev drivers before vgacon,
		 * otherwise the vga fbdev driver falls over. */
		ret = i915_kick_out_firmware_fb(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
			goto out_gtt;
		}

		ret = i915_kick_out_vgacon(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting VGA console\n");
			goto out_gtt;
		}
	}

	pci_set_master(dev->pdev);

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN2(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

	aperture_size = dev_priv->gtt.mappable_end;

	dev_priv->gtt.mappable =
		io_mapping_create_wc(dev_priv->gtt.mappable_base,
				     aperture_size);
	if (dev_priv->gtt.mappable == NULL) {
		ret = -EIO;
		goto out_gtt;
	}

	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
					      aperture_size);

	/* The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL) {
		DRM_ERROR("Failed to create our workqueue.\n");
		ret = -ENOMEM;
		goto out_mtrrfree;
	}

	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->dp_wq == NULL) {
		DRM_ERROR("Failed to create our dp workqueue.\n");
		ret = -ENOMEM;
		goto out_freewq;
	}

	intel_irq_init(dev_priv);
	intel_uncore_sanitize(dev);

	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev);
	intel_setup_gmbus(dev);
	intel_opregion_setup(dev);

	intel_setup_bios(dev);

	i915_gem_load(dev);

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
	 * be lost or delayed, but we use them anyways to avoid
	 * stuck interrupts on some machines.
	 */
	if (!IS_I945G(dev) && !IS_I945GM(dev))
		pci_enable_msi(dev->pdev);

	intel_device_info_runtime_init(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
		if (ret)
			goto out_gem_unload;
	}

	intel_power_domains_init(dev_priv);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = i915_load_modeset_init(dev);
		if (ret < 0) {
			DRM_ERROR("failed to init modeset\n");
			goto out_power_well;
		}
#ifdef DRM_I915_VGT_SUPPORT
		if (USES_VGT(dev)) {
			/*
			 * Tell VGT that we have a valid surface to show
			 * after modesetting. We doesn't distinguish DOM0 and
			 * Linux guest here, The PVINFO write handler will
			 * handle this.
			 */
			I915_WRITE(vgt_info_off(display_ready), 1);
		}
#endif
	}

	i915_setup_sysfs(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_init(dev);
		acpi_video_register();
	}

	if (IS_GEN5(dev))
		intel_gpu_ips_init(dev_priv);

	intel_runtime_pm_enable(dev_priv);

	return 0;

out_power_well:
	intel_power_domains_fini(dev_priv);
	drm_vblank_cleanup(dev);
out_gem_unload:
	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
	unregister_shrinker(&dev_priv->mm.shrinker);

	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	intel_teardown_gmbus(dev);
	intel_teardown_mchbar(dev);
	pm_qos_remove_request(&dev_priv->pm_qos);
	destroy_workqueue(dev_priv->dp_wq);
out_freewq:
	destroy_workqueue(dev_priv->wq);
out_mtrrfree:
	arch_phys_wc_del(dev_priv->gtt.mtrr);
	io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
	i915_global_gtt_cleanup(dev);
out_regs:
	intel_uncore_fini(dev);
	pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
	pci_dev_put(dev_priv->bridge_dev);
free_priv:
	if (dev_priv->slab)
		kmem_cache_destroy(dev_priv->slab);
	kfree(dev_priv);
	return ret;
}
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
	struct nvkm_device *device = nvxx_device(&drm->client.device);
	struct nvkm_pci *pci = device->pci;
	struct nvif_mmu *mmu = &drm->client.mmu;
	struct drm_device *dev = drm->dev;
	int typei, ret;

	ret = nouveau_ttm_init_host(drm, 0);
	if (ret)
		return ret;

	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
	    drm->client.device.info.chipset != 0x50) {
		ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
		if (ret)
			return ret;
	}

	if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
	    drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
		typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
					   NVIF_MEM_KIND |
					   NVIF_MEM_COMP |
					   NVIF_MEM_DISP);
		if (typei < 0)
			return -ENOSYS;

		drm->ttm.type_vram = typei;
	} else {
		drm->ttm.type_vram = -1;
	}

	if (pci && pci->agp.bridge) {
		drm->agp.bridge = pci->agp.bridge;
		drm->agp.base = pci->agp.base;
		drm->agp.size = pci->agp.size;
		drm->agp.cma = pci->agp.cma;
	}

	ret = nouveau_ttm_global_init(drm);
	if (ret)
		return ret;

	ret = ttm_bo_device_init(&drm->ttm.bdev,
				  drm->ttm.bo_global_ref.ref.object,
				  &nouveau_bo_driver,
				  dev->anon_inode->i_mapping,
				  DRM_FILE_PAGE_OFFSET,
				  drm->client.mmu.dmabits <= 32 ? true : false);
	if (ret) {
		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
		return ret;
	}

	/* VRAM init */
	drm->gem.vram_available = drm->client.device.info.ram_user;

	arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
				   device->func->resource_size(device, 1));

	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
			      drm->gem.vram_available >> PAGE_SHIFT);
	if (ret) {
		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
		return ret;
	}

	drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
					 device->func->resource_size(device, 1));

	/* GART init */
	if (!drm->agp.bridge) {
		drm->gem.gart_available = drm->client.vmm.vmm.limit;
	} else {
		drm->gem.gart_available = drm->agp.size;
	}

	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
			      drm->gem.gart_available >> PAGE_SHIFT);
	if (ret) {
		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
		return ret;
	}

	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
	return 0;
}
Exemple #4
0
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
    struct vmw_private *dev_priv;
    int ret;
    uint32_t svga_id;
    enum vmw_res_type i;
    bool refuse_dma = false;

    dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
    if (unlikely(dev_priv == NULL)) {
        DRM_ERROR("Failed allocating a device private struct.\n");
        return -ENOMEM;
    }

    pci_set_master(dev->pdev);

    dev_priv->dev = dev;
    dev_priv->vmw_chipset = chipset;
    dev_priv->last_read_seqno = (uint32_t) -100;
    mutex_init(&dev_priv->cmdbuf_mutex);
    mutex_init(&dev_priv->release_mutex);
    mutex_init(&dev_priv->binding_mutex);
    rwlock_init(&dev_priv->resource_lock);
    ttm_lock_init(&dev_priv->reservation_sem);
    spin_lock_init(&dev_priv->hw_lock);
    spin_lock_init(&dev_priv->waiter_lock);
    spin_lock_init(&dev_priv->cap_lock);
    spin_lock_init(&dev_priv->svga_lock);

    for (i = vmw_res_context; i < vmw_res_max; ++i) {
        idr_init(&dev_priv->res_idr[i]);
        INIT_LIST_HEAD(&dev_priv->res_lru[i]);
    }

    mutex_init(&dev_priv->init_mutex);
    init_waitqueue_head(&dev_priv->fence_queue);
    init_waitqueue_head(&dev_priv->fifo_queue);
    dev_priv->fence_queue_waiters = 0;
    atomic_set(&dev_priv->fifo_queue_waiters, 0);

    dev_priv->used_memory_size = 0;

    dev_priv->io_start = pci_resource_start(dev->pdev, 0);
    dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
    dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

    dev_priv->enable_fb = enable_fbdev;

    vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
    svga_id = vmw_read(dev_priv, SVGA_REG_ID);
    if (svga_id != SVGA_ID_2) {
        ret = -ENOSYS;
        DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
        goto out_err0;
    }

    dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
    ret = vmw_dma_select_mode(dev_priv);
    if (unlikely(ret != 0)) {
        DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
        refuse_dma = true;
    }

    dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
    dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
    dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
    dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

    vmw_get_initial_size(dev_priv);

    if (dev_priv->capabilities & SVGA_CAP_GMR2) {
        dev_priv->max_gmr_ids =
            vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
        dev_priv->max_gmr_pages =
            vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
        dev_priv->memory_size =
            vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
        dev_priv->memory_size -= dev_priv->vram_size;
    } else {
        /*
         * An arbitrary limit of 512MiB on surface
         * memory. But all HWV8 hardware supports GMR2.
         */
        dev_priv->memory_size = 512*1024*1024;
    }
    dev_priv->max_mob_pages = 0;
    dev_priv->max_mob_size = 0;
    if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
        uint64_t mem_size =
            vmw_read(dev_priv,
                     SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

        dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
        dev_priv->prim_bb_mem =
            vmw_read(dev_priv,
                     SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
        dev_priv->max_mob_size =
            vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
        dev_priv->stdu_max_width =
            vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
        dev_priv->stdu_max_height =
            vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

        vmw_write(dev_priv, SVGA_REG_DEV_CAP,
                  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
        dev_priv->texture_max_width = vmw_read(dev_priv,
                                               SVGA_REG_DEV_CAP);
        vmw_write(dev_priv, SVGA_REG_DEV_CAP,
                  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
        dev_priv->texture_max_height = vmw_read(dev_priv,
                                                SVGA_REG_DEV_CAP);
    } else {
        dev_priv->texture_max_width = 8192;
        dev_priv->texture_max_height = 8192;
        dev_priv->prim_bb_mem = dev_priv->vram_size;
    }

    vmw_print_capabilities(dev_priv->capabilities);

    ret = vmw_dma_masks(dev_priv);
    if (unlikely(ret != 0))
        goto out_err0;

    if (dev_priv->capabilities & SVGA_CAP_GMR2) {
        DRM_INFO("Max GMR ids is %u\n",
                 (unsigned)dev_priv->max_gmr_ids);
        DRM_INFO("Max number of GMR pages is %u\n",
                 (unsigned)dev_priv->max_gmr_pages);
        DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
                 (unsigned)dev_priv->memory_size / 1024);
    }
    DRM_INFO("Maximum display memory size is %u kiB\n",
             dev_priv->prim_bb_mem / 1024);
    DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
             dev_priv->vram_start, dev_priv->vram_size / 1024);
    DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
             dev_priv->mmio_start, dev_priv->mmio_size / 1024);

    ret = vmw_ttm_global_init(dev_priv);
    if (unlikely(ret != 0))
        goto out_err0;


    vmw_master_init(&dev_priv->fbdev_master);
    ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
    dev_priv->active_master = &dev_priv->fbdev_master;


    dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
                                           dev_priv->mmio_size);

    dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
                                     dev_priv->mmio_size);

    if (unlikely(dev_priv->mmio_virt == NULL)) {
        ret = -ENOMEM;
        DRM_ERROR("Failed mapping MMIO.\n");
        goto out_err3;
    }

    /* Need mmio memory to check for fifo pitchlock cap. */
    if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
            !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
            !vmw_fifo_have_pitchlock(dev_priv)) {
        ret = -ENOSYS;
        DRM_ERROR("Hardware has no pitchlock\n");
        goto out_err4;
    }

    dev_priv->tdev = ttm_object_device_init
                     (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);

    if (unlikely(dev_priv->tdev == NULL)) {
        DRM_ERROR("Unable to initialize TTM object management.\n");
        ret = -ENOMEM;
        goto out_err4;
    }

    dev->dev_private = dev_priv;

    ret = pci_request_regions(dev->pdev, "vmwgfx probe");
    dev_priv->stealth = (ret != 0);
    if (dev_priv->stealth) {
        /**
         * Request at least the mmio PCI resource.
         */

        DRM_INFO("It appears like vesafb is loaded. "
                 "Ignore above error if any.\n");
        ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
        if (unlikely(ret != 0)) {
            DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
            goto out_no_device;
        }
    }

    if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
        ret = drm_irq_install(dev, dev->pdev->irq);
        if (ret != 0) {
            DRM_ERROR("Failed installing irq: %d\n", ret);
            goto out_no_irq;
        }
    }

    dev_priv->fman = vmw_fence_manager_init(dev_priv);
    if (unlikely(dev_priv->fman == NULL)) {
        ret = -ENOMEM;
        goto out_no_fman;
    }

    ret = ttm_bo_device_init(&dev_priv->bdev,
                             dev_priv->bo_global_ref.ref.object,
                             &vmw_bo_driver,
                             dev->anon_inode->i_mapping,
                             VMWGFX_FILE_PAGE_OFFSET,
                             false);
    if (unlikely(ret != 0)) {
        DRM_ERROR("Failed initializing TTM buffer object driver.\n");
        goto out_no_bdev;
    }

    /*
     * Enable VRAM, but initially don't use it until SVGA is enabled and
     * unhidden.
     */
    ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
                         (dev_priv->vram_size >> PAGE_SHIFT));
    if (unlikely(ret != 0)) {
        DRM_ERROR("Failed initializing memory manager for VRAM.\n");
        goto out_no_vram;
    }
    dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

    dev_priv->has_gmr = true;
    if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
            refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
                                         VMW_PL_GMR) != 0) {
        DRM_INFO("No GMR memory available. "
                 "Graphics memory resources are very limited.\n");
        dev_priv->has_gmr = false;
    }

    if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
        dev_priv->has_mob = true;
        if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
                           VMW_PL_MOB) != 0) {
            DRM_INFO("No MOB memory available. "
                     "3D will be disabled.\n");
            dev_priv->has_mob = false;
        }
    }

    if (dev_priv->has_mob) {
        spin_lock(&dev_priv->cap_lock);
        vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
        dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
        spin_unlock(&dev_priv->cap_lock);
    }


    ret = vmw_kms_init(dev_priv);
    if (unlikely(ret != 0))
        goto out_no_kms;
    vmw_overlay_init(dev_priv);

    ret = vmw_request_device(dev_priv);
    if (ret)
        goto out_no_fifo;

    DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");

    if (dev_priv->enable_fb) {
        vmw_fifo_resource_inc(dev_priv);
        vmw_svga_enable(dev_priv);
        vmw_fb_init(dev_priv);
    }

    dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
    register_pm_notifier(&dev_priv->pm_nb);

    return 0;

out_no_fifo:
    vmw_overlay_close(dev_priv);
    vmw_kms_close(dev_priv);
out_no_kms:
    if (dev_priv->has_mob)
        (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
    if (dev_priv->has_gmr)
        (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
    (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
    (void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
    vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
    if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
        drm_irq_uninstall(dev_priv->dev);
out_no_irq:
    if (dev_priv->stealth)
        pci_release_region(dev->pdev, 2);
    else
        pci_release_regions(dev->pdev);
out_no_device:
    ttm_object_device_release(&dev_priv->tdev);
out_err4:
    iounmap(dev_priv->mmio_virt);
out_err3:
    arch_phys_wc_del(dev_priv->mmio_mtrr);
    vmw_ttm_global_release(dev_priv);
out_err0:
    for (i = vmw_res_context; i < vmw_res_max; ++i)
        idr_destroy(&dev_priv->res_idr[i]);

    if (dev_priv->ctx.staged_bindings)
        vmw_binding_state_free(dev_priv->ctx.staged_bindings);
    kfree(dev_priv);
    return ret;
}