Пример #1
0
/*
 * Delete MTRRs and free device-private data.
 */
void savage_driver_lastclose(struct drm_device *dev)
{
	drm_savage_private_t *dev_priv = dev->dev_private;
	int i;

	for (i = 0; i < 3; ++i) {
		arch_phys_wc_del(dev_priv->mtrr_handles[i]);
		dev_priv->mtrr_handles[i] = 0;
	}
}
Пример #2
0
static int vmw_driver_unload(struct drm_device *dev)
{
    struct vmw_private *dev_priv = vmw_priv(dev);
    enum vmw_res_type i;

    unregister_pm_notifier(&dev_priv->pm_nb);

    if (dev_priv->ctx.res_ht_initialized)
        drm_ht_remove(&dev_priv->ctx.res_ht);
    vfree(dev_priv->ctx.cmd_bounce);
    if (dev_priv->enable_fb) {
        vmw_fb_off(dev_priv);
        vmw_fb_close(dev_priv);
        vmw_fifo_resource_dec(dev_priv);
        vmw_svga_disable(dev_priv);
    }

    vmw_kms_close(dev_priv);
    vmw_overlay_close(dev_priv);

    if (dev_priv->has_gmr)
        (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
    (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);

    vmw_release_device_early(dev_priv);
    if (dev_priv->has_mob)
        (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
    (void) ttm_bo_device_release(&dev_priv->bdev);
    vmw_release_device_late(dev_priv);
    vmw_fence_manager_takedown(dev_priv->fman);
    if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
        drm_irq_uninstall(dev_priv->dev);
    if (dev_priv->stealth)
        pci_release_region(dev->pdev, 2);
    else
        pci_release_regions(dev->pdev);

    ttm_object_device_release(&dev_priv->tdev);
    iounmap(dev_priv->mmio_virt);
    arch_phys_wc_del(dev_priv->mmio_mtrr);
    if (dev_priv->ctx.staged_bindings)
        vmw_binding_state_free(dev_priv->ctx.staged_bindings);
    vmw_ttm_global_release(dev_priv);

    for (i = vmw_res_context; i < vmw_res_max; ++i)
        idr_destroy(&dev_priv->res_idr[i]);

    kfree(dev_priv);

    return 0;
}
Пример #3
0
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
	struct nvkm_device *device = nvxx_device(&drm->client.device);

	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);

	ttm_bo_device_release(&drm->ttm.bdev);

	nouveau_ttm_global_release(drm);

	arch_phys_wc_del(drm->ttm.mtrr);
	drm->ttm.mtrr = 0;
	arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
				device->func->resource_size(device, 1));

}
Пример #4
0
int i915_driver_unload(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = i915_gem_suspend(dev);
	if (ret) {
		DRM_ERROR("failed to idle hardware: %d\n", ret);
		return ret;
	}

	intel_power_domains_fini(dev_priv);

	intel_gpu_ips_teardown();

	i915_teardown_sysfs(dev);

	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
	unregister_shrinker(&dev_priv->mm.shrinker);

	io_mapping_free(dev_priv->gtt.mappable);
	arch_phys_wc_del(dev_priv->gtt.mtrr);

	acpi_video_unregister();

	if (drm_core_check_feature(dev, DRIVER_MODESET))
		intel_fbdev_fini(dev);

	drm_vblank_cleanup(dev);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		intel_modeset_cleanup(dev);

		/*
		 * free the memory space allocated for the child device
		 * config parsed from VBT
		 */
		if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
			kfree(dev_priv->vbt.child_dev);
			dev_priv->vbt.child_dev = NULL;
			dev_priv->vbt.child_dev_num = 0;
		}

		vga_switcheroo_unregister_client(dev->pdev);
		vga_client_register(dev->pdev, NULL, NULL, NULL);
	}

	/* Free error state after interrupts are fully disabled. */
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
	cancel_work_sync(&dev_priv->gpu_error.work);
	i915_destroy_error_state(dev);

	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	intel_opregion_fini(dev);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		/* Flush any outstanding unpin_work. */
		flush_workqueue(dev_priv->wq);

		mutex_lock(&dev->struct_mutex);
		i915_gem_cleanup_ringbuffer(dev);
		i915_gem_context_fini(dev);
		mutex_unlock(&dev->struct_mutex);
		i915_gem_cleanup_stolen(dev);
	}

	intel_teardown_gmbus(dev);
	intel_teardown_mchbar(dev);

	destroy_workqueue(dev_priv->dp_wq);
	destroy_workqueue(dev_priv->wq);
	pm_qos_remove_request(&dev_priv->pm_qos);

	i915_global_gtt_cleanup(dev);

	intel_uncore_fini(dev);
	if (dev_priv->regs != NULL)
		pci_iounmap(dev->pdev, dev_priv->regs);

	if (dev_priv->slab)
		kmem_cache_destroy(dev_priv->slab);

	pci_dev_put(dev_priv->bridge_dev);
	kfree(dev_priv);

	return 0;
}
Пример #5
0
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
	struct drm_i915_private *dev_priv;
	struct intel_device_info *info, *device_info;
	int ret = 0, mmio_bar, mmio_size;
	uint32_t aperture_size;

	info = (struct intel_device_info *) flags;

	/* Refuse to load on gen6+ without kms enabled. */
	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
		return -ENODEV;
	}

	/* UMS needs agp support. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
		return -EINVAL;

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	dev->dev_private = (void *)dev_priv;
	gpu_perf_dev_priv = (void *)dev_priv;
	dev_priv->dev = dev;

	/* Setup the write-once "constant" device info */
	device_info = (struct intel_device_info *)&dev_priv->info;
	memcpy(device_info, info, sizeof(dev_priv->info));
	device_info->device_id = dev->pdev->device;

	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
	spin_lock_init(&dev_priv->mm.object_stat_lock);
	spin_lock_init(&dev_priv->mmio_flip_lock);
	mutex_init(&dev_priv->dpio_lock);
	mutex_init(&dev_priv->modeset_restore_lock);

	intel_pm_setup(dev);

	intel_display_crc_init(dev);

	i915_dump_device_info(dev_priv);

	/* Not all pre-production machines fall into this category, only the
	 * very first ones. Almost everything should work, except for maybe
	 * suspend/resume. And we don't implement workarounds that affect only
	 * pre-production machines. */
	if (IS_HSW_EARLY_SDV(dev))
		DRM_INFO("This is an early pre-production Haswell machine. "
			 "It may not be fully functional.\n");

	if (i915_get_bridge_dev(dev)) {
		ret = -EIO;
		goto free_priv;
	}

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/* Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (info->gen < 5)
		mmio_size = 512*1024;
	else
		mmio_size = 2*1024*1024;

	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (!dev_priv->regs) {
		DRM_ERROR("failed to map registers\n");
		ret = -EIO;
		goto put_bridge;
	}

	/* This must be called before any calls to HAS_PCH_* */
	intel_detect_pch(dev);

	intel_uncore_init(dev);

	if (i915_start_vgt(dev->pdev))
		i915_host_mediate = true;
	printk("i915_start_vgt: %s\n", i915_host_mediate ? "success" : "fail");

	i915_check_vgt(dev_priv);
	if (USES_VGT(dev))
		i915.enable_fbc = 0;

	ret = i915_gem_gtt_init(dev);
	if (ret)
		goto out_regs;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		/* WARNING: Apparently we must kick fbdev drivers before vgacon,
		 * otherwise the vga fbdev driver falls over. */
		ret = i915_kick_out_firmware_fb(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
			goto out_gtt;
		}

		ret = i915_kick_out_vgacon(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting VGA console\n");
			goto out_gtt;
		}
	}

	pci_set_master(dev->pdev);

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN2(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

	aperture_size = dev_priv->gtt.mappable_end;

	dev_priv->gtt.mappable =
		io_mapping_create_wc(dev_priv->gtt.mappable_base,
				     aperture_size);
	if (dev_priv->gtt.mappable == NULL) {
		ret = -EIO;
		goto out_gtt;
	}

	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
					      aperture_size);

	/* The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL) {
		DRM_ERROR("Failed to create our workqueue.\n");
		ret = -ENOMEM;
		goto out_mtrrfree;
	}

	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->dp_wq == NULL) {
		DRM_ERROR("Failed to create our dp workqueue.\n");
		ret = -ENOMEM;
		goto out_freewq;
	}

	intel_irq_init(dev_priv);
	intel_uncore_sanitize(dev);

	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev);
	intel_setup_gmbus(dev);
	intel_opregion_setup(dev);

	intel_setup_bios(dev);

	i915_gem_load(dev);

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
	 * be lost or delayed, but we use them anyways to avoid
	 * stuck interrupts on some machines.
	 */
	if (!IS_I945G(dev) && !IS_I945GM(dev))
		pci_enable_msi(dev->pdev);

	intel_device_info_runtime_init(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
		if (ret)
			goto out_gem_unload;
	}

	intel_power_domains_init(dev_priv);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = i915_load_modeset_init(dev);
		if (ret < 0) {
			DRM_ERROR("failed to init modeset\n");
			goto out_power_well;
		}
#ifdef DRM_I915_VGT_SUPPORT
		if (USES_VGT(dev)) {
			/*
			 * Tell VGT that we have a valid surface to show
			 * after modesetting. We doesn't distinguish DOM0 and
			 * Linux guest here, The PVINFO write handler will
			 * handle this.
			 */
			I915_WRITE(vgt_info_off(display_ready), 1);
		}
#endif
	}

	i915_setup_sysfs(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_init(dev);
		acpi_video_register();
	}

	if (IS_GEN5(dev))
		intel_gpu_ips_init(dev_priv);

	intel_runtime_pm_enable(dev_priv);

	return 0;

out_power_well:
	intel_power_domains_fini(dev_priv);
	drm_vblank_cleanup(dev);
out_gem_unload:
	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
	unregister_shrinker(&dev_priv->mm.shrinker);

	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	intel_teardown_gmbus(dev);
	intel_teardown_mchbar(dev);
	pm_qos_remove_request(&dev_priv->pm_qos);
	destroy_workqueue(dev_priv->dp_wq);
out_freewq:
	destroy_workqueue(dev_priv->wq);
out_mtrrfree:
	arch_phys_wc_del(dev_priv->gtt.mtrr);
	io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
	i915_global_gtt_cleanup(dev);
out_regs:
	intel_uncore_fini(dev);
	pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
	pci_dev_put(dev_priv->bridge_dev);
free_priv:
	if (dev_priv->slab)
		kmem_cache_destroy(dev_priv->slab);
	kfree(dev_priv);
	return ret;
}
Пример #6
0
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
    struct vmw_private *dev_priv;
    int ret;
    uint32_t svga_id;
    enum vmw_res_type i;
    bool refuse_dma = false;

    dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
    if (unlikely(dev_priv == NULL)) {
        DRM_ERROR("Failed allocating a device private struct.\n");
        return -ENOMEM;
    }

    pci_set_master(dev->pdev);

    dev_priv->dev = dev;
    dev_priv->vmw_chipset = chipset;
    dev_priv->last_read_seqno = (uint32_t) -100;
    mutex_init(&dev_priv->cmdbuf_mutex);
    mutex_init(&dev_priv->release_mutex);
    mutex_init(&dev_priv->binding_mutex);
    rwlock_init(&dev_priv->resource_lock);
    ttm_lock_init(&dev_priv->reservation_sem);
    spin_lock_init(&dev_priv->hw_lock);
    spin_lock_init(&dev_priv->waiter_lock);
    spin_lock_init(&dev_priv->cap_lock);
    spin_lock_init(&dev_priv->svga_lock);

    for (i = vmw_res_context; i < vmw_res_max; ++i) {
        idr_init(&dev_priv->res_idr[i]);
        INIT_LIST_HEAD(&dev_priv->res_lru[i]);
    }

    mutex_init(&dev_priv->init_mutex);
    init_waitqueue_head(&dev_priv->fence_queue);
    init_waitqueue_head(&dev_priv->fifo_queue);
    dev_priv->fence_queue_waiters = 0;
    atomic_set(&dev_priv->fifo_queue_waiters, 0);

    dev_priv->used_memory_size = 0;

    dev_priv->io_start = pci_resource_start(dev->pdev, 0);
    dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
    dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);

    dev_priv->enable_fb = enable_fbdev;

    vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
    svga_id = vmw_read(dev_priv, SVGA_REG_ID);
    if (svga_id != SVGA_ID_2) {
        ret = -ENOSYS;
        DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
        goto out_err0;
    }

    dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
    ret = vmw_dma_select_mode(dev_priv);
    if (unlikely(ret != 0)) {
        DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
        refuse_dma = true;
    }

    dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
    dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
    dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
    dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

    vmw_get_initial_size(dev_priv);

    if (dev_priv->capabilities & SVGA_CAP_GMR2) {
        dev_priv->max_gmr_ids =
            vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
        dev_priv->max_gmr_pages =
            vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
        dev_priv->memory_size =
            vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
        dev_priv->memory_size -= dev_priv->vram_size;
    } else {
        /*
         * An arbitrary limit of 512MiB on surface
         * memory. But all HWV8 hardware supports GMR2.
         */
        dev_priv->memory_size = 512*1024*1024;
    }
    dev_priv->max_mob_pages = 0;
    dev_priv->max_mob_size = 0;
    if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
        uint64_t mem_size =
            vmw_read(dev_priv,
                     SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);

        dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
        dev_priv->prim_bb_mem =
            vmw_read(dev_priv,
                     SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
        dev_priv->max_mob_size =
            vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
        dev_priv->stdu_max_width =
            vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
        dev_priv->stdu_max_height =
            vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);

        vmw_write(dev_priv, SVGA_REG_DEV_CAP,
                  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
        dev_priv->texture_max_width = vmw_read(dev_priv,
                                               SVGA_REG_DEV_CAP);
        vmw_write(dev_priv, SVGA_REG_DEV_CAP,
                  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
        dev_priv->texture_max_height = vmw_read(dev_priv,
                                                SVGA_REG_DEV_CAP);
    } else {
        dev_priv->texture_max_width = 8192;
        dev_priv->texture_max_height = 8192;
        dev_priv->prim_bb_mem = dev_priv->vram_size;
    }

    vmw_print_capabilities(dev_priv->capabilities);

    ret = vmw_dma_masks(dev_priv);
    if (unlikely(ret != 0))
        goto out_err0;

    if (dev_priv->capabilities & SVGA_CAP_GMR2) {
        DRM_INFO("Max GMR ids is %u\n",
                 (unsigned)dev_priv->max_gmr_ids);
        DRM_INFO("Max number of GMR pages is %u\n",
                 (unsigned)dev_priv->max_gmr_pages);
        DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
                 (unsigned)dev_priv->memory_size / 1024);
    }
    DRM_INFO("Maximum display memory size is %u kiB\n",
             dev_priv->prim_bb_mem / 1024);
    DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
             dev_priv->vram_start, dev_priv->vram_size / 1024);
    DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
             dev_priv->mmio_start, dev_priv->mmio_size / 1024);

    ret = vmw_ttm_global_init(dev_priv);
    if (unlikely(ret != 0))
        goto out_err0;


    vmw_master_init(&dev_priv->fbdev_master);
    ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
    dev_priv->active_master = &dev_priv->fbdev_master;


    dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
                                           dev_priv->mmio_size);

    dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
                                     dev_priv->mmio_size);

    if (unlikely(dev_priv->mmio_virt == NULL)) {
        ret = -ENOMEM;
        DRM_ERROR("Failed mapping MMIO.\n");
        goto out_err3;
    }

    /* Need mmio memory to check for fifo pitchlock cap. */
    if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
            !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
            !vmw_fifo_have_pitchlock(dev_priv)) {
        ret = -ENOSYS;
        DRM_ERROR("Hardware has no pitchlock\n");
        goto out_err4;
    }

    dev_priv->tdev = ttm_object_device_init
                     (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);

    if (unlikely(dev_priv->tdev == NULL)) {
        DRM_ERROR("Unable to initialize TTM object management.\n");
        ret = -ENOMEM;
        goto out_err4;
    }

    dev->dev_private = dev_priv;

    ret = pci_request_regions(dev->pdev, "vmwgfx probe");
    dev_priv->stealth = (ret != 0);
    if (dev_priv->stealth) {
        /**
         * Request at least the mmio PCI resource.
         */

        DRM_INFO("It appears like vesafb is loaded. "
                 "Ignore above error if any.\n");
        ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
        if (unlikely(ret != 0)) {
            DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
            goto out_no_device;
        }
    }

    if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
        ret = drm_irq_install(dev, dev->pdev->irq);
        if (ret != 0) {
            DRM_ERROR("Failed installing irq: %d\n", ret);
            goto out_no_irq;
        }
    }

    dev_priv->fman = vmw_fence_manager_init(dev_priv);
    if (unlikely(dev_priv->fman == NULL)) {
        ret = -ENOMEM;
        goto out_no_fman;
    }

    ret = ttm_bo_device_init(&dev_priv->bdev,
                             dev_priv->bo_global_ref.ref.object,
                             &vmw_bo_driver,
                             dev->anon_inode->i_mapping,
                             VMWGFX_FILE_PAGE_OFFSET,
                             false);
    if (unlikely(ret != 0)) {
        DRM_ERROR("Failed initializing TTM buffer object driver.\n");
        goto out_no_bdev;
    }

    /*
     * Enable VRAM, but initially don't use it until SVGA is enabled and
     * unhidden.
     */
    ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
                         (dev_priv->vram_size >> PAGE_SHIFT));
    if (unlikely(ret != 0)) {
        DRM_ERROR("Failed initializing memory manager for VRAM.\n");
        goto out_no_vram;
    }
    dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

    dev_priv->has_gmr = true;
    if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
            refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
                                         VMW_PL_GMR) != 0) {
        DRM_INFO("No GMR memory available. "
                 "Graphics memory resources are very limited.\n");
        dev_priv->has_gmr = false;
    }

    if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
        dev_priv->has_mob = true;
        if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
                           VMW_PL_MOB) != 0) {
            DRM_INFO("No MOB memory available. "
                     "3D will be disabled.\n");
            dev_priv->has_mob = false;
        }
    }

    if (dev_priv->has_mob) {
        spin_lock(&dev_priv->cap_lock);
        vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
        dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
        spin_unlock(&dev_priv->cap_lock);
    }


    ret = vmw_kms_init(dev_priv);
    if (unlikely(ret != 0))
        goto out_no_kms;
    vmw_overlay_init(dev_priv);

    ret = vmw_request_device(dev_priv);
    if (ret)
        goto out_no_fifo;

    DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");

    if (dev_priv->enable_fb) {
        vmw_fifo_resource_inc(dev_priv);
        vmw_svga_enable(dev_priv);
        vmw_fb_init(dev_priv);
    }

    dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
    register_pm_notifier(&dev_priv->pm_nb);

    return 0;

out_no_fifo:
    vmw_overlay_close(dev_priv);
    vmw_kms_close(dev_priv);
out_no_kms:
    if (dev_priv->has_mob)
        (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
    if (dev_priv->has_gmr)
        (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
    (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
    (void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
    vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
    if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
        drm_irq_uninstall(dev_priv->dev);
out_no_irq:
    if (dev_priv->stealth)
        pci_release_region(dev->pdev, 2);
    else
        pci_release_regions(dev->pdev);
out_no_device:
    ttm_object_device_release(&dev_priv->tdev);
out_err4:
    iounmap(dev_priv->mmio_virt);
out_err3:
    arch_phys_wc_del(dev_priv->mmio_mtrr);
    vmw_ttm_global_release(dev_priv);
out_err0:
    for (i = vmw_res_context; i < vmw_res_max; ++i)
        idr_destroy(&dev_priv->res_idr[i]);

    if (dev_priv->ctx.staged_bindings)
        vmw_binding_state_free(dev_priv->ctx.staged_bindings);
    kfree(dev_priv);
    return ret;
}