Esempio n. 1
0
/**
 * vmw_kms_stdu_init_display - Initializes a Screen Target based display
 *
 * @dev_priv: VMW DRM device
 *
 * This function initialize a Screen Target based display device.  It checks
 * the capability bits to make sure the underlying hardware can support
 * screen targets, and then creates the maximum number of CRTCs, a.k.a Display
 * Units, as supported by the display hardware.
 *
 * RETURNS:
 * 0 on success, error code otherwise
 */
int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
{
    struct drm_device *dev = dev_priv->dev;
    int i, ret;


    /* Do nothing if Screen Target support is turned off */
    if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
        return -ENOSYS;

    if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
        return -ENOSYS;

    ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
    if (unlikely(ret != 0))
        return ret;

    dev_priv->active_display_unit = vmw_du_screen_target;

    vmw_kms_create_implicit_placement_property(dev_priv, false);

    for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
        ret = vmw_stdu_init(dev_priv, i);

        if (unlikely(ret != 0)) {
            DRM_ERROR("Failed to initialize STDU %d", i);
            goto err_vblank_cleanup;
        }
    }

    DRM_INFO("Screen Target Display device initialized\n");

    return 0;

err_vblank_cleanup:
    drm_vblank_cleanup(dev);
    return ret;
}
Esempio n. 2
0
int qxl_driver_load(struct drm_device *dev, unsigned long flags)
{
	struct qxl_device *qdev;
	int r;

	/* require kms */
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

	qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
	if (qdev == NULL)
		return -ENOMEM;

	dev->dev_private = qdev;

	r = qxl_device_init(qdev, dev, dev->pdev, flags);
	if (r)
		goto out;

	r = drm_vblank_init(dev, 1);
	if (r)
		goto unload;

	r = qxl_modeset_init(qdev);
	if (r)
		goto unload;

	drm_kms_helper_poll_init(qdev->ddev);

	return 0;
unload:
	qxl_driver_unload(dev);

out:
	kfree(qdev);
	return r;
}
Esempio n. 3
0
int radeon_irq_kms_init(struct radeon_device *rdev)
{
	int i;
	int r = 0;

	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);

	spin_lock_init(&rdev->irq.sw_lock);
	for (i = 0; i < rdev->num_crtc; i++)
		spin_lock_init(&rdev->irq.pflip_lock[i]);
	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
	if (r) {
		return r;
	}
	/* enable msi */
	rdev->msi_enabled = 0;
	/* MSIs don't seem to work reliably on all IGP
	 * chips.  Disable MSI on them for now.
	 */
	if ((rdev->family >= CHIP_RV380) &&
	    ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
	    (!(rdev->flags & RADEON_IS_AGP))) {
		int ret = pci_enable_msi(rdev->pdev);
		if (!ret) {
			rdev->msi_enabled = 1;
			dev_info(rdev->dev, "radeon: using MSI.\n");
		}
	}
	rdev->irq.installed = true;
	r = drm_irq_install(rdev->ddev);
	if (r) {
		rdev->irq.installed = false;
		return r;
	}
	DRM_INFO("radeon: irq initialized.\n");
	return 0;
}
Esempio n. 4
0
static int fsl_dcu_load(struct drm_device *drm, unsigned long flags)
{
	struct device *dev = drm->dev;
	struct fsl_dcu_drm_device *fsl_dev = drm->dev_private;
	int ret;

	ret = fsl_dcu_drm_modeset_init(fsl_dev);
	if (ret < 0) {
		dev_err(dev, "failed to initialize mode setting\n");
		return ret;
	}

	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
	if (ret < 0) {
		dev_err(dev, "failed to initialize vblank\n");
		goto done;
	}
	drm->vblank_disable_allowed = true;

	ret = fsl_dcu_drm_irq_init(drm);
	if (ret < 0)
		goto done;
	drm->irq_enabled = true;

	fsl_dcu_fbdev_init(drm);

	return 0;
done:
	if (ret) {
		drm_mode_config_cleanup(drm);
		drm_vblank_cleanup(drm);
		drm_irq_uninstall(drm);
		drm->dev_private = NULL;
	}

	return ret;
}
Esempio n. 5
0
int via_driver_load(struct drm_device *dev, unsigned long chipset)
{
	drm_via_private_t *dev_priv;
	int ret = 0;

	dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	idr_init(&dev_priv->object_idr);
	dev->dev_private = (void *)dev_priv;

	dev_priv->chipset = chipset;

	pci_set_master(dev->pdev);

	ret = drm_vblank_init(dev, 1);
	if (ret) {
		kfree(dev_priv);
		return ret;
	}

	return 0;
}
Esempio n. 6
0
/**
 * radeon_irq_kms_init - init driver interrupt info
 *
 * @rdev: radeon device pointer
 *
 * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
 * Returns 0 for success, error for failure.
 */
int radeon_irq_kms_init(struct radeon_device *rdev)
{
	int r = 0;

	spin_lock_init(&rdev->irq.lock);
	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
	if (r) {
		return r;
	}

	/* enable msi */
	rdev->msi_enabled = 0;

	if (radeon_msi_ok(rdev)) {
		int ret = pci_enable_msi(rdev->pdev);
		if (!ret) {
			rdev->msi_enabled = 1;
			dev_info(rdev->dev, "radeon: using MSI.\n");
		}
	}

	INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
	INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
	INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);

	rdev->irq.installed = true;
	r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
	if (r) {
		rdev->irq.installed = false;
		flush_delayed_work(&rdev->hotplug_work);
		return r;
	}

	DRM_INFO("radeon: irq initialized.\n");
	return 0;
}
Esempio n. 7
0
int radeon_irq_kms_init(struct radeon_device *rdev)
{
	int r = 0;
	int num_crtc = 2;

	if (rdev->flags & RADEON_SINGLE_CRTC)
		num_crtc = 1;

	r = drm_vblank_init(rdev->ddev, num_crtc);
	if (r) {
		return r;
	}
	
	rdev->msi_enabled = 0;
	if (rdev->family >= CHIP_RV380) {
		int ret = pci_enable_msi(rdev->pdev);
		if (!ret)
			rdev->msi_enabled = 1;
	}
	drm_irq_install(rdev->ddev);
	rdev->irq.installed = true;
	DRM_INFO("radeon: irq initialized.\n");
	return 0;
}
Esempio n. 8
0
/**
 * amdgpu_irq_init - init driver interrupt info
 *
 * @adev: amdgpu device pointer
 *
 * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
 * Returns 0 for success, error for failure.
 */
int amdgpu_irq_init(struct amdgpu_device *adev)
{
	int r = 0;

	spin_lock_init(&adev->irq.lock);
	r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
	if (r) {
		return r;
	}

	/* enable msi */
	adev->irq.msi_enabled = false;

	if (amdgpu_msi_ok(adev)) {
		int ret = pci_enable_msi(adev->pdev);
		if (!ret) {
			adev->irq.msi_enabled = true;
			dev_info(adev->dev, "amdgpu: using MSI.\n");
		}
	}

	INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
	INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);

	adev->irq.installed = true;
	r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
	if (r) {
		adev->irq.installed = false;
		flush_work(&adev->hotplug_work);
		cancel_work_sync(&adev->reset_work);
		return r;
	}

	DRM_INFO("amdgpu: irq initialized.\n");
	return 0;
}
int radeon_irq_kms_init(struct radeon_device *rdev)
{
	int r = 0;
	int num_crtc = 2;

	if (rdev->flags & RADEON_SINGLE_CRTC)
		num_crtc = 1;
	spin_lock_init(&rdev->irq.sw_lock);
	r = drm_vblank_init(rdev->ddev, num_crtc);
	if (r) {
		return r;
	}
	/* enable msi */
	rdev->msi_enabled = 0;
	/* MSIs don't seem to work on my rs780;
	 * not sure about rs880 or other rs780s.
	 * Needs more investigation.
	 */
	if ((rdev->family >= CHIP_RV380) &&
	    (rdev->family != CHIP_RS780) &&
	    (rdev->family != CHIP_RS880)) {
		int ret = pci_enable_msi(rdev->pdev);
		if (!ret) {
			rdev->msi_enabled = 1;
			DRM_INFO("radeon: using MSI.\n");
		}
	}
	rdev->irq.installed = true;
	r = drm_irq_install(rdev->ddev);
	if (r) {
		rdev->irq.installed = false;
		return r;
	}
	DRM_INFO("radeon: irq initialized.\n");
	return 0;
}
Esempio n. 10
0
static int sun4i_drv_bind(struct device *dev)
{
	struct drm_device *drm;
	struct sun4i_drv *drv;
	int ret;

	drm = drm_dev_alloc(&sun4i_drv_driver, dev);
	if (IS_ERR(drm))
		return PTR_ERR(drm);

	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
	if (!drv) {
		ret = -ENOMEM;
		goto free_drm;
	}

	dev_set_drvdata(dev, drm);
	drm->dev_private = drv;
	INIT_LIST_HEAD(&drv->frontend_list);
	INIT_LIST_HEAD(&drv->engine_list);
	INIT_LIST_HEAD(&drv->tcon_list);

	ret = of_reserved_mem_device_init(dev);
	if (ret && ret != -ENODEV) {
		dev_err(drm->dev, "Couldn't claim our memory region\n");
		goto free_drm;
	}

	drm_mode_config_init(drm);
	drm->mode_config.allow_fb_modifiers = true;

	ret = component_bind_all(drm->dev, drm);
	if (ret) {
		dev_err(drm->dev, "Couldn't bind all pipelines components\n");
		goto cleanup_mode_config;
	}

	/* drm_vblank_init calls kcalloc, which can fail */
	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
	if (ret)
		goto cleanup_mode_config;

	drm->irq_enabled = true;

	/* Remove early framebuffers (ie. simplefb) */
	drm_fb_helper_remove_conflicting_framebuffers(NULL, "sun4i-drm-fb", false);

	sun4i_framebuffer_init(drm);

	/* Enable connectors polling */
	drm_kms_helper_poll_init(drm);

	ret = drm_dev_register(drm, 0);
	if (ret)
		goto finish_poll;

	drm_fbdev_generic_setup(drm, 32);

	return 0;

finish_poll:
	drm_kms_helper_poll_fini(drm);
cleanup_mode_config:
	drm_mode_config_cleanup(drm);
	of_reserved_mem_device_release(dev);
free_drm:
	drm_dev_put(drm);
	return ret;
}
Esempio n. 11
0
static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
{
	struct shmob_drm_platform_data *pdata = dev->dev->platform_data;
	struct platform_device *pdev = dev->platformdev;
	struct shmob_drm_device *sdev;
	struct resource *res;
	unsigned int i;
	int ret;

	if (pdata == NULL) {
		dev_err(dev->dev, "no platform data\n");
		return -EINVAL;
	}

	sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
	if (sdev == NULL) {
		dev_err(dev->dev, "failed to allocate private data\n");
		return -ENOMEM;
	}

	sdev->dev = &pdev->dev;
	sdev->pdata = pdata;
	spin_lock_init(&sdev->irq_lock);

	sdev->ddev = dev;
	dev->dev_private = sdev;

	/* I/O resources and clocks */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "failed to get memory resource\n");
		ret = -EINVAL;
		goto done;
	}

	sdev->mmio = ioremap_nocache(res->start, resource_size(res));
	if (sdev->mmio == NULL) {
		dev_err(&pdev->dev, "failed to remap memory resource\n");
		ret = -ENOMEM;
		goto done;
	}

	ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
	if (ret < 0)
		goto done;

	ret = shmob_drm_init_interface(sdev);
	if (ret < 0)
		goto done;

	ret = shmob_drm_modeset_init(sdev);
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to initialize mode setting\n");
		goto done;
	}

	for (i = 0; i < 4; ++i) {
		ret = shmob_drm_plane_create(sdev, i);
		if (ret < 0) {
			dev_err(&pdev->dev, "failed to create plane %u\n", i);
			goto done;
		}
	}

	ret = drm_vblank_init(dev, 1);
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to initialize vblank\n");
		goto done;
	}

	ret = drm_irq_install(dev);
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to install IRQ handler\n");
		goto done;
	}

	platform_set_drvdata(pdev, sdev);

done:
	if (ret)
		shmob_drm_unload(dev);

	return ret;
}
Esempio n. 12
0
static int rcar_du_probe(struct platform_device *pdev)
{
    struct device_node *np = pdev->dev.of_node;
    struct rcar_du_device *rcdu;
    struct drm_connector *connector;
    struct drm_device *ddev;
    struct resource *mem;
    int ret;

    if (np == NULL) {
        dev_err(&pdev->dev, "no device tree node\n");
        return -ENODEV;
    }

    /* Allocate and initialize the DRM and R-Car device structures. */
    rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
    if (rcdu == NULL)
        return -ENOMEM;

    init_waitqueue_head(&rcdu->commit.wait);

    rcdu->dev = &pdev->dev;
    rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;

    ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
    if (!ddev)
        return -ENOMEM;

    drm_dev_set_unique(ddev, dev_name(&pdev->dev));

    rcdu->ddev = ddev;
    ddev->dev_private = rcdu;

    platform_set_drvdata(pdev, rcdu);

    /* I/O resources */
    mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
    if (IS_ERR(rcdu->mmio)) {
        ret = PTR_ERR(rcdu->mmio);
        goto error;
    }

    /* Initialize vertical blanking interrupts handling. Start with vblank
     * disabled for all CRTCs.
     */
    ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
    if (ret < 0) {
        dev_err(&pdev->dev, "failed to initialize vblank\n");
        goto error;
    }

    /* DRM/KMS objects */
    ret = rcar_du_modeset_init(rcdu);
    if (ret < 0) {
        dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
        goto error;
    }

    ddev->irq_enabled = 1;

    /* Register the DRM device with the core and the connectors with
     * sysfs.
     */
    ret = drm_dev_register(ddev, 0);
    if (ret)
        goto error;

    mutex_lock(&ddev->mode_config.mutex);
    drm_for_each_connector(connector, ddev) {
        ret = drm_connector_register(connector);
        if (ret < 0)
            break;
    }
Esempio n. 13
0
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
	struct armada_private *priv;
	struct resource *mem = NULL;
	int ret, n;

	for (n = 0; ; n++) {
		struct resource *r = platform_get_resource(dev->platformdev,
							   IORESOURCE_MEM, n);
		if (!r)
			break;

		/* Resources above 64K are graphics memory */
		if (resource_size(r) > SZ_64K)
			mem = r;
		else
			return -EINVAL;
	}

	if (!mem)
		return -ENXIO;

	if (!devm_request_mem_region(dev->dev, mem->start,
			resource_size(mem), "armada-drm"))
		return -EBUSY;

	priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		DRM_ERROR("failed to allocate private\n");
		return -ENOMEM;
	}

	platform_set_drvdata(dev->platformdev, dev);
	dev->dev_private = priv;

	INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
	INIT_KFIFO(priv->fb_unref);

	/* Mode setting support */
	drm_mode_config_init(dev);
	dev->mode_config.min_width = 320;
	dev->mode_config.min_height = 200;

	/*
	 * With vscale enabled, the maximum width is 1920 due to the
	 * 1920 by 3 lines RAM
	 */
	dev->mode_config.max_width = 1920;
	dev->mode_config.max_height = 2048;

	dev->mode_config.preferred_depth = 24;
	dev->mode_config.funcs = &armada_drm_mode_config_funcs;
	drm_mm_init(&priv->linear, mem->start, resource_size(mem));
	mutex_init(&priv->linear_lock);

	ret = component_bind_all(dev->dev, dev);
	if (ret)
		goto err_kms;

	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
	if (ret)
		goto err_comp;

	dev->irq_enabled = true;
	dev->vblank_disable_allowed = 1;

	ret = armada_fbdev_init(dev);
	if (ret)
		goto err_comp;

	drm_kms_helper_poll_init(dev);

	return 0;

 err_comp:
	component_unbind_all(dev->dev, dev);
 err_kms:
	drm_mode_config_cleanup(dev);
	drm_mm_takedown(&priv->linear);
	flush_work(&priv->fb_unref_work);

	return ret;
}
Esempio n. 14
0
int
nouveau_card_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_engine *engine;
	int ret;

	NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);

	if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
		return 0;

	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
	vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
				       nouveau_switcheroo_can_switch);

	/* Initialise internal driver API hooks */
	ret = nouveau_init_engine_ptrs(dev);
	if (ret)
		goto out;
	engine = &dev_priv->engine;
	dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
	spin_lock_init(&dev_priv->context_switch_lock);

	/* Parse BIOS tables / Run init tables if card not POSTed */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = nouveau_bios_init(dev);
		if (ret)
			goto out;
	}

	ret = nouveau_mem_detect(dev);
	if (ret)
		goto out_bios;

	ret = nouveau_gpuobj_early_init(dev);
	if (ret)
		goto out_bios;

	/* Initialise instance memory, must happen before mem_init so we
	 * know exactly how much VRAM we're able to use for "normal"
	 * purposes.
	 */
	ret = engine->instmem.init(dev);
	if (ret)
		goto out_gpuobj_early;

	/* Setup the memory manager */
	ret = nouveau_mem_init(dev);
	if (ret)
		goto out_instmem;

	ret = nouveau_gpuobj_init(dev);
	if (ret)
		goto out_mem;

	/* PMC */
	ret = engine->mc.init(dev);
	if (ret)
		goto out_gpuobj;

	/* PTIMER */
	ret = engine->timer.init(dev);
	if (ret)
		goto out_mc;

	/* PFB */
	ret = engine->fb.init(dev);
	if (ret)
		goto out_timer;

	if (nouveau_noaccel)
		engine->graph.accel_blocked = true;
	else {
		/* PGRAPH */
		ret = engine->graph.init(dev);
		if (ret)
			goto out_fb;

		/* PFIFO */
		ret = engine->fifo.init(dev);
		if (ret)
			goto out_graph;
	}

	/* this call irq_preinstall, register irq handler and
	 * call irq_postinstall
	 */
	ret = drm_irq_install(dev);
	if (ret)
		goto out_fifo;

	ret = drm_vblank_init(dev, 0);
	if (ret)
		goto out_irq;

	/* what about PVIDEO/PCRTC/PRAMDAC etc? */

	if (!engine->graph.accel_blocked) {
		ret = nouveau_card_init_channel(dev);
		if (ret)
			goto out_irq;
	}

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		if (dev_priv->card_type >= NV_50)
			ret = nv50_display_create(dev);
		else
			ret = nv04_display_create(dev);
		if (ret)
			goto out_channel;
	}

	ret = nouveau_backlight_init(dev);
	if (ret)
		NV_ERROR(dev, "Error %d registering backlight\n", ret);

	dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		nouveau_fbcon_init(dev);
		drm_kms_helper_poll_init(dev);
	}

	return 0;

out_channel:
	if (dev_priv->channel) {
		nouveau_channel_free(dev_priv->channel);
		dev_priv->channel = NULL;
	}
out_irq:
	drm_irq_uninstall(dev);
out_fifo:
	if (!nouveau_noaccel)
		engine->fifo.takedown(dev);
out_graph:
	if (!nouveau_noaccel)
		engine->graph.takedown(dev);
out_fb:
	engine->fb.takedown(dev);
out_timer:
	engine->timer.takedown(dev);
out_mc:
	engine->mc.takedown(dev);
out_gpuobj:
	nouveau_gpuobj_takedown(dev);
out_mem:
	nouveau_sgdma_takedown(dev);
	nouveau_mem_close(dev);
out_instmem:
	engine->instmem.takedown(dev);
out_gpuobj_early:
	nouveau_gpuobj_late_takedown(dev);
out_bios:
	nouveau_bios_takedown(dev);
out:
	vga_client_register(dev->pdev, NULL, NULL, NULL);
	return ret;
}
Esempio n. 15
0
int
nouveau_card_init(struct drm_device *dev)
{
    struct drm_nouveau_private *dev_priv = dev->dev_private;
    struct nouveau_engine *engine;
    int ret;

    vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
    vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
                                   nouveau_switcheroo_reprobe,
                                   nouveau_switcheroo_can_switch);

    /* Initialise internal driver API hooks */
    ret = nouveau_init_engine_ptrs(dev);
    if (ret)
        goto out;
    engine = &dev_priv->engine;
    spin_lock_init(&dev_priv->channels.lock);
    spin_lock_init(&dev_priv->tile.lock);
    spin_lock_init(&dev_priv->context_switch_lock);
    spin_lock_init(&dev_priv->vm_lock);

    /* Make the CRTCs and I2C buses accessible */
    ret = engine->display.early_init(dev);
    if (ret)
        goto out;

    /* Parse BIOS tables / Run init tables if card not POSTed */
    ret = nouveau_bios_init(dev);
    if (ret)
        goto out_display_early;

    nouveau_pm_init(dev);

    ret = nouveau_mem_vram_init(dev);
    if (ret)
        goto out_bios;

    ret = nouveau_gpuobj_init(dev);
    if (ret)
        goto out_vram;

    ret = engine->instmem.init(dev);
    if (ret)
        goto out_gpuobj;

    ret = nouveau_mem_gart_init(dev);
    if (ret)
        goto out_instmem;

    /* PMC */
    ret = engine->mc.init(dev);
    if (ret)
        goto out_gart;

    /* PGPIO */
    ret = engine->gpio.init(dev);
    if (ret)
        goto out_mc;

    /* PTIMER */
    ret = engine->timer.init(dev);
    if (ret)
        goto out_gpio;

    /* PFB */
    ret = engine->fb.init(dev);
    if (ret)
        goto out_timer;

    if (nouveau_noaccel)
        engine->graph.accel_blocked = true;
    else {
        /* PGRAPH */
        ret = engine->graph.init(dev);
        if (ret)
            goto out_fb;

        /* PCRYPT */
        ret = engine->crypt.init(dev);
        if (ret)
            goto out_graph;

        /* PFIFO */
        ret = engine->fifo.init(dev);
        if (ret)
            goto out_crypt;
    }

    ret = engine->display.create(dev);
    if (ret)
        goto out_fifo;

    ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
    if (ret)
        goto out_vblank;

    ret = nouveau_irq_init(dev);
    if (ret)
        goto out_vblank;

    /* what about PVIDEO/PCRTC/PRAMDAC etc? */

    if (!engine->graph.accel_blocked) {
        ret = nouveau_fence_init(dev);
        if (ret)
            goto out_irq;

        ret = nouveau_card_init_channel(dev);
        if (ret)
            goto out_fence;
    }

    nouveau_fbcon_init(dev);
    drm_kms_helper_poll_init(dev);
    return 0;

out_fence:
    nouveau_fence_fini(dev);
out_irq:
    nouveau_irq_fini(dev);
out_vblank:
    drm_vblank_cleanup(dev);
    engine->display.destroy(dev);
out_fifo:
    if (!nouveau_noaccel)
        engine->fifo.takedown(dev);
out_crypt:
    if (!nouveau_noaccel)
        engine->crypt.takedown(dev);
out_graph:
    if (!nouveau_noaccel)
        engine->graph.takedown(dev);
out_fb:
    engine->fb.takedown(dev);
out_timer:
    engine->timer.takedown(dev);
out_gpio:
    engine->gpio.takedown(dev);
out_mc:
    engine->mc.takedown(dev);
out_gart:
    nouveau_mem_gart_fini(dev);
out_instmem:
    engine->instmem.takedown(dev);
out_gpuobj:
    nouveau_gpuobj_takedown(dev);
out_vram:
    nouveau_mem_vram_fini(dev);
out_bios:
    nouveau_pm_fini(dev);
    nouveau_bios_takedown(dev);
out_display_early:
    engine->display.late_takedown(dev);
out:
    vga_client_register(dev->pdev, NULL, NULL, NULL);
    return ret;
}
Esempio n. 16
0
static int mxsfb_load(struct drm_device *drm, unsigned long flags)
{
	struct platform_device *pdev = to_platform_device(drm->dev);
	struct mxsfb_drm_private *mxsfb;
	struct resource *res;
	int ret;

	mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL);
	if (!mxsfb)
		return -ENOMEM;

	drm->dev_private = mxsfb;
	mxsfb->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	mxsfb->base = devm_ioremap_resource(drm->dev, res);
	if (IS_ERR(mxsfb->base))
		return PTR_ERR(mxsfb->base);

	mxsfb->clk = devm_clk_get(drm->dev, NULL);
	if (IS_ERR(mxsfb->clk))
		return PTR_ERR(mxsfb->clk);

	mxsfb->clk_axi = devm_clk_get(drm->dev, "axi");
	if (IS_ERR(mxsfb->clk_axi))
		mxsfb->clk_axi = NULL;

	mxsfb->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi");
	if (IS_ERR(mxsfb->clk_disp_axi))
		mxsfb->clk_disp_axi = NULL;

	ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

	pm_runtime_enable(drm->dev);

	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
	if (ret < 0) {
		dev_err(drm->dev, "Failed to initialise vblank\n");
		goto err_vblank;
	}

	/* Modeset init */
	drm_mode_config_init(drm);

	ret = mxsfb_create_output(drm);
	if (ret < 0) {
		dev_err(drm->dev, "Failed to create outputs\n");
		goto err_vblank;
	}

	ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
			mxsfb_formats, ARRAY_SIZE(mxsfb_formats),
			&mxsfb->connector);
	if (ret < 0) {
		dev_err(drm->dev, "Cannot setup simple display pipe\n");
		goto err_vblank;
	}

	ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector);
	if (ret) {
		dev_err(drm->dev, "Cannot connect panel\n");
		goto err_vblank;
	}

	drm->mode_config.min_width	= MXSFB_MIN_XRES;
	drm->mode_config.min_height	= MXSFB_MIN_YRES;
	drm->mode_config.max_width	= MXSFB_MAX_XRES;
	drm->mode_config.max_height	= MXSFB_MAX_YRES;
	drm->mode_config.funcs		= &mxsfb_mode_config_funcs;

	drm_mode_config_reset(drm);

	pm_runtime_get_sync(drm->dev);
	ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
	pm_runtime_put_sync(drm->dev);

	if (ret < 0) {
		dev_err(drm->dev, "Failed to install IRQ handler\n");
		goto err_irq;
	}

	drm_kms_helper_poll_init(drm);

	mxsfb->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
					  drm->mode_config.num_connector);
	if (IS_ERR(mxsfb->fbdev)) {
		mxsfb->fbdev = NULL;
		dev_err(drm->dev, "Failed to init FB CMA area\n");
		goto err_cma;
	}

	platform_set_drvdata(pdev, drm);

	drm_helper_hpd_irq_event(drm);

	return 0;

err_cma:
	drm_irq_uninstall(drm);
err_irq:
	drm_panel_detach(mxsfb->panel);
err_vblank:
	pm_runtime_disable(drm->dev);

	return ret;
}
Esempio n. 17
0
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
	struct drm_i915_private *dev_priv;
	struct intel_device_info *info, *device_info;
	int ret = 0, mmio_bar, mmio_size;
	uint32_t aperture_size;

	info = (struct intel_device_info *) flags;

	/* Refuse to load on gen6+ without kms enabled. */
	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
		return -ENODEV;
	}

	/* UMS needs agp support. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
		return -EINVAL;

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	dev->dev_private = (void *)dev_priv;
	gpu_perf_dev_priv = (void *)dev_priv;
	dev_priv->dev = dev;

	/* Setup the write-once "constant" device info */
	device_info = (struct intel_device_info *)&dev_priv->info;
	memcpy(device_info, info, sizeof(dev_priv->info));
	device_info->device_id = dev->pdev->device;

	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
	spin_lock_init(&dev_priv->mm.object_stat_lock);
	spin_lock_init(&dev_priv->mmio_flip_lock);
	mutex_init(&dev_priv->dpio_lock);
	mutex_init(&dev_priv->modeset_restore_lock);

	intel_pm_setup(dev);

	intel_display_crc_init(dev);

	i915_dump_device_info(dev_priv);

	/* Not all pre-production machines fall into this category, only the
	 * very first ones. Almost everything should work, except for maybe
	 * suspend/resume. And we don't implement workarounds that affect only
	 * pre-production machines. */
	if (IS_HSW_EARLY_SDV(dev))
		DRM_INFO("This is an early pre-production Haswell machine. "
			 "It may not be fully functional.\n");

	if (i915_get_bridge_dev(dev)) {
		ret = -EIO;
		goto free_priv;
	}

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/* Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (info->gen < 5)
		mmio_size = 512*1024;
	else
		mmio_size = 2*1024*1024;

	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (!dev_priv->regs) {
		DRM_ERROR("failed to map registers\n");
		ret = -EIO;
		goto put_bridge;
	}

	/* This must be called before any calls to HAS_PCH_* */
	intel_detect_pch(dev);

	intel_uncore_init(dev);

	if (i915_start_vgt(dev->pdev))
		i915_host_mediate = true;
	printk("i915_start_vgt: %s\n", i915_host_mediate ? "success" : "fail");

	i915_check_vgt(dev_priv);
	if (USES_VGT(dev))
		i915.enable_fbc = 0;

	ret = i915_gem_gtt_init(dev);
	if (ret)
		goto out_regs;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		/* WARNING: Apparently we must kick fbdev drivers before vgacon,
		 * otherwise the vga fbdev driver falls over. */
		ret = i915_kick_out_firmware_fb(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
			goto out_gtt;
		}

		ret = i915_kick_out_vgacon(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting VGA console\n");
			goto out_gtt;
		}
	}

	pci_set_master(dev->pdev);

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN2(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

	aperture_size = dev_priv->gtt.mappable_end;

	dev_priv->gtt.mappable =
		io_mapping_create_wc(dev_priv->gtt.mappable_base,
				     aperture_size);
	if (dev_priv->gtt.mappable == NULL) {
		ret = -EIO;
		goto out_gtt;
	}

	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
					      aperture_size);

	/* The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL) {
		DRM_ERROR("Failed to create our workqueue.\n");
		ret = -ENOMEM;
		goto out_mtrrfree;
	}

	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->dp_wq == NULL) {
		DRM_ERROR("Failed to create our dp workqueue.\n");
		ret = -ENOMEM;
		goto out_freewq;
	}

	intel_irq_init(dev_priv);
	intel_uncore_sanitize(dev);

	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev);
	intel_setup_gmbus(dev);
	intel_opregion_setup(dev);

	intel_setup_bios(dev);

	i915_gem_load(dev);

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
	 * be lost or delayed, but we use them anyways to avoid
	 * stuck interrupts on some machines.
	 */
	if (!IS_I945G(dev) && !IS_I945GM(dev))
		pci_enable_msi(dev->pdev);

	intel_device_info_runtime_init(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
		if (ret)
			goto out_gem_unload;
	}

	intel_power_domains_init(dev_priv);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = i915_load_modeset_init(dev);
		if (ret < 0) {
			DRM_ERROR("failed to init modeset\n");
			goto out_power_well;
		}
#ifdef DRM_I915_VGT_SUPPORT
		if (USES_VGT(dev)) {
			/*
			 * Tell VGT that we have a valid surface to show
			 * after modesetting. We doesn't distinguish DOM0 and
			 * Linux guest here, The PVINFO write handler will
			 * handle this.
			 */
			I915_WRITE(vgt_info_off(display_ready), 1);
		}
#endif
	}

	i915_setup_sysfs(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_init(dev);
		acpi_video_register();
	}

	if (IS_GEN5(dev))
		intel_gpu_ips_init(dev_priv);

	intel_runtime_pm_enable(dev_priv);

	return 0;

out_power_well:
	intel_power_domains_fini(dev_priv);
	drm_vblank_cleanup(dev);
out_gem_unload:
	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
	unregister_shrinker(&dev_priv->mm.shrinker);

	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	intel_teardown_gmbus(dev);
	intel_teardown_mchbar(dev);
	pm_qos_remove_request(&dev_priv->pm_qos);
	destroy_workqueue(dev_priv->dp_wq);
out_freewq:
	destroy_workqueue(dev_priv->wq);
out_mtrrfree:
	arch_phys_wc_del(dev_priv->gtt.mtrr);
	io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
	i915_global_gtt_cleanup(dev);
out_regs:
	intel_uncore_fini(dev);
	pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
	pci_dev_put(dev_priv->bridge_dev);
free_priv:
	if (dev_priv->slab)
		kmem_cache_destroy(dev_priv->slab);
	kfree(dev_priv);
	return ret;
}
Esempio n. 18
0
static int malidp_bind(struct device *dev)
{
	struct resource *res;
	struct drm_device *drm;
	struct device_node *ep;
	struct malidp_drm *malidp;
	struct malidp_hw_device *hwdev;
	struct platform_device *pdev = to_platform_device(dev);
	/* number of lines for the R, G and B output */
	u8 output_width[MAX_OUTPUT_CHANNELS];
	int ret = 0, i;
	u32 version, out_depth = 0;

	malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL);
	if (!malidp)
		return -ENOMEM;

	hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL);
	if (!hwdev)
		return -ENOMEM;

	/*
	 * copy the associated data from malidp_drm_of_match to avoid
	 * having to keep a reference to the OF node after binding
	 */
	memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
	malidp->dev = hwdev;

	INIT_LIST_HEAD(&malidp->event_list);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	hwdev->regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(hwdev->regs))
		return PTR_ERR(hwdev->regs);

	hwdev->pclk = devm_clk_get(dev, "pclk");
	if (IS_ERR(hwdev->pclk))
		return PTR_ERR(hwdev->pclk);

	hwdev->aclk = devm_clk_get(dev, "aclk");
	if (IS_ERR(hwdev->aclk))
		return PTR_ERR(hwdev->aclk);

	hwdev->mclk = devm_clk_get(dev, "mclk");
	if (IS_ERR(hwdev->mclk))
		return PTR_ERR(hwdev->mclk);

	hwdev->pxlclk = devm_clk_get(dev, "pxlclk");
	if (IS_ERR(hwdev->pxlclk))
		return PTR_ERR(hwdev->pxlclk);

	/* Get the optional framebuffer memory resource */
	ret = of_reserved_mem_device_init(dev);
	if (ret && ret != -ENODEV)
		return ret;

	drm = drm_dev_alloc(&malidp_driver, dev);
	if (IS_ERR(drm)) {
		ret = PTR_ERR(drm);
		goto alloc_fail;
	}

	/* Enable APB clock in order to get access to the registers */
	clk_prepare_enable(hwdev->pclk);
	/*
	 * Enable AXI clock and main clock so that prefetch can start once
	 * the registers are set
	 */
	clk_prepare_enable(hwdev->aclk);
	clk_prepare_enable(hwdev->mclk);

	ret = hwdev->query_hw(hwdev);
	if (ret) {
		DRM_ERROR("Invalid HW configuration\n");
		goto query_hw_fail;
	}

	version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
	DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
		 (version >> 12) & 0xf, (version >> 8) & 0xf);

	/* set the number of lines used for output of RGB data */
	ret = of_property_read_u8_array(dev->of_node,
					"arm,malidp-output-port-lines",
					output_width, MAX_OUTPUT_CHANNELS);
	if (ret)
		goto query_hw_fail;

	for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
		out_depth = (out_depth << 8) | (output_width[i] & 0xf);
	malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);

	drm->dev_private = malidp;
	dev_set_drvdata(dev, drm);
	atomic_set(&malidp->config_valid, 0);
	init_waitqueue_head(&malidp->wq);

	ret = malidp_init(drm);
	if (ret < 0)
		goto init_fail;

	ret = drm_dev_register(drm, 0);
	if (ret)
		goto register_fail;

	/* Set the CRTC's port so that the encoder component can find it */
	ep = of_graph_get_next_endpoint(dev->of_node, NULL);
	if (!ep) {
		ret = -EINVAL;
		goto port_fail;
	}
	malidp->crtc.port = of_get_next_parent(ep);

	ret = component_bind_all(dev, drm);
	if (ret) {
		DRM_ERROR("Failed to bind all components\n");
		goto bind_fail;
	}

	ret = malidp_irq_init(pdev);
	if (ret < 0)
		goto irq_init_fail;

	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
	if (ret < 0) {
		DRM_ERROR("failed to initialise vblank\n");
		goto vblank_fail;
	}

	drm_mode_config_reset(drm);

	malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
					   drm->mode_config.num_connector);

	if (IS_ERR(malidp->fbdev)) {
		ret = PTR_ERR(malidp->fbdev);
		malidp->fbdev = NULL;
		goto fbdev_fail;
	}

	drm_kms_helper_poll_init(drm);
	return 0;

fbdev_fail:
	drm_vblank_cleanup(drm);
vblank_fail:
	malidp_se_irq_fini(drm);
	malidp_de_irq_fini(drm);
irq_init_fail:
	component_unbind_all(dev, drm);
bind_fail:
	of_node_put(malidp->crtc.port);
	malidp->crtc.port = NULL;
port_fail:
	drm_dev_unregister(drm);
register_fail:
	malidp_de_planes_destroy(drm);
	drm_mode_config_cleanup(drm);
init_fail:
	drm->dev_private = NULL;
	dev_set_drvdata(dev, NULL);
query_hw_fail:
	clk_disable_unprepare(hwdev->mclk);
	clk_disable_unprepare(hwdev->aclk);
	clk_disable_unprepare(hwdev->pclk);
	drm_dev_unref(drm);
alloc_fail:
	of_reserved_mem_device_release(dev);

	return ret;
}
Esempio n. 19
0
static int sun4i_drv_bind(struct device *dev)
{
	struct drm_device *drm;
	struct sun4i_drv *drv;
	int ret;

	drm = drm_dev_alloc(&sun4i_drv_driver, dev);
	if (IS_ERR(drm))
		return PTR_ERR(drm);

	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
	if (!drv) {
		ret = -ENOMEM;
		goto free_drm;
	}
	drm->dev_private = drv;

	ret = of_reserved_mem_device_init(dev);
	if (ret && ret != -ENODEV) {
		dev_err(drm->dev, "Couldn't claim our memory region\n");
		goto free_drm;
	}

	/* drm_vblank_init calls kcalloc, which can fail */
	ret = drm_vblank_init(drm, 1);
	if (ret)
		goto free_mem_region;

	drm_mode_config_init(drm);

	ret = component_bind_all(drm->dev, drm);
	if (ret) {
		dev_err(drm->dev, "Couldn't bind all pipelines components\n");
		goto cleanup_mode_config;
	}

	drm->irq_enabled = true;

	/* Remove early framebuffers (ie. simplefb) */
	sun4i_remove_framebuffers();

	/* Create our framebuffer */
	drv->fbdev = sun4i_framebuffer_init(drm);
	if (IS_ERR(drv->fbdev)) {
		dev_err(drm->dev, "Couldn't create our framebuffer\n");
		ret = PTR_ERR(drv->fbdev);
		goto cleanup_mode_config;
	}

	/* Enable connectors polling */
	drm_kms_helper_poll_init(drm);

	ret = drm_dev_register(drm, 0);
	if (ret)
		goto finish_poll;

	return 0;

finish_poll:
	drm_kms_helper_poll_fini(drm);
	sun4i_framebuffer_free(drm);
cleanup_mode_config:
	drm_mode_config_cleanup(drm);
	drm_vblank_cleanup(drm);
free_mem_region:
	of_reserved_mem_device_release(dev);
free_drm:
	drm_dev_unref(drm);
	return ret;
}
Esempio n. 20
0
int
nouveau_card_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_engine *engine;
	struct nouveau_gpuobj *gpuobj;
	int ret;

	NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);

	if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
		return 0;

	/* Determine exact chipset we're running on */
	if (dev_priv->card_type < NV_10)
		dev_priv->chipset = dev_priv->card_type;
	else
		dev_priv->chipset =
			(nv_rd32(dev, NV03_PMC_BOOT_0) & 0x0ff00000) >> 20;

	/* Initialise internal driver API hooks */
	ret = nouveau_init_engine_ptrs(dev);
	if (ret)
		return ret;
	engine = &dev_priv->engine;
	dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;

	/* Parse BIOS tables / Run init tables if card not POSTed */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = nouveau_bios_init(dev);
		if (ret)
			return ret;
	}

	ret = nouveau_gpuobj_early_init(dev);
	if (ret)
		return ret;

	/* Initialise instance memory, must happen before mem_init so we
	 * know exactly how much VRAM we're able to use for "normal"
	 * purposes.
	 */
	ret = engine->instmem.init(dev);
	if (ret)
		return ret;

	/* Setup the memory manager */
	ret = nouveau_mem_init(dev);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_init(dev);
	if (ret)
		return ret;

	/* PMC */
	ret = engine->mc.init(dev);
	if (ret)
		return ret;

	/* PTIMER */
	ret = engine->timer.init(dev);
	if (ret)
		return ret;

	/* PFB */
	ret = engine->fb.init(dev);
	if (ret)
		return ret;

	/* PGRAPH */
	ret = engine->graph.init(dev);
	if (ret)
		return ret;

	/* PFIFO */
	ret = engine->fifo.init(dev);
	if (ret)
		return ret;

	/* this call irq_preinstall, register irq handler and
	 * call irq_postinstall
	 */
	ret = drm_irq_install(dev);
	if (ret)
		return ret;

	ret = drm_vblank_init(dev, 0);
	if (ret)
		return ret;

	/* what about PVIDEO/PCRTC/PRAMDAC etc? */

	ret = nouveau_channel_alloc(dev, &dev_priv->channel,
				    (struct drm_file *)-2,
				    NvDmaFB, NvDmaTT);
	if (ret)
		return ret;

	gpuobj = NULL;
	ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
				     0, nouveau_mem_fb_amount(dev),
				     NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
				     &gpuobj);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
				     gpuobj, NULL);
	if (ret) {
		nouveau_gpuobj_del(dev, &gpuobj);
		return ret;
	}

	gpuobj = NULL;
	ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
					  dev_priv->gart_info.aper_size,
					  NV_DMA_ACCESS_RW, &gpuobj, NULL);
	if (ret)
		return ret;

	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
				     gpuobj, NULL);
	if (ret) {
		nouveau_gpuobj_del(dev, &gpuobj);
		return ret;
	}

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		if (dev_priv->card_type >= NV_50) {
			ret = nv50_display_create(dev);
			if (ret)
				return ret;
		} else {
			ret = nv04_display_create(dev);
			if (ret)
				return ret;
		}
	}

	ret = nouveau_backlight_init(dev);
	if (ret)
		NV_ERROR(dev, "Error %d registering backlight\n", ret);

	dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;

	if (drm_core_check_feature(dev, DRIVER_MODESET))
		drm_helper_initial_config(dev);

	return 0;
}
Esempio n. 21
0
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
    const struct platform_device_id *id;
    const struct armada_variant *variant;
    struct armada_private *priv;
    struct resource *res[ARRAY_SIZE(priv->dcrtc)];
    struct resource *mem = NULL;
    int ret, n, i;

    memset(res, 0, sizeof(res));

    for (n = i = 0; ; n++) {
        struct resource *r = platform_get_resource(dev->platformdev,
                             IORESOURCE_MEM, n);
        if (!r)
            break;

        /* Resources above 64K are graphics memory */
        if (resource_size(r) > SZ_64K)
            mem = r;
        else if (i < ARRAY_SIZE(priv->dcrtc))
            res[i++] = r;
        else
            return -EINVAL;
    }

    if (!mem)
        return -ENXIO;

    if (!devm_request_mem_region(dev->dev, mem->start,
                                 resource_size(mem), "armada-drm"))
        return -EBUSY;

    priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
    if (!priv) {
        DRM_ERROR("failed to allocate private\n");
        return -ENOMEM;
    }

    platform_set_drvdata(dev->platformdev, dev);
    dev->dev_private = priv;

    /* Get the implementation specific driver data. */
    id = platform_get_device_id(dev->platformdev);
    if (!id)
        return -ENXIO;

    variant = (const struct armada_variant *)id->driver_data;

    INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
    INIT_KFIFO(priv->fb_unref);

    /* Mode setting support */
    drm_mode_config_init(dev);
    dev->mode_config.min_width = 320;
    dev->mode_config.min_height = 200;

    /*
     * With vscale enabled, the maximum width is 1920 due to the
     * 1920 by 3 lines RAM
     */
    dev->mode_config.max_width = 1920;
    dev->mode_config.max_height = 2048;

    dev->mode_config.preferred_depth = 24;
    dev->mode_config.funcs = &armada_drm_mode_config_funcs;
    drm_mm_init(&priv->linear, mem->start, resource_size(mem));

    /* Create all LCD controllers */
    for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
        int irq;

        if (!res[n])
            break;

        irq = platform_get_irq(dev->platformdev, n);
        if (irq < 0)
            goto err_kms;

        ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
                                     variant, NULL);
        if (ret)
            goto err_kms;
    }

    if (is_componentized(dev->dev)) {
        ret = component_bind_all(dev->dev, dev);
        if (ret)
            goto err_kms;
    } else {
#ifdef CONFIG_DRM_ARMADA_TDA1998X
        ret = armada_drm_connector_slave_create(dev, &tda19988_config);
        if (ret)
            goto err_kms;
#endif
    }

    ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
    if (ret)
        goto err_comp;

    dev->irq_enabled = true;
    dev->vblank_disable_allowed = 1;

    ret = armada_fbdev_init(dev);
    if (ret)
        goto err_comp;

    drm_kms_helper_poll_init(dev);

    return 0;

err_comp:
    if (is_componentized(dev->dev))
        component_unbind_all(dev->dev, dev);
err_kms:
    drm_mode_config_cleanup(dev);
    drm_mm_takedown(&priv->linear);
    flush_work(&priv->fb_unref_work);

    return ret;
}
Esempio n. 22
0
static int xylon_drm_load(struct drm_device *dev, unsigned long flags)
{
	struct platform_device *pdev = dev->platformdev;
	struct xylon_drm_device *xdev;
	unsigned int bpp;
	int ret;

	xdev = devm_kzalloc(dev->dev, sizeof(*xdev), GFP_KERNEL);
	if (!xdev)
		return -ENOMEM;
	xdev->dev = dev;

	dev->dev_private = xdev;

	drm_mode_config_init(dev);

	drm_kms_helper_poll_init(dev);

	xdev->crtc = xylon_drm_crtc_create(dev);
	if (IS_ERR(xdev->crtc)) {
		DRM_ERROR("failed create xylon crtc\n");
		ret = PTR_ERR(xdev->crtc);
		goto err_out;
	}

	xylon_drm_mode_config_init(dev);

	xdev->encoder = xylon_drm_encoder_create(dev);
	if (IS_ERR(xdev->encoder)) {
		DRM_ERROR("failed create xylon encoder\n");
		ret = PTR_ERR(xdev->encoder);
		goto err_out;
	}

	xdev->connector = xylon_drm_connector_create(dev, xdev->encoder);
	if (IS_ERR(xdev->connector)) {
		DRM_ERROR("failed create xylon connector\n");
		ret = PTR_ERR(xdev->connector);
		goto err_out;
	}

	ret = drm_vblank_init(dev, 1);
	if (ret) {
		DRM_ERROR("failed initialize vblank\n");
		goto err_out;
	}
	dev->vblank_disable_allowed = 1;

	ret = xylon_drm_irq_install(dev);
	if (ret < 0) {
		DRM_ERROR("failed install irq\n");
		goto err_irq;
	}

	ret = xylon_drm_crtc_get_param(xdev->crtc, &bpp,
				       XYLON_DRM_CRTC_BUFF_BPP);
	if (ret) {
		DRM_ERROR("failed get bpp\n");
		goto err_fbdev;
	}
	xdev->fbdev = xylon_drm_fbdev_init(dev, bpp, 1, 1);
	if (IS_ERR(xdev->fbdev)) {
		DRM_ERROR("failed initialize fbdev\n");
		ret = PTR_ERR(xdev->fbdev);
		goto err_fbdev;
	}

	drm_helper_disable_unused_functions(dev);

	platform_set_drvdata(pdev, xdev);

	return 0;

err_fbdev:
	xylon_drm_irq_uninstall(dev);
err_irq:
	drm_vblank_cleanup(dev);
err_out:
	drm_mode_config_cleanup(dev);

	if (ret == -EPROBE_DEFER)
		DRM_INFO("driver load deferred, will be called again\n");

	return ret;
}
Esempio n. 23
0
int r128_driver_load(struct drm_device *dev, unsigned long flags)
{
	pci_set_master(dev->pdev);
	return drm_vblank_init(dev, 1);
}
Esempio n. 24
0
static int sti_compositor_bind(struct device *dev,
                               struct device *master,
                               void *data)
{
    struct sti_compositor *compo = dev_get_drvdata(dev);
    struct drm_device *drm_dev = data;
    unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
    struct sti_private *dev_priv = drm_dev->dev_private;
    struct drm_plane *cursor = NULL;
    struct drm_plane *primary = NULL;
    struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
    unsigned int array_size = compo->data.nb_subdev;

    dev_priv->compo = compo;

    /* Register mixer subdev and video subdev first */
    for (i = 0; i < array_size; i++) {
        switch (desc[i].type) {
        case STI_VID_SUBDEV:
            compo->vid[vid_id++] =
                sti_vid_create(compo->dev, desc[i].id,
                               compo->regs + desc[i].offset);
            break;
        case STI_MIXER_MAIN_SUBDEV:
        case STI_MIXER_AUX_SUBDEV:
            compo->mixer[mixer_id++] =
                sti_mixer_create(compo->dev, desc[i].id,
                                 compo->regs + desc[i].offset);
            break;
        case STI_GPD_SUBDEV:
        case STI_CURSOR_SUBDEV:
            /* Nothing to do, wait for the second round */
            break;
        default:
            DRM_ERROR("Unknow subdev compoment type\n");
            return 1;
        }
    }

    /* Register the other subdevs, create crtc and planes */
    for (i = 0; i < array_size; i++) {
        enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;

        if (crtc_id < mixer_id)
            plane_type = DRM_PLANE_TYPE_PRIMARY;

        switch (desc[i].type) {
        case STI_MIXER_MAIN_SUBDEV:
        case STI_MIXER_AUX_SUBDEV:
        case STI_VID_SUBDEV:
            /* Nothing to do, already done at the first round */
            break;
        case STI_CURSOR_SUBDEV:
            cursor = sti_cursor_create(drm_dev, compo->dev,
                                       desc[i].id,
                                       compo->regs + desc[i].offset,
                                       1);
            if (!cursor) {
                DRM_ERROR("Can't create CURSOR plane\n");
                break;
            }
            break;
        case STI_GPD_SUBDEV:
            primary = sti_gdp_create(drm_dev, compo->dev,
                                     desc[i].id,
                                     compo->regs + desc[i].offset,
                                     (1 << mixer_id) - 1,
                                     plane_type);
            if (!primary) {
                DRM_ERROR("Can't create GDP plane\n");
                break;
            }
            break;
        default:
            DRM_ERROR("Unknown subdev compoment type\n");
            return 1;
        }

        /* The first planes are reserved for primary planes*/
        if (crtc_id < mixer_id && primary) {
            sti_crtc_init(drm_dev, compo->mixer[crtc_id],
                          primary, cursor);
            crtc_id++;
            cursor = NULL;
            primary = NULL;
        }
    }

    drm_vblank_init(drm_dev, crtc_id);
    /* Allow usage of vblank without having to call drm_irq_install */
    drm_dev->irq_enabled = 1;

    return 0;
}
Esempio n. 25
0
static int atmel_hlcdc_dc_load(struct drm_device *dev)
{
	struct platform_device *pdev = to_platform_device(dev->dev);
	const struct of_device_id *match;
	struct atmel_hlcdc_dc *dc;
	int ret;

	match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node);
	if (!match) {
		dev_err(&pdev->dev, "invalid compatible string\n");
		return -ENODEV;
	}

	if (!match->data) {
		dev_err(&pdev->dev, "invalid hlcdc description\n");
		return -EINVAL;
	}

	dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL);
	if (!dc)
		return -ENOMEM;

	dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0);
	if (!dc->wq)
		return -ENOMEM;

	init_waitqueue_head(&dc->commit.wait);
	dc->desc = match->data;
	dc->hlcdc = dev_get_drvdata(dev->dev->parent);
	dev->dev_private = dc;

	ret = clk_prepare_enable(dc->hlcdc->periph_clk);
	if (ret) {
		dev_err(dev->dev, "failed to enable periph_clk\n");
		goto err_destroy_wq;
	}

	pm_runtime_enable(dev->dev);

	ret = drm_vblank_init(dev, 1);
	if (ret < 0) {
		dev_err(dev->dev, "failed to initialize vblank\n");
		goto err_periph_clk_disable;
	}

	ret = atmel_hlcdc_dc_modeset_init(dev);
	if (ret < 0) {
		dev_err(dev->dev, "failed to initialize mode setting\n");
		goto err_periph_clk_disable;
	}

	drm_mode_config_reset(dev);

	pm_runtime_get_sync(dev->dev);
	ret = drm_irq_install(dev, dc->hlcdc->irq);
	pm_runtime_put_sync(dev->dev);
	if (ret < 0) {
		dev_err(dev->dev, "failed to install IRQ handler\n");
		goto err_periph_clk_disable;
	}

	platform_set_drvdata(pdev, dev);

	drm_kms_helper_poll_init(dev);

	/* force connectors detection */
	drm_helper_hpd_irq_event(dev);

	return 0;

err_periph_clk_disable:
	pm_runtime_disable(dev->dev);
	clk_disable_unprepare(dc->hlcdc->periph_clk);

err_destroy_wq:
	destroy_workqueue(dc->wq);

	return ret;
}
Esempio n. 26
0
int r128_driver_irq_postinstall(struct drm_device * dev)
{
	return drm_vblank_init(dev, 1);
}
Esempio n. 27
0
int
mach64_driver_load(struct drm_device * dev, unsigned long flags)
{
        return drm_vblank_init(dev, 1);
}
Esempio n. 28
0
static int pdev_probe(struct platform_device *pdev)
{
	const struct soc_device_attribute *soc;
	struct omap_drm_private *priv;
	struct drm_device *ddev;
	unsigned int i;
	int ret;

	DBG("%s", pdev->name);

	if (omapdss_is_initialized() == false)
		return -EPROBE_DEFER;

	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
	if (ret) {
		dev_err(&pdev->dev, "Failed to set the DMA mask\n");
		return ret;
	}

	omap_crtc_pre_init();

	ret = omap_connect_dssdevs();
	if (ret)
		goto err_crtc_uninit;

	/* Allocate and initialize the driver private structure. */
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		ret = -ENOMEM;
		goto err_disconnect_dssdevs;
	}

	priv->dispc_ops = dispc_get_ops();

	soc = soc_device_match(omapdrm_soc_devices);
	priv->omaprev = soc ? (unsigned int)soc->data : 0;
	priv->wq = alloc_ordered_workqueue("omapdrm", 0);

	spin_lock_init(&priv->list_lock);
	INIT_LIST_HEAD(&priv->obj_list);

	/* Allocate and initialize the DRM device. */
	ddev = drm_dev_alloc(&omap_drm_driver, &pdev->dev);
	if (IS_ERR(ddev)) {
		ret = PTR_ERR(ddev);
		goto err_free_priv;
	}

	ddev->dev_private = priv;
	platform_set_drvdata(pdev, ddev);

	/* Get memory bandwidth limits */
	if (priv->dispc_ops->get_memory_bandwidth_limit)
		priv->max_bandwidth =
				priv->dispc_ops->get_memory_bandwidth_limit();

	omap_gem_init(ddev);

	ret = omap_modeset_init(ddev);
	if (ret) {
		dev_err(&pdev->dev, "omap_modeset_init failed: ret=%d\n", ret);
		goto err_free_drm_dev;
	}

	/* Initialize vblank handling, start with all CRTCs disabled. */
	ret = drm_vblank_init(ddev, priv->num_crtcs);
	if (ret) {
		dev_err(&pdev->dev, "could not init vblank\n");
		goto err_cleanup_modeset;
	}

	for (i = 0; i < priv->num_crtcs; i++)
		drm_crtc_vblank_off(priv->crtcs[i]);

	priv->fbdev = omap_fbdev_init(ddev);

	drm_kms_helper_poll_init(ddev);
	omap_modeset_enable_external_hpd();

	/*
	 * Register the DRM device with the core and the connectors with
	 * sysfs.
	 */
	ret = drm_dev_register(ddev, 0);
	if (ret)
		goto err_cleanup_helpers;

	return 0;

err_cleanup_helpers:
	omap_modeset_disable_external_hpd();
	drm_kms_helper_poll_fini(ddev);
	if (priv->fbdev)
		omap_fbdev_free(ddev);
err_cleanup_modeset:
	drm_mode_config_cleanup(ddev);
	omap_drm_irq_uninstall(ddev);
err_free_drm_dev:
	omap_gem_deinit(ddev);
	drm_dev_unref(ddev);
err_free_priv:
	destroy_workqueue(priv->wq);
	kfree(priv);
err_disconnect_dssdevs:
	omap_disconnect_dssdevs();
err_crtc_uninit:
	omap_crtc_pre_uninit();
	return ret;
}
Esempio n. 29
0
static int pl111_modeset_init(struct drm_device *dev)
{
	struct drm_mode_config *mode_config;
	struct pl111_drm_dev_private *priv = dev->dev_private;
	struct drm_panel *panel;
	struct drm_bridge *bridge;
	int ret = 0;

	drm_mode_config_init(dev);
	mode_config = &dev->mode_config;
	mode_config->funcs = &mode_config_funcs;
	mode_config->min_width = 1;
	mode_config->max_width = 1024;
	mode_config->min_height = 1;
	mode_config->max_height = 768;

	ret = drm_of_find_panel_or_bridge(dev->dev->of_node,
					  0, 0, &panel, &bridge);
	if (ret && ret != -ENODEV)
		return ret;
	if (panel) {
		bridge = drm_panel_bridge_add(panel,
					      DRM_MODE_CONNECTOR_Unknown);
		if (IS_ERR(bridge)) {
			ret = PTR_ERR(bridge);
			goto out_config;
		}
		/*
		 * TODO: when we are using a different bridge than a panel
		 * (such as a dumb VGA connector) we need to devise a different
		 * method to get the connector out of the bridge.
		 */
	}

	ret = pl111_display_init(dev);
	if (ret != 0) {
		dev_err(dev->dev, "Failed to init display\n");
		goto out_bridge;
	}

	ret = drm_simple_display_pipe_attach_bridge(&priv->pipe,
						    bridge);
	if (ret)
		return ret;

	priv->bridge = bridge;
	priv->panel = panel;
	priv->connector = panel->connector;

	ret = drm_vblank_init(dev, 1);
	if (ret != 0) {
		dev_err(dev->dev, "Failed to init vblank\n");
		goto out_bridge;
	}

	drm_mode_config_reset(dev);

	drm_fb_cma_fbdev_init(dev, 32, 0);

	drm_kms_helper_poll_init(dev);

	goto finish;

out_bridge:
	if (panel)
		drm_panel_bridge_remove(bridge);
out_config:
	drm_mode_config_cleanup(dev);
finish:
	return ret;
}
Esempio n. 30
0
static int hdlcd_drm_bind(struct device *dev)
{
	struct drm_device *drm;
	struct hdlcd_drm_private *hdlcd;
	int ret;

	hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL);
	if (!hdlcd)
		return -ENOMEM;

	drm = drm_dev_alloc(&hdlcd_driver, dev);
	if (IS_ERR(drm))
		return PTR_ERR(drm);

	drm->dev_private = hdlcd;
	dev_set_drvdata(dev, drm);

	hdlcd_setup_mode_config(drm);
	ret = hdlcd_load(drm, 0);
	if (ret)
		goto err_free;

	ret = drm_dev_register(drm, 0);
	if (ret)
		goto err_unload;

	ret = component_bind_all(dev, drm);
	if (ret) {
		DRM_ERROR("Failed to bind all components\n");
		goto err_unregister;
	}

	ret = pm_runtime_set_active(dev);
	if (ret)
		goto err_pm_active;

	pm_runtime_enable(dev);

	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
	if (ret < 0) {
		DRM_ERROR("failed to initialise vblank\n");
		goto err_vblank;
	}

	drm_mode_config_reset(drm);
	drm_kms_helper_poll_init(drm);

	hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
					  drm->mode_config.num_connector);

	if (IS_ERR(hdlcd->fbdev)) {
		ret = PTR_ERR(hdlcd->fbdev);
		hdlcd->fbdev = NULL;
		goto err_fbdev;
	}

	return 0;

err_fbdev:
	drm_kms_helper_poll_fini(drm);
	drm_mode_config_cleanup(drm);
	drm_vblank_cleanup(drm);
err_vblank:
	pm_runtime_disable(drm->dev);
err_pm_active:
	component_unbind_all(dev, drm);
err_unregister:
	drm_dev_unregister(drm);
err_unload:
	drm_irq_uninstall(drm);
	of_reserved_mem_device_release(drm->dev);
err_free:
	dev_set_drvdata(dev, NULL);
	drm_dev_unref(drm);

	return ret;
}