int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev) { int ret; drm_mode_config_init(fsl_dev->drm); fsl_dev->drm->mode_config.min_width = 0; fsl_dev->drm->mode_config.min_height = 0; fsl_dev->drm->mode_config.max_width = 2031; fsl_dev->drm->mode_config.max_height = 2047; fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs; ret = fsl_dcu_drm_crtc_create(fsl_dev); if (ret) goto err; ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); if (ret) goto err; ret = fsl_dcu_create_outputs(fsl_dev); if (ret) goto err; drm_mode_config_reset(fsl_dev->drm); drm_kms_helper_poll_init(fsl_dev->drm); return 0; err: drm_mode_config_cleanup(fsl_dev->drm); return ret; }
static int kirin_drm_kms_init(struct drm_device *dev) { struct kirin_drm_private *priv; int ret; priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev->dev_private = priv; dev_set_drvdata(dev->dev, dev); /* dev->mode_config initialization */ drm_mode_config_init(dev); kirin_drm_mode_config_init(dev); /* display controller init */ ret = dc_ops->init(to_platform_device(dev->dev)); if (ret) goto err_mode_config_cleanup; /* bind and init sub drivers */ ret = component_bind_all(dev->dev, dev); if (ret) { DRM_ERROR("failed to bind all component.\n"); goto err_dc_cleanup; } /* vblank init */ ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret) { DRM_ERROR("failed to initialize vblank.\n"); goto err_unbind_all; } /* with irq_enabled = true, we can use the vblank feature. */ dev->irq_enabled = true; /* reset all the states of crtc/plane/encoder/connector */ drm_mode_config_reset(dev); /* init kms poll for handling hpd */ drm_kms_helper_poll_init(dev); /* force detection after connectors init */ (void)drm_helper_hpd_irq_event(dev); return 0; err_unbind_all: component_unbind_all(dev->dev, dev); err_dc_cleanup: dc_ops->cleanup(to_platform_device(dev->dev)); err_mode_config_cleanup: drm_mode_config_cleanup(dev); devm_kfree(dev->dev, priv); dev->dev_private = NULL; return ret; }
static int pl111_modeset_init(struct drm_device *dev) { struct drm_mode_config *mode_config; struct pl111_drm_dev_private *priv = dev->dev_private; int ret = 0; drm_mode_config_init(dev); mode_config = &dev->mode_config; mode_config->funcs = &mode_config_funcs; mode_config->min_width = 1; mode_config->max_width = 1024; mode_config->min_height = 1; mode_config->max_height = 768; ret = pl111_connector_init(dev); if (ret) { dev_err(dev->dev, "Failed to create pl111_drm_connector\n"); goto out_config; } /* Don't actually attach if we didn't find a drm_panel * attached to us. This will allow a kernel to include both * the fbdev pl111 driver and this one, and choose between * them based on which subsystem has support for the panel. */ if (!priv->connector.panel) { dev_info(dev->dev, "Disabling due to lack of DRM panel device.\n"); ret = -ENODEV; goto out_config; } ret = pl111_display_init(dev); if (ret != 0) { dev_err(dev->dev, "Failed to init display\n"); goto out_config; } ret = drm_vblank_init(dev, 1); if (ret != 0) { dev_err(dev->dev, "Failed to init vblank\n"); goto out_config; } drm_mode_config_reset(dev); priv->fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_connector); drm_kms_helper_poll_init(dev); goto finish; out_config: drm_mode_config_cleanup(dev); finish: return ret; }
static int vbox_drm_thaw(struct drm_device *dev) { struct vbox_private *vbox = dev->dev_private; drm_mode_config_reset(dev); drm_helper_resume_force_mode(dev); drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, false); return 0; }
static int hx8357d_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct drm_device *drm; struct mipi_dbi *mipi; struct gpio_desc *dc; u32 rotation = 0; int ret; mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); if (!mipi) return -ENOMEM; drm = &mipi->drm; ret = devm_drm_dev_init(dev, drm, &hx8357d_driver); if (ret) { kfree(mipi); return ret; } drm_mode_config_init(drm); dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW); if (IS_ERR(dc)) { DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n"); return PTR_ERR(dc); } mipi->backlight = devm_of_find_backlight(dev); if (IS_ERR(mipi->backlight)) return PTR_ERR(mipi->backlight); device_property_read_u32(dev, "rotation", &rotation); ret = mipi_dbi_spi_init(spi, mipi, dc); if (ret) return ret; ret = mipi_dbi_init(mipi, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation); if (ret) return ret; drm_mode_config_reset(drm); ret = drm_dev_register(drm, 0); if (ret) return ret; spi_set_drvdata(spi, drm); drm_fbdev_generic_setup(drm, 0); return 0; }
static int vbox_drm_thaw(struct drm_device *dev) { int error = 0; drm_mode_config_reset(dev); drm_helper_resume_force_mode(dev); console_lock(); vbox_fbdev_set_suspend(dev, 0); console_unlock(); return error; }
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) { struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL); struct drm_device *drm; int err; if (!kms) return ERR_PTR(-ENOMEM); drm = &kms->base; err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev); if (err) goto free_kms; drm->dev_private = mdev; komeda_kms_mode_config_init(kms, mdev); err = komeda_kms_add_private_objs(kms, mdev); if (err) goto cleanup_mode_config; err = komeda_kms_add_planes(kms, mdev); if (err) goto cleanup_mode_config; err = drm_vblank_init(drm, kms->n_crtcs); if (err) goto cleanup_mode_config; err = komeda_kms_add_crtcs(kms, mdev); if (err) goto cleanup_mode_config; err = component_bind_all(mdev->dev, kms); if (err) goto cleanup_mode_config; drm_mode_config_reset(drm); err = drm_dev_register(drm, 0); if (err) goto cleanup_mode_config; return kms; cleanup_mode_config: drm_mode_config_cleanup(drm); free_kms: kfree(kms); return ERR_PTR(err); }
struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm) { drm_mode_config_reset(drm); drm->mode_config.max_width = 8192; drm->mode_config.max_height = 8192; drm->mode_config.funcs = &sun4i_de_mode_config_funcs; return drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, drm->mode_config.num_connector); }
static int ast_drm_thaw(struct drm_device *dev) { int error = 0; ast_post_gpu(dev); drm_mode_config_reset(dev); drm_modeset_lock_all(dev); drm_helper_resume_force_mode(dev); drm_modeset_unlock_all(dev); console_lock(); ast_fbdev_set_suspend(dev, 0); console_unlock(); return error; }
static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); if (HAS_PCH_SPLIT(dev)) ironlake_init_pch_refclk(dev); drm_mode_config_reset(dev); drm_irq_install(dev); /* Resume the modeset for every activated CRTC */ mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); mutex_unlock(&dev->mode_config.mutex); if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); } intel_opregion_init(dev); dev_priv->modeset_on_lid = 0; console_lock(); intel_fbdev_set_suspend(dev, 0); console_unlock(); return error; }
static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; DRM_LOCK(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { i915_gem_restore_gtt_mappings(dev); } i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); if (HAS_PCH_SPLIT(dev)) ironlake_init_pch_refclk(dev); DRM_UNLOCK(dev); lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE); drm_mode_config_reset(dev); lockmgr(&dev->mode_config.mutex, LK_RELEASE); drm_irq_install(dev); lockmgr(&dev->mode_config.mutex, LK_EXCLUSIVE); /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); lockmgr(&dev->mode_config.mutex, LK_RELEASE); if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); DRM_LOCK(dev); } intel_opregion_init(dev); dev_priv->modeset_on_lid = 0; DRM_UNLOCK(dev); return error; }
int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev) { drm_mode_config_init(fsl_dev->drm); fsl_dev->drm->mode_config.min_width = 0; fsl_dev->drm->mode_config.min_height = 0; fsl_dev->drm->mode_config.max_width = 2031; fsl_dev->drm->mode_config.max_height = 2047; fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs; drm_kms_helper_poll_init(fsl_dev->drm); fsl_dcu_drm_crtc_create(fsl_dev); fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder); drm_mode_config_reset(fsl_dev->drm); return 0; }
int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) { int i; drm_mode_config_init(vgdev->ddev); vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs; /* modes will be validated against the framebuffer size */ vgdev->ddev->mode_config.min_width = XRES_MIN; vgdev->ddev->mode_config.min_height = YRES_MIN; vgdev->ddev->mode_config.max_width = XRES_MAX; vgdev->ddev->mode_config.max_height = YRES_MAX; for (i = 0 ; i < vgdev->num_scanouts; ++i) vgdev_output_init(vgdev, i); drm_mode_config_reset(vgdev->ddev); return 0; }
static int __i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); intel_modeset_init_hw(dev); drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, false); drm_irq_install(dev); } intel_opregion_init(dev); dev_priv->modeset_on_lid = 0; /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ if (console_trylock()) { intel_fbdev_set_suspend(dev, 0); console_unlock(); } else { schedule_work(&dev_priv->console_resume_work); } return error; }
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) { int i; drm_mode_config_init(vgdev->ddev); vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true; vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs; vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers; /* modes will be validated against the framebuffer size */ vgdev->ddev->mode_config.min_width = XRES_MIN; vgdev->ddev->mode_config.min_height = YRES_MIN; vgdev->ddev->mode_config.max_width = XRES_MAX; vgdev->ddev->mode_config.max_height = YRES_MAX; for (i = 0 ; i < vgdev->num_scanouts; ++i) vgdev_output_init(vgdev, i); drm_mode_config_reset(vgdev->ddev); }
static int drv_load(struct drm_device *ddev) { struct platform_device *pdev = to_platform_device(ddev->dev); struct ltdc_device *ldev; int ret; DRM_DEBUG("%s\n", __func__); ldev = devm_kzalloc(ddev->dev, sizeof(*ldev), GFP_KERNEL); if (!ldev) return -ENOMEM; ddev->dev_private = (void *)ldev; drm_mode_config_init(ddev); /* * set max width and height as default value. * this value would be used to check framebuffer size limitation * at drm_mode_addfb(). */ ddev->mode_config.min_width = 0; ddev->mode_config.min_height = 0; ddev->mode_config.max_width = STM_MAX_FB_WIDTH; ddev->mode_config.max_height = STM_MAX_FB_HEIGHT; ddev->mode_config.funcs = &drv_mode_config_funcs; ret = ltdc_load(ddev); if (ret) goto err; drm_mode_config_reset(ddev); drm_kms_helper_poll_init(ddev); platform_set_drvdata(pdev, ddev); return 0; err: drm_mode_config_cleanup(ddev); return ret; }
static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) ironlake_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); intel_modeset_init_hw(dev); intel_modeset_setup_hw_state(dev, false); drm_mode_config_reset(dev); drm_irq_install(dev); } intel_opregion_init(dev); dev_priv->modeset_on_lid = 0; console_lock(); intel_fbdev_set_suspend(dev, 0); console_unlock(); return error; }
static int atmel_hlcdc_dc_load(struct drm_device *dev) { struct platform_device *pdev = to_platform_device(dev->dev); const struct of_device_id *match; struct atmel_hlcdc_dc *dc; int ret; match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node); if (!match) { dev_err(&pdev->dev, "invalid compatible string\n"); return -ENODEV; } if (!match->data) { dev_err(&pdev->dev, "invalid hlcdc description\n"); return -EINVAL; } dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL); if (!dc) return -ENOMEM; dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); if (!dc->wq) return -ENOMEM; init_waitqueue_head(&dc->commit.wait); dc->desc = match->data; dc->hlcdc = dev_get_drvdata(dev->dev->parent); dev->dev_private = dc; ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { dev_err(dev->dev, "failed to enable periph_clk\n"); goto err_destroy_wq; } pm_runtime_enable(dev->dev); ret = drm_vblank_init(dev, 1); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } drm_mode_config_reset(dev); pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { dev_err(dev->dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } platform_set_drvdata(pdev, dev); drm_kms_helper_poll_init(dev); /* force connectors detection */ drm_helper_hpd_irq_event(dev); return 0; err_periph_clk_disable: pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); err_destroy_wq: destroy_workqueue(dc->wq); return ret; }
static int arcpgu_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct arcpgu_drm_private *arcpgu; struct device_node *encoder_node; struct resource *res; int ret; arcpgu = devm_kzalloc(&pdev->dev, sizeof(*arcpgu), GFP_KERNEL); if (arcpgu == NULL) return -ENOMEM; drm->dev_private = arcpgu; arcpgu->clk = devm_clk_get(drm->dev, "pxlclk"); if (IS_ERR(arcpgu->clk)) return PTR_ERR(arcpgu->clk); arcpgu_setup_mode_config(drm); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); arcpgu->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(arcpgu->regs)) return PTR_ERR(arcpgu->regs); dev_info(drm->dev, "arc_pgu ID: 0x%x\n", arc_pgu_read(arcpgu, ARCPGU_REG_ID)); /* Get the optional framebuffer memory resource */ ret = of_reserved_mem_device_init(drm->dev); if (ret && ret != -ENODEV) return ret; if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32))) return -ENODEV; if (arc_pgu_setup_crtc(drm) < 0) return -ENODEV; /* find the encoder node and initialize it */ encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0); if (encoder_node) { ret = arcpgu_drm_hdmi_init(drm, encoder_node); of_node_put(encoder_node); if (ret < 0) return ret; } else { ret = arcpgu_drm_sim_init(drm, NULL); if (ret < 0) return ret; } drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); arcpgu->fbdev = drm_fbdev_cma_init(drm, 16, drm->mode_config.num_crtc, drm->mode_config.num_connector); if (IS_ERR(arcpgu->fbdev)) { ret = PTR_ERR(arcpgu->fbdev); arcpgu->fbdev = NULL; return -ENODEV; } platform_set_drvdata(pdev, arcpgu); return 0; }
static int malidp_bind(struct device *dev) { struct resource *res; struct drm_device *drm; struct device_node *ep; struct malidp_drm *malidp; struct malidp_hw_device *hwdev; struct platform_device *pdev = to_platform_device(dev); /* number of lines for the R, G and B output */ u8 output_width[MAX_OUTPUT_CHANNELS]; int ret = 0, i; u32 version, out_depth = 0; malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL); if (!malidp) return -ENOMEM; hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL); if (!hwdev) return -ENOMEM; /* * copy the associated data from malidp_drm_of_match to avoid * having to keep a reference to the OF node after binding */ memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev)); malidp->dev = hwdev; INIT_LIST_HEAD(&malidp->event_list); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwdev->regs = devm_ioremap_resource(dev, res); if (IS_ERR(hwdev->regs)) return PTR_ERR(hwdev->regs); hwdev->pclk = devm_clk_get(dev, "pclk"); if (IS_ERR(hwdev->pclk)) return PTR_ERR(hwdev->pclk); hwdev->aclk = devm_clk_get(dev, "aclk"); if (IS_ERR(hwdev->aclk)) return PTR_ERR(hwdev->aclk); hwdev->mclk = devm_clk_get(dev, "mclk"); if (IS_ERR(hwdev->mclk)) return PTR_ERR(hwdev->mclk); hwdev->pxlclk = devm_clk_get(dev, "pxlclk"); if (IS_ERR(hwdev->pxlclk)) return PTR_ERR(hwdev->pxlclk); /* Get the optional framebuffer memory resource */ ret = of_reserved_mem_device_init(dev); if (ret && ret != -ENODEV) return ret; drm = drm_dev_alloc(&malidp_driver, dev); if (IS_ERR(drm)) { ret = PTR_ERR(drm); goto alloc_fail; } /* Enable APB clock in order to get access to the registers */ clk_prepare_enable(hwdev->pclk); /* * Enable AXI clock and main clock so that prefetch can start once * the registers are set */ clk_prepare_enable(hwdev->aclk); clk_prepare_enable(hwdev->mclk); ret = hwdev->query_hw(hwdev); if (ret) { DRM_ERROR("Invalid HW configuration\n"); goto query_hw_fail; } version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID); DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, (version >> 12) & 0xf, (version >> 8) & 0xf); /* set the number of lines used for output of RGB data */ ret = of_property_read_u8_array(dev->of_node, "arm,malidp-output-port-lines", output_width, MAX_OUTPUT_CHANNELS); if (ret) goto query_hw_fail; for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) out_depth = (out_depth << 8) | (output_width[i] & 0xf); malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base); drm->dev_private = malidp; dev_set_drvdata(dev, drm); atomic_set(&malidp->config_valid, 0); init_waitqueue_head(&malidp->wq); ret = malidp_init(drm); if (ret < 0) goto init_fail; ret = drm_dev_register(drm, 0); if (ret) goto register_fail; /* Set the CRTC's port so that the encoder component can find it */ ep = of_graph_get_next_endpoint(dev->of_node, NULL); if (!ep) { ret = -EINVAL; goto port_fail; } malidp->crtc.port = of_get_next_parent(ep); ret = component_bind_all(dev, drm); if (ret) { DRM_ERROR("Failed to bind all components\n"); goto bind_fail; } ret = malidp_irq_init(pdev); if (ret < 0) goto irq_init_fail; ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); goto vblank_fail; } drm_mode_config_reset(drm); malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, drm->mode_config.num_connector); if (IS_ERR(malidp->fbdev)) { ret = PTR_ERR(malidp->fbdev); malidp->fbdev = NULL; goto fbdev_fail; } drm_kms_helper_poll_init(drm); return 0; fbdev_fail: drm_vblank_cleanup(drm); vblank_fail: malidp_se_irq_fini(drm); malidp_de_irq_fini(drm); irq_init_fail: component_unbind_all(dev, drm); bind_fail: of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; port_fail: drm_dev_unregister(drm); register_fail: malidp_de_planes_destroy(drm); drm_mode_config_cleanup(drm); init_fail: drm->dev_private = NULL; dev_set_drvdata(dev, NULL); query_hw_fail: clk_disable_unprepare(hwdev->mclk); clk_disable_unprepare(hwdev->aclk); clk_disable_unprepare(hwdev->pclk); drm_dev_unref(drm); alloc_fail: of_reserved_mem_device_release(dev); return ret; }
int i915_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; bool need_display = true; int ret; if (!i915_try_reset) return 0; if (!mutex_trylock(&dev->struct_mutex)) return -EBUSY; i915_gem_reset(dev); ret = -ENODEV; if (get_seconds() - dev_priv->last_gpu_reset < 5) { DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); } else switch (INTEL_INFO(dev)->gen) { case 7: case 6: ret = gen6_do_reset(dev, flags); break; case 5: ret = ironlake_do_reset(dev, flags); break; case 4: ret = i965_do_reset(dev, flags); break; case 2: ret = i8xx_do_reset(dev, flags); break; } dev_priv->last_gpu_reset = get_seconds(); if (ret) { DRM_ERROR("Failed to reset chip.\n"); mutex_unlock(&dev->struct_mutex); return ret; } if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { dev_priv->mm.suspended = 0; i915_gem_init_swizzling(dev); dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); if (HAS_BSD(dev)) dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); if (HAS_BLT(dev)) dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); i915_gem_init_ppgtt(dev); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_mode_config_reset(dev); drm_irq_install(dev); mutex_lock(&dev->struct_mutex); } mutex_unlock(&dev->struct_mutex); if (need_display) { mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); mutex_unlock(&dev->mode_config.mutex); } return 0; }
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; intel_uncore_early_sanitize(dev); intel_uncore_sanitize(dev); if (drm_core_check_feature(dev, DRIVER_MODESET) && restore_gtt_mappings) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } intel_power_domains_init_hw(dev); i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); /* We need working interrupts for modeset enabling ... */ drm_irq_install(dev); intel_modeset_init_hw(dev); drm_modeset_lock_all(dev); drm_mode_config_reset(dev); intel_modeset_setup_hw_state(dev, true); drm_modeset_unlock_all(dev); /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't * bother with the tiny race here where we might loose hotplug * notifications. * */ intel_hpd_init(dev); dev_priv->enable_hotplug_processing = true; /* Config may have changed between suspend and resume */ intel_resume_hotplug(dev); } intel_opregion_init(dev); /* * The console lock can be pretty contented on resume due * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ if (console_trylock()) { intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); console_unlock(); } else { schedule_work(&dev_priv->console_resume_work); } /* Undo what we did at i915_drm_freeze so the refcount goes back to the * expected level. */ hsw_enable_package_c8(dev_priv); mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); intel_runtime_pm_put(dev_priv); return error; }
int vkms_output_init(struct vkms_device *vkmsdev) { struct vkms_output *output = &vkmsdev->output; struct drm_device *dev = &vkmsdev->drm; struct drm_connector *connector = &output->connector; struct drm_encoder *encoder = &output->encoder; struct drm_crtc *crtc = &output->crtc; struct drm_plane *primary, *cursor = NULL; int ret; primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY); if (IS_ERR(primary)) return PTR_ERR(primary); if (enable_cursor) { cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR); if (IS_ERR(cursor)) { ret = PTR_ERR(cursor); goto err_cursor; } } ret = vkms_crtc_init(dev, crtc, primary, cursor); if (ret) goto err_crtc; ret = drm_connector_init(dev, connector, &vkms_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { DRM_ERROR("Failed to init connector\n"); goto err_connector; } drm_connector_helper_add(connector, &vkms_conn_helper_funcs); ret = drm_connector_register(connector); if (ret) { DRM_ERROR("Failed to register connector\n"); goto err_connector_register; } ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs, DRM_MODE_ENCODER_VIRTUAL, NULL); if (ret) { DRM_ERROR("Failed to init encoder\n"); goto err_encoder; } encoder->possible_crtcs = 1; ret = drm_connector_attach_encoder(connector, encoder); if (ret) { DRM_ERROR("Failed to attach connector to encoder\n"); goto err_attach; } drm_mode_config_reset(dev); return 0; err_attach: drm_encoder_cleanup(encoder); err_encoder: drm_connector_unregister(connector); err_connector_register: drm_connector_cleanup(connector); err_connector: drm_crtc_cleanup(crtc); err_crtc: if (enable_cursor) drm_plane_cleanup(cursor); err_cursor: drm_plane_cleanup(primary); return ret; }
static int mxsfb_load(struct drm_device *drm, unsigned long flags) { struct platform_device *pdev = to_platform_device(drm->dev); struct mxsfb_drm_private *mxsfb; struct resource *res; int ret; mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL); if (!mxsfb) return -ENOMEM; drm->dev_private = mxsfb; mxsfb->devdata = &mxsfb_devdata[pdev->id_entry->driver_data]; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mxsfb->base = devm_ioremap_resource(drm->dev, res); if (IS_ERR(mxsfb->base)) return PTR_ERR(mxsfb->base); mxsfb->clk = devm_clk_get(drm->dev, NULL); if (IS_ERR(mxsfb->clk)) return PTR_ERR(mxsfb->clk); mxsfb->clk_axi = devm_clk_get(drm->dev, "axi"); if (IS_ERR(mxsfb->clk_axi)) mxsfb->clk_axi = NULL; mxsfb->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi"); if (IS_ERR(mxsfb->clk_disp_axi)) mxsfb->clk_disp_axi = NULL; ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); if (ret) return ret; pm_runtime_enable(drm->dev); ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { dev_err(drm->dev, "Failed to initialise vblank\n"); goto err_vblank; } /* Modeset init */ drm_mode_config_init(drm); ret = mxsfb_create_output(drm); if (ret < 0) { dev_err(drm->dev, "Failed to create outputs\n"); goto err_vblank; } ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs, mxsfb_formats, ARRAY_SIZE(mxsfb_formats), &mxsfb->connector); if (ret < 0) { dev_err(drm->dev, "Cannot setup simple display pipe\n"); goto err_vblank; } ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector); if (ret) { dev_err(drm->dev, "Cannot connect panel\n"); goto err_vblank; } drm->mode_config.min_width = MXSFB_MIN_XRES; drm->mode_config.min_height = MXSFB_MIN_YRES; drm->mode_config.max_width = MXSFB_MAX_XRES; drm->mode_config.max_height = MXSFB_MAX_YRES; drm->mode_config.funcs = &mxsfb_mode_config_funcs; drm_mode_config_reset(drm); pm_runtime_get_sync(drm->dev); ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); pm_runtime_put_sync(drm->dev); if (ret < 0) { dev_err(drm->dev, "Failed to install IRQ handler\n"); goto err_irq; } drm_kms_helper_poll_init(drm); mxsfb->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc, drm->mode_config.num_connector); if (IS_ERR(mxsfb->fbdev)) { mxsfb->fbdev = NULL; dev_err(drm->dev, "Failed to init FB CMA area\n"); goto err_cma; } platform_set_drvdata(pdev, drm); drm_helper_hpd_irq_event(drm); return 0; err_cma: drm_irq_uninstall(drm); err_irq: drm_panel_detach(mxsfb->panel); err_vblank: pm_runtime_disable(drm->dev); return ret; }
/** * cdv_restore_display_registers - restore lost register state * @dev: our DRM device * * Restore register state that was lost during suspend and resume. * * FIXME: review */ static int cdv_restore_display_registers(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_save_area *regs = &dev_priv->regs; struct drm_connector *connector; u32 temp; pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB); REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D); REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D); /* BIOS does below anyway */ REG_WRITE(DPIO_CFG, 0); REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N); temp = REG_READ(DPLL_A); if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) { REG_WRITE(DPLL_A, temp | DPLL_SYNCLOCK_ENABLE); REG_READ(DPLL_A); } temp = REG_READ(DPLL_B); if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) { REG_WRITE(DPLL_B, temp | DPLL_SYNCLOCK_ENABLE); REG_READ(DPLL_B); } udelay(500); REG_WRITE(DSPFW1, regs->cdv.saveDSPFW[0]); REG_WRITE(DSPFW2, regs->cdv.saveDSPFW[1]); REG_WRITE(DSPFW3, regs->cdv.saveDSPFW[2]); REG_WRITE(DSPFW4, regs->cdv.saveDSPFW[3]); REG_WRITE(DSPFW5, regs->cdv.saveDSPFW[4]); REG_WRITE(DSPFW6, regs->cdv.saveDSPFW[5]); REG_WRITE(DSPARB, regs->cdv.saveDSPARB); REG_WRITE(ADPA, regs->cdv.saveADPA); REG_WRITE(BLC_PWM_CTL2, regs->saveBLC_PWM_CTL2); REG_WRITE(LVDS, regs->cdv.saveLVDS); REG_WRITE(PFIT_CONTROL, regs->cdv.savePFIT_CONTROL); REG_WRITE(PFIT_PGM_RATIOS, regs->cdv.savePFIT_PGM_RATIOS); REG_WRITE(BLC_PWM_CTL, regs->saveBLC_PWM_CTL); REG_WRITE(PP_ON_DELAYS, regs->cdv.savePP_ON_DELAYS); REG_WRITE(PP_OFF_DELAYS, regs->cdv.savePP_OFF_DELAYS); REG_WRITE(PP_CYCLE, regs->cdv.savePP_CYCLE); REG_WRITE(PP_CONTROL, regs->cdv.savePP_CONTROL); REG_WRITE(VGACNTRL, regs->cdv.saveVGACNTRL); REG_WRITE(PSB_INT_ENABLE_R, regs->cdv.saveIER); REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR); /* Fix arbitration bug */ cdv_errata(dev); drm_mode_config_reset(dev); list_for_each_entry(connector, &dev->mode_config.connector_list, head) connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); /* Resume the modeset for every activated CRTC */ drm_helper_resume_force_mode(dev); return 0; }
static int pl111_modeset_init(struct drm_device *dev) { struct drm_mode_config *mode_config; struct pl111_drm_dev_private *priv = dev->dev_private; struct drm_panel *panel; struct drm_bridge *bridge; int ret = 0; drm_mode_config_init(dev); mode_config = &dev->mode_config; mode_config->funcs = &mode_config_funcs; mode_config->min_width = 1; mode_config->max_width = 1024; mode_config->min_height = 1; mode_config->max_height = 768; ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, 0, &panel, &bridge); if (ret && ret != -ENODEV) return ret; if (panel) { bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_Unknown); if (IS_ERR(bridge)) { ret = PTR_ERR(bridge); goto out_config; } /* * TODO: when we are using a different bridge than a panel * (such as a dumb VGA connector) we need to devise a different * method to get the connector out of the bridge. */ } ret = pl111_display_init(dev); if (ret != 0) { dev_err(dev->dev, "Failed to init display\n"); goto out_bridge; } ret = drm_simple_display_pipe_attach_bridge(&priv->pipe, bridge); if (ret) return ret; priv->bridge = bridge; priv->panel = panel; priv->connector = panel->connector; ret = drm_vblank_init(dev, 1); if (ret != 0) { dev_err(dev->dev, "Failed to init vblank\n"); goto out_bridge; } drm_mode_config_reset(dev); drm_fb_cma_fbdev_init(dev, 32, 0); drm_kms_helper_poll_init(dev); goto finish; out_bridge: if (panel) drm_panel_bridge_remove(bridge); out_config: drm_mode_config_cleanup(dev); finish: return ret; }
static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; disable_rpm_wakeref_asserts(dev_priv); mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); i915_restore_state(dev); intel_opregion_setup(dev); intel_init_pch_refclk(dev); drm_mode_config_reset(dev); /* * Interrupts have to be enabled before any batches are run. If not the * GPU will hang. i915_gem_init_hw() will initiate batches to * update/restore the context. * * Modeset enabling in intel_modeset_init_hw() also needs working * interrupts. */ intel_runtime_pm_enable_interrupts(dev_priv); mutex_lock(&dev->struct_mutex); if (i915_gem_init_hw(dev)) { DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); } mutex_unlock(&dev->struct_mutex); intel_guc_resume(dev); intel_modeset_init_hw(dev); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irq(&dev_priv->irq_lock); intel_dp_mst_resume(dev); intel_display_resume(dev); /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't * bother with the tiny race here where we might loose hotplug * notifications. * */ intel_hpd_init(dev_priv); /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); intel_opregion_init(dev); intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); intel_opregion_notify_adapter(dev, PCI_D0); drm_kms_helper_poll_enable(dev); enable_rpm_wakeref_asserts(dev_priv); return 0; }
static int omap_modeset_init(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; struct omap_dss_device *dssdev = NULL; int num_ovls = priv->dispc_ops->get_num_ovls(); int num_mgrs = priv->dispc_ops->get_num_mgrs(); int num_crtcs, crtc_idx, plane_idx; int ret; u32 plane_crtc_mask; drm_mode_config_init(dev); ret = omap_modeset_init_properties(dev); if (ret < 0) return ret; /* * This function creates exactly one connector, encoder, crtc, * and primary plane per each connected dss-device. Each * connector->encoder->crtc chain is expected to be separate * and each crtc is connect to a single dss-channel. If the * configuration does not match the expectations or exceeds * the available resources, the configuration is rejected. */ num_crtcs = 0; for_each_dss_dev(dssdev) if (omapdss_device_is_connected(dssdev)) num_crtcs++; if (num_crtcs > num_mgrs || num_crtcs > num_ovls || num_crtcs > ARRAY_SIZE(priv->crtcs) || num_crtcs > ARRAY_SIZE(priv->planes) || num_crtcs > ARRAY_SIZE(priv->encoders) || num_crtcs > ARRAY_SIZE(priv->connectors)) { dev_err(dev->dev, "%s(): Too many connected displays\n", __func__); return -EINVAL; } /* All planes can be put to any CRTC */ plane_crtc_mask = (1 << num_crtcs) - 1; dssdev = NULL; crtc_idx = 0; plane_idx = 0; for_each_dss_dev(dssdev) { struct drm_connector *connector; struct drm_encoder *encoder; struct drm_plane *plane; struct drm_crtc *crtc; if (!omapdss_device_is_connected(dssdev)) continue; encoder = omap_encoder_init(dev, dssdev); if (!encoder) return -ENOMEM; connector = omap_connector_init(dev, get_connector_type(dssdev), dssdev, encoder); if (!connector) return -ENOMEM; plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_PRIMARY, plane_crtc_mask); if (IS_ERR(plane)) return PTR_ERR(plane); crtc = omap_crtc_init(dev, plane, dssdev); if (IS_ERR(crtc)) return PTR_ERR(crtc); drm_mode_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = (1 << crtc_idx); priv->crtcs[priv->num_crtcs++] = crtc; priv->planes[priv->num_planes++] = plane; priv->encoders[priv->num_encoders++] = encoder; priv->connectors[priv->num_connectors++] = connector; plane_idx++; crtc_idx++; } /* * Create normal planes for the remaining overlays: */ for (; plane_idx < num_ovls; plane_idx++) { struct drm_plane *plane; if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes))) return -EINVAL; plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_OVERLAY, plane_crtc_mask); if (IS_ERR(plane)) return PTR_ERR(plane); priv->planes[priv->num_planes++] = plane; } DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n", priv->num_planes, priv->num_crtcs, priv->num_encoders, priv->num_connectors); dev->mode_config.min_width = 8; dev->mode_config.min_height = 2; /* note: eventually will need some cpu_is_omapXYZ() type stuff here * to fill in these limits properly on different OMAP generations.. */ dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; dev->mode_config.funcs = &omap_mode_config_funcs; dev->mode_config.helper_private = &omap_mode_config_helper_funcs; drm_mode_config_reset(dev); omap_drm_irq_install(dev); return 0; }
/** * i965_reset - reset chip after a hang * @dev: drm device to reset * @flags: reset domains * * Reset the chip. Useful if a hang is detected. Returns zero on successful * reset or otherwise an error code. * * Procedure is fairly simple: * - reset the chip using the reset reg * - re-init context state * - re-init hardware status page * - re-init ring buffer * - re-init interrupt state * - re-init display */ int i915_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; /* * We really should only reset the display subsystem if we actually * need to */ bool need_display = true; int ret; if (!i915_try_reset) return 0; if (!mutex_trylock(&dev->struct_mutex)) return -EBUSY; i915_gem_reset(dev); ret = -ENODEV; if (get_seconds() - dev_priv->last_gpu_reset < 5) { DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); } else switch (INTEL_INFO(dev)->gen) { case 7: case 6: ret = gen6_do_reset(dev, flags); break; case 5: ret = ironlake_do_reset(dev, flags); break; case 4: ret = i965_do_reset(dev, flags); break; case 2: ret = i8xx_do_reset(dev, flags); break; } dev_priv->last_gpu_reset = get_seconds(); if (ret) { DRM_ERROR("Failed to reset chip.\n"); mutex_unlock(&dev->struct_mutex); return ret; } /* Ok, now get things going again... */ /* * Everything depends on having the GTT running, so we need to start * there. Fortunately we don't need to do this unless we reset the * chip at a PCI level. * * Next we need to restore the context, but we don't use those * yet either... * * Ring buffer needs to be re-initialized in the KMS case, or if X * was running at the time of the reset (i.e. we weren't VT * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { dev_priv->mm.suspended = 0; dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); if (HAS_BSD(dev)) dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); if (HAS_BLT(dev)) dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_mode_config_reset(dev); drm_irq_install(dev); mutex_lock(&dev->struct_mutex); } mutex_unlock(&dev->struct_mutex); /* * Perform a full modeset as on later generations, e.g. Ironlake, we may * need to retrain the display link and cannot just restore the register * values. */ if (need_display) { mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); mutex_unlock(&dev->mode_config.mutex); } return 0; }
int i915_reset(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; /* * We really should only reset the display subsystem if we actually * need to */ bool need_display = true; int ret; if (!i915_try_reset) return (0); if (!sx_try_xlock(&dev->dev_struct_lock)) return (-EBUSY); i915_gem_reset(dev); ret = -ENODEV; if (time_second - dev_priv->last_gpu_reset < 5) { DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); } else ret = intel_gpu_reset(dev); dev_priv->last_gpu_reset = time_second; if (ret) { DRM_ERROR("Failed to reset chip.\n"); DRM_UNLOCK(dev); return (ret); } if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { dev_priv->mm.suspended = 0; i915_gem_init_swizzling(dev); dev_priv->rings[RCS].init(&dev_priv->rings[RCS]); if (HAS_BSD(dev)) dev_priv->rings[VCS].init(&dev_priv->rings[VCS]); if (HAS_BLT(dev)) dev_priv->rings[BCS].init(&dev_priv->rings[BCS]); i915_gem_context_init(dev); i915_gem_init_ppgtt(dev); drm_irq_uninstall(dev); drm_mode_config_reset(dev); DRM_UNLOCK(dev); drm_irq_install(dev); DRM_LOCK(dev); } DRM_UNLOCK(dev); if (need_display) { sx_xlock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); sx_xunlock(&dev->mode_config.mutex); } return (0); }