void mali_driver_exit(void) { MALI_DEBUG_PRINT(2, ("\n")); MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION)); #if MALI_LICENSE_IS_GPL && defined(CONFIG_MALI_UMP_R3P1_DEBUG_MEM_USAGE_FOR_OOM) unregister_oom_notifier(&mali_oom_notifier); #endif /* No need to terminate sysfs, this will be done automatically along with device termination */ #if MALI_INTERNAL_TIMELINE_PROFILING_ENABLED _mali_internal_profiling_term(); #endif mali_terminate_subsystems(); mali_osk_low_level_mem_term(); mali_platform_deinit(); terminate_kernel_device(); _mali_dev_platform_unregister(); #if MALI_LICENSE_IS_GPL /* @@@@ clean up the work queues! This should not be terminated here, since it isn't inited in the function above! */ flush_workqueue(mali_wq); destroy_workqueue(mali_wq); mali_wq = NULL; #endif MALI_PRINT(("Mali device driver unloaded\n")); }
/** * cmm_init - Module initialization * * Return value: * 0 on success / other on failure **/ static int cmm_init(void) { int rc = -ENOMEM; if (!firmware_has_feature(FW_FEATURE_CMO)) return -EOPNOTSUPP; if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0) return rc; if ((rc = register_reboot_notifier(&cmm_reboot_nb))) goto out_oom_notifier; if ((rc = cmm_sysfs_register(&cmm_sysdev))) goto out_reboot_notifier; if (cmm_disabled) return rc; cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); if (IS_ERR(cmm_thread_ptr)) { rc = PTR_ERR(cmm_thread_ptr); goto out_unregister_sysfs; } return rc; out_unregister_sysfs: cmm_unregister_sysfs(&cmm_sysdev); out_reboot_notifier: unregister_reboot_notifier(&cmm_reboot_nb); out_oom_notifier: unregister_oom_notifier(&cmm_oom_nb); return rc; }
/** * cmm_exit - Module exit * * Return value: * nothing **/ static void cmm_exit(void) { if (cmm_thread_ptr) kthread_stop(cmm_thread_ptr); unregister_oom_notifier(&cmm_oom_nb); cmm_free_pages(loaned_pages); cmm_unregister_sysfs(&cmm_sysdev); }
static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; int err; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); if (!vb) { err = -ENOMEM; goto out; } vb->num_pages = 0; mutex_init(&vb->balloon_lock); init_waitqueue_head(&vb->config_change); init_waitqueue_head(&vb->acked); vb->vdev = vdev; vb->need_stats_update = 0; balloon_devinfo_init(&vb->vb_dev_info); #ifdef CONFIG_BALLOON_COMPACTION vb->vb_dev_info.migratepage = virtballoon_migratepage; #endif err = init_vqs(vb); if (err) goto out_free_vb; vb->nb.notifier_call = virtballoon_oom_notify; vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY; err = register_oom_notifier(&vb->nb); if (err < 0) goto out_oom_notify; virtio_device_ready(vdev); vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { err = PTR_ERR(vb->thread); goto out_del_vqs; } return 0; out_del_vqs: unregister_oom_notifier(&vb->nb); out_oom_notify: vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); out: return err; }
static void virtballoon_remove(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; unregister_oom_notifier(&vb->nb); kthread_stop(vb->thread); remove_common(vb); kfree(vb); }
/** * cmm_exit - Module exit * * Return value: * nothing **/ static void cmm_exit(void) { if (cmm_thread_ptr) kthread_stop(cmm_thread_ptr); unregister_oom_notifier(&cmm_oom_nb); unregister_reboot_notifier(&cmm_reboot_nb); unregister_memory_notifier(&cmm_mem_nb); unregister_memory_isolate_notifier(&cmm_mem_isolate_nb); cmm_free_pages(loaned_pages); cmm_unregister_sysfs(&cmm_dev); }
static void cmm_exit(void) { kthread_stop(cmm_thread_ptr); unregister_oom_notifier(&cmm_oom_nb); cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); #ifdef CONFIG_CMM_PROC unregister_sysctl_table(cmm_sysctl_header); #endif #ifdef CONFIG_CMM_IUCV smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); #endif }
static void virtballoon_remove(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; unregister_oom_notifier(&vb->nb); spin_lock_irq(&vb->stop_update_lock); vb->stop_update = true; spin_unlock_irq(&vb->stop_update_lock); cancel_work_sync(&vb->update_balloon_size_work); cancel_work_sync(&vb->update_balloon_stats_work); remove_common(vb); #ifdef CONFIG_BALLOON_COMPACTION if (vb->vb_dev_info.inode) iput(vb->vb_dev_info.inode); kern_unmount(balloon_mnt); #endif kfree(vb); }
static int cmm_init (void) { int rc = -ENOMEM; #ifdef CONFIG_CMM_PROC cmm_sysctl_header = register_sysctl_table(cmm_dir_table); if (!cmm_sysctl_header) goto out; #endif #ifdef CONFIG_CMM_IUCV rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); if (rc < 0) goto out_smsg; #endif rc = register_oom_notifier(&cmm_oom_nb); if (rc < 0) goto out_oom_notify; init_waitqueue_head(&cmm_thread_wait); init_timer(&cmm_timer); cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; if (!rc) goto out; /* * kthread_create failed. undo all the stuff from above again. */ unregister_oom_notifier(&cmm_oom_nb); out_oom_notify: #ifdef CONFIG_CMM_IUCV smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); out_smsg: #endif #ifdef CONFIG_CMM_PROC unregister_sysctl_table(cmm_sysctl_header); #endif out: return rc; }
static int cmm_init (void) { int rc = -ENOMEM; #ifdef CONFIG_CMM_PROC cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1); if (!cmm_sysctl_header) goto out; #endif #ifdef CONFIG_CMM_IUCV rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); if (rc < 0) goto out_smsg; #endif rc = register_oom_notifier(&cmm_oom_nb); if (rc < 0) goto out_oom_notify; cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; if (rc) goto out_kthread; return 0; out_kthread: unregister_oom_notifier(&cmm_oom_nb); out_oom_notify: #ifdef CONFIG_CMM_IUCV smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); out_smsg: #endif #ifdef CONFIG_CMM_PROC unregister_sysctl_table(cmm_sysctl_header); out: #endif del_timer_sync(&cmm_timer); return rc; }
int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; ret = i915_gem_suspend(dev); if (ret) { DRM_ERROR("failed to idle hardware: %d\n", ret); return ret; } intel_power_domains_fini(dev_priv); intel_gpu_ips_teardown(); i915_teardown_sysfs(dev); WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); unregister_shrinker(&dev_priv->mm.shrinker); io_mapping_free(dev_priv->gtt.mappable); arch_phys_wc_del(dev_priv->gtt.mtrr); acpi_video_unregister(); if (drm_core_check_feature(dev, DRIVER_MODESET)) intel_fbdev_fini(dev); drm_vblank_cleanup(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev); /* * free the memory space allocated for the child device * config parsed from VBT */ if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { kfree(dev_priv->vbt.child_dev); dev_priv->vbt.child_dev = NULL; dev_priv->vbt.child_dev_num = 0; } vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); } /* Free error state after interrupts are fully disabled. */ del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_work_sync(&dev_priv->gpu_error.work); i915_destroy_error_state(dev); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_opregion_fini(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* Flush any outstanding unpin_work. */ flush_workqueue(dev_priv->wq); mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); i915_gem_cleanup_stolen(dev); } intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->wq); pm_qos_remove_request(&dev_priv->pm_qos); i915_global_gtt_cleanup(dev); intel_uncore_fini(dev); if (dev_priv->regs != NULL) pci_iounmap(dev->pdev, dev_priv->regs); if (dev_priv->slab) kmem_cache_destroy(dev_priv->slab); pci_dev_put(dev_priv->bridge_dev); kfree(dev_priv); return 0; }
int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; struct intel_device_info *info, *device_info; int ret = 0, mmio_bar, mmio_size; uint32_t aperture_size; info = (struct intel_device_info *) flags; /* Refuse to load on gen6+ without kms enabled. */ if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) { DRM_INFO("Your hardware requires kernel modesetting (KMS)\n"); DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n"); return -ENODEV; } /* UMS needs agp support. */ if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp) return -EINVAL; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; gpu_perf_dev_priv = (void *)dev_priv; dev_priv->dev = dev; /* Setup the write-once "constant" device info */ device_info = (struct intel_device_info *)&dev_priv->info; memcpy(device_info, info, sizeof(dev_priv->info)); device_info->device_id = dev->pdev->device; spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); spin_lock_init(&dev_priv->mm.object_stat_lock); spin_lock_init(&dev_priv->mmio_flip_lock); mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->modeset_restore_lock); intel_pm_setup(dev); intel_display_crc_init(dev); i915_dump_device_info(dev_priv); /* Not all pre-production machines fall into this category, only the * very first ones. Almost everything should work, except for maybe * suspend/resume. And we don't implement workarounds that affect only * pre-production machines. */ if (IS_HSW_EARLY_SDV(dev)) DRM_INFO("This is an early pre-production Haswell machine. " "It may not be fully functional.\n"); if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; } mmio_bar = IS_GEN2(dev) ? 1 : 0; /* Before gen4, the registers and the GTT are behind different BARs. * However, from gen4 onwards, the registers and the GTT are shared * in the same BAR, so we want to restrict this ioremap from * clobbering the GTT which we want ioremap_wc instead. Fortunately, * the register BAR remains the same size for all the earlier * generations up to Ironlake. */ if (info->gen < 5) mmio_size = 512*1024; else mmio_size = 2*1024*1024; dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; goto put_bridge; } /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev); intel_uncore_init(dev); if (i915_start_vgt(dev->pdev)) i915_host_mediate = true; printk("i915_start_vgt: %s\n", i915_host_mediate ? "success" : "fail"); i915_check_vgt(dev_priv); if (USES_VGT(dev)) i915.enable_fbc = 0; ret = i915_gem_gtt_init(dev); if (ret) goto out_regs; if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. */ ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); goto out_gtt; } ret = i915_kick_out_vgacon(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); goto out_gtt; } } pci_set_master(dev->pdev); /* overlay on gen2 is broken and can't address above 1G */ if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); /* 965GM sometimes incorrectly writes to hardware status page (HWS) * using 32bit addressing, overwriting memory if HWS is located * above 4GB. * * The documentation also mentions an issue with undefined * behaviour if any general state is accessed within a page above 4GB, * which also needs to be handled carefully. */ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); aperture_size = dev_priv->gtt.mappable_end; dev_priv->gtt.mappable = io_mapping_create_wc(dev_priv->gtt.mappable_base, aperture_size); if (dev_priv->gtt.mappable == NULL) { ret = -EIO; goto out_gtt; } dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, aperture_size); /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed * by the GPU. i915_gem_retire_requests() is called directly when we * need high-priority retirement, such as waiting for an explicit * bo. * * It is also used for periodic low-priority events, such as * idle-timers and recording error state. * * All tasks on the workqueue are expected to acquire the dev mutex * so there is no point in running more than one instance of the * workqueue at any time. Use an ordered one. */ dev_priv->wq = alloc_ordered_workqueue("i915", 0); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; goto out_mtrrfree; } dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0); if (dev_priv->dp_wq == NULL) { DRM_ERROR("Failed to create our dp workqueue.\n"); ret = -ENOMEM; goto out_freewq; } intel_irq_init(dev_priv); intel_uncore_sanitize(dev); /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); intel_setup_gmbus(dev); intel_opregion_setup(dev); intel_setup_bios(dev); i915_gem_load(dev); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function * correctly in testing on 945G. * This may be a side effect of MSI having been made available for PEG * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may * be lost or delayed, but we use them anyways to avoid * stuck interrupts on some machines. */ if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); intel_device_info_runtime_init(dev); if (INTEL_INFO(dev)->num_pipes) { ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); if (ret) goto out_gem_unload; } intel_power_domains_init(dev_priv); if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_power_well; } #ifdef DRM_I915_VGT_SUPPORT if (USES_VGT(dev)) { /* * Tell VGT that we have a valid surface to show * after modesetting. We doesn't distinguish DOM0 and * Linux guest here, The PVINFO write handler will * handle this. */ I915_WRITE(vgt_info_off(display_ready), 1); } #endif } i915_setup_sysfs(dev); if (INTEL_INFO(dev)->num_pipes) { /* Must be done after probing outputs */ intel_opregion_init(dev); acpi_video_register(); } if (IS_GEN5(dev)) intel_gpu_ips_init(dev_priv); intel_runtime_pm_enable(dev_priv); return 0; out_power_well: intel_power_domains_fini(dev_priv); drm_vblank_cleanup(dev); out_gem_unload: WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); unregister_shrinker(&dev_priv->mm.shrinker); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); pm_qos_remove_request(&dev_priv->pm_qos); destroy_workqueue(dev_priv->dp_wq); out_freewq: destroy_workqueue(dev_priv->wq); out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); io_mapping_free(dev_priv->gtt.mappable); out_gtt: i915_global_gtt_cleanup(dev); out_regs: intel_uncore_fini(dev); pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: if (dev_priv->slab) kmem_cache_destroy(dev_priv->slab); kfree(dev_priv); return ret; }
static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; int err; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); if (!vb) { err = -ENOMEM; goto out; } INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func); INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func); spin_lock_init(&vb->stop_update_lock); vb->stop_update = false; vb->num_pages = 0; mutex_init(&vb->balloon_lock); init_waitqueue_head(&vb->acked); vb->vdev = vdev; balloon_devinfo_init(&vb->vb_dev_info); err = init_vqs(vb); if (err) goto out_free_vb; vb->nb.notifier_call = virtballoon_oom_notify; vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY; err = register_oom_notifier(&vb->nb); if (err < 0) goto out_del_vqs; #ifdef CONFIG_BALLOON_COMPACTION balloon_mnt = kern_mount(&balloon_fs); if (IS_ERR(balloon_mnt)) { err = PTR_ERR(balloon_mnt); unregister_oom_notifier(&vb->nb); goto out_del_vqs; } vb->vb_dev_info.migratepage = virtballoon_migratepage; vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb); if (IS_ERR(vb->vb_dev_info.inode)) { err = PTR_ERR(vb->vb_dev_info.inode); kern_unmount(balloon_mnt); unregister_oom_notifier(&vb->nb); vb->vb_dev_info.inode = NULL; goto out_del_vqs; } vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; #endif virtio_device_ready(vdev); if (towards_target(vb)) virtballoon_changed(vdev); return 0; out_del_vqs: vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); out: return err; }
void oom_exit(void) { unregister_oom_notifier(&mc_oom_nb); }