static void hsic_pm_runtime_start(struct work_struct *work) { struct link_pm_data *pm_data = container_of(work, struct link_pm_data, hsic_pm_start.work); struct usb_device *usbdev = pm_data->usb_ld->usbdev; struct device *dev, *ppdev; if (!pm_data->usb_ld->if_usb_connected) { MIF_DEBUG("disconnect status, ignore\n"); return; } dev = &usbdev->dev; /* wait interface driver resumming */ if (dev->power.runtime_status == RPM_SUSPENDED) { MIF_ERR("suspended yet, delayed work\n"); queue_delayed_work(pm_data->wq, &pm_data->hsic_pm_start, msecs_to_jiffies(10)); return; } if (usbdev && dev->parent) { MIF_DEBUG("rpm_status: %d\n", dev->power.runtime_status); ppdev = dev->parent->parent; pm_runtime_set_autosuspend_delay(&usbdev->dev, 200); pm_runtime_allow(dev); pm_runtime_allow(ppdev); pm_data->resume_requested = false; pm_data->resume_retry_cnt = 0; pm_data->rpm_suspending_cnt = 0; } }
static void usbsvn_runtime_start(struct work_struct *work) { struct usbsvn *svn = container_of(work, struct usbsvn, pm_runtime_work.work); struct device *dev, *ppdev; dev = &svn->usbdev->dev; if (svn->usbdev && dev->parent) { ppdev = dev->parent->parent; /*enable runtime feature - once after boot*/ pm_runtime_allow(dev); dev_dbg(dev, "usbsvn Runtime PM Start!!\n"); pm_runtime_allow(ppdev); /*ehci*/ } }
void request_autopm_lock(int status) { struct diag_bridge *dev = __dev; if (!dev || !dev->udev) return; pr_info("%s: set runtime pm lock : %d\n", __func__, status); if (status) { if (!atomic_read(&dev->pmlock_cnt)) { atomic_inc(&dev->pmlock_cnt); pr_info("get lock\n"); pm_runtime_get(&dev->udev->dev); pm_runtime_forbid(&dev->udev->dev); } else atomic_inc(&dev->pmlock_cnt); } else { if (!atomic_read(&dev->pmlock_cnt)) pr_info("unbalanced release\n"); else if (atomic_dec_and_test(&dev->pmlock_cnt)) { pr_info("release lock\n"); pm_runtime_allow(&dev->udev->dev); pm_runtime_put(&dev->udev->dev); } } }
static int intel_lpss_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_lpss_platform_info *info; int ret; ret = pcim_enable_device(pdev); if (ret) return ret; info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->mem = &pdev->resource[0]; info->irq = pdev->irq; /* Probably it is enough to set this for iDMA capable devices only */ pci_set_master(pdev); ret = intel_lpss_probe(&pdev->dev, info); if (ret) return ret; pm_runtime_put(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; }
static void smdhsic_pm_runtime_start(struct work_struct *work) { if (g_usbdev.usbdev) { pr_info("%s(udev:0x%p)\n", __func__, g_usbdev.usbdev); pm_runtime_allow(&g_usbdev.usbdev->dev); } }
static void serial_omap_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct uart_omap_port *up = (struct uart_omap_port *)port; unsigned char efr; dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line); pm_runtime_get_sync(&up->pdev->dev); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); efr = serial_in(up, UART_EFR); serial_out(up, UART_EFR, efr | UART_EFR_ECB); serial_out(up, UART_LCR, 0); serial_out(up, UART_IER, (state != 0) ? UART_IERX_SLEEP : 0); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(up, UART_EFR, efr); serial_out(up, UART_LCR, 0); if (!device_may_wakeup(&up->pdev->dev)) { if (!state) pm_runtime_forbid(&up->pdev->dev); else pm_runtime_allow(&up->pdev->dev); } pm_runtime_put(&up->pdev->dev); }
static int dwc3_remove(struct platform_device *pdev) { struct dwc3 *dwc = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pm_runtime_get_sync(&pdev->dev); /* * restore res->start back to its original value so that, in case the * probe is deferred, we don't end up getting error in request the * memory region the next time probe is called. */ res->start -= DWC3_GLOBALS_REGS_START; dwc3_debugfs_exit(dwc); dwc3_core_exit_mode(dwc); dwc3_core_exit(dwc); dwc3_ulpi_exit(dwc); pm_runtime_put_sync(&pdev->dev); pm_runtime_allow(&pdev->dev); pm_runtime_disable(&pdev->dev); dwc3_free_event_buffers(dwc); dwc3_free_scratch_buffers(dwc); return 0; }
static ssize_t store_autosuspend(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct miscdevice *miscdev = dev_get_drvdata(dev); struct link_pm_data *pm_data = container_of(miscdev, struct link_pm_data, miscdev); struct usb_link_device *usb_ld = pm_data->usb_ld; struct task_struct *task = get_current(); char taskname[TASK_COMM_LEN]; mif_info("autosuspend: %s: %s(%d)'\n", buf, get_task_comm(taskname, task), task->pid); if (!strncmp(buf, "on", 2)) { pm_data->autosuspend = true; if (usb_ld->usbdev) pm_runtime_allow(&usb_ld->usbdev->dev); } else if (!strncmp(buf, "off", 3)) { pm_data->autosuspend = false; if (usb_ld->usbdev) pm_runtime_forbid(&usb_ld->usbdev->dev); } return count; }
static void link_pm_runtime_start(struct work_struct *work) { struct link_pm_data *pm_data = container_of(work, struct link_pm_data, link_pm_start.work); struct usb_device *usbdev = pm_data->usb_ld->usbdev; struct device *dev, *ppdev; struct link_device *ld = &pm_data->usb_ld->ld; if (!pm_data->usb_ld->if_usb_connected || pm_data->usb_ld->ld.com_state == COM_NONE) { mif_debug("disconnect status, ignore\n"); return; } dev = &pm_data->usb_ld->usbdev->dev; /* wait interface driver resumming */ if (dev->power.runtime_status == RPM_SUSPENDED) { if (pm_data->usb_ld->ld.com_state != COM_ONLINE) { mif_info("com_state is not online (%d)\n", pm_data->usb_ld->ld.com_state); return; } mif_info("suspended yet, delayed work, com_state(%d)\n", pm_data->usb_ld->ld.com_state); queue_delayed_work(pm_data->wq, &pm_data->link_pm_start, msecs_to_jiffies(20)); return; } if (pm_data->usb_ld->usbdev && dev->parent) { mif_info("rpm_status: %d\n", dev->power.runtime_status); usb_set_autosuspend_delay(usbdev, 200); ppdev = dev->parent->parent; pm_runtime_allow(dev); pm_runtime_allow(ppdev);/*ehci*/ pm_data->link_pm_active = true; pm_data->resume_requested = false; pm_data->link_reconnect_cnt = 2; pm_data->resume_retry_cnt = 0; /* retry prvious link tx q */ queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, 0); } }
static ssize_t store_ehci_power(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct usb_hcd *hcd = dev_get_drvdata(dev); struct s5p_ehci_hcd *s5p_ehci = to_s5p_ehci(hcd); int power_on; int irq; int retval; if (sscanf(buf, "%d", &power_on) != 1) return -EINVAL; device_lock(dev); if (!power_on && s5p_ehci->power_on) { dev_info(dev, "EHCI turn off\n"); pm_runtime_forbid(dev); s5p_ehci->power_on = 0; usb_remove_hcd(hcd); if (s5p_ehci->phy) { /* Shutdown PHY only if it wasn't shutdown before */ if (!s5p_ehci->post_lpa_resume) usb_phy_shutdown(s5p_ehci->phy); } else if (s5p_ehci->pdata->phy_exit) { s5p_ehci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST); } } else if (power_on) { dev_info(dev, "EHCI turn on\n"); if (s5p_ehci->power_on) { pm_runtime_forbid(dev); usb_remove_hcd(hcd); } else { s5p_ehci_phy_init(pdev); } irq = platform_get_irq(pdev, 0); retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval < 0) { dev_err(dev, "Power On Fail\n"); goto exit; } /* * EHCI root hubs are expected to handle remote wakeup. * So, wakeup flag init defaults for root hubs. */ device_wakeup_enable(&hcd->self.root_hub->dev); s5p_ehci->power_on = 1; pm_runtime_allow(dev); } exit: device_unlock(dev); return count; }
/** * amdgpu_driver_load_kms - Main load function for KMS. * * @dev: drm dev pointer * @flags: device flags * * This is the main load function for KMS (all asics). * Returns 0 on success, error on failure. */ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct amdgpu_device *adev; int r, acpi_status; adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); if (adev == NULL) { return -ENOMEM; } dev->dev_private = (void *)adev; if ((amdgpu_runtime_pm != 0) && amdgpu_has_atpx() && ((flags & AMD_IS_APU) == 0)) flags |= AMD_IS_PX; /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must * properly initialize the GPU MC controller and permit * VRAM allocation */ r = amdgpu_device_init(adev, dev, dev->pdev, flags); if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; } /* Call ACPI methods: require modeset init * but failure is not fatal */ if (!r) { acpi_status = amdgpu_acpi_init(adev); if (acpi_status) dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); } amdgpu_amdkfd_load_interface(adev); amdgpu_amdkfd_device_probe(adev); amdgpu_amdkfd_device_init(adev); if (amdgpu_device_is_px(dev)) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); pm_runtime_allow(dev->dev); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } out: if (r) amdgpu_driver_unload_kms(dev); return r; }
void wil_pm_runtime_allow(struct wil6210_priv *wil) { struct device *dev = wil_to_dev(wil); pm_runtime_put_noidle(dev); pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS); pm_runtime_use_autosuspend(dev); pm_runtime_allow(dev); }
static ssize_t store_ohci_power(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data; struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev); struct usb_hcd *hcd = exynos_ohci->hcd; int power_on; int irq; int retval; if (sscanf(buf, "%d", &power_on) != 1) return -EINVAL; device_lock(dev); if (!power_on && exynos_ohci->power_on) { printk(KERN_DEBUG "%s: EHCI turns off\n", __func__); pm_runtime_forbid(dev); exynos_ohci->power_on = 0; usb_remove_hcd(hcd); if (pdata && pdata->phy_exit) pdata->phy_exit(pdev, S5P_USB_PHY_HOST); } else if (power_on) { printk(KERN_DEBUG "%s: EHCI turns on\n", __func__); if (exynos_ohci->power_on) { pm_runtime_forbid(dev); usb_remove_hcd(hcd); } else { if (pdata && pdata->phy_init) pdata->phy_init(pdev, S5P_USB_PHY_HOST); } irq = platform_get_irq(pdev, 0); retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (retval < 0) { dev_err(dev, "Power On Fail\n"); goto exit; } /* * OHCI root hubs are expected to handle remote wakeup. * So, wakeup flag init defaults for root hubs. */ device_wakeup_enable(&hcd->self.root_hub->dev); exynos_ohci->power_on = 1; pm_runtime_allow(dev); } exit: device_unlock(dev); return count; }
static void link_pm_runtime_start(struct work_struct *work) { struct link_pm_data *pm_data = container_of(work, struct link_pm_data, link_pm_start.work); struct usb_device *usbdev = pm_data->usb_ld->usbdev; struct device *dev, *hdev; struct link_device *ld = &pm_data->usb_ld->ld; if (!pm_data->usb_ld->if_usb_connected || pm_data->usb_ld->ld.com_state == COM_NONE) { mif_err("disconnect status, ignore\n"); return; } dev = &pm_data->usb_ld->usbdev->dev; /* wait interface driver resumming */ if (dev->power.runtime_status == RPM_SUSPENDED) { mif_info("suspended yet, delayed work\n"); queue_delayed_work(pm_data->wq, &pm_data->link_pm_start, msecs_to_jiffies(20)); return; } if (pm_data->usb_ld->usbdev && dev->parent) { mif_info("rpm_status: %d\n", dev->power.runtime_status); pm_runtime_set_autosuspend_delay(dev, 200); hdev = usbdev->bus->root_hub->dev.parent; mif_info("EHCI runtime %s, %s\n", dev_driver_string(hdev), dev_name(hdev)); pm_runtime_allow(dev); pm_runtime_allow(hdev);/*ehci*/ pm_data->link_pm_active = true; pm_data->resume_requested = false; pm_data->link_reconnect_cnt = 5; pm_data->resume_retry_cnt = 0; /* retry prvious link tx q */ queue_delayed_work(ld->tx_wq, &ld->tx_delayed_work, 0); } }
void i915_rpm_enable(struct device *dev) { int cur_status = pm_runtime_enabled(dev); if (!cur_status) { pm_runtime_enable(dev); pm_runtime_allow(dev); } return; }
static ssize_t store_ohci_power(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); struct s5p_ohci_platdata *pdata = pdev->dev.platform_data; struct s5p_ohci_hcd *s5p_ohci = platform_get_drvdata(pdev); struct usb_hcd *hcd = s5p_ohci->hcd; int power_on; int irq; int retval; if (sscanf(buf, "%d", &power_on) != 1) return -EINVAL; device_lock(dev); if (!power_on && s5p_ohci->power_on) { printk(KERN_DEBUG "%s: EHCI turns off\n", __func__); pm_runtime_forbid(dev); s5p_ohci->power_on = 0; usb_remove_hcd(hcd); if (pdata && pdata->phy_exit) pdata->phy_exit(pdev, S5P_USB_PHY_HOST); } else if (power_on) { printk(KERN_DEBUG "%s: EHCI turns on\n", __func__); if (s5p_ohci->power_on) { usb_remove_hcd(hcd); } if (pdata->phy_init) pdata->phy_init(pdev, S5P_USB_PHY_HOST); irq = platform_get_irq(pdev, 0); retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (retval < 0) { dev_err(dev, "Power On Fail\n"); goto exit; } s5p_ohci->power_on = 1; pm_runtime_allow(dev); } exit: device_unlock(dev); return count; }
static ssize_t control_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { char *cp; int len = n; cp = memchr(buf, '\n', n); if (cp) len = cp - buf; if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) pm_runtime_allow(dev); else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) pm_runtime_forbid(dev); else return -EINVAL; return n; }
static int xhci_exit_test_mode(struct xhci_hcd *xhci) { int retval; if (!xhci->test_mode) { xhci_err(xhci, "Not in test mode, do nothing.\n"); return 0; } if (xhci->test_mode == TEST_FORCE_EN && !(xhci->xhc_state & XHCI_STATE_HALTED)) { retval = xhci_halt(xhci); if (retval) return retval; } pm_runtime_allow(xhci_to_hcd(xhci)->self.controller); xhci->test_mode = 0; return xhci_reset(xhci); }
static int smsc_hub_enable(struct hsic_hub *hub) { struct smsc_hub_platform_data *pdata = hub->pdata; struct of_dev_auxdata *hsic_host_auxdata = dev_get_platdata(hub->dev); struct device_node *node = hub->dev->of_node; int ret; ret = gpio_direction_output(pdata->xo_clk_gpio, 1); if (ret < 0) { dev_err(hub->dev, "fail to enable xo clk\n"); return ret; } ret = gpio_direction_output(pdata->hub_reset, 0); if (ret < 0) { dev_err(hub->dev, "fail to assert reset\n"); goto disable_xo; } udelay(5); ret = gpio_direction_output(pdata->hub_reset, 1); if (ret < 0) { dev_err(hub->dev, "fail to de-assert reset\n"); goto disable_xo; } ret = of_platform_populate(node, NULL, hsic_host_auxdata, hub->dev); if (ret < 0) { dev_err(smsc_hub->dev, "fail to add child with %d\n", ret); goto reset; } pm_runtime_allow(hub->dev); return 0; reset: gpio_direction_output(pdata->hub_reset, 0); disable_xo: gpio_direction_output(pdata->xo_clk_gpio, 0); return ret; }
/** * ufshcd_pci_probe - probe routine of the driver * @pdev: pointer to PCI device handle * @id: PCI device id * * Returns 0 on success, non-zero value on failure */ static int ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ufs_hba *hba; void __iomem *mmio_base; int err; err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "pcim_enable_device failed\n"); return err; } pci_set_master(pdev); err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD); if (err < 0) { dev_err(&pdev->dev, "request and iomap failed\n"); return err; } mmio_base = pcim_iomap_table(pdev)[0]; err = ufshcd_alloc_host(&pdev->dev, &hba); if (err) { dev_err(&pdev->dev, "Allocation failed\n"); return err; } INIT_LIST_HEAD(&hba->clk_list_head); err = ufshcd_init(hba, mmio_base, pdev->irq); if (err) { dev_err(&pdev->dev, "Initialization failed\n"); ufshcd_dealloc_host(hba); return err; } pci_set_drvdata(pdev, hba); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; }
static void drm_enable_locked(struct secmem_info *info, bool enable) { if (drm_onoff != enable) { #ifdef CONFIG_EXYNOS5_DEV_GSC if (enable) pm_runtime_forbid(info->dev->parent); else pm_runtime_allow(info->dev->parent); #endif drm_onoff = enable; /* * this will only allow this instance to turn drm_off either by * calling the ioctl or by closing the fd */ info->drm_enabled = enable; } else { pr_err("%s: DRM is already %s\n", __func__, drm_onoff ? "on" : "off"); } }
static int serial_hsu_pci_port_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct uart_hsu_port *up; int ret, port, hw_type; resource_size_t start, len; start = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); dev_info(&pdev->dev, "FUNC: %d driver: %ld addr:%lx len:%lx\n", PCI_FUNC(pdev->devfn), ent->driver_data, (unsigned long) start, (unsigned long) len); port = intel_mid_hsu_func_to_port(PCI_FUNC(pdev->devfn)); if (port == -1) return 0; ret = pci_enable_device(pdev); if (ret) return ret; ret = pci_request_region(pdev, 0, "hsu"); if (ret) goto err; up = serial_hsu_port_setup(&pdev->dev, port, start, len, pdev->irq); if (IS_ERR(up)) goto err; pci_set_drvdata(pdev, up); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; err: pci_disable_device(pdev); return ret; }
/* RPM init */ int i915_rpm_init(struct drm_device *drm_dev) { int ret = 0; struct device *dev = drm_dev->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; ret = i915_rpm_procfs_init(drm_dev); if (ret) { DRM_ERROR("unable to initialize procfs entry"); } ret = pm_runtime_set_active(dev); dev_priv->rpm.ring_active = false; atomic_set(&dev_priv->rpm.procfs_count, 0); pm_runtime_allow(dev); /* enable Auto Suspend */ pm_runtime_set_autosuspend_delay(dev, RPM_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); if (dev->power.runtime_error) DRM_ERROR("rpm init: error = %d\n", dev->power.runtime_error); return ret; }
static int dwc3_remove(struct platform_device *pdev) { struct dwc3 *dwc = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); dwc3_debugfs_exit(dwc); dwc3_core_exit_mode(dwc); dwc3_event_buffers_cleanup(dwc); dwc3_core_exit(dwc); dwc3_ulpi_exit(dwc); pm_runtime_put_sync(&pdev->dev); pm_runtime_allow(&pdev->dev); pm_runtime_disable(&pdev->dev); dwc3_free_event_buffers(dwc); dwc3_free_scratch_buffers(dwc); clk_bulk_put(dwc->num_clks, dwc->clks); return 0; }
static int pwm_lpss_probe_pci(struct pci_dev *pdev, const struct pci_device_id *id) { const struct pwm_lpss_boardinfo *info; struct pwm_lpss_chip *lpwm; int err; err = pcim_enable_device(pdev); if (err < 0) return err; info = (struct pwm_lpss_boardinfo *)id->driver_data; lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info); if (IS_ERR(lpwm)) return PTR_ERR(lpwm); pci_set_drvdata(pdev, lpwm); pm_runtime_put(&pdev->dev); pm_runtime_allow(&pdev->dev); return 0; }
static int ehci_hsic_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hc_driver *driver; struct usb_hcd *hcd; struct ehci_hcd *ehci; int irq, retval; pr_debug("initializing Intel EHCI HSIC Host Controller\n"); if (usb_disabled()) return -ENODEV; if (!id) return -EINVAL; pci_dev = pdev; if (pci_enable_device(pdev) < 0) return -ENODEV; pdev->current_state = PCI_D0; wake_lock_init(&hsic.resume_wake_lock, WAKE_LOCK_SUSPEND, "hsic_aux2_wlock"); wake_lock_init(&hsic.s3_wake_lock, WAKE_LOCK_SUSPEND, "hsic_s3_wlock"); hsic.hsic_pm_nb.notifier_call = hsic_pm_notify; usb_register_notify(&hsic.hsic_pm_nb); hsic.hsic_s3_entry_nb.notifier_call = hsic_s3_entry_notify; register_pm_notifier(&hsic.hsic_s3_entry_nb); /* we need not call pci_enable_dev since otg transceiver already take * the control of this device and this probe actaully gets called by * otg transceiver driver with HNP protocol. */ irq = pdev->irq; if (!pdev->irq) { dev_dbg(&pdev->dev, "No IRQ.\n"); retval = -ENODEV; goto disable_pci; } driver = (struct hc_driver *)id->driver_data; if (!driver) return -EINVAL; /* AUX GPIO init */ retval = hsic_aux_gpio_init(); if (retval < 0) { dev_err(&pdev->dev, "AUX GPIO init fail\n"); retval = -ENODEV; goto disable_pci; } /* AUX GPIO init */ retval = hsic_wakeup_gpio_init(); if (retval < 0) { dev_err(&pdev->dev, "Wakeup GPIO init fail\n"); retval = -ENODEV; goto disable_pci; } hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { retval = -ENOMEM; goto disable_pci; } ehci = hcd_to_ehci(hcd); hcd->rsrc_start = pci_resource_start(pdev, 0); hcd->rsrc_len = pci_resource_len(pdev, 0); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) { dev_dbg(&pdev->dev, "controller already in use\n"); retval = -EBUSY; goto clear_companion; } hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&pdev->dev, "error mapping memory\n"); retval = -EFAULT; goto release_mem_region; } pci_set_master(pdev); if (hsic.hsic_enable_created == 0) { retval = create_device_files(); if (retval < 0) { dev_dbg(&pdev->dev, "error create device files\n"); goto release_mem_region; } hsic.hsic_enable_created = 1; } if (hsic.hsic_mutex_init == 0) { mutex_init(&hsic.hsic_mutex); mutex_init(&hsic.wlock_mutex); hsic.hsic_mutex_init = 1; } if (hsic.aux_wq_init == 0) { init_waitqueue_head(&hsic.aux_wq); hsic.aux_wq_init = 1; } hsic.work_queue = create_singlethread_workqueue("hsic"); INIT_DELAYED_WORK(&hsic.wakeup_work, wakeup_work); INIT_DELAYED_WORK(&(hsic.hsic_aux), hsic_aux_work); hcd->hsic_notify = hsic_notify; retval = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED | IRQF_NO_SUSPEND); if (retval != 0) goto unmap_registers; dev_set_drvdata(&pdev->dev, hcd); /* Clear phy low power mode, enable phy clock */ ehci_hsic_phy_power(ehci, 0); if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); if (!enabling_disabling) { /* Check here to avoid to call pm_runtime_put_noidle() twice */ if (!pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); } hsic.hsic_stopped = 0; hsic_enable = 1; hsic.s3_rt_state = RESUMED; s3_wake_lock(); hsic_debugfs_init(hcd); return retval; unmap_registers: destroy_workqueue(hsic.work_queue); if (driver->flags & HCD_MEMORY) { iounmap(hcd->regs); release_mem_region: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); } else release_region(hcd->rsrc_start, hcd->rsrc_len); clear_companion: dev_set_drvdata(&pdev->dev, NULL); usb_put_hcd(hcd); disable_pci: pci_disable_device(pdev); dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval); wake_lock_destroy(&(hsic.resume_wake_lock)); wake_lock_destroy(&hsic.s3_wake_lock); return retval; }
/* * We need to register our own PCI probe function (instead of the USB core's * function) in order to create a second roothub under xHCI. */ static int xhci_ush_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { int retval; struct xhci_hcd *xhci; struct hc_driver *driver; struct usb_hcd *hcd; driver = (struct hc_driver *)id->driver_data; pci_dev = dev; /* AUX GPIO init */ retval = hsic_aux_gpio_init(); if (retval < 0) { dev_err(&dev->dev, "AUX GPIO init fail\n"); retval = -ENODEV; } /* AUX GPIO init */ retval = hsic_wakeup_gpio_init(); if (retval < 0) { dev_err(&dev->dev, "Wakeup GPIO init fail\n"); retval = -ENODEV; } /* Register the USB 2.0 roothub. * FIXME: USB core must know to register the USB 2.0 roothub first. * This is sort of silly, because we could just set the HCD driver flags * to say USB 2.0, but I'm not sure what the implications would be in * the other parts of the HCD code. */ retval = usb_hcd_pci_probe(dev, id); if (retval) return retval; /* USB 2.0 roothub is stored in the PCI device now. */ hcd = dev_get_drvdata(&dev->dev); xhci = hcd_to_xhci(hcd); xhci->shared_hcd = usb_create_shared_hcd(driver, &dev->dev, pci_name(dev), hcd); if (!xhci->shared_hcd) { retval = -ENOMEM; goto dealloc_usb2_hcd; } /* Set the xHCI pointer before xhci_pci_setup() (aka hcd_driver.reset) * is called by usb_add_hcd(). */ *((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci; if (hsic.hsic_enable_created == 0) { retval = create_device_files(); if (retval < 0) { dev_dbg(&dev->dev, "error create device files\n"); goto dealloc_usb2_hcd; } hsic.hsic_enable_created = 1; } if (hsic.hsic_mutex_init == 0) { mutex_init(&hsic.hsic_mutex); hsic.hsic_mutex_init = 1; } if (hsic.aux_wq_init == 0) { init_waitqueue_head(&hsic.aux_wq); hsic.aux_wq_init = 1; } hsic.work_queue = create_singlethread_workqueue("hsic"); INIT_WORK(&hsic.wakeup_work, wakeup_work); INIT_DELAYED_WORK(&(hsic.hsic_aux), hsic_aux_work); retval = usb_add_hcd(xhci->shared_hcd, dev->irq, IRQF_SHARED); if (retval) goto put_usb3_hcd; /* Roothub already marked as USB 3.0 speed */ /* Enable Controller wakeup capability */ device_set_wakeup_enable(&dev->dev, true); /* Enable runtime pm ability */ hcd->rpm_control = 1; hcd->rpm_resume = 0; pm_runtime_set_active(&dev->dev); /* Check here to avoid to call pm_runtime_put_noidle() twice */ if (!pci_dev_run_wake(dev)) pm_runtime_put_noidle(&dev->dev); pm_runtime_allow(&dev->dev); hsic.hsic_stopped = 0; hsic_enable = 1; return 0; put_usb3_hcd: usb_put_hcd(xhci->shared_hcd); dealloc_usb2_hcd: usb_hcd_pci_remove(dev); return retval; }
static int __devinit sunxi_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct resource *res; struct sunxi_otgc *otgc; struct device *dev = &pdev->dev; int ret = -ENOMEM; int irq; void __iomem *regs; void *mem; mem = devm_kzalloc(dev, sizeof(*otgc) + SUNXI_ALIGN_MASK, GFP_KERNEL); if (!mem) { dev_err(dev, "not enough memory\n"); return -ENOMEM; } otgc = PTR_ALIGN(mem, SUNXI_ALIGN_MASK + 1); otgc->mem = mem; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "missing resource\n"); return -ENODEV; } otgc->res = res; res = devm_request_mem_region(dev, res->start, resource_size(res), dev_name(dev)); if (!res) { dev_err(dev, "can't request mem region\n"); return -ENOMEM; } regs = devm_ioremap(dev, res->start, resource_size(res)); if (!regs) { dev_err(dev, "ioremap failed\n"); return -ENOMEM; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "missing IRQ\n"); return -ENODEV; } spin_lock_init(&otgc ->lock); otgc->regs = regs; otgc->regs_size = resource_size(res); otgc->dev = dev; otgc->irq = irq; if (!strncmp("super", maximum_speed, 5)) otgc->maximum_speed = SUNXI_DCFG_SUPERSPEED; else if (!strncmp("high", maximum_speed, 4)) otgc->maximum_speed = SUNXI_DCFG_HIGHSPEED; else if (!strncmp("full", maximum_speed, 4)) otgc->maximum_speed = SUNXI_DCFG_FULLSPEED1; else if (!strncmp("low", maximum_speed, 3)) otgc->maximum_speed = SUNXI_DCFG_LOWSPEED; else otgc->maximum_speed = SUNXI_DCFG_SUPERSPEED; if (of_get_property(node, "tx-fifo-resize", NULL)) otgc->needs_fifo_resize = true; pm_runtime_enable(dev); pm_runtime_get_sync(dev); pm_runtime_forbid(dev); sunxi_open_usb_clock(otgc); #ifndef SUNXI_USB_FPGA sunxi_pin_init(otgc); request_usb_regulator_io(otgc); #endif ret = sunxi_core_init(otgc); if (ret) { dev_err(dev, "failed to initialize core\n"); return ret; } sunxi_set_mode(otgc , SUNXI_GCTL_PRTCAP_DEVICE); sunxi_gadget_init(otgc ); ret = sunxi_debugfs_init(otgc); if (ret) { dev_err(dev, "failed to initialize debugfs\n"); goto err1; } pm_runtime_allow(dev); platform_set_drvdata(pdev, otgc); sunxi_otg_pdev = pdev; sunxi_usb_device_disable(); return 0; err1: sunxi_core_exit(otgc); return ret; }
/** * radeon_driver_load_kms - Main load function for KMS. * * @dev: drm dev pointer * @flags: device flags * * This is the main load function for KMS (all asics). * It calls radeon_device_init() to set up the non-display * parts of the chip (asic init, CP, writeback, etc.), and * radeon_modeset_init() to set up the display parts * (crtcs, encoders, hotplug detect, etc.). * Returns 0 on success, error on failure. */ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct radeon_device *rdev; int r, acpi_status; rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); if (rdev == NULL) { return -ENOMEM; } dev->dev_private = (void *)rdev; /* update BUS flag */ if (drm_pci_device_is_agp(dev)) { DRM_INFO("RADEON_IS_AGP\n"); flags |= RADEON_IS_AGP; } else if (pci_is_pcie(dev->dev->bsddev)) { DRM_INFO("RADEON_IS_PCIE\n"); flags |= RADEON_IS_PCIE; } else { DRM_INFO("RADEON_IS_PCI\n"); flags |= RADEON_IS_PCI; } #ifdef PM_TODO if ((radeon_runtime_pm != 0) && radeon_has_atpx() && ((flags & RADEON_IS_IGP) == 0)) #endif /* radeon_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must * properly initialize the GPU MC controller and permit * VRAM allocation */ r = radeon_device_init(rdev, dev, dev->pdev, flags); if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; } /* Again modeset_init should fail only on fatal error * otherwise it should provide enough functionalities * for shadowfb to run */ r = radeon_modeset_init(rdev); if (r) dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); /* Call ACPI methods: require modeset init * but failure is not fatal */ if (!r) { acpi_status = radeon_acpi_init(rdev); if (acpi_status) dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); } #ifdef PM_TODO if (radeon_is_px(dev)) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); pm_runtime_allow(dev->dev); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } #endif out: if (r) radeon_driver_unload_kms(dev); return r; }
static ssize_t ksb_fs_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { int ret; unsigned long flags; struct ks_bridge *ksb = fp->private_data; struct data_pkt *pkt = NULL; size_t space, copied; read_start: if (!test_bit(USB_DEV_CONNECTED, &ksb->flags)) return -ENODEV; spin_lock_irqsave(&ksb->lock, flags); if (list_empty(&ksb->to_ks_list)) { spin_unlock_irqrestore(&ksb->lock, flags); ret = wait_event_interruptible(ksb->ks_wait_q, !list_empty(&ksb->to_ks_list) || !test_bit(USB_DEV_CONNECTED, &ksb->flags)); if (ret < 0) return ret; goto read_start; } space = count; copied = 0; while (!list_empty(&ksb->to_ks_list) && space && test_bit(USB_DEV_CONNECTED, &ksb->flags)) { size_t len; pkt = list_first_entry(&ksb->to_ks_list, struct data_pkt, list); list_del_init(&pkt->list); len = min_t(size_t, space, pkt->len - pkt->n_read); spin_unlock_irqrestore(&ksb->lock, flags); ret = copy_to_user(buf + copied, pkt->buf + pkt->n_read, len); if (ret) { pr_err("copy_to_user failed err:%d\n", ret); ksb_free_data_pkt(pkt); return -EFAULT; } pkt->n_read += len; space -= len; copied += len; if (pkt->n_read == pkt->len) { /* * re-init the packet and queue it * for more data. */ pkt->n_read = 0; pkt->len = MAX_DATA_PKT_SIZE; submit_one_urb(ksb, GFP_KERNEL, pkt); pkt = NULL; } spin_lock_irqsave(&ksb->lock, flags); } /* put the partial packet back in the list */ if (!space && pkt && pkt->n_read != pkt->len) { if (test_bit(USB_DEV_CONNECTED, &ksb->flags)) list_add(&pkt->list, &ksb->to_ks_list); else ksb_free_data_pkt(pkt); } spin_unlock_irqrestore(&ksb->lock, flags); dbg_log_event(ksb, "KS_READ", copied, 0); if (!strcmp(ksb->name, "ks_bridge:2") && count == 48 && *buf == 0x1) { pr_info("%s, HELLO COMMAND = 0x%x\n", __func__, *buf); if (!atomic_read(&ksb->pmlock_cnt)) { atomic_inc(&ksb->pmlock_cnt); pr_info("<cnt = %d> efs sync\n", atomic_read(&ksb->pmlock_cnt)); } else if (atomic_read(&ksb->pmlock_cnt)) { atomic_inc(&ksb->pmlock_cnt); pr_info("<cnt = %d> get efs lock\n", atomic_read(&ksb->pmlock_cnt)); pm_runtime_get(&ksb->udev->dev); pm_runtime_forbid(&ksb->udev->dev); } } else if (!strcmp(ksb->name, "ks_bridge:2") && count == 8 && *buf == 0x8) { pr_info("%s, RESET_RESPONSE = 0x%x\n", __func__, *buf); if (atomic_read(&ksb->pmlock_cnt) == 2) { atomic_dec(&ksb->pmlock_cnt); pr_info("<cnt = %d> release efs lock\n", atomic_read(&ksb->pmlock_cnt)); pm_runtime_allow(&ksb->udev->dev); pm_runtime_put(&ksb->udev->dev); } } if (!strcmp(ksb->name, "ks_bridge:2")) pr_info("count:%d space:%d copied:%d", count, space, copied); else pr_debug("count:%d space:%d copied:%d", count, space, copied); return copied; }