static int u8500_hsem_probe(struct platform_device *pdev) { struct hwspinlock_pdata *pdata = pdev->dev.platform_data; struct hwspinlock_device *bank; struct hwspinlock *hwlock; struct resource *res; void __iomem *io_base; int i, ret, num_locks = U8500_MAX_SEMAPHORE; ulong val; if (!pdata) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; io_base = ioremap(res->start, resource_size(res)); if (!io_base) return -ENOMEM; /* make sure protocol 1 is selected */ val = readl(io_base + HSEM_CTRL_REG); writel((val & ~HSEM_PROTOCOL_1), io_base + HSEM_CTRL_REG); /* clear all interrupts */ writel(0xFFFF, io_base + HSEM_ICRALL); bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL); if (!bank) { ret = -ENOMEM; goto iounmap_base; } platform_set_drvdata(pdev, bank); for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++) hwlock->priv = io_base + HSEM_REGISTER_OFFSET + sizeof(u32) * i; /* no pm needed for HSem but required to comply with hwspilock core */ pm_runtime_enable(&pdev->dev); ret = hwspin_lock_register(bank, &pdev->dev, &u8500_hwspinlock_ops, pdata->base_id, num_locks); if (ret) goto reg_fail; return 0; reg_fail: pm_runtime_disable(&pdev->dev); kfree(bank); iounmap_base: iounmap(io_base); return ret; }
static int sprd_hwspinlock_probe(struct platform_device *pdev) { struct sprd_hwspinlock_dev *sprd_hwlock; struct hwspinlock *lock; struct resource *res; int i, ret; if (!pdev->dev.of_node) return -ENODEV; sprd_hwlock = devm_kzalloc(&pdev->dev, sizeof(struct sprd_hwspinlock_dev) + SPRD_HWLOCKS_NUM * sizeof(*lock), GFP_KERNEL); if (!sprd_hwlock) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); sprd_hwlock->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(sprd_hwlock->base)) return PTR_ERR(sprd_hwlock->base); sprd_hwlock->clk = devm_clk_get(&pdev->dev, "enable"); if (IS_ERR(sprd_hwlock->clk)) { dev_err(&pdev->dev, "get hwspinlock clock failed!\n"); return PTR_ERR(sprd_hwlock->clk); } clk_prepare_enable(sprd_hwlock->clk); /* set the hwspinlock to record user id to identify subsystems */ writel(HWSPINLOCK_USER_BITS, sprd_hwlock->base + HWSPINLOCK_RECCTRL); for (i = 0; i < SPRD_HWLOCKS_NUM; i++) { lock = &sprd_hwlock->bank.lock[i]; lock->priv = sprd_hwlock->base + HWSPINLOCK_TOKEN(i); } platform_set_drvdata(pdev, sprd_hwlock); pm_runtime_enable(&pdev->dev); ret = hwspin_lock_register(&sprd_hwlock->bank, &pdev->dev, &sprd_hwspinlock_ops, 0, SPRD_HWLOCKS_NUM); if (ret) { pm_runtime_disable(&pdev->dev); clk_disable_unprepare(sprd_hwlock->clk); return ret; } return 0; }
static int omap_hwspinlock_probe(struct platform_device *pdev) { struct hwspinlock_pdata *pdata = pdev->dev.platform_data; struct hwspinlock_device *bank; struct hwspinlock *hwlock; struct resource *res; void __iomem *io_base; int num_locks, i, ret; if (!pdata) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; io_base = ioremap(res->start, resource_size(res)); if (!io_base) return -ENOMEM; /* Determine number of locks */ i = readl(io_base + SYSSTATUS_OFFSET); i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET; /* one of the four lsb's must be set, and nothing else */ if (hweight_long(i & 0xf) != 1 || i > 8) { ret = -EINVAL; goto iounmap_base; } num_locks = i * 32; /* actual number of locks in this device */ bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL); if (!bank) { ret = -ENOMEM; goto iounmap_base; } platform_set_drvdata(pdev, bank); for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++) hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i; /* * runtime PM will make sure the clock of this module is * enabled iff at least one lock is requested */ pm_runtime_enable(&pdev->dev); ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops, pdata->base_id, num_locks); if (ret) goto reg_fail; return 0; reg_fail: pm_runtime_disable(&pdev->dev); kfree(bank); iounmap_base: iounmap(io_base); return ret; }
static int __devinit sprd_hwspinlock_probe(struct platform_device *pdev) { struct sprd_hwspinlock *sprd_lock; struct sprd_hwspinlock_state *state; struct hwspinlock *lock; struct resource *res; int i, ret; sci_glb_set(REG_AHB_AHB_CTL0, BIT_SPINLOCK_EB); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; if (!res->start) { ret = -ENOMEM; goto free_state; } state->num_locks = HWSPINLOCK_MAX_NUM; state->io_base = (void __iomem *)res->start; hwspinlock_base = state->io_base; platform_set_drvdata(pdev, state); /* * runtime PM will make sure the clock of this module is * enabled if at least one lock is requested */ pm_runtime_enable(&pdev->dev); for (i = 0; i < state->num_locks; i++) { sprd_lock = kzalloc(sizeof(*sprd_lock), GFP_KERNEL); if (!sprd_lock) { ret = -ENOMEM; goto free_locks; } sprd_lock->lock.dev = &pdev->dev; sprd_lock->lock.owner = THIS_MODULE; sprd_lock->lock.id = i; sprd_lock->lock.ops = &sprd_hwspinlock_ops; sprd_lock->addr = (void __iomem *)(res->start + HWSPINLOCK_TOKEN(i)); ret = hwspin_lock_register(&sprd_lock->lock); if (ret) { kfree(sprd_lock); goto free_locks; } } printk("sprd_hwspinlock_probe ok\n"); return 0; free_locks: while (--i >= 0) { lock = hwspin_lock_unregister(i); /* this should't happen, but let's give our best effort */ if (!lock) { dev_err(&pdev->dev, "%s: cleanups failed\n", __func__); continue; } sprd_lock = to_sprd_hwspinlock(lock); kfree(sprd_lock); } pm_runtime_disable(&pdev->dev); free_state: kfree(state); return ret; }