static int sprd_hwspinlock_probe(struct platform_device *pdev)
{
	struct sprd_hwspinlock_dev *sprd_hwlock;
	struct hwspinlock *lock;
	struct resource *res;
	int i, ret;

	if (!pdev->dev.of_node)
		return -ENODEV;

	sprd_hwlock = devm_kzalloc(&pdev->dev,
				   sizeof(struct sprd_hwspinlock_dev) +
				   SPRD_HWLOCKS_NUM * sizeof(*lock),
				   GFP_KERNEL);
	if (!sprd_hwlock)
		return -ENOMEM;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	sprd_hwlock->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(sprd_hwlock->base))
		return PTR_ERR(sprd_hwlock->base);

	sprd_hwlock->clk = devm_clk_get(&pdev->dev, "enable");
	if (IS_ERR(sprd_hwlock->clk)) {
		dev_err(&pdev->dev, "get hwspinlock clock failed!\n");
		return PTR_ERR(sprd_hwlock->clk);
	}

	clk_prepare_enable(sprd_hwlock->clk);

	/* set the hwspinlock to record user id to identify subsystems */
	writel(HWSPINLOCK_USER_BITS, sprd_hwlock->base + HWSPINLOCK_RECCTRL);

	for (i = 0; i < SPRD_HWLOCKS_NUM; i++) {
		lock = &sprd_hwlock->bank.lock[i];
		lock->priv = sprd_hwlock->base + HWSPINLOCK_TOKEN(i);
	}

	platform_set_drvdata(pdev, sprd_hwlock);
	pm_runtime_enable(&pdev->dev);

	ret = hwspin_lock_register(&sprd_hwlock->bank, &pdev->dev,
				   &sprd_hwspinlock_ops, 0, SPRD_HWLOCKS_NUM);
	if (ret) {
		pm_runtime_disable(&pdev->dev);
		clk_disable_unprepare(sprd_hwlock->clk);
		return ret;
	}

	return 0;
}
static int __devinit sprd_hwspinlock_probe(struct platform_device *pdev)
{
	struct sprd_hwspinlock *sprd_lock;
	struct sprd_hwspinlock_state *state;
	struct hwspinlock *lock;
	struct resource *res;
	int i, ret;

	sci_glb_set(REG_AHB_AHB_CTL0, BIT_SPINLOCK_EB);
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (!state)
		return -ENOMEM;

	if (!res->start) {
		ret = -ENOMEM;
		goto free_state;
	}

	state->num_locks = HWSPINLOCK_MAX_NUM;
	state->io_base = (void __iomem *)res->start;
	hwspinlock_base = state->io_base;

	platform_set_drvdata(pdev, state);

	/*
	 * runtime PM will make sure the clock of this module is
	 * enabled if at least one lock is requested
	 */
	pm_runtime_enable(&pdev->dev);

	for (i = 0; i < state->num_locks; i++) {
		sprd_lock = kzalloc(sizeof(*sprd_lock), GFP_KERNEL);
		if (!sprd_lock) {
			ret = -ENOMEM;
			goto free_locks;
		}

		sprd_lock->lock.dev = &pdev->dev;
		sprd_lock->lock.owner = THIS_MODULE;
		sprd_lock->lock.id = i;
		sprd_lock->lock.ops = &sprd_hwspinlock_ops;
		sprd_lock->addr =
		    (void __iomem *)(res->start + HWSPINLOCK_TOKEN(i));

		ret = hwspin_lock_register(&sprd_lock->lock);
		if (ret) {
			kfree(sprd_lock);
			goto free_locks;
		}
	}

	printk("sprd_hwspinlock_probe ok\n");
	return 0;

free_locks:
	while (--i >= 0) {
		lock = hwspin_lock_unregister(i);
		/* this should't happen, but let's give our best effort */
		if (!lock) {
			dev_err(&pdev->dev, "%s: cleanups failed\n", __func__);
			continue;
		}
		sprd_lock = to_sprd_hwspinlock(lock);
		kfree(sprd_lock);
	}

	pm_runtime_disable(&pdev->dev);

free_state:
	kfree(state);
	return ret;
}