/** * __hwspin_lock_request() - tag an hwspinlock as used and power it up * * This is an internal function that prepares an hwspinlock instance * before it is given to the user. The function assumes that * hwspinlock_tree_lock is taken. * * Returns 0 or positive to indicate success, and a negative value to * indicate an error (with the appropriate error code) */ static int __hwspin_lock_request(struct hwspinlock *hwlock) { struct device *dev = hwlock->bank->dev; struct hwspinlock *tmp; int ret; /* prevent underlying implementation from being removed */ if (!try_module_get(dev->driver->owner)) { dev_err(dev, "%s: can't get owner\n", __func__); return -EINVAL; } /* notify PM core that power is now needed */ ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "%s: can't power on device\n", __func__); pm_runtime_put_noidle(dev); module_put(dev->driver->owner); return ret; } /* mark hwspinlock as used, should not fail */ tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock), HWSPINLOCK_UNUSED); /* self-sanity check that should never fail */ WARN_ON(tmp != hwlock); return ret; }
/** * hwspin_lock_request_specific() - request for a specific hwspinlock * @id: index of the specific hwspinlock that is requested * * This function should be called by users of the hwspinlock module, * in order to assign them a specific hwspinlock. * Usually early board code will be calling this function in order to * reserve specific hwspinlock ids for predefined purposes. * * Should be called from a process context (might sleep) * * Returns the address of the assigned hwspinlock, or NULL on error */ struct hwspinlock *hwspin_lock_request_specific(unsigned int id) { struct hwspinlock *hwlock; int ret; mutex_lock(&hwspinlock_tree_lock); /* make sure this hwspinlock exists */ hwlock = radix_tree_lookup(&hwspinlock_tree, id); if (!hwlock) { pr_warn("hwspinlock %u does not exist\n", id); goto out; } /* sanity check (this shouldn't happen) */ WARN_ON(hwlock_to_id(hwlock) != id); /* make sure this hwspinlock is unused */ ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); if (ret == 0) { pr_warn("hwspinlock %u is already in use\n", id); hwlock = NULL; goto out; } /* mark as used and power up */ ret = __hwspin_lock_request(hwlock); if (ret < 0) hwlock = NULL; out: mutex_unlock(&hwspinlock_tree_lock); return hwlock; }
/** * hwspin_lock_get_id() - retrieve id number of a given hwspinlock * @hwlock: a valid hwspinlock instance * * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. */ int hwspin_lock_get_id(struct hwspinlock *hwlock) { if (!hwlock) { pr_err("invalid hwlock\n"); return -EINVAL; } return hwlock_to_id(hwlock); }
/** * hwspin_lock_free() - free a specific hwspinlock * @hwlock: the specific hwspinlock to free * * This function mark @hwlock as free again. * Should only be called with an @hwlock that was retrieved from * an earlier call to omap_hwspin_lock_request{_specific}. * * Should be called from a process context (might sleep) * * Returns 0 on success, or an appropriate error code on failure */ int hwspin_lock_free(struct hwspinlock *hwlock) { struct device *dev = hwlock->bank->dev; struct hwspinlock *tmp; int ret; if (!hwlock) { pr_err("invalid hwlock\n"); return -EINVAL; } mutex_lock(&hwspinlock_tree_lock); /* make sure the hwspinlock is used */ ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock), HWSPINLOCK_UNUSED); if (ret == 1) { dev_err(dev, "%s: hwlock is already free\n", __func__); dump_stack(); ret = -EINVAL; goto out; } /* notify the underlying device that power is not needed */ ret = pm_runtime_put(dev); if (ret < 0) goto out; /* mark this hwspinlock as available */ tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock), HWSPINLOCK_UNUSED); /* sanity check (this shouldn't happen) */ WARN_ON(tmp != hwlock); module_put(dev->driver->owner); out: mutex_unlock(&hwspinlock_tree_lock); return ret; }
static int debugfs_hwspinlock_lock_free(struct hwspinlock *_hwlock) { int ret; ret = hwspin_lock_free(_hwlock); if (ret) pr_err("[debug] hwspinlock free fail, ret = [%d]\n", ret); else pr_info("[debug] hwspinlock free %d sucess!\n", hwlock_to_id(_hwlock)); return ret; }
/** * __hwspin_lock_reset() - reset/unlock a specific hwspinlock * @id: index of the specific hwspinlock that needs to be reset * * This function resets a specific hwspinlock. This _should_ * only be called in very special circumstances to unlock and * to avoid a deadlock by an user who would want to acquire the * lock but cannot. * * An example would be a case where a remote processor has crashed * holding a lock. */ void __hwspin_lock_reset(int id) { struct hwspinlock *hwlock; mutex_lock(&hwspinlock_tree_lock); /* make sure this hwspinlock exists */ hwlock = radix_tree_lookup(&hwspinlock_tree, id); if (!hwlock) { pr_warn("hwspinlock %u does not exist\n", id); goto out; } /* sanity check (this shouldn't happen) */ if (hwlock_to_id(hwlock) != id) { pr_warn("id %u does not match hwspinlock id %u\n", id, hwlock_to_id(hwlock)); goto out; } /* * We must make sure that memory operations (both reads and writes), * done before unlocking the hwspinlock, will not be reordered * after the lock is released. * * That's the purpose of this explicit memory barrier. */ mb(); pr_warn("hwspinlock %u forced to unlock\n", id); hwlock->bank->ops->unlock(hwlock); out: mutex_unlock(&hwspinlock_tree_lock); }
/* try to lock the hardware spinlock */ static int sprd_hwspinlock_trylock(struct hwspinlock *lock) { struct sprd_hwspinlock_dev *sprd_hwlock = dev_get_drvdata(lock->bank->dev); void __iomem *addr = lock->priv; int user_id, lock_id; if (!readl(addr)) return 1; lock_id = hwlock_to_id(lock); /* get the hardware spinlock master/user id */ user_id = readl(sprd_hwlock->base + HWSPINLOCK_MASTERID(lock_id)); dev_warn(sprd_hwlock->bank.dev, "hwspinlock [%d] lock failed and master/user id = %d!\n", lock_id, user_id); return 0; }
static ssize_t debugfs_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct hwspinlock *__hwlock; char buf[128]; char *cmd = NULL; char *_cmd = NULL; int id; int timeout; int unlock; int ret ; if (NULL == ubuf || 0 == cnt){ pr_err("buf is null !\n"); return (-EINVAL); } if(cnt > sizeof(buf)) { pr_err("input count larger! \n"); return (-ENOMEM); } if (copy_from_user(buf, ubuf, cnt - 1)) { pr_err("[Hwspinlock Debugfs] can not copy from user\n"); cnt = -EINVAL; goto out; } buf[cnt - 1] = '\0'; cmd = buf; _cmd = buf; pr_debug("[Hwspinlock Debugfs] [cmd: %s[cnt: %d]]\n", cmd, (int)cnt); if (!strncmp("request ", _cmd, strlen("request "))) { cmd = cmd + strlen("request "); _cmd = cmd; while ((' ' != *_cmd) && ('\0' != *_cmd)) _cmd++; *_cmd = '\0'; if (kstrtos32(cmd, 10, &id)) { pr_err("Hwspinlock Debugfs cmd error\n"); cnt = -EINVAL; goto out; } hwlock = hwspin_lock_request_specific(id); if (!hwlock) pr_err("hwspinlock %u is already in use\n", id); else pr_info("[debug] Request hwspinlock %d sucess!",id); } else if (!strncmp("free_lock ", _cmd, strlen("free_lock "))) { cmd = cmd + strlen("free_lock "); _cmd = cmd; while ((' ' != *_cmd) && ('\0' != *_cmd)) _cmd++; *_cmd = '\0'; if (kstrtos32(cmd, 10, &id)) { pr_err("Hwspinlock Debugfs cmd error\n"); cnt = -EINVAL; goto out; } if (!hwlock){ __hwlock = hwspin_lock_lookup(id); if (__hwlock){ debugfs_hwspinlock_lock_free(__hwlock); __hwlock = NULL; }else{ pr_err("Current haven't requested the hwspinlock %d\n",id); goto out; } }else if (hwlock_to_id(hwlock) == id){ debugfs_hwspinlock_lock_free(hwlock); hwlock = NULL; }else pr_err("[debug] please freelock the correct lock!\n"); } else if (!strncmp("trylock ", _cmd, strlen("trylock "))) { cmd = cmd + strlen("trylock "); _cmd = cmd; while ((' ' != *_cmd) && ('\0' != *_cmd)) _cmd++; if ('\0' == *_cmd) { pr_err("[debug] cmd error\n"); cnt = -EINVAL; goto out; } *_cmd = '\0'; if (kstrtos32(cmd, 10, &id)) { pr_err("[debug] cmd error\n"); cnt = -EINVAL; goto out; } cmd = _cmd + 1; _cmd = cmd; while ((' ' != *_cmd) && ('\0' != *_cmd)) _cmd++; *_cmd = '\0'; if (kstrtos32(cmd, 10, &timeout)) { pr_err("[debug] cmd error\n"); cnt = -EINVAL; goto out; } cmd = _cmd + 1; _cmd = cmd; while ((' ' != *_cmd) && ('\0' != *_cmd)) _cmd++; *_cmd = '\0'; if (kstrtos32(cmd, 10, &unlock)) { pr_err("[debug] cmd error\n"); cnt = -EINVAL; goto out; } if (!hwlock){ pr_err("Current haven't requested the hwspinlock %d\n",id); goto out; }else if (hwlock_to_id(hwlock) == id && timeout >= 0){ ret =debugfs_hwspinlock_trylock_timeout(id, timeout, unlock); if (!ret){ if (0 == timeout) pr_info("[debug] hwspin_trylock %d sucess!\n",id); else pr_info("[debug] hwspin_trylock_timout %d sucess!\n",id); } }else if(timeout < 0) pr_err("[debug] cmd err! timeout must > 0\n"); else pr_err("[debug] please trylock the correct lock!\n"); }else if (!strncmp("unlock ", _cmd, strlen("unlock "))) { cmd = cmd + strlen("unlock "); _cmd = cmd; while ((' ' != *_cmd) && ('\0' != *_cmd)) _cmd++; *_cmd = '\0'; if (kstrtos32(cmd, 10, &id)) { pr_err("Hwspinlock Debugfs cmd error\n"); cnt = -EINVAL; goto out; } if (!hwlock){ pr_err("Current don't echo request the hwspinlock %d\n",id); goto out; }else if(hwlock_to_id(hwlock) == id && locked == 1){ hwspin_unlock(hwlock); pr_info("[debug] hwspin_unlock id=%d sucess!\n",id); }else pr_err("[debug] please free the correct request lock!\n"); } else { pr_err("Hwspinlock Debugfs cmd error\n"); cnt = -EINVAL; } out: return cnt; }