int kedr_target_detector_clear_target_name(void) { int result; /* * Only this order of mutex locking is correct. * * Otherwise deadlock is possible, because * detector_notifier_call() is called with module_mutex locked. */ result = mutex_lock_killable(&module_mutex); if(result) { KEDR_MSG(COMPONENT_STRING "failed to lock module_mutex\n"); return -EINTR; } result = mutex_lock_killable(&target_module_mutex); if(result) { KEDR_MSG(COMPONENT_STRING "failed to lock target_module_mutex\n"); mutex_unlock(&module_mutex); return -EINTR; } result = set_target_name_internal(NULL); mutex_unlock(&target_module_mutex); mutex_unlock(&module_mutex); return result; }
int iterate_dir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); int res = -ENOTDIR; if (!file->f_op->iterate) goto out; res = security_file_permission(file, MAY_READ); if (res) goto out; res = mutex_lock_killable(&inode->i_mutex); if (res) goto out; res = -ENOENT; if (!IS_DEADDIR(inode)) { ctx->pos = file->f_pos; res = file->f_op->iterate(file, ctx); file->f_pos = ctx->pos; fsnotify_access(file); file_accessed(file); } mutex_unlock(&inode->i_mutex); out: return res; }
ssize_t sleepy_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct sleepy_dev *dev = (struct sleepy_dev *)filp->private_data; ssize_t retval = 0; int minor; if (mutex_lock_killable(&dev->sleepy_mutex)) return -EINTR; /* YOUR CODE HERE */ // Report when a process is waking everyone up minor = (int)iminor(filp->f_path.dentry->d_inode); printk("SLEEPY_READ DEVICE (%d): Process is waking everyone up. \n", minor); // Set flag to wake up processes waiting on this device dev->wakeytime = 1; // Wake up all processes waiting on device wake_up_interruptible(&dev->wqueue); /* END YOUR CODE */ mutex_unlock(&dev->sleepy_mutex); return retval; }
ssize_t ticket_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct ticket_dev *dev = (struct ticket_dev *)filp->private_data; ssize_t retval = 0; // Used to check for errors during execution: int errCheck; // If count != 4, we return -EINVAL. This is outside of the mutex since // this doesn't need to be atomic. Handling this outside of the lock // simplifies the cleanup if (count != 4) return -EINVAL; if (mutex_lock_killable(&dev->ticket_mutex)) return -EINTR; // Attempt to write data as requested errCheck = copy_to_user(buf, &(dev->ticket_number), count); if (errCheck != 0) retval = -EINVAL; else { retval = 4; dev->ticket_number++; } mutex_unlock(&dev->ticket_mutex); return retval; }
/* A callback function to catch loading and unloading of module. * Sets target_module pointer among other things. */ static int detector_notifier_call(struct notifier_block *nb, unsigned long mod_state, void *vmod) { struct module* mod = (struct module *)vmod; BUG_ON(mod == NULL); /* handle module state change */ switch(mod_state) { case MODULE_STATE_COMING: /* the module has just loaded */ if(mutex_lock_killable(&target_module_mutex)) { KEDR_MSG(COMPONENT_STRING "failed to lock target_module_mutex\n"); return 0; } if((target_name != NULL) && (strcmp(target_name, module_name(mod)) == 0)) { BUG_ON(target_module != NULL); if((notifier->mod == NULL) || try_module_get(notifier->mod)) { if(!notifier->on_target_load(notifier, mod)) { target_module = mod; } else { if(notifier->mod) module_put(notifier->mod); } } else { pr_err("Fail to fix module of notifier."); } } mutex_unlock(&target_module_mutex); break; case MODULE_STATE_GOING: /* the module is going to unload */ /* if the target module has already been unloaded, * target_module is NULL, so (mod != target_module) will * be true. */ mutex_lock(&target_module_mutex); if(mod == target_module) { notifier->on_target_unload(notifier, mod); target_module = NULL; if(notifier->mod != NULL) module_put(notifier->mod); } mutex_unlock(&target_module_mutex); break; } return 0; }
ssize_t debug_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct debug_dev *dev = (struct debug_dev *)filp->private_data; ssize_t retval = 0; if (mutex_lock_killable(&dev->debug_mutex)) return -EINTR; if (*f_pos >= dev->buffer_size) /* EOF */ goto out; if (*f_pos + count > dev->buffer_size) count = dev->buffer_size - *f_pos; if (count > dev->block_size) count = dev->block_size; if (copy_to_user(buf, &(dev->data[*f_pos]), count) != 0) { retval = -EFAULT; goto out; } *f_pos += count; retval = count; out: mutex_unlock(&dev->debug_mutex); return retval; }
ssize_t sleepy_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct sleepy_dev *dev = (struct sleepy_dev *)filp->private_data; ssize_t retval = OK; s64 timeout = 0; s64 start_time = jiffies; int minor = (int)iminor(filp->f_path.dentry->d_inode); if (mutex_lock_killable(&dev->sleepy_mutex)) return -EINTR; /* YOUR CODE HERE */ if(COUNT_NUM != count) { printk(KERN_WARNING "Please input 4 bytes integer\n"); retval = -EINVAL; goto EXIT_LOCK; } retval = copy_from_user(dev->data, buf, count); if(OK != retval) { printk(KERN_WARNING "Copy to kernel space failed, error code: %d\n", (int)retval); goto EXIT_LOCK; } timeout = *(int*)dev->data; //printk(KERN_DEBUG "Rcvd time: %lld, len: %zd\n", timeout, count); if(0 > timeout) { printk(KERN_WARNING "Negative number or None-number input, no sleep is performed\n"); goto EXIT_LOCK; } timeout *= HZ; printk(KERN_DEBUG "sleep %i (%s) in queue /dev/sleepy%d\n", current->pid, current->comm, minor); gWakeFlag[minor] = 0; mutex_unlock(&dev->sleepy_mutex); retval = wait_event_interruptible_timeout(gWaitQueue[minor], 0 != gWakeFlag[minor], timeout); if(OK > retval) { printk(KERN_WARNING "Wait event time error\n"); goto EXIT_NOLOCK; } retval = OK; mutex_lock(&dev->sleepy_mutex); retval = (start_time + timeout - jiffies) / HZ; printk("SLEEPY_WRITE DEVICE (%d): remaining = %zd \n", minor, retval); /* END YOUR CODE */ EXIT_LOCK: mutex_unlock(&dev->sleepy_mutex); EXIT_NOLOCK: return retval; }
ssize_t debug_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct debug_dev *dev = (struct debug_dev *)filp->private_data; ssize_t retval = 0; if (mutex_lock_killable(&dev->debug_mutex)) return -EINTR; if (*f_pos >= dev->buffer_size) { /* Writing beyond the end of the buffer is not allowed. */ retval = -EINVAL; goto out; } if (*f_pos + count > dev->buffer_size) count = dev->buffer_size - *f_pos; if (count > dev->block_size) count = dev->block_size; if (copy_from_user(&(dev->data[*f_pos]), buf, count) != 0) { retval = -EFAULT; goto out; } *f_pos += count; retval = count; out: mutex_unlock(&dev->debug_mutex); return retval; }
ssize_t sleepy_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct sleepy_dev *dev = (struct sleepy_dev *)filp->private_data; ssize_t retval = OK; int minor = (int)iminor(filp->f_path.dentry->d_inode); if (mutex_lock_killable(&dev->sleepy_mutex)) return -EINTR; /* YOUR CODE HERE */ printk("SLEEPY_READ DEVICE (%d): Process is waking everyone up. \n", minor); if(OK == waitqueue_active(&gWaitQueue[minor])) { // printk(KERN_DEBUG "Nothing in the waiting queue, exit\n"); goto EXIT_LOCK; } gWakeFlag[minor] = 1; wake_up_interruptible(&gWaitQueue[minor]); retval = copy_to_user(buf, dev->data, count); if(OK != retval) { printk(KERN_WARNING "Copy to user space failed\n"); } /* END YOUR CODE */ EXIT_LOCK: mutex_unlock(&dev->sleepy_mutex); return retval; }
ssize_t sleepy_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct sleepy_dev *dev = (struct sleepy_dev *)filp->private_data; ssize_t retval = 0; unsigned int sign = 0; if (mutex_lock_killable(&dev->sleepy_mutex)) return -EINTR; /* YOUR CODE HERE */ dev->flag = 1; wake_up_interruptible(&dev->my_queue); sign = copy_to_user(buf, dev->data, count); if(sign != 0){ printk(KERN_INFO "read fails!\n"); mutex_unlock(&dev->sleepy_mutex); return -EFAULT; } printk(KERN_INFO "read successes!\n"); /* END YOUR CODE */ mutex_unlock(&dev->sleepy_mutex); return retval; }
static void ww_test_normal(void) { int ret; WWAI(&t); /* * None of the ww_mutex codepaths should be taken in the 'normal' * mutex calls. The easiest way to verify this is by using the * normal mutex calls, and making sure o.ctx is unmodified. */ /* mutex_lock (and indirectly, mutex_lock_nested) */ o.ctx = (void *)~0UL; mutex_lock(&o.base); mutex_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); /* mutex_lock_interruptible (and *_nested) */ o.ctx = (void *)~0UL; ret = mutex_lock_interruptible(&o.base); if (!ret) mutex_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* mutex_lock_killable (and *_nested) */ o.ctx = (void *)~0UL; ret = mutex_lock_killable(&o.base); if (!ret) mutex_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* trylock, succeeding */ o.ctx = (void *)~0UL; ret = mutex_trylock(&o.base); WARN_ON(!ret); if (ret) mutex_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* trylock, failing */ o.ctx = (void *)~0UL; mutex_lock(&o.base); ret = mutex_trylock(&o.base); WARN_ON(ret); mutex_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); /* nest_lock */ o.ctx = (void *)~0UL; mutex_lock_nest_lock(&o.base, &t); mutex_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); }
// Condition function that acquires lock, checks flag, and releases lock int myFlagSet(struct sleepy_dev * dev, int devNum) { mutex_lock_killable(&dev->sleepy_mutex); int result = flags[devNum] == 0 ? 1 : 0; mutex_unlock(&dev->sleepy_mutex); return result; }
static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count, const void *arg) { struct pci_vpd *vpd = dev->vpd; const u8 *buf = arg; loff_t end = pos + count; int ret = 0; if (pos < 0 || (pos & 3) || (count & 3)) return -EINVAL; if (!vpd->valid) { vpd->valid = 1; vpd->len = pci_vpd_size(dev, vpd->len); } if (vpd->len == 0) return -EIO; if (end > vpd->len) return -EINVAL; if (mutex_lock_killable(&vpd->lock)) return -EINTR; ret = pci_vpd_wait(dev); if (ret < 0) goto out; while (pos < end) { u32 val; val = *buf++; val |= *buf++ << 8; val |= *buf++ << 16; val |= *buf++ << 24; ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val); if (ret < 0) break; ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, pos | PCI_VPD_ADDR_F); if (ret < 0) break; vpd->busy = 1; vpd->flag = 0; ret = pci_vpd_wait(dev); if (ret < 0) break; pos += sizeof(u32); } out: mutex_unlock(&vpd->lock); return ret ? ret : count; }
INT32 wmt_plat_wake_lock_ctrl(ENUM_WL_OP opId) { #ifdef CFG_WMT_WAKELOCK_SUPPORT static INT32 counter; INT32 status; INT32 ret = 0; ret = mutex_lock_killable(&gOsSLock); if (ret) { WMT_PLAT_ERR_FUNC("--->lock gOsSLock failed, ret=%d\n", ret); return ret; } if (WL_OP_GET == opId) ++counter; else if (WL_OP_PUT == opId) --counter; mutex_unlock(&gOsSLock); if (WL_OP_GET == opId && counter == 1) { #ifdef CONFIG_PM_WAKELOCKS __pm_stay_awake(&wmtWakeLock); status = wmtWakeLock.active; #else wake_lock(&wmtWakeLock); status = wake_lock_active(&wmtWakeLock); #endif WMT_PLAT_DBG_FUNC("WMT-PLAT: after wake_lock(%d), counter(%d)\n", status, counter); } else if (WL_OP_PUT == opId && counter == 0) { #ifdef CONFIG_PM_WAKELOCKS __pm_relax(&wmtWakeLock); status = wmtWakeLock.active; #else wake_unlock(&wmtWakeLock); status = wake_lock_active(&wmtWakeLock); #endif WMT_PLAT_DBG_FUNC("WMT-PLAT: after wake_unlock(%d), counter(%d)\n", status, counter); } else { #ifdef CONFIG_PM_WAKELOCKS status = wmtWakeLock.active; #else status = wake_lock_active(&wmtWakeLock); #endif WMT_PLAT_WARN_FUNC("WMT-PLAT: wakelock status(%d), counter(%d)\n", status, counter); } return 0; #else WMT_PLAT_WARN_FUNC("WMT-PLAT: host awake function is not supported.\n"); return 0; #endif }
ssize_t shady_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct shady_dev *dev = (struct shady_dev *)filp->private_data; ssize_t retval = 0; if (mutex_lock_killable(&dev->shady_mutex)) return -EINTR; mutex_unlock(&dev->shady_mutex); return retval; }
unsigned int mc_poll(struct file *filp, poll_table *wait) { struct mc_dev *dev = filp->private_data; unsigned int mask = 0; if (mutex_lock_killable(&dev->mc_mutex)) return -EINTR; poll_wait(filp, &mc_queue, wait); if (mc_poll_flag){ mask = POLLIN | POLLRDNORM; mc_poll_flag = 0; } mutex_unlock(&dev->mc_mutex); return mask; }
static ssize_t klc_read_common(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { ssize_t ret = 0; size_t data_len; loff_t pos = *f_pos; struct klc_output_buffer *ob = (struct klc_output_buffer *)filp->private_data; if (ob == NULL) return -EINVAL; if (mutex_lock_killable(&ob->lock) != 0) { pr_warning(KEDR_LC_MSG_PREFIX "klc_read_common(): " "got a signal while trying to acquire a mutex.\n"); return -EINTR; } data_len = ob->data_len; /* Reading outside of the data buffer is not allowed */ if ((pos < 0) || (pos > data_len)) { ret = -EINVAL; goto out; } /* EOF reached or 0 bytes requested */ if ((count == 0) || (pos == data_len)) { ret = 0; goto out; } if (pos + count > data_len) count = data_len - pos; if (copy_to_user(buf, &(ob->buf[pos]), count) != 0) { ret = -EFAULT; goto out; } mutex_unlock(&ob->lock); *f_pos += count; return count; out: mutex_unlock(&ob->lock); return ret; }
static ssize_t stats_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvhost_device *ndev = to_nvhost_device(dev); struct tegra_dc *dc = nvhost_get_drvdata(ndev); bool enabled; if (mutex_lock_killable(&dc->lock)) return -EINTR; enabled = tegra_dc_stats_get(dc); mutex_unlock(&dc->lock); return snprintf(buf, PAGE_SIZE, "%d", enabled); }
static int kcmp_lock(struct mutex *m1, struct mutex *m2) { int err; if (m2 > m1) swap(m1, m2); err = mutex_lock_killable(m1); if (!err && likely(m1 != m2)) { err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING); if (err) mutex_unlock(m1); } return err; }
/* * Fix all payloads and return array of functions with information * how them should be intercepted. * * Last element in the array should contain NULL in 'orig' field. * * On error, return ERR_PTR. * * Returning array is freed by the callee * at kedr_target_unload_callback() call. */ const struct kedr_base_interception_info* kedr_base_target_load_callback(struct module* m) { /* 0 - return info_array_current, otherwise return ERR_PTR(result)*/ int result; result = mutex_lock_killable(&base_mutex); if(result) return ERR_PTR(result); result = kedr_base_target_load_callback_internal(m); mutex_unlock(&base_mutex); return result ? ERR_PTR(result) : info_array_current; }
static ssize_t stats_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct nvhost_device *ndev = to_nvhost_device(dev); struct tegra_dc *dc = nvhost_get_drvdata(ndev); unsigned long val = 0; if (strict_strtoul(buf, 10, &val) < 0) return -EINVAL; if (mutex_lock_killable(&dc->lock)) return -EINTR; tegra_dc_stats_enable(dc, !!val); mutex_unlock(&dc->lock); return count; }
ssize_t sleepy_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct sleepy_dev *dev = (struct sleepy_dev *)filp->private_data; int sleepCnt; int our_flag; char __buf[4]; // If the user attempted to write anything other than a standard integer, return EINVAL if (count != sizeof(int)) { printk("ERROR 1\n"); return -EINVAL; } copy_from_user(__buf, buf, 4); // TODO: double check this hack. I'm assuming that passing the address of an // integer is the appropriate way to interface with this, but I'm not sure sleepCnt = HZ * (*(int*)__buf); // If the user passed a negative value, just return if (sleepCnt < 0) { printk("ERROR 2\n"); return 0; } if (mutex_lock_killable(&dev->sleepy_mutex)) { printk("ERROR 3\n"); return -EINTR; } // We will save the flag as it is now, and once it changes, we'll // know to wake from our slumber early our_flag = dev->flag; mutex_unlock(&dev->sleepy_mutex); // zzzzzz printk("no error!\n"); return wait_event_interruptible_timeout(dev->queue, dev->flag != our_flag, sleepCnt); }
INT32 wmt_plat_wake_lock_ctrl(ENUM_WL_OP opId) { #ifdef CFG_WMT_WAKELOCK_SUPPORT static INT32 counter = 0; INT32 ret = 0; ret = mutex_lock_killable( &gOsSLock); if (ret) { WMT_PLAT_ERR_FUNC("--->lock gOsSLock failed, ret=%d\n", ret); return ret; } if (WL_OP_GET == opId) { ++counter; }else if (WL_OP_PUT == opId) { --counter; } mutex_unlock( &gOsSLock); if (WL_OP_GET == opId && counter == 1) { wake_lock(&wmtWakeLock); WMT_PLAT_DBG_FUNC("WMT-PLAT: after wake_lock(%d), counter(%d)\n", wake_lock_active(&wmtWakeLock), counter); } else if (WL_OP_PUT == opId && counter == 0) { wake_unlock(&wmtWakeLock); WMT_PLAT_DBG_FUNC("WMT-PLAT: after wake_unlock(%d), counter(%d)\n", wake_lock_active(&wmtWakeLock), counter); } else { WMT_PLAT_WARN_FUNC("WMT-PLAT: wakelock status(%d), counter(%d)\n", wake_lock_active(&wmtWakeLock), counter); } return 0; #else WMT_PLAT_WARN_FUNC("WMT-PLAT: host awake function is not supported."); return 0; #endif }
static int command_open(struct inode *inode, struct file *filp) { struct escore_priv *escore; int err; unsigned major; unsigned minor; pr_debug("called: %s\n", __func__); major = imajor(inode); minor = iminor(inode); if (major != cdev_major || minor < 0 || minor >= CDEV_COUNT) { pr_warn("escore: no such device major=%u minor=%u\n", major, minor); err = -ENODEV; goto OPEN_ERR; } escore = container_of((inode)->i_cdev, struct escore_priv, cdev_command); if (inode->i_cdev != &escore->cdev_command) { dev_err(escore->dev, "open: error bad cdev field\n"); err = -ENODEV; goto OPEN_ERR; } err = mutex_lock_killable(&escore->api_mutex); if (err) { dev_dbg(escore->dev, "did not get lock: %d\n", err); err = -EBUSY; goto OPEN_ERR; } filp->private_data = escore; /* Initialize parser. */ last_token = PT_NIL; parse_have = 0; parse_cb_preset = macro_preset_id; parse_cb_cmd = macro_cmd; OPEN_ERR: return err; }
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) { struct mm_struct *mm; int err; err = mutex_lock_killable(&task->signal->cred_guard_mutex); if (err) return ERR_PTR(err); mm = get_task_mm(task); if (mm && mm != current->mm && !ptrace_may_access(task, mode)) { mmput(mm); mm = ERR_PTR(-EACCES); } mutex_unlock(&task->signal->cred_guard_mutex); return mm; }
ssize_t sleepy_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct sleepy_dev *dev = (struct sleepy_dev *)filp->private_data; ssize_t retval = 0; if (mutex_lock_killable(&dev->sleepy_mutex)) return -EINTR; // Adjust the flag, so the sleeping processes' saved values will differ when they make // their wake-up check dev->flag++; wake_up_interruptible(&dev->queue); mutex_unlock(&dev->sleepy_mutex); return retval; }
ssize_t sleepy_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct sleepy_dev *dev = (struct sleepy_dev *)filp->private_data; ssize_t retval = 0; unsigned int sign = 0; unsigned value = 0; if (mutex_lock_killable(&dev->sleepy_mutex)) return -EINTR; /* YOUR CODE HERE */ if(count != 4){ printk(KERN_INFO "process should writes a 4-byte integer!\n"); return EINVAL; } sign = copy_from_user(dev->data, buf, count); if(sign == 0){ value = *(int*)dev->data; if(value < 0){ mutex_unlock(&dev->sleepy_mutex); return retval; } printk(KERN_INFO "write successes!\n"); }else{ printk(KERN_INFO "write fails!\n"); mutex_unlock(&dev->sleepy_mutex); return -EFAULT; } mutex_unlock(&dev->sleepy_mutex); unsigned long start = jiffies; sign = wait_event_interruptible_timeout(dev->my_queue, dev->flag != 0, value * HZ); if(sign != 0){ retval = value - (jiffies - start) / HZ; printk(KERN_INFO "sleep ended abnormally!\n"); }else printk(KERN_INFO "sleep ended normally!\n"); mutex_lock(&dev->sleepy_mutex); dev -> flag = 0; /* END YOUR CODE */ mutex_unlock(&dev->sleepy_mutex); return retval; }
ssize_t sleepy_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct sleepy_dev *dev = (struct sleepy_dev *)filp->private_data; ssize_t retval = 0; int devNum = iminor(filp->f_path.dentry->d_inode); if (mutex_lock_killable(&dev->sleepy_mutex)) return -EINTR; /* YOUR CODE HERE */ printk(KERN_DEBUG "Read from sleepy device %d\n", devNum); flags[devNum] = 0; wake_up_interruptible(&myQueue); /* END YOUR CODE */ mutex_unlock(&dev->sleepy_mutex); return retval; }
int kedr_payload_register(struct kedr_payload *payload) { int result = 0; BUG_ON(payload == NULL); result = mutex_lock_killable(&base_mutex); if (result != 0) { KEDR_MSG(COMPONENT_STRING "failed to lock base_mutex\n"); return result; } result = kedr_payload_register_internal(payload); mutex_unlock(&base_mutex); return result; }
void klc_print_string(struct kedr_lc_output *output, enum klc_output_type output_type, const char *s) { struct klc_output_buffer *ob = NULL; BUG_ON(s == NULL); switch (output_type) { case KLC_UNFREED_ALLOC: ob = &output->ob_leaks; break; case KLC_BAD_FREE: ob = &output->ob_bad_frees; break; case KLC_OTHER: ob = &output->ob_other; break; default: pr_warning(KEDR_LC_MSG_PREFIX "unknown output type: %d\n", (int)output_type); return; } BUG_ON(ob->buf == NULL); if (mutex_lock_killable(&ob->lock) != 0) { pr_warning(KEDR_LC_MSG_PREFIX "klc_print_string(): " "got a signal while trying to acquire a mutex.\n"); return; } klc_output_buffer_append(ob, s); klc_output_buffer_append(ob, "\n"); if (syslog_output) pr_warning(KEDR_LC_MSG_PREFIX "%s\n", s); mutex_unlock(&ob->lock); }