void quota_send_warning(struct kqid qid, dev_t dev, const char warntype) { static atomic_unchecked_t seq; struct sk_buff *skb; void *msg_head; int ret; int msg_size = 4 * nla_total_size(sizeof(u32)) + 2 * nla_total_size(sizeof(u64)); /* We have to allocate using GFP_NOFS as we are called from a * filesystem performing write and thus further recursion into * the fs to free some data could cause deadlocks. */ skb = genlmsg_new(msg_size, GFP_NOFS); if (!skb) { printk(KERN_ERR "VFS: Not enough memory to send quota warning.\n"); return; } msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq), "a_genl_family, 0, QUOTA_NL_C_WARNING); if (!msg_head) { printk(KERN_ERR "VFS: Cannot store netlink header in quota warning.\n"); goto err_out; } ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, from_kqid_munged(&init_user_ns, qid)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); if (ret) goto attr_err_out; ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, from_kuid_munged(&init_user_ns, current_uid())); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); return; attr_err_out: printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); err_out: kfree_skb(skb); }
static int handle_op(struct test_thread_data *td, int lockwakeup) { int i, id, ret = -EINVAL; switch(td->opcode) { case RTTEST_NOP: return 0; case RTTEST_LOCKCONT: td->mutexes[td->opdata] = 1; td->event = atomic_add_return_unchecked(1, &rttest_event); return 0; case RTTEST_RESET: for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) { if (td->mutexes[i] == 4) { rt_mutex_unlock(&mutexes[i]); td->mutexes[i] = 0; } } return 0; case RTTEST_RESETEVENT: atomic_set_unchecked(&rttest_event, 0); return 0; default: if (lockwakeup) return ret; } switch(td->opcode) { case RTTEST_LOCK: case RTTEST_LOCKNOWAIT: id = td->opdata; if (id < 0 || id >= MAX_RT_TEST_MUTEXES) return ret; td->mutexes[id] = 1; td->event = atomic_add_return_unchecked(1, &rttest_event); rt_mutex_lock(&mutexes[id]); td->event = atomic_add_return_unchecked(1, &rttest_event); td->mutexes[id] = 4; return 0; case RTTEST_LOCKINT: case RTTEST_LOCKINTNOWAIT: id = td->opdata; if (id < 0 || id >= MAX_RT_TEST_MUTEXES) return ret; td->mutexes[id] = 1; td->event = atomic_add_return_unchecked(1, &rttest_event); ret = rt_mutex_lock_interruptible(&mutexes[id], 0); td->event = atomic_add_return_unchecked(1, &rttest_event); td->mutexes[id] = ret ? 0 : 4; return ret ? -EINTR : 0; case RTTEST_UNLOCK: id = td->opdata; if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4) return ret; td->event = atomic_add_return_unchecked(1, &rttest_event); rt_mutex_unlock(&mutexes[id]); td->event = atomic_add_return_unchecked(1, &rttest_event); td->mutexes[id] = 0; return 0; default: break; } return ret; }
/* * Schedule replacement for rtsem_down(). Only called for threads with * PF_MUTEX_TESTER set. * * This allows us to have finegrained control over the event flow. * */ void schedule_rt_mutex_test(struct rt_mutex *mutex) { int tid, op, dat; struct test_thread_data *td; /* We have to lookup the task */ for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) { if (threads[tid] == current) break; } BUG_ON(tid == MAX_RT_TEST_THREADS); td = &thread_data[tid]; op = td->opcode; dat = td->opdata; switch (op) { case RTTEST_LOCK: case RTTEST_LOCKINT: case RTTEST_LOCKNOWAIT: case RTTEST_LOCKINTNOWAIT: if (mutex != &mutexes[dat]) break; if (td->mutexes[dat] != 1) break; td->mutexes[dat] = 2; td->event = atomic_add_return_unchecked(1, &rttest_event); break; default: break; } schedule(); switch (op) { case RTTEST_LOCK: case RTTEST_LOCKINT: if (mutex != &mutexes[dat]) return; if (td->mutexes[dat] != 2) return; td->mutexes[dat] = 3; td->event = atomic_add_return_unchecked(1, &rttest_event); break; case RTTEST_LOCKNOWAIT: case RTTEST_LOCKINTNOWAIT: if (mutex != &mutexes[dat]) return; if (td->mutexes[dat] != 2) return; td->mutexes[dat] = 1; td->event = atomic_add_return_unchecked(1, &rttest_event); return; default: return; } td->opcode = 0; for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (td->opcode > 0) { int ret; set_current_state(TASK_RUNNING); ret = handle_op(td, 1); set_current_state(TASK_INTERRUPTIBLE); if (td->opcode == RTTEST_LOCKCONT) break; td->opcode = ret; } /* Wait for the next command to be executed */ schedule(); } /* Restore previous command and data */ td->opcode = op; td->opdata = dat; }