int dirty_bytes_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { unsigned long old_bytes = vm_dirty_bytes; int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_bytes != old_bytes) { update_completion_period(); vm_dirty_ratio = 0; } return ret; }
STATIC int xfs_stats_clear_proc_handler( ctl_table *ctl, int write, struct file *filp, void *buffer, size_t *lenp) { int ret, *valp = ctl->data; __uint32_t vn_active; ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); if (!ret && write && *valp) { printk("XFS Clearing xfsstats\n"); /* save vn_active, it's a universal truth! */ vn_active = xfsstats.vn_active; memset(&xfsstats, 0, sizeof(xfsstats)); xfsstats.vn_active = vn_active; xfs_params.stats_clear = 0; } return ret; }
STATIC int xfs_refcache_resize_proc_handler( ctl_table *ctl, int write, struct file *filp, void *buffer, size_t *lenp) { int ret, *valp = ctl->data; int xfs_refcache_new_size; int xfs_refcache_old_size = *valp; ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp); xfs_refcache_new_size = *valp; if (!ret && write && xfs_refcache_new_size != xfs_refcache_old_size) { xfs_refcache_resize(xfs_refcache_new_size); /* Don't purge more than size of the cache */ if (xfs_refcache_new_size < xfs_params.refcache_purge) xfs_params.refcache_purge = xfs_refcache_new_size; } return ret; }
int proc_nr_files(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { files_stat.nr_files = get_nr_files(); return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); }
static int ipv4_tcp_mem(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; unsigned long vec[3]; struct net *net = current->nsproxy->net_ns; #ifdef CONFIG_MEMCG_KMEM struct mem_cgroup *memcg; #endif struct ctl_table tmp = { .data = &vec, .maxlen = sizeof(vec), .mode = ctl->mode, }; if (!write) { ctl->data = &net->ipv4.sysctl_tcp_mem; return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos); } ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos); if (ret) return ret; #ifdef CONFIG_MEMCG_KMEM rcu_read_lock(); memcg = mem_cgroup_from_task(current); tcp_prot_mem(memcg, vec[0], 0); tcp_prot_mem(memcg, vec[1], 1); tcp_prot_mem(memcg, vec[2], 2); rcu_read_unlock(); #endif net->ipv4.sysctl_tcp_mem[0] = vec[0]; net->ipv4.sysctl_tcp_mem[1] = vec[1]; net->ipv4.sysctl_tcp_mem[2] = vec[2]; return 0; } static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) }; struct tcp_fastopen_context *ctxt; int ret; u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */ tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); if (!tbl.data) return -ENOMEM; rcu_read_lock(); ctxt = rcu_dereference(tcp_fastopen_ctx); if (ctxt) memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH); else memset(user_key, 0, sizeof(user_key)); rcu_read_unlock(); snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x", user_key[0], user_key[1], user_key[2], user_key[3]); ret = proc_dostring(&tbl, write, buffer, lenp, ppos); if (write && ret == 0) { if (sscanf(tbl.data, "%x-%x-%x-%x", user_key, user_key + 1, user_key + 2, user_key + 3) != 4) { ret = -EINVAL; goto bad_key; } tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH); } bad_key: pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n", user_key[0], user_key[1], user_key[2], user_key[3], (char *)tbl.data, ret); kfree(tbl.data); return ret; } static struct ctl_table ipv4_table[] = { { .procname = "tcp_timestamps", .data = &sysctl_tcp_timestamps, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "tcp_window_scaling", .data = &sysctl_tcp_window_scaling, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "tcp_sack",