示例#1
0
/* Validate changes from /proc interface. */
static int ipv4_local_port_range(ctl_table *table, int write,
				 void __user *buffer,
				 size_t *lenp, loff_t *ppos)
{
	int ret;
	int range[2];
	ctl_table tmp = {
		.data = &range,
		.maxlen = sizeof(range),
		.mode = table->mode,
		.extra1 = &ip_local_port_range_min,
		.extra2 = &ip_local_port_range_max,
	};

	inet_get_local_port_range(range, range + 1);
	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);

	if (write && ret == 0) {
		if (range[1] < range[0])
			ret = -EINVAL;
		else
			set_local_port_range(range);
	}

	return ret;
}


static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
{
	kgid_t *data = table->data;
	unsigned int seq;
	do {
		seq = read_seqbegin(&sysctl_local_ports.lock);

		*low = data[0];
		*high = data[1];
	} while (read_seqretry(&sysctl_local_ports.lock, seq));
}
static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
	void __user *buffer, size_t *lenp, loff_t *ppos)
{
	struct ctl_table ipc_table;
	size_t lenp_bef = *lenp;
	int rc;

	memcpy(&ipc_table, table, sizeof(ipc_table));
	ipc_table.data = get_ipc(table);

	rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);

	if (write && !rc && lenp_bef == *lenp)
		/*
		 * Tunable has successfully been changed by hand. Disable its
		 * automatic adjustment. This simply requires unregistering
		 * the notifiers that trigger recalculation.
		 */
		unregister_ipcns_notifier(current->nsproxy->ipc_ns);

	return rc;
}
示例#3
0
/*
 * Used for sysctl_perf_event_max_stack and
 * sysctl_perf_event_max_contexts_per_stack.
 */
int perf_event_max_stack_handler(struct ctl_table *table, int write,
				 void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int *value = table->data;
	int new_value = *value, ret;
	struct ctl_table new_table = *table;

	new_table.data = &new_value;
	ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
	if (ret || !write)
		return ret;

	mutex_lock(&callchain_mutex);
	if (atomic_read(&nr_callchain_events))
		ret = -EBUSY;
	else
		*value = new_value;

	mutex_unlock(&callchain_mutex);

	return ret;
}
示例#4
0
文件: fork.c 项目: lovejavaee/linux-2
int sysctl_max_threads(struct ctl_table *table, int write,
		       void __user *buffer, size_t *lenp, loff_t *ppos)
{
	struct ctl_table t;
	int ret;
	int threads = max_threads;
	int min = MIN_THREADS;
	int max = MAX_THREADS;

	t = *table;
	t.data = &threads;
	t.extra1 = &min;
	t.extra2 = &max;

	ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
	if (ret || !write)
		return ret;

	set_max_threads(threads);

	return 0;
}
示例#5
0
static int proc_local_port_range(ctl_table *table, int write,
				void __user *buffer,
				size_t *lenp, loff_t *ppos)
{
	int ret;
	int range[2] = {local_port_range[0], local_port_range[1]};
	ctl_table tmp = {
		.data = &range,
		.maxlen = sizeof(range),
		.mode = table->mode,
		.extra1 = &local_port_range_min,
		.extra2 = &local_port_range_max,
	};

	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);

	if (write && ret == 0) {
		if (range[1] < range[0])
			ret = -EINVAL;
		else
			set_local_port_range(range);
	}

	return ret;
}

static struct ctl_table phonet_table[] = {
	{
		.ctl_name	= CTL_UNNUMBERED,
		.procname	= "local_port_range",
		.data		= &local_port_range,
		.maxlen		= sizeof(local_port_range),
		.mode		= 0644,
		.proc_handler	= proc_local_port_range,
		.strategy	= NULL,
	},
	{ .ctl_name = 0 }
示例#6
0
/* Validate changes from /proc interface. */
static int ipv4_ping_group_range(ctl_table *table, int write,
				 void __user *buffer,
				 size_t *lenp, loff_t *ppos)
{
	int ret;
	gid_t range[2];
	ctl_table tmp = {
		.data = &range,
		.maxlen = sizeof(range),
		.mode = table->mode,
		.extra1 = &ip_ping_group_range_min,
		.extra2 = &ip_ping_group_range_max,
	};

	inet_get_ping_group_range_table(table, range, range + 1);
	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);

	if (write && ret == 0)
		set_ping_group_range(table, range);

	return ret;
}

/* Validate changes from /proc interface. */
static int proc_tcp_default_init_rwnd(ctl_table *ctl, int write,
				      void __user *buffer,
				      size_t *lenp, loff_t *ppos)
{
	int old_value = *(int *)ctl->data;
	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
	int new_value = *(int *)ctl->data;

	if (write && ret == 0 && (new_value < 3 || new_value > 100))
		*(int *)ctl->data = old_value;

	return ret;
}
示例#7
0
static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
					   void __user *buffer, size_t *lenp,
					   loff_t *ppos)
{
	int ret, jit_enable = *(int *)table->data;
	struct ctl_table tmp = *table;

	if (write && !capable(CAP_SYS_ADMIN))
		return -EPERM;

	tmp.data = &jit_enable;
	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
	if (write && !ret) {
		if (jit_enable < 2 ||
		    (jit_enable == 2 && bpf_dump_raw_ok())) {
			*(int *)table->data = jit_enable;
			if (jit_enable == 2)
				pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
		} else {
			ret = -EPERM;
		}
	}
	return ret;
}
STATIC int
xfs_stats_clear_proc_handler(
	ctl_table	*ctl,
	int		write,
	struct file	*filp,
	void		*buffer,
	size_t		*lenp)
{
	int		ret, *valp = ctl->data;
	__uint32_t	vn_active;

	ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp);

	if (!ret && write && *valp) {
		printk("XFS Clearing xfsstats\n");
		/* save vn_active, it's a universal truth! */
		vn_active = xfsstats.vn_active;
		memset(&xfsstats, 0, sizeof(xfsstats));
		xfsstats.vn_active = vn_active;
		xfs_stats_clear = 0;
	}

	return ret;
}
STATIC int
xfs_refcache_resize_proc_handler(
	ctl_table	*ctl,
	int		write,
	struct file	*filp,
	void		*buffer,
	size_t		*lenp)
{
	int		ret, *valp = ctl->data;
	int		xfs_refcache_new_size;
	int		xfs_refcache_old_size = *valp;

	ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp);
	xfs_refcache_new_size = *valp;

	if (!ret && write && xfs_refcache_new_size != xfs_refcache_old_size) {
		xfs_refcache_resize(xfs_refcache_new_size);
		/* Don't purge more than size of the cache */
		if (xfs_refcache_new_size < xfs_refcache_purge_count)
			xfs_refcache_purge_count = xfs_refcache_new_size;
	}

	return ret;
}
示例#10
0
/* Validate changes from /proc interface. */
static int ipv4_local_port_range(ctl_table *table, int write,
				 void __user *buffer,
				 size_t *lenp, loff_t *ppos)
{
	int ret;
	int range[2];
	ctl_table tmp = {
		.data = &range,
		.maxlen = sizeof(range),
		.mode = table->mode,
		.extra1 = &ip_local_port_range_min,
		.extra2 = &ip_local_port_range_max,
	};

	inet_get_local_port_range(range, range + 1);
	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);

	if (write && ret == 0) {
		if (range[1] < range[0])
			ret = -EINVAL;
		else
			set_local_port_range(range);
	}

	return ret;
}

/* Validate changes from sysctl interface. */
static int ipv4_sysctl_local_port_range(ctl_table *table,
					 void __user *oldval,
					 size_t __user *oldlenp,
					void __user *newval, size_t newlen)
{
	int ret;
	int range[2];
	ctl_table tmp = {
		.data = &range,
		.maxlen = sizeof(range),
		.mode = table->mode,
		.extra1 = &ip_local_port_range_min,
		.extra2 = &ip_local_port_range_max,
	};

	inet_get_local_port_range(range, range + 1);
	ret = sysctl_intvec(&tmp, oldval, oldlenp, newval, newlen);
	if (ret == 0 && newval && newlen) {
		if (range[1] < range[0])
			ret = -EINVAL;
		else
			set_local_port_range(range);
	}
	return ret;
}


static int proc_tcp_congestion_control(ctl_table *ctl, int write,
				       void __user *buffer, size_t *lenp, loff_t *ppos)
{
	char val[TCP_CA_NAME_MAX];
	ctl_table tbl = {
		.data = val,
		.maxlen = TCP_CA_NAME_MAX,
	};
	int ret;

	tcp_get_default_congestion_control(val);

	ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
	if (write && ret == 0)
		ret = tcp_set_default_congestion_control(val);
	return ret;
}
示例#11
0
/* Validate changes from /proc interface. */
static int ipv4_local_port_range(struct ctl_table *table, int write,
				 void __user *buffer,
				 size_t *lenp, loff_t *ppos)
{
	struct net *net =
		container_of(table->data, struct net, ipv4.ip_local_ports.range);
	int ret;
	int range[2];
	struct ctl_table tmp = {
		.data = &range,
		.maxlen = sizeof(range),
		.mode = table->mode,
		.extra1 = &ip_local_port_range_min,
		.extra2 = &ip_local_port_range_max,
	};

	inet_get_local_port_range(net, &range[0], &range[1]);

	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);

	if (write && ret == 0) {
		/* Ensure that the upper limit is not smaller than the lower,
		 * and that the lower does not encroach upon the privileged
		 * port limit.
		 */
		if ((range[1] < range[0]) ||
		    (range[0] < net->ipv4.sysctl_ip_prot_sock))
			ret = -EINVAL;
		else
			set_local_port_range(net, range);
	}

	return ret;
}

/* Validate changes from /proc interface. */
static int ipv4_privileged_ports(struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp, loff_t *ppos)
{
	struct net *net = container_of(table->data, struct net,
	    ipv4.sysctl_ip_prot_sock);
	int ret;
	int pports;
	int range[2];
	struct ctl_table tmp = {
		.data = &pports,
		.maxlen = sizeof(pports),
		.mode = table->mode,
		.extra1 = &ip_privileged_port_min,
		.extra2 = &ip_privileged_port_max,
	};

	pports = net->ipv4.sysctl_ip_prot_sock;

	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);

	if (write && ret == 0) {
		inet_get_local_port_range(net, &range[0], &range[1]);
		/* Ensure that the local port range doesn't overlap with the
		 * privileged port range.
		 */
		if (range[0] < pports)
			ret = -EINVAL;
		else
			net->ipv4.sysctl_ip_prot_sock = pports;
	}

	return ret;
}

static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
{
	kgid_t *data = table->data;
	struct net *net =
		container_of(table->data, struct net, ipv4.ping_group_range.range);
	unsigned int seq;
	do {
		seq = read_seqbegin(&net->ipv4.ping_group_range.lock);

		*low = data[0];
		*high = data[1];
	} while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
}

/* Update system visible IP port range */
static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
{
	kgid_t *data = table->data;
	struct net *net =
		container_of(table->data, struct net, ipv4.ping_group_range.range);
	write_seqlock(&net->ipv4.ping_group_range.lock);
	data[0] = low;
	data[1] = high;
	write_sequnlock(&net->ipv4.ping_group_range.lock);
}
示例#12
0
/* Validate changes from /proc interface. */
static int ipv4_ping_group_range(struct ctl_table *table, int write,
				 void __user *buffer,
				 size_t *lenp, loff_t *ppos)
{
	struct user_namespace *user_ns = current_user_ns();
	int ret;
	gid_t urange[2];
	kgid_t low, high;
	struct ctl_table tmp = {
		.data = &urange,
		.maxlen = sizeof(urange),
		.mode = table->mode,
		.extra1 = &ip_ping_group_range_min,
		.extra2 = &ip_ping_group_range_max,
	};

	inet_get_ping_group_range_table(table, &low, &high);
	urange[0] = from_kgid_munged(user_ns, low);
	urange[1] = from_kgid_munged(user_ns, high);
	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);

	if (write && ret == 0) {
		low = make_kgid(user_ns, urange[0]);
		high = make_kgid(user_ns, urange[1]);
		if (!gid_valid(low) || !gid_valid(high))
			return -EINVAL;
		if (urange[1] < urange[0] || gid_lt(high, low)) {
			low = make_kgid(&init_user_ns, 1);
			high = make_kgid(&init_user_ns, 0);
		}
		set_ping_group_range(table, low, high);
	}

	return ret;
}

static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
				    void __user *buffer,
				    size_t *lenp, loff_t *ppos)
{
	struct net *net;
	int ret;

	net = container_of(table->data, struct net,
			   ipv4.sysctl_ip_fwd_update_priority);
	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
	if (write && ret == 0)
		call_netevent_notifiers(NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE,
					net);

	return ret;
}

static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
				       void __user *buffer, size_t *lenp, loff_t *ppos)
{
	struct net *net = container_of(ctl->data, struct net,
				       ipv4.tcp_congestion_control);
	char val[TCP_CA_NAME_MAX];
	struct ctl_table tbl = {
		.data = val,
		.maxlen = TCP_CA_NAME_MAX,
	};
	int ret;

	tcp_get_default_congestion_control(net, val);

	ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
	if (write && ret == 0)
		ret = tcp_set_default_congestion_control(net, val);
	return ret;
}

static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
						 int write,
						 void __user *buffer, size_t *lenp,
						 loff_t *ppos)
{
	struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
	int ret;

	tbl.data = kmalloc(tbl.maxlen, GFP_USER);
	if (!tbl.data)
		return -ENOMEM;
	tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX);
	ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
	kfree(tbl.data);
	return ret;
}

static int proc_allowed_congestion_control(struct ctl_table *ctl,
					   int write,
					   void __user *buffer, size_t *lenp,
					   loff_t *ppos)
{
	struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
	int ret;

	tbl.data = kmalloc(tbl.maxlen, GFP_USER);
	if (!tbl.data)
		return -ENOMEM;

	tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen);
	ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
	if (write && ret == 0)
		ret = tcp_set_allowed_congestion_control(tbl.data);
	kfree(tbl.data);
	return ret;
}

static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
				 void __user *buffer, size_t *lenp,
				 loff_t *ppos)
{
	struct net *net = container_of(table->data, struct net,
	    ipv4.sysctl_tcp_fastopen);
	struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
	struct tcp_fastopen_context *ctxt;
	u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
	__le32 key[4];
	int ret, i;

	tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
	if (!tbl.data)
		return -ENOMEM;

	rcu_read_lock();
	ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
	if (ctxt)
		memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
	else
		memset(key, 0, sizeof(key));
	rcu_read_unlock();

	for (i = 0; i < ARRAY_SIZE(key); i++)
		user_key[i] = le32_to_cpu(key[i]);

	snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
		user_key[0], user_key[1], user_key[2], user_key[3]);
	ret = proc_dostring(&tbl, write, buffer, lenp, ppos);

	if (write && ret == 0) {
		if (sscanf(tbl.data, "%x-%x-%x-%x", user_key, user_key + 1,
			   user_key + 2, user_key + 3) != 4) {
			ret = -EINVAL;
			goto bad_key;
		}

		for (i = 0; i < ARRAY_SIZE(user_key); i++)
			key[i] = cpu_to_le32(user_key[i]);

		tcp_fastopen_reset_cipher(net, NULL, key,
					  TCP_FASTOPEN_KEY_LENGTH);
	}

bad_key:
	pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
		user_key[0], user_key[1], user_key[2], user_key[3],
	       (char *)tbl.data, ret);
	kfree(tbl.data);
	return ret;
}

static void proc_configure_early_demux(int enabled, int protocol)
{
	struct net_protocol *ipprot;
#if IS_ENABLED(CONFIG_IPV6)
	struct inet6_protocol *ip6prot;
#endif

	rcu_read_lock();

	ipprot = rcu_dereference(inet_protos[protocol]);
	if (ipprot)
		ipprot->early_demux = enabled ? ipprot->early_demux_handler :
						NULL;

#if IS_ENABLED(CONFIG_IPV6)
	ip6prot = rcu_dereference(inet6_protos[protocol]);
	if (ip6prot)
		ip6prot->early_demux = enabled ? ip6prot->early_demux_handler :
						 NULL;
#endif
	rcu_read_unlock();
}

static int proc_tcp_early_demux(struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int ret = 0;

	ret = proc_dointvec(table, write, buffer, lenp, ppos);

	if (write && !ret) {
		int enabled = init_net.ipv4.sysctl_tcp_early_demux;

		proc_configure_early_demux(enabled, IPPROTO_TCP);
	}

	return ret;
}

static int proc_udp_early_demux(struct ctl_table *table, int write,
				void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int ret = 0;

	ret = proc_dointvec(table, write, buffer, lenp, ppos);

	if (write && !ret) {
		int enabled = init_net.ipv4.sysctl_udp_early_demux;

		proc_configure_early_demux(enabled, IPPROTO_UDP);
	}

	return ret;
}
示例#13
0
/*
 * sysctl handler for /proc/sys/vm/dirty_writeback_suspend_centisecs
 */
int dirty_writeback_suspend_centisecs_handler(ctl_table *table, int write,
	void __user *buffer, size_t *length, loff_t *ppos)
{
	proc_dointvec_minmax(table, write, buffer, length, ppos);
	return 0;
}