Exemplo n.º 1
0
static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
				    struct nlattr **attrs)
{
	struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
	struct tipc_link_info link_info;

	nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);

	link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
	link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
	strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));

	return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
			    &link_info, sizeof(link_info));
}
static int iwl_testmode_notifications(struct ieee80211_hw *hw,
	struct nlattr **tb)
{
	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
	bool enable;

	enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
	if (enable)
		priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
	else
		priv->pre_rx_handler = NULL;
	return 0;
}
Exemplo n.º 3
0
static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
				    struct nlattr **attrs)
{
	struct tipc_node_info node_info;
	struct nlattr *node[TIPC_NLA_NODE_MAX + 1];

	nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL);

	node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
	node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));

	return tipc_add_tlv(msg->rep, TIPC_TLV_NODE_INFO, &node_info,
			    sizeof(node_info));
}
Exemplo n.º 4
0
static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
				    struct nlattr **attrs)
{
	struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
	struct tipc_link_info link_info;
	int err;

	if (!attrs[TIPC_NLA_LINK])
		return -EINVAL;

	err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
			       NULL);
	if (err)
		return err;

	link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
	link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
	nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
		    TIPC_MAX_LINK_NAME);

	return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
			    &link_info, sizeof(link_info));
}
Exemplo n.º 5
0
static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
{
	struct prio_sched_data *q = qdisc_priv(sch);
	struct tc_prio_qopt *qopt;
	struct nlattr *tb[TCA_PRIO_MAX + 1] = {0};
	int err;
	int i;

	qopt = nla_data(opt);
	if (nla_len(opt) < sizeof(*qopt))
		return -1;

	if (nla_len(opt) >= sizeof(*qopt) + sizeof(struct nlattr)) {
		err = nla_parse_nested(tb, TCA_PRIO_MAX,
				       (struct nlattr *) (qopt + 1), NULL);
		if (err < 0)
			return err;
	}

	q->bands = qopt->bands;
	/* If we're multiqueue, make sure the number of incoming bands
	 * matches the number of queues on the device we're associating with.
	 * If the number of bands requested is zero, then set q->bands to
	 * dev->egress_subqueue_count.  Also, the root qdisc must be the
	 * only one that is enabled for multiqueue, since it's the only one
	 * that interacts with the underlying device.
	 */
	q->mq = nla_get_flag(tb[TCA_PRIO_MQ]);
	if (q->mq) {
		if (sch->parent != TC_H_ROOT)
			return -EINVAL;
		if (netif_is_multiqueue(sch->dev)) {
			if (q->bands == 0)
				q->bands = sch->dev->egress_subqueue_count;
			else if (q->bands != sch->dev->egress_subqueue_count)
				return -EINVAL;
		} else
			return -EOPNOTSUPP;
	}

	if (q->bands > TCQ_PRIO_BANDS || q->bands < 2)
		return -EINVAL;

	for (i=0; i<=TC_PRIO_MAX; i++) {
		if (qopt->priomap[i] >= q->bands)
			return -EINVAL;
	}

	sch_tree_lock(sch);
	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);

	for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
		struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
		if (child != &noop_qdisc) {
			qdisc_tree_decrease_qlen(child, child->q.qlen);
			qdisc_destroy(child);
		}
	}
	sch_tree_unlock(sch);

	for (i=0; i<q->bands; i++) {
		if (q->queues[i] == &noop_qdisc) {
			struct Qdisc *child;
			child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
						  TC_H_MAKE(sch->handle, i + 1));
			if (child) {
				sch_tree_lock(sch);
				child = xchg(&q->queues[i], child);

				if (child != &noop_qdisc) {
					qdisc_tree_decrease_qlen(child,
								 child->q.qlen);
					qdisc_destroy(child);
				}
				sch_tree_unlock(sch);
			}
		}
	}
	return 0;
}
Exemplo n.º 6
0
static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
{
	u32 tunnel_id;
	u32 peer_tunnel_id;
	int proto_version;
	int fd;
	int ret = 0;
	struct l2tp_tunnel_cfg cfg = { 0, };
	struct l2tp_tunnel *tunnel;
	struct net *net = genl_info_net(info);

	if (!info->attrs[L2TP_ATTR_CONN_ID]) {
		ret = -EINVAL;
		goto out;
	}
	tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);

	if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) {
		ret = -EINVAL;
		goto out;
	}
	peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]);

	if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) {
		ret = -EINVAL;
		goto out;
	}
	proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]);

	if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) {
		ret = -EINVAL;
		goto out;
	}
	cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]);

	fd = -1;
	if (info->attrs[L2TP_ATTR_FD]) {
		fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
	} else {
		if (info->attrs[L2TP_ATTR_IP_SADDR])
			cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]);
		if (info->attrs[L2TP_ATTR_IP_DADDR])
			cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]);
		if (info->attrs[L2TP_ATTR_UDP_SPORT])
			cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
		if (info->attrs[L2TP_ATTR_UDP_DPORT])
			cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
		if (info->attrs[L2TP_ATTR_UDP_CSUM])
			cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
	}

	if (info->attrs[L2TP_ATTR_DEBUG])
		cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);

	tunnel = l2tp_tunnel_find(net, tunnel_id);
	if (tunnel != NULL) {
		ret = -EEXIST;
		goto out;
	}

	ret = -EINVAL;
	switch (cfg.encap) {
	case L2TP_ENCAPTYPE_UDP:
	case L2TP_ENCAPTYPE_IP:
		ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id,
					 peer_tunnel_id, &cfg, &tunnel);
		break;
	}

out:
	return ret;
}
/*
 * This function handles the user application commands to the ucode.
 *
 * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
 * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
 * host command to the ucode.
 *
 * If any mandatory field is missing, -ENOMSG is replied to the user space
 * application; otherwise, waits for the host command to be sent and checks
 * the return code. In case or error, it is returned, otherwise a reply is
 * allocated and the reply RX packet
 * is returned.
 *
 * @hw: ieee80211_hw object that represents the device
 * @tb: gnl message fields from the user space
 */
static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
{
	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
	struct iwl_host_cmd cmd;
	struct iwl_rx_packet *pkt;
	struct sk_buff *skb;
	void *reply_buf;
	u32 reply_len;
	int ret;
	bool cmd_want_skb;

	memset(&cmd, 0, sizeof(struct iwl_host_cmd));

	if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
	    !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
		IWL_ERR(priv, "Missing ucode command mandatory fields\n");
		return -ENOMSG;
	}

	cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
	cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
	if (cmd_want_skb)
		cmd.flags |= CMD_WANT_SKB;

	cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
	cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
	cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
	cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
	IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
				" len %d\n", cmd.id, cmd.flags, cmd.len[0]);

	ret = iwl_dvm_send_cmd(priv, &cmd);
	if (ret) {
		IWL_ERR(priv, "Failed to send hcmd\n");
		return ret;
	}
	if (!cmd_want_skb)
		return ret;

	/* Handling return of SKB to the user */
	pkt = cmd.resp_pkt;
	if (!pkt) {
		IWL_ERR(priv, "HCMD received a null response packet\n");
		return ret;
	}

	reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
	skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20);
	reply_buf = kmalloc(reply_len, GFP_KERNEL);
	if (!skb || !reply_buf) {
		kfree_skb(skb);
		kfree(reply_buf);
		return -ENOMEM;
	}

	/* The reply is in a page, that we cannot send to user space. */
	memcpy(reply_buf, &(pkt->hdr), reply_len);
	iwl_free_resp(&cmd);

	NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT);
	NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf);
	return cfg80211_testmode_reply(skb);

nla_put_failure:
	IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n");
	return -ENOMSG;
}
Exemplo n.º 8
0
static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
{
	u32 tunnel_id;
	u32 peer_tunnel_id;
	int proto_version;
	int fd;
	int ret = 0;
	struct l2tp_tunnel_cfg cfg = { 0, };
	struct l2tp_tunnel *tunnel;
	struct net *net = genl_info_net(info);

	if (!info->attrs[L2TP_ATTR_CONN_ID]) {
		ret = -EINVAL;
		goto out;
	}
	tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);

	if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) {
		ret = -EINVAL;
		goto out;
	}
	peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]);

	if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) {
		ret = -EINVAL;
		goto out;
	}
	proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]);

	if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) {
		ret = -EINVAL;
		goto out;
	}
	cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]);

	fd = -1;
	if (info->attrs[L2TP_ATTR_FD]) {
		fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
	} else {
#if IS_ENABLED(CONFIG_IPV6)
		if (info->attrs[L2TP_ATTR_IP6_SADDR] &&
		    info->attrs[L2TP_ATTR_IP6_DADDR]) {
			cfg.local_ip6 = nla_data(
				info->attrs[L2TP_ATTR_IP6_SADDR]);
			cfg.peer_ip6 = nla_data(
				info->attrs[L2TP_ATTR_IP6_DADDR]);
		} else
#endif
		if (info->attrs[L2TP_ATTR_IP_SADDR] &&
		    info->attrs[L2TP_ATTR_IP_DADDR]) {
			cfg.local_ip.s_addr = nla_get_in_addr(
				info->attrs[L2TP_ATTR_IP_SADDR]);
			cfg.peer_ip.s_addr = nla_get_in_addr(
				info->attrs[L2TP_ATTR_IP_DADDR]);
		} else {
			ret = -EINVAL;
			goto out;
		}
		if (info->attrs[L2TP_ATTR_UDP_SPORT])
			cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
		if (info->attrs[L2TP_ATTR_UDP_DPORT])
			cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
		if (info->attrs[L2TP_ATTR_UDP_CSUM])
			cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);

#if IS_ENABLED(CONFIG_IPV6)
		if (info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX])
			cfg.udp6_zero_tx_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]);
		if (info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX])
			cfg.udp6_zero_rx_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]);
#endif
	}

	if (info->attrs[L2TP_ATTR_DEBUG])
		cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);

	tunnel = l2tp_tunnel_find(net, tunnel_id);
	if (tunnel != NULL) {
		ret = -EEXIST;
		goto out;
	}

	ret = -EINVAL;
	switch (cfg.encap) {
	case L2TP_ENCAPTYPE_UDP:
	case L2TP_ENCAPTYPE_IP:
		ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id,
					 peer_tunnel_id, &cfg, &tunnel);
		break;
	}

	if (ret >= 0)
		ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
					 tunnel, L2TP_CMD_TUNNEL_CREATE);
out:
	return ret;
}