Ejemplo n.º 1
0
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
			    const struct nlattr *attr)
{
	struct dp_upcall_info upcall;
	const struct nlattr *a;
	int rem;

	upcall.cmd = OVS_PACKET_CMD_ACTION;
	upcall.key = &OVS_CB(skb)->flow->key;
	upcall.userdata = NULL;
	upcall.pid = 0;

	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
		 a = nla_next(a, &rem)) {
		switch (nla_type(a)) {
		case OVS_USERSPACE_ATTR_USERDATA:
			upcall.userdata = a;
			break;

		case OVS_USERSPACE_ATTR_PID:
			upcall.pid = nla_get_u32(a);
			break;
		}
	}

	return ovs_dp_upcall(dp, skb, &upcall);
}
Ejemplo n.º 2
0
/* called from BPF program, therefore rcu_read_lock is held
 * bpf_check() verified that 'buf' pointer to BPF's stack
 * and it has 'len' bytes for us to read
 */
void bpf_channel_push_struct(struct bpf_context *pctx, u32 struct_id,
			     const void *buf, u32 len)
{
	struct bpf_dp_context *ctx = container_of(pctx, struct bpf_dp_context,
						  context);
	struct dp_upcall_info upcall;
	struct plum *plum;
	struct nlattr *nla;

	if (unlikely(!ctx->skb))
		return;

	plum = rcu_dereference(ctx->dp->plums[pctx->plum_id]);
	if (unlikely(!plum))
		return;

	/* allocate temp nlattr to pass it into ovs_dp_upcall */
	nla = kzalloc(nla_total_size(4 + len), GFP_ATOMIC);
	if (unlikely(!nla))
		return;

	nla->nla_type = OVS_PACKET_ATTR_USERDATA;
	nla->nla_len = nla_attr_size(4 + len);
	memcpy(nla_data(nla), &struct_id, 4);
	memcpy(nla_data(nla) + 4, buf, len);

	upcall.cmd = OVS_PACKET_CMD_ACTION;
	upcall.key = NULL;
	upcall.userdata = nla;
	upcall.portid = plum->upcall_pid;
	ovs_dp_upcall(ctx->dp, NULL, &upcall);
	kfree(nla);
}
Ejemplo n.º 3
0
/* Must be called with rcu_read_lock. */
void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
	struct datapath *dp = p->dp;
	struct sw_flow *flow;
	struct dp_stats_percpu *stats;
	u64 *stats_counter;
	int error;

	stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());

	if (!OVS_CB(skb)->flow) {
		struct sw_flow_key key;
		int key_len;

		/* Extract flow from 'skb' into 'key'. */
		error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
		if (unlikely(error)) {
			kfree_skb(skb);
			return;
		}

		/* Look up flow. */
		flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table),
					   &key, key_len);
		if (unlikely(!flow)) {
			struct dp_upcall_info upcall;

			upcall.cmd = OVS_PACKET_CMD_MISS;
			upcall.key = &key;
			upcall.userdata = NULL;
			upcall.pid = p->upcall_pid;
			ovs_dp_upcall(dp, skb, &upcall);
			consume_skb(skb);
			stats_counter = &stats->n_missed;
			goto out;
		}

		OVS_CB(skb)->flow = flow;
	}

	stats_counter = &stats->n_hit;
	ovs_flow_used(OVS_CB(skb)->flow, skb);
	ovs_execute_actions(dp, skb);

out:
	/* Update datapath statistics. */
	u64_stats_update_begin(&stats->sync);
	(*stats_counter)++;
	u64_stats_update_end(&stats->sync);
}
Ejemplo n.º 4
0
/* called from BPF program, therefore rcu_read_lock is held */
void bpf_channel_push_packet(struct bpf_context *pctx)
{
	struct bpf_dp_context *ctx = container_of(pctx, struct bpf_dp_context,
						  context);
	struct dp_upcall_info upcall;
	struct sk_buff *nskb;
	struct plum *plum;

	if (unlikely(!ctx->skb))
		return;

	plum = rcu_dereference(ctx->dp->plums[pctx->plum_id]);
	if (unlikely(!plum))
		return;

	/* queue_gso_packets() inside ovs_dp_upcall() changes skb,
	 * so copy it here, since BPF program might still be using it
	 */
	nskb = skb_clone(ctx->skb, GFP_ATOMIC);
	if (unlikely(!nskb))
		return;

	upcall.cmd = OVS_PACKET_CMD_ACTION;
	upcall.key = NULL;
	upcall.userdata = NULL;
	upcall.portid = plum->upcall_pid;
	/* don't exit earlier even if upcall_pid is invalid,
	 * since we want 'lost' count to be incremented
	 */
	/* disable softirq to make sure that genlmsg_unicast()->gfp_any() picks
	 * GFP_ATOMIC flag
	 * note that bpf_channel_push_struct() doesn't need to do it,
	 * since skb==NULL
	 */
	local_bh_disable();
	ovs_dp_upcall(ctx->dp, nskb, &upcall);
	local_bh_enable();
	consume_skb(nskb);
}