static struct vport *vxlan_tnl_create(const struct vport_parms *parms) { struct net *net = ovs_dp_get_net(parms->dp); struct nlattr *options = parms->options; struct vxlan_port *vxlan_port; struct vxlan_sock *vs; struct vport *vport; struct nlattr *a; u16 dst_port; int err; if (!options) { err = -EINVAL; goto error; } a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT); if (a && nla_len(a) == sizeof(u16)) { dst_port = nla_get_u16(a); } else { /* Require destination port from userspace. */ err = -EINVAL; goto error; } vport = ovs_vport_alloc(sizeof(struct vxlan_port), &ovs_vxlan_vport_ops, parms); if (IS_ERR(vport)) return vport; vxlan_port = vxlan_vport(vport); strncpy(vxlan_port->name, parms->name, IFNAMSIZ); a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION); if (a) { err = vxlan_configure_exts(vport, a); if (err) { ovs_vport_free(vport); goto error; } } vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, vxlan_port->exts); if (IS_ERR(vs)) { ovs_vport_free(vport); return (void *)vs; } vxlan_port->vs = vs; return vport; error: return ERR_PTR(err); }
struct nlattr *drbd_nla_find_nested(int maxtype, struct nlattr *nla, int attrtype) { int err; /* * If any nested attribute has the DRBD_GENLA_F_MANDATORY flag set and * we don't know about that attribute, reject all the nested * attributes. */ err = drbd_nla_check_mandatory(maxtype, nla); if (err) return ERR_PTR(err); return nla_find_nested(nla, attrtype); }
static struct vport *geneve_tnl_create(const struct vport_parms *parms) { struct net *net = ovs_dp_get_net(parms->dp); struct nlattr *options = parms->options; struct geneve_port *geneve_port; struct net_device *dev; struct vport *vport; struct nlattr *a; u16 dst_port; int err; if (!options) { err = -EINVAL; goto error; } a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT); if (a && nla_len(a) == sizeof(u16)) { dst_port = nla_get_u16(a); } else { /* Require destination port from userspace. */ err = -EINVAL; goto error; } vport = ovs_vport_alloc(sizeof(struct geneve_port), &ovs_geneve_vport_ops, parms); if (IS_ERR(vport)) return vport; geneve_port = geneve_vport(vport); geneve_port->dst_port = dst_port; rtnl_lock(); dev = geneve_dev_create_fb(net, parms->name, NET_NAME_USER, dst_port); if (IS_ERR(dev)) { rtnl_unlock(); ovs_vport_free(vport); return ERR_CAST(dev); } dev_change_flags(dev, dev->flags | IFF_UP); rtnl_unlock(); return vport; error: return ERR_PTR(err); }
static struct vport *stt_tnl_create(const struct vport_parms *parms) { struct net *net = ovs_dp_get_net(parms->dp); struct nlattr *options = parms->options; struct stt_port *stt_port; struct stt_sock *stt_sock; struct vport *vport; struct nlattr *a; int err; u16 dst_port; if (!options) { err = -EINVAL; goto error; } a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT); if (a && nla_len(a) == sizeof(u16)) { dst_port = nla_get_u16(a); } else { /* Require destination port from userspace. */ err = -EINVAL; goto error; } vport = ovs_vport_alloc(sizeof(struct stt_port), &ovs_stt_vport_ops, parms); if (IS_ERR(vport)) return vport; stt_port = stt_vport(vport); strncpy(stt_port->name, parms->name, IFNAMSIZ); stt_sock = stt_sock_add(net, htons(dst_port), stt_rcv, vport); if (IS_ERR(stt_sock)) { ovs_vport_free(vport); return ERR_CAST(stt_sock); } stt_port->stt_sock = stt_sock; return vport; error: return ERR_PTR(err); }
static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (a > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *) &skb->data[a]; if (nla->nla_len > skb->len - a) return 0; nla = nla_find_nested(nla, x); if (nla) return (void *) nla - (void *) skb->data; return 0; }
/** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @filter: filter to apply * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. @skb is the data we are * filtering, @filter is the array of filter instructions. * Because all jumps are guaranteed to be before last instruction, * and last instruction guaranteed to be a RET, we dont need to check * flen. (We used to pass to this function the length of filter) */ unsigned int sk_run_filter(const struct sk_buff *skb, const struct sock_filter *fentry) { void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ unsigned long memvalid = 0; u32 tmp; int k; BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); /* * Process array of filter instructions. */ for (;; fentry++) { #if defined(CONFIG_X86_32) #define K (fentry->k) #else const u32 K = fentry->k; #endif switch (fentry->code) { case BPF_S_ALU_ADD_X: A += X; continue; case BPF_S_ALU_ADD_K: A += K; continue; case BPF_S_ALU_SUB_X: A -= X; continue; case BPF_S_ALU_SUB_K: A -= K; continue; case BPF_S_ALU_MUL_X: A *= X; continue; case BPF_S_ALU_MUL_K: A *= K; continue; case BPF_S_ALU_DIV_X: if (X == 0) return 0; A /= X; continue; case BPF_S_ALU_DIV_K: A /= K; continue; case BPF_S_ALU_AND_X: A &= X; continue; case BPF_S_ALU_AND_K: A &= K; continue; case BPF_S_ALU_OR_X: A |= X; continue; case BPF_S_ALU_OR_K: A |= K; continue; case BPF_S_ALU_LSH_X: A <<= X; continue; case BPF_S_ALU_LSH_K: A <<= K; continue; case BPF_S_ALU_RSH_X: A >>= X; continue; case BPF_S_ALU_RSH_K: A >>= K; continue; case BPF_S_ALU_NEG: A = -A; continue; case BPF_S_JMP_JA: fentry += K; continue; case BPF_S_JMP_JGT_K: fentry += (A > K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_K: fentry += (A >= K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_K: fentry += (A == K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_K: fentry += (A & K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGT_X: fentry += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_X: fentry += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_X: fentry += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_X: fentry += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_S_LD_W_ABS: k = K; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = get_unaligned_be32(ptr); continue; } break; case BPF_S_LD_H_ABS: k = K; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = get_unaligned_be16(ptr); continue; } break; case BPF_S_LD_B_ABS: k = K; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { A = *(u8 *)ptr; continue; } break; case BPF_S_LD_W_LEN: A = skb->len; continue; case BPF_S_LDX_W_LEN: X = skb->len; continue; case BPF_S_LD_W_IND: k = X + K; goto load_w; case BPF_S_LD_H_IND: k = X + K; goto load_h; case BPF_S_LD_B_IND: k = X + K; goto load_b; case BPF_S_LDX_B_MSH: ptr = load_pointer(skb, K, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_S_LD_IMM: A = K; continue; case BPF_S_LDX_IMM: X = K; continue; case BPF_S_LD_MEM: A = (memvalid & (1UL << K)) ? mem[K] : 0; continue; case BPF_S_LDX_MEM: X = (memvalid & (1UL << K)) ? mem[K] : 0; continue; case BPF_S_MISC_TAX: X = A; continue; case BPF_S_MISC_TXA: A = X; continue; case BPF_S_RET_K: return K; case BPF_S_RET_A: return A; case BPF_S_ST: memvalid |= 1UL << K; mem[K] = A; continue; case BPF_S_STX: memvalid |= 1UL << K; mem[K] = X; continue; default: WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n", fentry->code, fentry->jt, fentry->jf, fentry->k); return 0; } /* * Handle ancillary data, which are impossible * (or very difficult) to get parsing packet contents. */ switch (k-SKF_AD_OFF) { case SKF_AD_PROTOCOL: A = ntohs(skb->protocol); continue; case SKF_AD_PKTTYPE: A = skb->pkt_type; continue; case SKF_AD_IFINDEX: if (!skb->dev) return 0; A = skb->dev->ifindex; continue; case SKF_AD_MARK: A = skb->mark; continue; case SKF_AD_QUEUE: A = skb->queue_mapping; continue; case SKF_AD_HATYPE: if (!skb->dev) return 0; A = skb->dev->type; continue; #if 0 case SKF_AD_RXHASH: A = skb->rxhash; continue; #endif case SKF_AD_CPU: A = raw_smp_processor_id(); continue; case SKF_AD_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *)&skb->data[A], skb->len - A, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } case SKF_AD_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *)&skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } default: return 0; } } return 0; }
/** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @fentry: filter to apply * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. @skb is the data we are * filtering, @filter is the array of filter instructions. * Because all jumps are guaranteed to be before last instruction, * and last instruction guaranteed to be a RET, we dont need to check * flen. (We used to pass to this function the length of filter) */ unsigned int sk_run_filter(const struct sk_buff *skb, const struct sock_filter *fentry) { void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ u32 tmp; int k; /* * Process array of filter instructions. */ for (;; fentry++) { #if defined(CONFIG_X86_32) #define K (fentry->k) #else const u32 K = fentry->k; #endif switch (fentry->code) { case BPF_S_ALU_ADD_X: A += X; continue; case BPF_S_ALU_ADD_K: A += K; continue; case BPF_S_ALU_SUB_X: A -= X; continue; case BPF_S_ALU_SUB_K: A -= K; continue; case BPF_S_ALU_MUL_X: A *= X; continue; case BPF_S_ALU_MUL_K: A *= K; continue; case BPF_S_ALU_DIV_X: if (X == 0) return 0; A /= X; continue; case BPF_S_ALU_DIV_K: A = reciprocal_divide(A, K); continue; case BPF_S_ALU_AND_X: A &= X; continue; case BPF_S_ALU_AND_K: A &= K; continue; case BPF_S_ALU_OR_X: A |= X; continue; case BPF_S_ALU_OR_K: A |= K; continue; case BPF_S_ALU_LSH_X: A <<= X; continue; case BPF_S_ALU_LSH_K: A <<= K; continue; case BPF_S_ALU_RSH_X: A >>= X; continue; case BPF_S_ALU_RSH_K: A >>= K; continue; case BPF_S_ALU_NEG: A = -A; continue; case BPF_S_JMP_JA: fentry += K; continue; case BPF_S_JMP_JGT_K: fentry += (A > K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_K: fentry += (A >= K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_K: fentry += (A == K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_K: fentry += (A & K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGT_X: fentry += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_X: fentry += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_X: fentry += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_X: fentry += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_S_LD_W_ABS: k = K; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = get_unaligned_be32(ptr); continue; } return 0; case BPF_S_LD_H_ABS: k = K; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = get_unaligned_be16(ptr); continue; } return 0; case BPF_S_LD_B_ABS: k = K; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { A = *(u8 *)ptr; continue; } return 0; case BPF_S_LD_W_LEN: A = skb->len; continue; case BPF_S_LDX_W_LEN: X = skb->len; continue; case BPF_S_LD_W_IND: k = X + K; goto load_w; case BPF_S_LD_H_IND: k = X + K; goto load_h; case BPF_S_LD_B_IND: k = X + K; goto load_b; case BPF_S_LDX_B_MSH: ptr = load_pointer(skb, K, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_S_LD_IMM: A = K; continue; case BPF_S_LDX_IMM: X = K; continue; case BPF_S_LD_MEM: A = mem[K]; continue; case BPF_S_LDX_MEM: X = mem[K]; continue; case BPF_S_MISC_TAX: X = A; continue; case BPF_S_MISC_TXA: A = X; continue; case BPF_S_RET_K: return K; case BPF_S_RET_A: return A; case BPF_S_ST: mem[K] = A; continue; case BPF_S_STX: mem[K] = X; continue; case BPF_S_ANC_PROTOCOL: A = ntohs(skb->protocol); continue; case BPF_S_ANC_PKTTYPE: A = skb->pkt_type; continue; case BPF_S_ANC_IFINDEX: if (!skb->dev) return 0; A = skb->dev->ifindex; continue; case BPF_S_ANC_MARK: A = skb->mark; continue; case BPF_S_ANC_QUEUE: A = skb->queue_mapping; continue; case BPF_S_ANC_HATYPE: if (!skb->dev) return 0; A = skb->dev->type; continue; case BPF_S_ANC_RXHASH: A = skb->rxhash; continue; case BPF_S_ANC_CPU: A = raw_smp_processor_id(); continue; case BPF_S_ANC_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *)&skb->data[A], skb->len - A, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } case BPF_S_ANC_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *)&skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } #ifdef CONFIG_SECCOMP_FILTER case BPF_S_ANC_SECCOMP_LD_W: A = seccomp_bpf_load(fentry->k); continue; #endif default: WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n", fentry->code, fentry->jt, fentry->jf, fentry->k); return 0; } } return 0; }
/** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @filter: filter to apply * @flen: length of filter * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. skb is the data we are * filtering, filter is the array of filter instructions, and * len is the number of filter blocks in the array. */ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) { void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ unsigned long memvalid = 0; u32 tmp; int k; int pc; BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); /* * Process array of filter instructions. */ for (pc = 0; pc < flen; pc++) { const struct sock_filter *fentry = &filter[pc]; u32 f_k = fentry->k; switch (fentry->code) { case BPF_ALU|BPF_ADD|BPF_X: A += X; continue; case BPF_ALU|BPF_ADD|BPF_K: A += f_k; continue; case BPF_ALU|BPF_SUB|BPF_X: A -= X; continue; case BPF_ALU|BPF_SUB|BPF_K: A -= f_k; continue; case BPF_ALU|BPF_MUL|BPF_X: A *= X; continue; case BPF_ALU|BPF_MUL|BPF_K: A *= f_k; continue; case BPF_ALU|BPF_DIV|BPF_X: if (X == 0) return 0; A /= X; continue; case BPF_ALU|BPF_DIV|BPF_K: A /= f_k; continue; case BPF_ALU|BPF_AND|BPF_X: A &= X; continue; case BPF_ALU|BPF_AND|BPF_K: A &= f_k; continue; case BPF_ALU|BPF_OR|BPF_X: A |= X; continue; case BPF_ALU|BPF_OR|BPF_K: A |= f_k; continue; case BPF_ALU|BPF_LSH|BPF_X: A <<= X; continue; case BPF_ALU|BPF_LSH|BPF_K: A <<= f_k; continue; case BPF_ALU|BPF_RSH|BPF_X: A >>= X; continue; case BPF_ALU|BPF_RSH|BPF_K: A >>= f_k; continue; case BPF_ALU|BPF_NEG: A = -A; continue; case BPF_JMP|BPF_JA: pc += f_k; continue; case BPF_JMP|BPF_JGT|BPF_K: pc += (A > f_k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JGE|BPF_K: pc += (A >= f_k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JEQ|BPF_K: pc += (A == f_k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JSET|BPF_K: pc += (A & f_k) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JGT|BPF_X: pc += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JGE|BPF_X: pc += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JEQ|BPF_X: pc += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_JMP|BPF_JSET|BPF_X: pc += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_LD|BPF_W|BPF_ABS: k = f_k; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = get_unaligned_be32(ptr); continue; } break; case BPF_LD|BPF_H|BPF_ABS: k = f_k; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = get_unaligned_be16(ptr); continue; } break; case BPF_LD|BPF_B|BPF_ABS: k = f_k; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { A = *(u8 *)ptr; continue; } break; case BPF_LD|BPF_W|BPF_LEN: A = skb->len; continue; case BPF_LDX|BPF_W|BPF_LEN: X = skb->len; continue; case BPF_LD|BPF_W|BPF_IND: k = X + f_k; goto load_w; case BPF_LD|BPF_H|BPF_IND: k = X + f_k; goto load_h; case BPF_LD|BPF_B|BPF_IND: k = X + f_k; goto load_b; case BPF_LDX|BPF_B|BPF_MSH: ptr = load_pointer(skb, f_k, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_LD|BPF_IMM: A = f_k; continue; case BPF_LDX|BPF_IMM: X = f_k; continue; case BPF_LD|BPF_MEM: A = (memvalid & (1UL << f_k)) ? mem[f_k] : 0; continue; case BPF_LDX|BPF_MEM: X = (memvalid & (1UL << f_k)) ? mem[f_k] : 0; continue; case BPF_MISC|BPF_TAX: X = A; continue; case BPF_MISC|BPF_TXA: A = X; continue; case BPF_RET|BPF_K: return f_k; case BPF_RET|BPF_A: return A; case BPF_ST: memvalid |= 1UL << f_k; mem[f_k] = A; continue; case BPF_STX: memvalid |= 1UL << f_k; mem[f_k] = X; continue; default: WARN_ON(1); return 0; } /* * Handle ancillary data, which are impossible * (or very difficult) to get parsing packet contents. */ switch (k-SKF_AD_OFF) { case SKF_AD_PROTOCOL: A = ntohs(skb->protocol); continue; case SKF_AD_PKTTYPE: A = skb->pkt_type; continue; case SKF_AD_IFINDEX: A = skb->dev->ifindex; continue; case SKF_AD_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *)&skb->data[A], skb->len - A, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } case SKF_AD_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *)&skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } default: return 0; } } return 0; }
/** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @filter: filter to apply * @flen: length of filter * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. skb is the data we are * filtering, filter is the array of filter instructions, and * len is the number of filter blocks in the array. */ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) { struct sock_filter *fentry; /* We walk down these */ void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ u32 tmp; int k; int pc; /* * Process array of filter instructions. */ for (pc = 0; pc < flen; pc++) { fentry = &filter[pc]; switch (fentry->code) { case BPF_S_ALU_ADD_X: A += X; continue; case BPF_S_ALU_ADD_K: A += fentry->k; continue; case BPF_S_ALU_SUB_X: A -= X; continue; case BPF_S_ALU_SUB_K: A -= fentry->k; continue; case BPF_S_ALU_MUL_X: A *= X; continue; case BPF_S_ALU_MUL_K: A *= fentry->k; continue; case BPF_S_ALU_DIV_X: if (X == 0) return 0; A /= X; continue; case BPF_S_ALU_DIV_K: A /= fentry->k; continue; case BPF_S_ALU_AND_X: A &= X; continue; case BPF_S_ALU_AND_K: A &= fentry->k; continue; case BPF_S_ALU_OR_X: A |= X; continue; case BPF_S_ALU_OR_K: A |= fentry->k; continue; case BPF_S_ALU_LSH_X: A <<= X; continue; case BPF_S_ALU_LSH_K: A <<= fentry->k; continue; case BPF_S_ALU_RSH_X: A >>= X; continue; case BPF_S_ALU_RSH_K: A >>= fentry->k; continue; case BPF_S_ALU_NEG: A = -A; continue; case BPF_S_JMP_JA: pc += fentry->k; continue; case BPF_S_JMP_JGT_K: pc += (A > fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_K: pc += (A >= fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_K: pc += (A == fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_K: pc += (A & fentry->k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGT_X: pc += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_X: pc += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_X: pc += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_X: pc += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_S_LD_W_ABS: k = fentry->k; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { A = get_unaligned_be32(ptr); continue; } break; case BPF_S_LD_H_ABS: k = fentry->k; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { A = get_unaligned_be16(ptr); continue; } break; case BPF_S_LD_B_ABS: k = fentry->k; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { A = *(u8 *)ptr; continue; } break; case BPF_S_LD_W_LEN: A = skb->len; continue; case BPF_S_LDX_W_LEN: X = skb->len; continue; case BPF_S_LD_W_IND: k = X + fentry->k; goto load_w; case BPF_S_LD_H_IND: k = X + fentry->k; goto load_h; case BPF_S_LD_B_IND: k = X + fentry->k; goto load_b; case BPF_S_LDX_B_MSH: ptr = load_pointer(skb, fentry->k, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_S_LD_IMM: A = fentry->k; continue; case BPF_S_LDX_IMM: X = fentry->k; continue; case BPF_S_LD_MEM: A = mem[fentry->k]; continue; case BPF_S_LDX_MEM: X = mem[fentry->k]; continue; case BPF_S_MISC_TAX: X = A; continue; case BPF_S_MISC_TXA: A = X; continue; case BPF_S_RET_K: return fentry->k; case BPF_S_RET_A: return A; case BPF_S_ST: mem[fentry->k] = A; continue; case BPF_S_STX: mem[fentry->k] = X; continue; default: WARN_ON(1); return 0; } /* * Handle ancillary data, which are impossible * (or very difficult) to get parsing packet contents. */ switch (k-SKF_AD_OFF) { case SKF_AD_PROTOCOL: A = ntohs(skb->protocol); continue; case SKF_AD_PKTTYPE: A = skb->pkt_type; continue; case SKF_AD_IFINDEX: A = skb->dev->ifindex; continue; case SKF_AD_MARK: A = skb->mark; continue; case SKF_AD_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *)&skb->data[A], skb->len - A, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } case SKF_AD_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (A > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *)&skb->data[A]; if (nla->nla_len > A - skb->len) return 0; nla = nla_find_nested(nla, X); if (nla) A = (void *)nla - (void *)skb->data; else A = 0; continue; } default: return 0; } } return 0; }
static struct vport *vxlan_tnl_create(const struct vport_parms *parms) { struct net *net = ovs_dp_get_net(parms->dp); struct nlattr *options = parms->options; struct net_device *dev; struct vport *vport; struct nlattr *a; int err; struct vxlan_config conf = { .no_share = true, .flags = VXLAN_F_COLLECT_METADATA, }; if (!options) { err = -EINVAL; goto error; } a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT); if (a && nla_len(a) == sizeof(u16)) { conf.dst_port = htons(nla_get_u16(a)); } else { /* Require destination port from userspace. */ err = -EINVAL; goto error; } vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms); if (IS_ERR(vport)) return vport; a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION); if (a) { err = vxlan_configure_exts(vport, a, &conf); if (err) { ovs_vport_free(vport); goto error; } } rtnl_lock(); dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf); if (IS_ERR(dev)) { rtnl_unlock(); ovs_vport_free(vport); return ERR_CAST(dev); } dev_change_flags(dev, dev->flags | IFF_UP); rtnl_unlock(); return vport; error: return ERR_PTR(err); } static struct vport *vxlan_create(const struct vport_parms *parms) { struct vport *vport; vport = vxlan_tnl_create(parms); if (IS_ERR(vport)) return vport; return ovs_netdev_link(vport, parms->name); } static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, struct dp_upcall_info *upcall) { struct vxlan_dev *vxlan = netdev_priv(vport->dev); struct net *net = ovs_dp_get_net(vport->dp); unsigned short family = ip_tunnel_info_af(upcall->egress_tun_info); __be16 dst_port = vxlan_dev_dst_port(vxlan, family); __be16 src_port; int port_min; int port_max; inet_get_local_port_range(net, &port_min, &port_max); src_port = udp_flow_src_port(net, skb, 0, 0, true); return ovs_tunnel_get_egress_info(upcall, net, skb, IPPROTO_UDP, src_port, dst_port); } static struct vport_ops ovs_vxlan_netdev_vport_ops = { .type = OVS_VPORT_TYPE_VXLAN, .create = vxlan_create, .destroy = ovs_netdev_tunnel_destroy, .get_options = vxlan_get_options, .send = ovs_netdev_send, .get_egress_tun_info = vxlan_get_egress_tun_info, }; static int __init ovs_vxlan_tnl_init(void) { return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops); } static void __exit ovs_vxlan_tnl_exit(void) { ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops); }