static void vr_flow_init_close(struct vrouter *router, struct vr_flow_entry *flow_e, struct vr_packet *pkt, struct vr_forwarding_md *fmd) { unsigned int flow_index; unsigned int head_room = sizeof(struct agent_hdr) + sizeof(struct vr_eth); struct vr_packet *pkt_c; pkt_c = vr_pclone(pkt); if (!pkt_c) return; vr_preset(pkt_c); if (vr_pcow(pkt_c, head_room)) { vr_pfree(pkt_c, VP_DROP_PCOW_FAIL); return; } flow_index = fmd->fmd_flow_index; vr_trap(pkt_c, fmd->fmd_dvrf, AGENT_TRAP_SESSION_CLOSE, (void *)&flow_index); return; }
int vr_mirror(struct vrouter *router, uint8_t mirror_id, struct vr_packet *pkt, struct vr_forwarding_md *fmd, mirror_type_t mtype) { bool reset = true; void *mirror_md; unsigned char *buf, default_mme[2] = {0xff, 0x0}; unsigned int captured_len, clone_len = 0; unsigned int mirror_md_len = 0, drop_reason; struct vr_nexthop *nh, *pkt_nh; struct vr_mirror_entry *mirror; struct vr_mirror_meta_entry *mme; struct vr_forwarding_md new_fmd; /* If the packet is already mirrored, dont mirror again */ if (pkt->vp_flags & VP_FLAG_FROM_DP) return 0; if (mtype <= MIRROR_TYPE_UNKNOWN || mtype >= MIRROR_TYPE_MAX) return 0; mirror = router->vr_mirrors[mirror_id]; if (!mirror) return 0; if (mirror->mir_flags & VR_MIRROR_FLAG_HW_ASSISTED) { vr_fmd_put_mirror_vlan(fmd, mirror->mir_vlan_id); return 0; } memcpy(&new_fmd, fmd, sizeof(*fmd)); new_fmd.fmd_ecmp_nh_index = -1; fmd = &new_fmd; vr_fmd_put_mirror_type(fmd, mtype); nh = mirror->mir_nh; if (!nh || !(nh->nh_flags & NH_FLAG_VALID)) return 0; pkt = vr_pclone(pkt); if (!pkt) return 0; /* Mark as mirrored */ pkt->vp_flags |= VP_FLAG_FROM_DP; /* Set the GSO and partial checksum flag */ pkt->vp_flags |= (VP_FLAG_FLOW_SET | VP_FLAG_GSO); vr_pkt_unset_gro(pkt); if (mirror->mir_flags & VR_MIRROR_FLAG_DYNAMIC) { if (mtype == MIRROR_TYPE_ACL) { if (fmd->fmd_flow_index >= 0) { mme = (struct vr_mirror_meta_entry *) vr_itable_get(router->vr_mirror_md, fmd->fmd_flow_index); if (mme) { mirror_md_len = mme->mirror_md_len; mirror_md = mme->mirror_md; } } } else if (mtype == MIRROR_TYPE_PORT_RX) { if (!pkt->vp_if) { drop_reason = VP_DROP_INVALID_IF; goto fail; } mirror_md_len = pkt->vp_if->vif_in_mirror_md_len; mirror_md = pkt->vp_if->vif_in_mirror_md; } else { if (!pkt->vp_nh || !pkt->vp_nh->nh_dev) { drop_reason = VP_DROP_INVALID_NH; goto fail; } mirror_md_len = pkt->vp_nh->nh_dev->vif_out_mirror_md_len; mirror_md = pkt->vp_nh->nh_dev->vif_out_mirror_md; } if (!mirror_md_len) { mirror_md = default_mme; mirror_md_len = sizeof(default_mme); } clone_len += mirror_md_len; clone_len += VR_MIRROR_PKT_HEAD_SPACE; } else { clone_len += VR_VXLAN_HDR_LEN; fmd->fmd_label = mirror->mir_vni; } if (pkt->vp_if && (pkt->vp_if->vif_type == VIF_TYPE_PHYSICAL)) { /* No need to mirror the Tunnel headers. So packet cant be reset */ reset = false; /* Identify whether the packet currently has L2 header. If not a * port mirroring, we need to add the extra L2 header */ if (mtype == MIRROR_TYPE_ACL) { pkt_nh = pkt->vp_nh; if (pkt_nh && (pkt_nh->nh_flags & NH_FLAG_VALID) && (pkt_nh->nh_type == NH_ENCAP) && (pkt_nh->nh_family == AF_INET)) { clone_len += pkt_nh->nh_encap_len; if (vr_pcow(&pkt, clone_len)) { drop_reason = VP_DROP_PCOW_FAIL; goto fail; } clone_len = 0; if (pkt_nh->nh_dev->vif_set_rewrite(pkt_nh->nh_dev, pkt, fmd, pkt_nh->nh_data, pkt_nh->nh_encap_len) < 0) { drop_reason = VP_DROP_REWRITE_FAIL; goto fail; } } } } if (reset) vr_preset(pkt); if (clone_len) { if (vr_pcow(&pkt, clone_len)) { drop_reason = VP_DROP_PCOW_FAIL; goto fail; } } captured_len = htonl(pkt_len(pkt)); if (mirror_md_len) { buf = pkt_push(pkt, mirror_md_len); if (!buf) { drop_reason = VP_DROP_PUSH; goto fail; } memcpy(buf, mirror_md, mirror_md_len); } if (nh->nh_vrf >= 0) fmd->fmd_dvrf = nh->nh_vrf; /* * we are now in the mirroring context and there isn't a flow for this * mirror packet. hence, set the flow index to -1. */ fmd->fmd_flow_index = -1; fmd->fmd_outer_src_ip = 0; nh_output(pkt, nh, fmd); return 0; fail: vr_pfree(pkt, drop_reason); return 0; }
static flow_result_t vr_flow_action(struct vrouter *router, struct vr_flow_entry *fe, unsigned int index, struct vr_packet *pkt, struct vr_forwarding_md *fmd) { int valid_src; flow_result_t result; struct vr_forwarding_md mirror_fmd; struct vr_nexthop *src_nh; struct vr_packet *pkt_clone; fmd->fmd_dvrf = fe->fe_vrf; /* * for now, we will not use dvrf if VRFT is set, because the RPF * check needs to happen in the source vrf */ src_nh = __vrouter_get_nexthop(router, fe->fe_src_nh_index); if (!src_nh) { vr_pfree(pkt, VP_DROP_INVALID_NH); return FLOW_CONSUMED; } if (src_nh->nh_validate_src) { valid_src = src_nh->nh_validate_src(pkt, src_nh, fmd, NULL); if (valid_src == NH_SOURCE_INVALID) { vr_pfree(pkt, VP_DROP_INVALID_SOURCE); return FLOW_CONSUMED; } if (valid_src == NH_SOURCE_MISMATCH) { pkt_clone = vr_pclone(pkt); if (pkt_clone) { vr_preset(pkt_clone); if (vr_pcow(pkt_clone, sizeof(struct vr_eth) + sizeof(struct agent_hdr))) { vr_pfree(pkt_clone, VP_DROP_PCOW_FAIL); } else { vr_trap(pkt_clone, fmd->fmd_dvrf, AGENT_TRAP_ECMP_RESOLVE, &fmd->fmd_flow_index); } } } } if (fe->fe_flags & VR_FLOW_FLAG_VRFT) { if (fmd->fmd_dvrf != fe->fe_dvrf) { fmd->fmd_dvrf = fe->fe_dvrf; fmd->fmd_to_me = 1; } } if (fe->fe_flags & VR_FLOW_FLAG_MIRROR) { if (fe->fe_mirror_id < VR_MAX_MIRROR_INDICES) { mirror_fmd = *fmd; mirror_fmd.fmd_ecmp_nh_index = -1; vr_mirror(router, fe->fe_mirror_id, pkt, &mirror_fmd); } if (fe->fe_sec_mirror_id < VR_MAX_MIRROR_INDICES) { mirror_fmd = *fmd; mirror_fmd.fmd_ecmp_nh_index = -1; vr_mirror(router, fe->fe_sec_mirror_id, pkt, &mirror_fmd); } } switch (fe->fe_action) { case VR_FLOW_ACTION_DROP: vr_pfree(pkt, VP_DROP_FLOW_ACTION_DROP); result = FLOW_CONSUMED; break; case VR_FLOW_ACTION_FORWARD: result = FLOW_FORWARD; break; case VR_FLOW_ACTION_NAT: result = vr_flow_nat(fe, pkt, fmd); break; default: vr_pfree(pkt, VP_DROP_FLOW_ACTION_INVALID); result = FLOW_CONSUMED; break; } return result; }