static int eth_tx(struct vr_interface *vif, struct vr_packet *pkt) { int ret; struct vr_forwarding_md fmd; struct vr_interface_stats *stats = vif_get_stats(vif, pkt->vp_cpu); /* * GRO packets come here twice - once with VP_FLAG_GRO set and * once without the flag set. Don't count them twice. */ if (((pkt->vp_flags & VP_FLAG_GRO) == 0) || (vif->vif_type != VIF_TYPE_VIRTUAL)) { stats->vis_obytes += pkt_len(pkt); stats->vis_opackets++; } if (vif->vif_flags & VIF_FLAG_MIRROR_TX) { vr_init_forwarding_md(&fmd); fmd.fmd_dvrf = vif->vif_vrf; vr_mirror(vif->vif_router, vif->vif_mirror_id, pkt, &fmd); } ret = hif_ops->hif_tx(vif, pkt); if (ret != 0) { ret = 0; stats->vis_oerrors++; } return ret; }
/* * vr_interface_input() is invoked if a packet ingresses an interface. * This function demultiplexes the packet to right input * function depending on the protocols enabled on the VIF */ static unsigned int vr_interface_input(unsigned short vrf, struct vr_interface *vif, struct vr_packet *pkt, unsigned short vlan_id) { struct vr_forwarding_md fmd; unsigned int ret; vr_init_forwarding_md(&fmd); if (vif->vif_flags & VIF_FLAG_MIRROR_RX) { fmd.fmd_dvrf = vif->vif_vrf; vr_mirror(vif->vif_router, vif->vif_mirror_id, pkt, &fmd); } /* If vlan tagged from VM, packet needs to be treated as L2 packet */ if ((vif->vif_type == VIF_TYPE_PHYSICAL) || (vlan_id == VLAN_ID_INVALID)) { if (vif->vif_flags & VIF_FLAG_L3_ENABLED) { ret = vr_l3_input(vrf, pkt, &fmd); if (ret != PKT_RET_FALLBACK_BRIDGING) return ret; } } if (vif->vif_flags & VIF_FLAG_L2_ENABLED) return vr_l2_input(vrf, pkt, &fmd, vlan_id); vif_drop_pkt(vif, pkt, 1); return 0; }
/* * vr_interface_input() is invoked if a packet ingresses an interface. * This function demultiplexes the packet to right input * function depending on the protocols enabled on the VIF */ unsigned int vr_virtual_input(unsigned short vrf, struct vr_interface *vif, struct vr_packet *pkt, unsigned short vlan_id) { struct vr_forwarding_md fmd; vr_init_forwarding_md(&fmd); fmd.fmd_vlan = vlan_id; fmd.fmd_dvrf = vrf; if (vif->vif_flags & VIF_FLAG_MIRROR_RX) { fmd.fmd_dvrf = vif->vif_vrf; vr_mirror(vif->vif_router, vif->vif_mirror_id, pkt, &fmd); } if (vr_pkt_type(pkt, 0, &fmd) < 0) { vif_drop_pkt(vif, pkt, 1); return 0; } /* * we really do not allow any broadcast packets from interfaces * that are part of transparent service chain, since transparent * service chain bridges packets across vrf (and hence loops can * happen) */ if ((pkt->vp_flags & VP_FLAG_MULTICAST) && (vif_is_service(vif))) { vif_drop_pkt(vif, pkt, 1); return 0; } if (!vr_flow_forward(pkt->vp_if->vif_router, pkt, &fmd)) return 0; vr_bridge_input(vif->vif_router, pkt, &fmd); return 0; }
static flow_result_t vr_flow_action(struct vrouter *router, struct vr_flow_entry *fe, unsigned int index, struct vr_packet *pkt, struct vr_forwarding_md *fmd) { int valid_src; flow_result_t result; struct vr_forwarding_md mirror_fmd; struct vr_nexthop *src_nh; struct vr_packet *pkt_clone; fmd->fmd_dvrf = fe->fe_vrf; /* * for now, we will not use dvrf if VRFT is set, because the RPF * check needs to happen in the source vrf */ src_nh = __vrouter_get_nexthop(router, fe->fe_src_nh_index); if (!src_nh) { vr_pfree(pkt, VP_DROP_INVALID_NH); return FLOW_CONSUMED; } if (src_nh->nh_validate_src) { valid_src = src_nh->nh_validate_src(pkt, src_nh, fmd, NULL); if (valid_src == NH_SOURCE_INVALID) { vr_pfree(pkt, VP_DROP_INVALID_SOURCE); return FLOW_CONSUMED; } if (valid_src == NH_SOURCE_MISMATCH) { pkt_clone = vr_pclone(pkt); if (pkt_clone) { vr_preset(pkt_clone); if (vr_pcow(pkt_clone, sizeof(struct vr_eth) + sizeof(struct agent_hdr))) { vr_pfree(pkt_clone, VP_DROP_PCOW_FAIL); } else { vr_trap(pkt_clone, fmd->fmd_dvrf, AGENT_TRAP_ECMP_RESOLVE, &fmd->fmd_flow_index); } } } } if (fe->fe_flags & VR_FLOW_FLAG_VRFT) { if (fmd->fmd_dvrf != fe->fe_dvrf) { fmd->fmd_dvrf = fe->fe_dvrf; fmd->fmd_to_me = 1; } } if (fe->fe_flags & VR_FLOW_FLAG_MIRROR) { if (fe->fe_mirror_id < VR_MAX_MIRROR_INDICES) { mirror_fmd = *fmd; mirror_fmd.fmd_ecmp_nh_index = -1; vr_mirror(router, fe->fe_mirror_id, pkt, &mirror_fmd); } if (fe->fe_sec_mirror_id < VR_MAX_MIRROR_INDICES) { mirror_fmd = *fmd; mirror_fmd.fmd_ecmp_nh_index = -1; vr_mirror(router, fe->fe_sec_mirror_id, pkt, &mirror_fmd); } } switch (fe->fe_action) { case VR_FLOW_ACTION_DROP: vr_pfree(pkt, VP_DROP_FLOW_ACTION_DROP); result = FLOW_CONSUMED; break; case VR_FLOW_ACTION_FORWARD: result = FLOW_FORWARD; break; case VR_FLOW_ACTION_NAT: result = vr_flow_nat(fe, pkt, fmd); break; default: vr_pfree(pkt, VP_DROP_FLOW_ACTION_INVALID); result = FLOW_CONSUMED; break; } return result; }
/* * vr_input is called from linux(host) ingress path. we are not allowed to * sleep here. return value should indicate whether the router consumed the * packet or not. if the router did not consume, host will continue with * its packet processing with the same packet. if the router did consume, * host will not touch the packet again. a return of 0 will tell the handler * that router consumed it, while all other return values are passed as is. * maybe we need a return value to host return mapping, but at a later time ? */ unsigned int vr_input(unsigned short vrf, struct vr_interface *vif, struct vr_packet *pkt) { unsigned char *data = pkt_data(pkt); unsigned char *eth = data; unsigned char *dmac = ð[VR_ETHER_DMAC_OFF]; unsigned short eth_proto; struct vr_vlan_hdr *vlan; struct vrouter *router = vif->vif_router; struct vr_forwarding_md fmd; int reason; if (vif->vif_flags & VIF_FLAG_MIRROR_RX) { vr_init_forwarding_md(&fmd); fmd.fmd_dvrf = vif->vif_vrf; vr_mirror(vif->vif_router, vif->vif_mirror_id, pkt, &fmd); } /* * we will optimise for the most likely case i.e that of IPv4. need * to see what needs to happen for v6 when it comes */ data = pkt_pull(pkt, VR_ETHER_HLEN); if (!data) { vif_drop_pkt(vif, pkt, 1); return 0; } eth_proto = ntohs(*(unsigned short *)(eth + VR_ETHER_PROTO_OFF)); while (eth_proto == VR_ETH_PROTO_VLAN) { vlan = (struct vr_vlan_hdr *)data; eth_proto = ntohs(vlan->vlan_proto); data = pkt_pull(pkt, sizeof(*vlan)); if (!data) { vif_drop_pkt(vif, pkt, 1); return 0; } } vr_init_forwarding_md(&fmd); pkt_set_network_header(pkt, pkt->vp_data); pkt_set_inner_network_header(pkt, pkt->vp_data); if (eth_proto == VR_ETH_PROTO_IP) { if (vr_from_vm_mss_adj && vr_pkt_from_vm_tcp_mss_adj && (vif->vif_type == VIF_TYPE_VIRTUAL)) { if ((reason = vr_pkt_from_vm_tcp_mss_adj(pkt))) { vr_pfree(pkt, reason); return 0; } } return vr_flow_inet_input(router, vrf, pkt, eth_proto, &fmd); } else if (eth_proto == VR_ETH_PROTO_ARP) return vr_arp_input(router, vrf, pkt); /* rest of the stuff is for slow path and we should be ok doing this */ if (well_known_mac(dmac)) return vr_trap(pkt, vrf, AGENT_TRAP_L2_PROTOCOLS, NULL); return vr_default_input(pkt); }