Пример #1
0
static int
eth_tx(struct vr_interface *vif, struct vr_packet *pkt)
{
    int ret;
    struct vr_forwarding_md fmd;
    struct vr_interface_stats *stats = vif_get_stats(vif, pkt->vp_cpu);

    /*
     * GRO packets come here twice - once with VP_FLAG_GRO set and
     * once without the flag set. Don't count them twice.
     */
    if (((pkt->vp_flags & VP_FLAG_GRO) == 0) ||
             (vif->vif_type != VIF_TYPE_VIRTUAL)) {
        stats->vis_obytes += pkt_len(pkt);
        stats->vis_opackets++;
    }
    if (vif->vif_flags & VIF_FLAG_MIRROR_TX) {
        vr_init_forwarding_md(&fmd);
        fmd.fmd_dvrf = vif->vif_vrf;
        vr_mirror(vif->vif_router, vif->vif_mirror_id, pkt, &fmd);
    }
        
    ret = hif_ops->hif_tx(vif, pkt);
    if (ret != 0) {
        ret = 0;
        stats->vis_oerrors++;
    }

    return ret;
}
Пример #2
0
unsigned int
vr_fabric_input(struct vr_interface *vif, struct vr_packet *pkt,
                unsigned short vlan_id)
{
    int handled = 0;
    unsigned short pull_len;
    struct vr_forwarding_md fmd;

    vr_init_forwarding_md(&fmd);
    fmd.fmd_vlan = vlan_id;
    fmd.fmd_dvrf = vif->vif_vrf;

    if (vr_pkt_type(pkt, 0, &fmd) < 0) {
        vif_drop_pkt(vif, pkt, 1);
        return 0;
    }

    if (pkt->vp_type == VP_TYPE_IP6)
        return vif_xconnect(vif, pkt, &fmd);

    pull_len = pkt_get_network_header_off(pkt) - pkt_head_space(pkt);
    pkt_pull(pkt, pull_len);

    if (pkt->vp_type == VP_TYPE_IP || pkt->vp_type == VP_TYPE_IP6)
        handled = vr_l3_input(pkt, &fmd);
    else if (pkt->vp_type == VP_TYPE_ARP)
        handled = vr_arp_input(pkt, &fmd);

    if (!handled) {
        pkt_push(pkt, pull_len);
        return vif_xconnect(vif, pkt, &fmd);
    }

    return 0;
}
Пример #3
0
static void
vr_fragment_flush_queue_element(struct vr_fragment_queue_element *vfqe)
{
    struct vrouter *router;
    struct vr_packet *pkt;

    struct vr_forwarding_md fmd;
    struct vr_packet_node *pnode;

    if (!vfqe)
        goto exit_flush;

    router = vfqe->fqe_router;
    pnode = &vfqe->fqe_pnode;
    pkt = pnode->pl_packet;
    if (!pkt)
        goto exit_flush;

    vr_init_forwarding_md(&fmd);
    fmd.fmd_vlan = pnode->pl_vlan;
    fmd.fmd_dvrf = pnode->pl_vrf;
    vr_flow_flush_pnode(router, pnode, NULL, &fmd);

exit_flush:
    vr_fragment_queue_element_free(vfqe, VP_DROP_CLONED_ORIGINAL);
    return;
}
Пример #4
0
/*
 * vr_interface_input() is invoked if a packet ingresses an interface. 
 * This function demultiplexes the packet to right input 
 * function depending on the protocols enabled on the VIF
 */
static unsigned int
vr_interface_input(unsigned short vrf, struct vr_interface *vif,
                       struct vr_packet *pkt, unsigned short vlan_id)
{
    struct vr_forwarding_md fmd;
    unsigned int ret;

    vr_init_forwarding_md(&fmd);

    if (vif->vif_flags & VIF_FLAG_MIRROR_RX) {
        fmd.fmd_dvrf = vif->vif_vrf;
        vr_mirror(vif->vif_router, vif->vif_mirror_id, pkt, &fmd);
    }

    /* If vlan tagged from VM, packet needs to be treated as L2 packet */
    if ((vif->vif_type == VIF_TYPE_PHYSICAL) ||  (vlan_id == VLAN_ID_INVALID)) {
        if (vif->vif_flags & VIF_FLAG_L3_ENABLED) {
            ret = vr_l3_input(vrf, pkt, &fmd);
            if (ret != PKT_RET_FALLBACK_BRIDGING)
                return ret;
        }
    }

    if (vif->vif_flags & VIF_FLAG_L2_ENABLED)
        return vr_l2_input(vrf, pkt, &fmd, vlan_id);

    vif_drop_pkt(vif, pkt, 1);
    return 0;
}
Пример #5
0
static void
vr_flow_flush(void *arg)
{
    struct vrouter *router;
    struct vr_flow_entry *fe;
    struct vr_forwarding_md fmd;
    struct vr_flow_md *flmd = 
                (struct vr_flow_md *)arg;

    router = flmd->flmd_router;
    if (!router)
        return;

    fe = vr_get_flow_entry(router, flmd->flmd_index);
    if (!fe)
        return;

    vr_init_forwarding_md(&fmd);
    vr_flow_set_forwarding_md(router, fe, flmd->flmd_index, &fmd);

    vr_flush_entry(router, fe, flmd, &fmd);

    if (!(flmd->flmd_flags & VR_FLOW_FLAG_ACTIVE)) {
        vr_reset_flow_entry(router, fe, flmd->flmd_index);
    } 

    return;
}
Пример #6
0
static void
vr_arp_proxy(struct vr_arp *sarp, struct vr_packet *pkt,
        struct vr_forwarding_md *fmd, unsigned char *dmac)
{
    struct vr_eth *eth;
    struct vr_arp *arp;
    struct vr_forwarding_md fmd_new;
    struct vr_interface *vif = pkt->vp_if;

    eth = (struct vr_eth *)pkt_push(pkt, sizeof(*eth));
    if (!eth) {
        vr_pfree(pkt, VP_DROP_PUSH);
        return;
    }

    memcpy(eth->eth_dmac, sarp->arp_sha, VR_ETHER_ALEN);
    memcpy(eth->eth_smac, dmac, VR_ETHER_ALEN);
    eth->eth_proto = htons(VR_ETH_PROTO_ARP);

    arp = (struct vr_arp *)(pkt_data(pkt) + sizeof(*eth));
    arp->arp_hw = htons(VR_ARP_HW_TYPE_ETHER);
    arp->arp_proto = htons(VR_ETH_PROTO_IP);
    arp->arp_hwlen = VR_ETHER_ALEN;
    arp->arp_protolen = VR_IP_ADDRESS_LEN;
    arp->arp_op = htons(VR_ARP_OP_REPLY);
    memcpy(arp->arp_sha, dmac, VR_ETHER_ALEN);
    memcpy(arp->arp_dha, sarp->arp_sha, VR_ETHER_ALEN);
    memcpy(&arp->arp_dpa, &sarp->arp_spa, sizeof(sarp->arp_spa));
    memcpy(&arp->arp_spa, &sarp->arp_dpa, sizeof(sarp->arp_dpa));

    vr_init_forwarding_md(&fmd_new);
    fmd_new.fmd_dvrf = fmd->fmd_dvrf;
    vr_pkt_type(pkt, 0, &fmd_new);

    /*
     * XXX: for vcp ports, there won't be bridge table entries. to avoid
     * doing vr_bridge_input, we check for the flag NO_ARP_PROXY and
     * and if set, directly send out on that interface
     */
    if (vif_is_vhost(vif) ||
            (vif->vif_flags & VIF_FLAG_NO_ARP_PROXY)) {
        vif->vif_tx(vif, pkt, fmd);
    } else {
        vr_bridge_input(vif->vif_router, pkt, &fmd_new);
    }

    return;
}
Пример #7
0
unsigned int
vr_bridge_input(struct vrouter *router, unsigned short vrf,
                struct vr_packet *pkt, struct vr_forwarding_md *fmd)
{
    struct vr_route_req rt;
    struct vr_nexthop *nh;
    struct vr_forwarding_md cmd;
    char bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
    char *mac;

    /* First mark the packet as L2 */
    pkt->vp_type = VP_TYPE_L2;

    mac = (char *)pkt_data(pkt);
    rt.rtr_req.rtr_mac_size = VR_ETHER_ALEN;
    rt.rtr_req.rtr_mac =(int8_t *) mac;
    /* If multicast L2 packet, use broadcast composite nexthop */
    if (IS_MAC_BMCAST(mac)) {
        rt.rtr_req.rtr_mac = (int8_t *)bcast_mac;
        pkt->vp_flags |= VP_FLAG_MULTICAST;
    }

    rt.rtr_req.rtr_vrf_id = vrf;
    nh = vr_bridge_lookup(vrf, &rt, pkt);
    if (nh) {

        /*
         * If there is a label attached to this bridge entry add the
         * label
         */
        if (rt.rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG) {
            if (!fmd) {
                vr_init_forwarding_md(&cmd);
                fmd = &cmd;
            }
            fmd->fmd_label = rt.rtr_req.rtr_label;
        }
 
        return nh_output(vrf, pkt, nh, fmd);
    }

    vr_pfree(pkt, VP_DROP_INVALID_NH);
    return 0;
}
Пример #8
0
/*
 * vr_interface_input() is invoked if a packet ingresses an interface.
 * This function demultiplexes the packet to right input
 * function depending on the protocols enabled on the VIF
 */
unsigned int
vr_virtual_input(unsigned short vrf, struct vr_interface *vif,
                 struct vr_packet *pkt, unsigned short vlan_id)
{
    struct vr_forwarding_md fmd;

    vr_init_forwarding_md(&fmd);
    fmd.fmd_vlan = vlan_id;
    fmd.fmd_dvrf = vrf;

    if (vif->vif_flags & VIF_FLAG_MIRROR_RX) {
        fmd.fmd_dvrf = vif->vif_vrf;
        vr_mirror(vif->vif_router, vif->vif_mirror_id, pkt, &fmd);
    }

    if (vr_pkt_type(pkt, 0, &fmd) < 0) {
        vif_drop_pkt(vif, pkt, 1);
        return 0;
    }

    /*
     * we really do not allow any broadcast packets from interfaces
     * that are part of transparent service chain, since transparent
     * service chain bridges packets across vrf (and hence loops can
     * happen)
     */
    if ((pkt->vp_flags & VP_FLAG_MULTICAST) &&
            (vif_is_service(vif))) {
        vif_drop_pkt(vif, pkt, 1);
        return 0;
    }

    if (!vr_flow_forward(pkt->vp_if->vif_router, pkt, &fmd))
        return 0;

    vr_bridge_input(vif->vif_router, pkt, &fmd);
    return 0;

}
Пример #9
0
static void
vr_flow_queue_free(struct vrouter *router, void *arg)
{
    struct vr_forwarding_md fmd;
    struct vr_defer_data *defer;
    struct vr_flow_entry *fe;
    struct vr_flow_queue *vfq;

    defer = (struct vr_defer_data *)arg;
    if (!defer)
        return;

    vr_init_forwarding_md(&fmd);

    vfq = (struct vr_flow_queue *)defer->vdd_data;
    fe = vr_get_flow_entry(router, vfq->vfq_index);
    if (fe) {
        vr_flow_set_forwarding_md(router, fe, vfq->vfq_index, &fmd);
        vr_flush_flow_queue(router, fe, &fmd, vfq);
    }
    vr_free(vfq);
    return;
}
Пример #10
0
static void
vr_flow_table_reset(struct vrouter *router)
{
    unsigned int start, end, i;
    struct vr_flow_entry *fe;
    struct vr_forwarding_md fmd;
    struct vr_flow_md flmd;

    start = end = 0;
    if (router->vr_flow_table)
        end = vr_btable_entries(router->vr_flow_table);

    if (router->vr_oflow_table) {
        if (!end)
            start = vr_flow_entries;
        end += vr_btable_entries(router->vr_oflow_table);
    }

    if (end) {
        vr_init_forwarding_md(&fmd);
        flmd.flmd_action = VR_FLOW_ACTION_DROP;
        for (i = start; i < end; i++) {
            fe = vr_get_flow_entry(router, i);
            if (fe) {
                flmd.flmd_index = i;
                flmd.flmd_flags = fe->fe_flags;
                fe->fe_action = VR_FLOW_ACTION_DROP;
                vr_flush_entry(router, fe, &flmd, &fmd);
                vr_reset_flow_entry(router, fe, i);
            }
        }
    }

    vr_flow_table_info_reset(router);

    return;
}
Пример #11
0
unsigned int
vr_bridge_input(struct vrouter *router, struct vr_packet *pkt,
                struct vr_forwarding_md *fmd)
{
    struct vr_route_req rt;
    struct vr_forwarding_md cmd;
    struct vr_nexthop *nh;
    unsigned short pull_len, overlay_len = VROUTER_L2_OVERLAY_LEN;
    int reason;

    rt.rtr_req.rtr_label_flags = 0;
    rt.rtr_req.rtr_index = VR_BE_INVALID_INDEX;
    rt.rtr_req.rtr_mac_size = VR_ETHER_ALEN;
    rt.rtr_req.rtr_mac =(int8_t *) pkt_data(pkt);
    /* If multicast L2 packet, use broadcast composite nexthop */
    if (IS_MAC_BMCAST(rt.rtr_req.rtr_mac))
        rt.rtr_req.rtr_mac = (int8_t *)vr_bcast_mac;
    rt.rtr_req.rtr_vrf_id = fmd->fmd_dvrf;

    nh = vr_bridge_lookup(fmd->fmd_dvrf, &rt);
    if (!nh) {
        vr_pfree(pkt, VP_DROP_L2_NO_ROUTE);
        return 0;
    }

    if (nh->nh_type == NH_L2_RCV)
        overlay_len = VROUTER_OVERLAY_LEN;

    if (pkt->vp_type == VP_TYPE_IP || pkt->vp_type == VP_TYPE_IP6) {

        if (vif_is_virtual(pkt->vp_if) &&
                vr_from_vm_mss_adj && vr_pkt_from_vm_tcp_mss_adj) {

            pull_len = pkt_get_network_header_off(pkt) - pkt_head_space(pkt);
            if (!pkt_pull(pkt, pull_len)) {
                vr_pfree(pkt, VP_DROP_PULL);
                return 0;
            }

            if ((reason = vr_pkt_from_vm_tcp_mss_adj(pkt, overlay_len))) {
                vr_pfree(pkt, reason);
                return 0;
            }
            if (!pkt_push(pkt, pull_len)) {
                vr_pfree(pkt, VP_DROP_PUSH);
                return 0;
            }
        }
    }


    /*
     * If there is a label attached to this bridge entry add the
     * label
     */
    if (rt.rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG) {
        if (!fmd) {
            vr_init_forwarding_md(&cmd);
            fmd = &cmd;
        }
        fmd->fmd_label = rt.rtr_req.rtr_label;
    }

    nh_output(pkt, nh, fmd);
    return 0;
}
Пример #12
0
int
vr_mpls_input(struct vrouter *router, struct vr_packet *pkt,
        struct vr_forwarding_md *fmd)
{
    int ttl, l2_offset = 0;
    unsigned int label;
    unsigned short drop_reason;

    struct vr_nexthop *nh;
    struct vr_ip *ip;
    struct vr_forwarding_md c_fmd;

    if (!fmd) {
        vr_init_forwarding_md(&c_fmd);
        fmd = &c_fmd;
    }

    label = ntohl(*(unsigned int *)pkt_data(pkt));
    ttl = label & 0xFF;
    label >>= VR_MPLS_LABEL_SHIFT;
    if (label >= router->vr_max_labels) {
        drop_reason = VP_DROP_INVALID_LABEL;
        goto dropit;
    }

    if (--ttl <= 0) {
        drop_reason = VP_DROP_TTL_EXCEEDED;
        goto dropit;
    }

    ip = (struct vr_ip *)pkt_network_header(pkt);
    fmd->fmd_outer_src_ip = ip->ip_saddr;
    vr_forwarding_md_set_label(fmd, label, VR_LABEL_TYPE_MPLS);

    /* Store the TTL in packet. Will be used for multicast replication */
    pkt->vp_ttl = ttl;

    /* drop the TOStack label */
    if (!pkt_pull(pkt, VR_MPLS_HDR_LEN)) {
        drop_reason = VP_DROP_PULL;
        goto dropit;
    }

    nh = __vrouter_get_label(router, label);
    if (!nh) {
        drop_reason = VP_DROP_INVALID_LABEL;
        goto dropit;
    }

    /*
     * Mark it for GRO. Diag, L2 and multicast nexthops unmark if
     * required
     */
    if (vr_perfr)
        pkt->vp_flags |= VP_FLAG_GRO;

    /* Reset the flags which get defined below */
    pkt->vp_flags &= ~VP_FLAG_MULTICAST;
    fmd->fmd_vlan = VLAN_ID_INVALID;

    if (nh->nh_family == AF_INET) {
        ip = (struct vr_ip *)pkt_data(pkt);
        if (vr_ip_is_ip4(ip)) {
            pkt->vp_type = VP_TYPE_IP;
        } else if (vr_ip_is_ip6(ip)) {
            pkt->vp_type = VP_TYPE_IP6;
        } else {
            drop_reason = VP_DROP_INVALID_PROTOCOL;
            goto dropit;
        }

        pkt_set_network_header(pkt, pkt->vp_data);
        pkt_set_inner_network_header(pkt, pkt->vp_data);

    } else if (nh->nh_family == AF_BRIDGE) {

        if (nh->nh_type == NH_COMPOSITE) {
            if (label >= VR_MAX_UCAST_LABELS)
                l2_offset = VR_L2_MCAST_CTRL_DATA_LEN + VR_VXLAN_HDR_LEN;
        }

        if (vr_pkt_type(pkt, l2_offset, fmd) < 0) {
            drop_reason = VP_DROP_INVALID_PACKET;
            goto dropit;
        }

    } else {
        drop_reason = VP_DROP_INVALID_NH;
        goto dropit;
    }

    /*
     * We are typically looking at interface nexthops, and hence we will
     * hit the vrf of the destination device. But, labels can also point
     * to composite nexthops (ECMP being case in point), in which case we
     * will take the vrf from the nexthop. When everything else fails, we
     * will forward the packet in the vrf in which it came i.e fabric
     */
    if (nh->nh_vrf >= 0)
        fmd->fmd_dvrf = nh->nh_vrf;
    else if (nh->nh_dev)
        fmd->fmd_dvrf = nh->nh_dev->vif_vrf;
    else
        fmd->fmd_dvrf = pkt->vp_if->vif_vrf;

    nh_output(pkt, nh, fmd);

    return 0;

dropit:
    vr_pfree(pkt, drop_reason);
    return 0;
}
Пример #13
0
/*
 * vr_input is called from linux(host) ingress path. we are not allowed to
 * sleep here. return value should indicate whether the router consumed the
 * packet or not. if the router did not consume, host will continue with
 * its packet processing with the same packet. if the router did consume,
 * host will not touch the packet again. a return of 0 will tell the handler
 * that router consumed it, while all other return values are passed as is.
 * maybe we need a return value to host return mapping, but at a later time ?
 */
unsigned int
vr_input(unsigned short vrf, struct vr_interface *vif, struct vr_packet *pkt)
{
    unsigned char *data = pkt_data(pkt);
    unsigned char *eth = data;
    unsigned char *dmac = &eth[VR_ETHER_DMAC_OFF];
    unsigned short eth_proto;
    struct vr_vlan_hdr *vlan;
    struct vrouter *router = vif->vif_router;
    struct vr_forwarding_md fmd;
    int reason;

    if (vif->vif_flags & VIF_FLAG_MIRROR_RX) {
        vr_init_forwarding_md(&fmd);
        fmd.fmd_dvrf = vif->vif_vrf;
        vr_mirror(vif->vif_router, vif->vif_mirror_id, pkt, &fmd);
    }

    /*
     * we will optimise for the most likely case i.e that of IPv4. need
     * to see what needs to happen for v6 when it comes
     */
    data = pkt_pull(pkt, VR_ETHER_HLEN);
    if (!data) {
        vif_drop_pkt(vif, pkt, 1);
        return 0;
    }

    eth_proto = ntohs(*(unsigned short *)(eth + VR_ETHER_PROTO_OFF));
    while (eth_proto == VR_ETH_PROTO_VLAN) {
        vlan = (struct vr_vlan_hdr *)data;
        eth_proto = ntohs(vlan->vlan_proto);
        data = pkt_pull(pkt, sizeof(*vlan));
        if (!data) {
            vif_drop_pkt(vif, pkt, 1);
            return 0;
        }
    }

    vr_init_forwarding_md(&fmd);

    pkt_set_network_header(pkt, pkt->vp_data);
    pkt_set_inner_network_header(pkt, pkt->vp_data);
    if (eth_proto == VR_ETH_PROTO_IP) {
        if (vr_from_vm_mss_adj && vr_pkt_from_vm_tcp_mss_adj &&
                         (vif->vif_type == VIF_TYPE_VIRTUAL)) {
            if ((reason = vr_pkt_from_vm_tcp_mss_adj(pkt))) {
                vr_pfree(pkt, reason);
                return 0;
            }
        }

        return vr_flow_inet_input(router, vrf, pkt, eth_proto, &fmd);
    } else if (eth_proto == VR_ETH_PROTO_ARP)
        return vr_arp_input(router, vrf, pkt);

    /* rest of the stuff is for slow path and we should be ok doing this */
    if (well_known_mac(dmac))
        return vr_trap(pkt, vrf,  AGENT_TRAP_L2_PROTOCOLS, NULL);


    return vr_default_input(pkt);
}