Пример #1
0
uint32_t
vr_fragment_get_hash(unsigned int vrf, struct vr_packet *pkt)
{
    struct vr_ip *ip;

    ip = (struct vr_ip *)pkt_network_header(pkt);
    return __vr_fragment_get_hash(vrf, ip->ip_saddr, ip->ip_daddr, pkt);
}
Пример #2
0
uint32_t
__vr_fragment_get_hash(unsigned int vrf, uint32_t sip,
        uint32_t dip, struct vr_packet *pkt)
{
    struct vr_fragment_key vfk;
    struct vr_ip *ip;

    ip = (struct vr_ip *)pkt_network_header(pkt);
    __fragment_key(&vfk, vrf, sip, dip, ip->ip_id);

    return vr_hash(&vfk, sizeof(vfk), 0);
}
Пример #3
0
/*
 * handle unicast arp requests and neighbor refreshes. In many cases,
 * we wouldn't like the unicast arp requests from gateway (such as MX)
 * to reach the VMs and change the gateway mac to ip(6) binding, since
 * for vms the gateway is always agent. We would like such requests
 * to go only if the mode is l2
 */
int
vif_plug_mac_request(struct vr_interface *vif, struct vr_packet *pkt,
        struct vr_forwarding_md *fmd)
{
    int handled = 1;
    int nheader;

    struct vr_arp *sarp;

    if (pkt->vp_flags & VP_FLAG_MULTICAST)
        goto unhandled;

    nheader = pkt_network_header(pkt) - pkt_data(pkt);
    if (nheader < 0 || (pkt->vp_data + nheader > pkt->vp_end))
        goto unhandled;

    if (pkt->vp_type == VP_TYPE_ARP) {
        if (pkt->vp_len < (nheader + sizeof(*sarp)))
            goto unhandled;

        sarp = (struct vr_arp *)(pkt_data(pkt) + nheader);
        if (ntohs(sarp->arp_op) != VR_ARP_OP_REQUEST)
            goto unhandled;

        pkt_pull(pkt, nheader);

        handled = vr_arp_input(pkt, fmd);
        if (!handled) {
            pkt_push(pkt, nheader);
        }
        return handled;
    } else if (pkt->vp_type == VP_TYPE_IP6) {
        if (pkt->vp_len < (nheader + sizeof(struct vr_ip6) +
                    sizeof(struct vr_icmp) + VR_IP6_ADDRESS_LEN +
                    sizeof(struct vr_neighbor_option) + VR_ETHER_ALEN))
            goto unhandled;

        pkt_pull(pkt, nheader);

        handled = vr_neighbor_input(pkt, fmd);
        if (!handled) {
            pkt_push(pkt, nheader);
        }
        return handled;
    }

unhandled:
    return !handled;
}
Пример #4
0
static int
vr_mcast_mpls_input(struct vrouter *router, struct vr_packet *pkt,
        struct vr_forwarding_md *fmd) 
{
    unsigned int ttl;
    unsigned int label;
    unsigned short drop_reason = 0;
    int i;
    int found;
    struct vr_nexthop *nh;
    struct vr_nexthop *dir_nh;
    struct vr_ip *ip;

    label = ntohl(*(unsigned int *)pkt_data(pkt));
    ttl = label & 0xFF;
    label >>= VR_MPLS_LABEL_SHIFT;

    if (--ttl == 0) {
        drop_reason = VP_DROP_TTL_EXCEEDED;
        goto dropit;
    }

    nh = router->vr_ilm[label];
    if (!nh || nh->nh_type != NH_COMPOSITE) {
        drop_reason = VP_DROP_INVALID_NH;
        goto dropit;
    }

    if (!pkt_pull(pkt, VR_MPLS_HDR_LEN)) {
        drop_reason = VP_DROP_PUSH;
        goto dropit;
    }

    ip = (struct vr_ip *)pkt_network_header(pkt);

    /* Ensure that the packet is received from one of the tree descendants */
    for (i = 0, found = 0; i < nh->nh_component_cnt; i++) {
        dir_nh = nh->nh_component_nh[i].cnh;
        if (dir_nh->nh_type == NH_TUNNEL) {
            if (ip->ip_saddr == dir_nh->nh_gre_tun_dip) {
                found = 1;
                break;
            }
        }
    }

    if (found == 0) {
        drop_reason = VP_DROP_INVALID_MCAST_SOURCE;
        goto dropit;
    }

    /* Update the ttl to be used for the subsequent nh processing */
    pkt->vp_ttl = ttl;

    /* If from valid descndant, start replicating */
    nh_output(pkt->vp_if->vif_vrf, pkt, nh, fmd);
    return 0;

dropit:
    vr_pfree(pkt, drop_reason);
    return 0;
}
Пример #5
0
static void
vr_flow_tcp_digest(struct vrouter *router, struct vr_flow_entry *flow_e,
        struct vr_packet *pkt, struct vr_forwarding_md *fmd)
{
    uint16_t tcp_offset_flags;
    unsigned int length;

    struct vr_ip *iph;
    struct vr_ip6 *ip6h;
    struct vr_tcp *tcph = NULL;
    struct vr_flow_entry *rflow_e = NULL;

    iph = (struct vr_ip *)pkt_network_header(pkt);
    if (!vr_ip_transport_header_valid(iph))
        return;

    if (pkt->vp_type == VP_TYPE_IP) {
        if (iph->ip_proto != VR_IP_PROTO_TCP)
            return;

        length = ntohs(iph->ip_len) - (iph->ip_hl * 4);
        tcph = (struct vr_tcp *)((unsigned char *)iph + (iph->ip_hl * 4));
    } else if (pkt->vp_type == VP_TYPE_IP6) {
        ip6h = (struct vr_ip6 *)iph;
        if (ip6h->ip6_nxt != VR_IP_PROTO_TCP)
            return;

        length = ntohs(ip6h->ip6_plen);
        tcph = (struct vr_tcp *)((unsigned char *)iph + sizeof(struct vr_ip6));
    }

    if (tcph) {
        /*
         * there are some optimizations here that makes the code slightly
         * not so frugal. For e.g.: the *_R flags are used to make sure that
         * for a packet that contains ACK, we will not need to fetch the
         * reverse flow if we are not interested, thus saving some execution
         * time.
         */
        tcp_offset_flags = ntohs(tcph->tcp_offset_r_flags);
        /* if we get a reset, session has to be closed */
        if (tcp_offset_flags & VR_TCP_FLAG_RST) {
            (void)__sync_fetch_and_or(&flow_e->fe_tcp_flags,
                    VR_FLOW_TCP_RST);
            if (flow_e->fe_flags & VR_RFLOW_VALID) {
                rflow_e = vr_get_flow_entry(router, flow_e->fe_rflow);
                if (rflow_e) {
                    (void)__sync_fetch_and_or(&rflow_e->fe_tcp_flags,
                            VR_FLOW_TCP_RST);
                }
            }
            vr_flow_init_close(router, flow_e, pkt, fmd);
            return;
        } else if (tcp_offset_flags & VR_TCP_FLAG_SYN) {
            /* if only a SYN... */
            flow_e->fe_tcp_seq = ntohl(tcph->tcp_seq);
            (void)__sync_fetch_and_or(&flow_e->fe_tcp_flags, VR_FLOW_TCP_SYN);
            if (flow_e->fe_flags & VR_RFLOW_VALID) {
                rflow_e = vr_get_flow_entry(router, flow_e->fe_rflow);
                if (rflow_e) {
                    (void)__sync_fetch_and_or(&rflow_e->fe_tcp_flags,
                            VR_FLOW_TCP_SYN_R);
                    if ((flow_e->fe_tcp_flags & VR_FLOW_TCP_SYN_R) &&
                            (tcp_offset_flags & VR_TCP_FLAG_ACK)) {
                        if (ntohl(tcph->tcp_ack) == (rflow_e->fe_tcp_seq + 1)) {
                            (void)__sync_fetch_and_or(&rflow_e->fe_tcp_flags,
                                    VR_FLOW_TCP_ESTABLISHED);
                            (void)__sync_fetch_and_or(&flow_e->fe_tcp_flags,
                                     VR_FLOW_TCP_ESTABLISHED_R);
                        }
                    }
                }
            }
        } else if (tcp_offset_flags & VR_TCP_FLAG_FIN) {
            /*
             * when a FIN is received, update the sequence of the FIN and set
             * the flow FIN flag. It is possible that the FIN packet came with
             * some data, in which case the sequence number of the FIN is one
             * more than the last data byte in the sequence
             */
            length -= (((tcp_offset_flags) >> 12) * 4);
            flow_e->fe_tcp_seq = ntohl(tcph->tcp_seq) + length;
            (void)__sync_fetch_and_or(&flow_e->fe_tcp_flags, VR_FLOW_TCP_FIN);
            /*
             * when an ack for a FIN is sent, we need to take some actions
             * on the reverse flow (since FIN came in the reverse flow). to
             * avoid looking up the reverse flow for all acks, we mark the
             * reverse flow's reverse flow with a flag (FIN_R). we will
             * lookup the reverse flow only if this flag is set and the
             * tcp header has an ack bit set
             */
            if (flow_e->fe_flags & VR_RFLOW_VALID) {
                rflow_e = vr_get_flow_entry(router, flow_e->fe_rflow);
                if (rflow_e) {
                    (void)__sync_fetch_and_or(&rflow_e->fe_tcp_flags,
                            VR_FLOW_TCP_FIN_R);
                }
            }
        }

        /*
         * if FIN_R is set in the flow and if the ACK bit is set in the
         * tcp header, then we need to mark the reverse flow as dead.
         *
         * OR
         *
         * if the SYN_R is set and ESTABLISHED_R is not set and if this
         * is an ack packet, if this ack completes the connection, we
         * need to set ESTABLISHED
         */
        if (((flow_e->fe_tcp_flags & VR_FLOW_TCP_FIN_R) ||
                (!(flow_e->fe_tcp_flags & VR_FLOW_TCP_ESTABLISHED_R) &&
                 (flow_e->fe_tcp_flags & VR_FLOW_TCP_SYN_R))) &&
                (tcp_offset_flags & VR_TCP_FLAG_ACK)) {
            if (flow_e->fe_flags & VR_RFLOW_VALID) {
                if (!rflow_e) {
                    rflow_e = vr_get_flow_entry(router, flow_e->fe_rflow);
                }

                if (rflow_e) {
                    if ((ntohl(tcph->tcp_ack) == (rflow_e->fe_tcp_seq + 1)) &&
                            (flow_e->fe_tcp_flags & VR_FLOW_TCP_FIN_R)) {
                        (void)__sync_fetch_and_or(&rflow_e->fe_tcp_flags,
                                VR_FLOW_TCP_HALF_CLOSE);
                        /*
                         * both the forward and the reverse flows are
                         * now dead
                         */
                        if (flow_e->fe_tcp_flags & VR_FLOW_TCP_HALF_CLOSE) {
                            vr_flow_init_close(router, flow_e, pkt, fmd);
                        }
                    } else if (ntohl(tcph->tcp_ack) != rflow_e->fe_tcp_seq) {
                        if (!(flow_e->fe_tcp_flags &
                                    VR_FLOW_TCP_ESTABLISHED_R)) {
                            (void)__sync_fetch_and_or(&rflow_e->fe_tcp_flags,
                                    VR_FLOW_TCP_ESTABLISHED);
                            (void)__sync_fetch_and_or(&flow_e->fe_tcp_flags,
                                     VR_FLOW_TCP_ESTABLISHED_R);
                        }
                    }
                }
            }
        }
    }
Пример #6
0
int
vr_fragment_assembler(struct vr_fragment **head_p,
        struct vr_fragment_queue_element *vfqe)
{
    int ret = 0;
    unsigned long sec, nsec;
    unsigned int list_length = 0, drop_reason;
    bool found = false, frag_head = false;

    struct vrouter *router;
    struct vr_ip *ip;
    struct vr_packet *pkt;
    struct vr_packet_node *pnode;
    struct vr_fragment *frag, *frag_flow, **prev = NULL;
    struct vr_fragment_queue_element *fqe;
    struct vr_fragment_key vfk;


    router = vfqe->fqe_router;
    pnode = &vfqe->fqe_pnode;
    pkt = pnode->pl_packet;
    ip = (struct vr_ip *)pkt_network_header(pkt);

    if (pnode->pl_flags & PN_FLAG_FRAGMENT_HEAD)
        frag_head = true;

    __fragment_key(&vfk, pnode->pl_vrf, pnode->pl_inner_src_ip,
            pnode->pl_inner_dst_ip, ip->ip_id);

    frag = *head_p;
    prev = head_p;
    while (frag) {
        list_length++;
        if (!memcmp(&frag->f_key, &vfk, sizeof(vfk))) {
            found = true;
            break;
        }

        prev = &frag->f_next;
        frag = frag->f_next;
    }

    if (!frag_head) {
        frag_flow = vr_fragment_get(router, pnode->pl_vrf, ip);
        if (frag_flow) {
            vr_fragment_flush_queue_element(vfqe);
            return 0;
        }
    }

    if (!found) {
        if (frag_head) {
            drop_reason = VP_DROP_CLONED_ORIGINAL;
            goto exit_assembly;
        }

        if (list_length > VR_MAX_FRAGMENTS_PER_ASSEMBLER_QUEUE) {
            drop_reason = VP_DROP_FRAGMENT_QUEUE_FAIL;
            goto exit_assembly;
        }

        frag = vr_zalloc(sizeof(*frag), VR_FRAGMENT_OBJECT);
        if (!frag) {
            ret = -ENOMEM;
            drop_reason = VP_DROP_NO_MEMORY;
            goto exit_assembly;
        }

        memcpy(&frag->f_key, &vfk, sizeof(vfk));
        frag->f_port_info_valid = false;
    }

    vr_get_mono_time(&sec, &nsec);
    frag->f_time = sec;
    if (!found) {
        prev = head_p;
        frag->f_next = *head_p;
        *head_p = frag;
    }

    if (!frag_head) {
        vfqe->fqe_next = NULL;
        fqe = frag->f_qe;
        if (!fqe) {
            frag->f_qe = vfqe;
        } else {
            while (fqe) {
                if (fqe->fqe_next) {
                    fqe = fqe->fqe_next;
                } else {
                    break;
                }
            }

            fqe->fqe_next = vfqe;
        }
    } else {
        frag->f_port_info_valid = true;
        vr_fragment_queue_element_free(vfqe, VP_DROP_CLONED_ORIGINAL);
    }


    if (frag->f_port_info_valid) {
        while ((fqe = frag->f_qe)) {
            frag->f_qe = fqe->fqe_next;
            vr_fragment_flush_queue_element(fqe);
        }

        fragment_unlink_frag(prev, frag);
        fragment_free_frag(frag);
    }

    return 0;

exit_assembly:
    vr_fragment_queue_element_free(vfqe, drop_reason);
    return ret;
}
Пример #7
0
unsigned int
vr_flow_inet_input(struct vrouter *router, unsigned short vrf,
        struct vr_packet *pkt, unsigned short proto,
        struct vr_forwarding_md *fmd)
{
    struct vr_flow_key key, *key_p = &key;
    struct vr_ip *ip, *icmp_pl_ip = NULL;
    struct vr_fragment *frag;
    unsigned int flow_parse_res;
    unsigned int trap_res  = 0;
    unsigned int sip, dip;
    unsigned short *t_hdr, sport, dport;
    unsigned char ip_proto;
    struct vr_icmp *icmph;

    /*
     * interface is in a mode where it wants all packets to be received
     * without doing lookups to figure out whether packets were destined
     * to me or not
     */
    if (pkt->vp_flags & VP_FLAG_TO_ME)
        return vr_ip_rcv(router, pkt, fmd);

    ip = (struct vr_ip *)pkt_network_header(pkt);
    ip_proto = ip->ip_proto;

    /* if the packet is not a fragment, we easily know the sport, and dport */
    if (vr_ip_transport_header_valid(ip)) {
        t_hdr = (unsigned short *)((char *)ip + (ip->ip_hl * 4));
        if (ip_proto == VR_IP_PROTO_ICMP) {
            icmph = (struct vr_icmp *)t_hdr;
            if (vr_icmp_error(icmph)) {
                icmp_pl_ip = (struct vr_ip *)(icmph + 1);
                ip_proto = icmp_pl_ip->ip_proto;
                t_hdr = (unsigned short *)((char *)icmp_pl_ip +
                        (icmp_pl_ip->ip_hl * 4));
                if (ip_proto == VR_IP_PROTO_ICMP)
                    icmph = (struct vr_icmp *)t_hdr;
            }
        }

        if (ip_proto == VR_IP_PROTO_ICMP) {
            if (icmph->icmp_type == VR_ICMP_TYPE_ECHO ||
                    icmph->icmp_type == VR_ICMP_TYPE_ECHO_REPLY) {
                sport = icmph->icmp_eid;
                dport = VR_ICMP_TYPE_ECHO_REPLY;
            } else {
                sport = 0;
                dport = icmph->icmp_type;
            }
        } else {
            if (icmp_pl_ip) {
                sport = *(t_hdr + 1);
                dport = *t_hdr;
            } else {
                sport = *t_hdr;
                dport = *(t_hdr + 1);
            }
        }
    } else {
        /* ...else, we need to get it from somewhere */
        flow_parse_res = vr_flow_parse(router, NULL, pkt, &trap_res);
        /* ...and it really matters only if we need to do a flow lookup */
        if (flow_parse_res == VR_FLOW_LOOKUP) {
            frag = vr_fragment_get(router, vrf, ip);
            if (!frag) {
                vr_pfree(pkt, VP_DROP_FRAGMENTS);
                return 0;
            }
            sport = frag->f_sport;
            dport = frag->f_dport;
            if (vr_ip_fragment_tail(ip))
                vr_fragment_del(frag);
        } else {
            /* 
             * since there is no other way of deriving a key, set the
             * key_p to NULL, indicating to code below that there is
             * indeed no need for flow lookup
             */
            key_p = NULL;
        }
    }

    if (key_p) {
        /* we have everything to make a key */

        if (icmp_pl_ip) {
            sip = icmp_pl_ip->ip_daddr;
            dip = icmp_pl_ip->ip_saddr;
        } else {
            sip = ip->ip_saddr;
            dip = ip->ip_daddr;
        }

        vr_get_flow_key(key_p, fmd->fmd_vlan, pkt,
                sip, dip, ip_proto, sport, dport);

        flow_parse_res = vr_flow_parse(router, key_p, pkt, &trap_res);
        if (flow_parse_res == VR_FLOW_LOOKUP && vr_ip_fragment_head(ip))
            vr_fragment_add(router, vrf, ip, key_p->key_src_port,
                    key_p->key_dst_port);

        if (flow_parse_res == VR_FLOW_BYPASS) {
            return vr_flow_forward(vrf, pkt, proto, fmd);
        } else if (flow_parse_res == VR_FLOW_TRAP) {
            return vr_trap(pkt, vrf, trap_res, NULL);
        }

        return vr_flow_lookup(router, vrf, key_p, pkt, proto, fmd);
    }

    /* 
     * ...come here, when there is not enough information to do a
     * flow lookup
     */
    return vr_flow_forward(vrf, pkt, proto, fmd);
}
Пример #8
0
int
vr_mpls_input(struct vrouter *router, struct vr_packet *pkt,
        struct vr_forwarding_md *fmd)
{
    int ttl, l2_offset = 0;
    unsigned int label;
    unsigned short drop_reason;

    struct vr_nexthop *nh;
    struct vr_ip *ip;
    struct vr_forwarding_md c_fmd;

    if (!fmd) {
        vr_init_forwarding_md(&c_fmd);
        fmd = &c_fmd;
    }

    label = ntohl(*(unsigned int *)pkt_data(pkt));
    ttl = label & 0xFF;
    label >>= VR_MPLS_LABEL_SHIFT;
    if (label >= router->vr_max_labels) {
        drop_reason = VP_DROP_INVALID_LABEL;
        goto dropit;
    }

    if (--ttl <= 0) {
        drop_reason = VP_DROP_TTL_EXCEEDED;
        goto dropit;
    }

    ip = (struct vr_ip *)pkt_network_header(pkt);
    fmd->fmd_outer_src_ip = ip->ip_saddr;
    vr_forwarding_md_set_label(fmd, label, VR_LABEL_TYPE_MPLS);

    /* Store the TTL in packet. Will be used for multicast replication */
    pkt->vp_ttl = ttl;

    /* drop the TOStack label */
    if (!pkt_pull(pkt, VR_MPLS_HDR_LEN)) {
        drop_reason = VP_DROP_PULL;
        goto dropit;
    }

    nh = __vrouter_get_label(router, label);
    if (!nh) {
        drop_reason = VP_DROP_INVALID_LABEL;
        goto dropit;
    }

    /*
     * Mark it for GRO. Diag, L2 and multicast nexthops unmark if
     * required
     */
    if (vr_perfr)
        pkt->vp_flags |= VP_FLAG_GRO;

    /* Reset the flags which get defined below */
    pkt->vp_flags &= ~VP_FLAG_MULTICAST;
    fmd->fmd_vlan = VLAN_ID_INVALID;

    if (nh->nh_family == AF_INET) {
        ip = (struct vr_ip *)pkt_data(pkt);
        if (vr_ip_is_ip4(ip)) {
            pkt->vp_type = VP_TYPE_IP;
        } else if (vr_ip_is_ip6(ip)) {
            pkt->vp_type = VP_TYPE_IP6;
        } else {
            drop_reason = VP_DROP_INVALID_PROTOCOL;
            goto dropit;
        }

        pkt_set_network_header(pkt, pkt->vp_data);
        pkt_set_inner_network_header(pkt, pkt->vp_data);

    } else if (nh->nh_family == AF_BRIDGE) {

        if (nh->nh_type == NH_COMPOSITE) {
            if (label >= VR_MAX_UCAST_LABELS)
                l2_offset = VR_L2_MCAST_CTRL_DATA_LEN + VR_VXLAN_HDR_LEN;
        }

        if (vr_pkt_type(pkt, l2_offset, fmd) < 0) {
            drop_reason = VP_DROP_INVALID_PACKET;
            goto dropit;
        }

    } else {
        drop_reason = VP_DROP_INVALID_NH;
        goto dropit;
    }

    /*
     * We are typically looking at interface nexthops, and hence we will
     * hit the vrf of the destination device. But, labels can also point
     * to composite nexthops (ECMP being case in point), in which case we
     * will take the vrf from the nexthop. When everything else fails, we
     * will forward the packet in the vrf in which it came i.e fabric
     */
    if (nh->nh_vrf >= 0)
        fmd->fmd_dvrf = nh->nh_vrf;
    else if (nh->nh_dev)
        fmd->fmd_dvrf = nh->nh_dev->vif_vrf;
    else
        fmd->fmd_dvrf = pkt->vp_if->vif_vrf;

    nh_output(pkt, nh, fmd);

    return 0;

dropit:
    vr_pfree(pkt, drop_reason);
    return 0;
}
Пример #9
0
unsigned int
vr_bridge_input(struct vrouter *router, struct vr_packet *pkt,
                struct vr_forwarding_md *fmd)
{
    int reason, handled;
    l4_pkt_type_t l4_type = L4_TYPE_UNKNOWN;
    unsigned short pull_len, overlay_len = VROUTER_OVERLAY_LEN;

    int8_t *dmac;
    struct vr_bridge_entry *be;
    struct vr_nexthop *nh = NULL;
    struct vr_vrf_stats *stats;

    dmac = (int8_t *) pkt_data(pkt);

    if (pkt->vp_if->vif_flags & VIF_FLAG_MAC_LEARN) {
        if (vr_bridge_learn(router, pkt, fmd)) {
            return 0;
        }
    }

    pull_len = 0;
    if ((pkt->vp_type == VP_TYPE_IP) || (pkt->vp_type == VP_TYPE_IP6) ||
            (pkt->vp_type == VP_TYPE_ARP)) {
        pull_len = pkt_get_network_header_off(pkt) - pkt_head_space(pkt);
        if (pull_len && !pkt_pull(pkt, pull_len)) {
            vr_pfree(pkt, VP_DROP_PULL);
            return 0;
        }
    }

    if ((pkt->vp_type == VP_TYPE_IP) || (pkt->vp_type == VP_TYPE_IP6)) {
        if (fmd->fmd_dscp < 0) {
            if (pkt->vp_type == VP_TYPE_IP) {
                fmd->fmd_dscp =
                    vr_inet_get_tos((struct vr_ip *)pkt_network_header(pkt));
            } else if (pkt->vp_type == VP_TYPE_IP6) {
                fmd->fmd_dscp =
                    vr_inet6_get_tos((struct vr_ip6 *)pkt_network_header(pkt));
            }
        }
    } else {
        if (fmd->fmd_dotonep < 0) {
            fmd->fmd_dotonep = vr_vlan_get_tos(pkt_data(pkt));
        }
    }

    /* Do the bridge lookup for the packets not meant for "me" */
    if (!fmd->fmd_to_me) {
        /*
         * If DHCP packet coming from VM, Trap it to Agent before doing the bridge
         * lookup itself
         */
        if (vif_is_virtual(pkt->vp_if)) {
            if (pkt->vp_type == VP_TYPE_IP)
                l4_type = vr_ip_well_known_packet(pkt);
            else if (pkt->vp_type == VP_TYPE_IP6)
                l4_type = vr_ip6_well_known_packet(pkt);

            if (l4_type == L4_TYPE_DHCP_REQUEST) {
                if (pkt->vp_if->vif_flags & VIF_FLAG_DHCP_ENABLED) {
                    vr_trap(pkt, fmd->fmd_dvrf,  AGENT_TRAP_L3_PROTOCOLS, NULL);
                    return 0;
                }
            }

            /*
             * Handle the unicast ARP, coming from VM, not
             * destined to us. Broadcast ARP requests would be handled
             * in L2 multicast nexthop. Multicast ARP on fabric
             * interface also would be handled in L2 multicast nexthop.
             * Unicast ARP packets on fabric interface would be handled
             * in plug routines of interface.
             */
            if (!IS_MAC_BMCAST(dmac)) {
                handled = 0;
                if (pkt->vp_type == VP_TYPE_ARP) {
                    handled = vr_arp_input(pkt, fmd, dmac);
                } else if (l4_type == L4_TYPE_NEIGHBOUR_SOLICITATION) {
                    handled = vr_neighbor_input(pkt, fmd, dmac);
                }

                if (handled)
                    return 0;
            }
        }

        be = bridge_lookup(dmac, fmd);
        if (be)
            nh = be->be_nh;

        if (!nh || nh->nh_type == NH_DISCARD) {

            /* If Flooding of unknown unicast not allowed, drop the packet */
            if (!vr_unknown_uc_flood(pkt->vp_if, pkt->vp_nh) ||
                                 IS_MAC_BMCAST(dmac)) {
                vr_pfree(pkt, VP_DROP_L2_NO_ROUTE);
                return 0;
            }

            be = bridge_lookup(vr_bcast_mac, fmd);
            nh = be->be_nh;
            if (!nh) {
                vr_pfree(pkt, VP_DROP_L2_NO_ROUTE);
                return 0;
            }
            stats = vr_inet_vrf_stats(fmd->fmd_dvrf, pkt->vp_cpu);
            if (stats)
                stats->vrf_uuc_floods++;

            /* Treat this unknown unicast packet as multicast */
            pkt->vp_flags |= VP_FLAG_MULTICAST;
        }

        if (be)
            __sync_fetch_and_add(&be->be_packets, 1);

        if (nh->nh_type != NH_L2_RCV)
            overlay_len = VROUTER_L2_OVERLAY_LEN;
    }


    /* Adjust MSS for V4 and V6 packets */
    if ((pkt->vp_type == VP_TYPE_IP) || (pkt->vp_type == VP_TYPE_IP6)) {

        if (vif_is_virtual(pkt->vp_if) &&
                vr_from_vm_mss_adj && vr_pkt_from_vm_tcp_mss_adj) {

            if ((reason = vr_pkt_from_vm_tcp_mss_adj(pkt, overlay_len))) {
                vr_pfree(pkt, reason);
                return 0;
            }
        }

        if (fmd->fmd_to_me) {
            handled = vr_l3_input(pkt, fmd);
            if (!handled) {
                vr_pfree(pkt, VP_DROP_NOWHERE_TO_GO);
            }
            return 0;
        }
    }

    if (pull_len && !pkt_push(pkt, pull_len)) {
        vr_pfree(pkt, VP_DROP_PUSH);
        return 0;
    }

    nh_output(pkt, nh, fmd);
    return 0;
}