Example #1
0
int
vr_arp_input(struct vr_packet *pkt, struct vr_forwarding_md *fmd)
{
    int handled = 1;
    struct vr_arp sarp;

    /* If vlan tagged packet, we let the VM handle the ARP packets */
    if ((pkt->vp_type != VP_TYPE_ARP) || (fmd->fmd_vlan != VLAN_ID_INVALID))
        return !handled;

    if (pkt->vp_len < sizeof(struct vr_arp)) {
        vr_pfree(pkt, VP_DROP_INVALID_ARP);
        return handled;
    }

    memcpy(&sarp, pkt_data(pkt), sizeof(struct vr_arp));

    switch (ntohs(sarp.arp_op)) {
    case VR_ARP_OP_REQUEST:
        return vr_handle_arp_request(&sarp, pkt, fmd);

    case VR_ARP_OP_REPLY:
        vr_handle_arp_reply(&sarp, pkt, fmd);
        break;

    default:
        vr_pfree(pkt, VP_DROP_INVALID_ARP);
    }

    return handled;
}
static int
agent_rx(struct vr_interface *vif, struct vr_packet *pkt,
        unsigned short vlan_id __attribute__((unused)))
{
    struct agent_hdr *hdr;
    struct vr_interface *agent_vif;
    struct vr_interface_stats *stats = vif_get_stats(vif, pkt->vp_cpu);

    stats->vis_ibytes += pkt_len(pkt);
    stats->vis_ipackets++;

    hdr = (struct agent_hdr *)pkt_pull(pkt, sizeof(struct vr_eth));
    if (!hdr || !pkt_pull(pkt, sizeof(*hdr))) {
        stats->vis_ierrors++;
        vr_pfree(pkt, VP_DROP_PULL);
        return 0;
    }

    /*
     * Update the original (OS visible) packet to point to the
     * l2 header of the injected packet
     */
    vr_pset_data(pkt, pkt->vp_data);
    if (ntohs(hdr->hdr_cmd) & AGENT_CMD_ROUTE) {
        /*
         * XXX 
         * Packet with command "route" from agent may 
         * result in flow setup, this breaks the 
         * assumption that all packets for a flow will
         * reach same CPU. Need a better way to handle this
         */
        agent_vif = __vrouter_get_interface(vrouter_get(0), 
                                            ntohs(hdr->hdr_ifindex));
        if (!agent_vif) {
            agent_vif = vif;
        }
        pkt->vp_if = agent_vif;
        vr_interface_input(ntohs(hdr->hdr_vrf), agent_vif, pkt, VLAN_ID_INVALID);
    } else {
        vif = __vrouter_get_interface(vrouter_get(0), ntohs(hdr->hdr_ifindex));
        if (!vif) {
            stats->vis_ierrors++;
            vr_pfree(pkt, VP_DROP_INVALID_IF);
            return 0;
        }

        pkt->vp_type = VP_TYPE_AGENT;
        pkt_set_network_header(pkt, pkt->vp_data + sizeof(struct vr_eth));
        pkt_set_inner_network_header(pkt, 
                                     pkt->vp_data + sizeof(struct vr_eth));
        return vif->vif_tx(vif, pkt);
    }

    return 0;
}
struct vr_packet *
pkt_copy(struct vr_packet *pkt, unsigned short off, unsigned short len)
{
    struct vr_packet *pkt_c;
    unsigned short head_space;

    /*
     * one eth header for agent, and one more for packets from
     * tun interfaces
     */
    head_space = (2 * sizeof(struct vr_eth)) + sizeof(struct agent_hdr);
    pkt_c = vr_palloc(head_space + len);
    if (!pkt_c)
        return pkt_c;

    pkt_c->vp_data += head_space;
    pkt_c->vp_tail += head_space;
    if (vr_pcopy(pkt_data(pkt_c), pkt, off, len) < 0) {
        vr_pfree(pkt_c, VP_DROP_MISC);
        return NULL;
    }
    pkt_pull_tail(pkt_c, len);

    pkt_c->vp_if = pkt->vp_if;
    pkt_c->vp_flags = pkt->vp_flags;
    pkt_c->vp_cpu = pkt->vp_cpu;
    pkt_c->vp_network_h = 0;

    return pkt_c;
}
Example #4
0
static int
vif_discard_rx(struct vr_interface *vif, struct vr_packet *pkt,
        unsigned short vlan_id __attribute__((unused)))
{
    vr_pfree(pkt, VP_DROP_INTERFACE_RX_DISCARD);
    return 0;
}
/**
 * Enqueue a packet to the assembler.
 *
 * Executed only from the forwarding lcores.
 */
int
dpdk_fragment_assembler_enqueue(struct vrouter *router, struct vr_packet *pkt,
                                struct vr_forwarding_md *fmd)
{
    int ret;
    unsigned int cpu;
    struct vr_dpdk_lcore *lcore;

    cpu = vr_get_cpu();
    if (cpu >= vr_num_cpus || cpu < VR_DPDK_FWD_LCORE_ID) {
        RTE_LOG(ERR, VROUTER, "%s:%d Enqueue to the assembler can only be "
                "done on forwarding lcores, not on cpu %u\n",
                __FUNCTION__, __LINE__, cpu);
        vr_pfree(pkt, VP_DROP_FRAGMENTS);
        return -EINVAL;
    }

    ret = vr_fragment_enqueue(router,
                              &per_cpu_queues[cpu - VR_DPDK_FWD_LCORE_ID].queue, pkt, fmd);

    if (!ret) {
        lcore = vr_dpdk.lcores[cpu];
        vr_dpdk_lcore_schedule_assembler_work(lcore, dpdk_fragment_assembler,
                                              &per_cpu_queues[cpu - VR_DPDK_FWD_LCORE_ID].queue);
    }

    return 0;
}
Example #6
0
/*
 * arp responses from vhostX need to be cross connected. nothing
 * needs to be done for arp responses from VMs, while responses
 * from fabric needs to be Xconnected and sent to agent
 */
static int
vr_handle_arp_reply(struct vr_arp *sarp, struct vr_packet *pkt,
                    struct vr_forwarding_md *fmd)
{
    struct vr_interface *vif = pkt->vp_if;
    struct vr_packet *cloned_pkt;

    if (vif_mode_xconnect(vif) || vif->vif_type == VIF_TYPE_HOST)
        return vif_xconnect(vif, pkt, fmd);

    if (vif->vif_type != VIF_TYPE_PHYSICAL) {
        if (vif_is_virtual(vif)) {
            vr_preset(pkt);
            return vr_trap(pkt, fmd->fmd_dvrf, AGENT_TRAP_ARP, NULL);
        }
        vr_pfree(pkt, VP_DROP_INVALID_IF);
        return 0;
    }


    cloned_pkt = vr_pclone(pkt);
    if (cloned_pkt) {
        vr_preset(cloned_pkt);
        vif_xconnect(vif, cloned_pkt, fmd);
    }

    return vr_trap(pkt, fmd->fmd_dvrf, AGENT_TRAP_ARP, NULL);
}
Example #7
0
unsigned int
vr_reinject_packet(struct vr_packet *pkt, struct vr_forwarding_md *fmd)
{
    struct vr_interface *vif = pkt->vp_if;
    int handled;

    if (pkt->vp_nh) {
        /* If nexthop does not have valid data, drop it */
        if (!(pkt->vp_nh->nh_flags & NH_FLAG_VALID)) {
            vr_pfree(pkt, VP_DROP_INVALID_NH);
            return 0;
        }

        return pkt->vp_nh->nh_reach_nh(pkt, pkt->vp_nh, fmd);
    }

    if (vif_is_vhost(vif)) {
        handled = vr_l3_input(pkt, fmd);
        if (!handled)
            vif_drop_pkt(vif, pkt, 1);
        return 0;
    }


    return vr_bridge_input(vif->vif_router, pkt, fmd);
}
Example #8
0
static int
vr_do_flow_action(struct vrouter *router, struct vr_flow_entry *fe,
        unsigned int index, struct vr_packet *pkt,
        unsigned short proto, struct vr_forwarding_md *fmd)
{
    uint32_t new_stats;

    new_stats = __sync_add_and_fetch(&fe->fe_stats.flow_bytes, pkt_len(pkt));
    if (new_stats < pkt_len(pkt))
        fe->fe_stats.flow_bytes_oflow++;

    new_stats = __sync_add_and_fetch(&fe->fe_stats.flow_packets, 1);
    if (!new_stats) 
        fe->fe_stats.flow_packets_oflow++;

    if (fe->fe_action == VR_FLOW_ACTION_HOLD) {
        if (vr_flow_queue_is_empty(router, fe)) {
            vr_trap_flow(router, fe, pkt, index);
            return vr_enqueue_flow(fe, pkt, proto, fmd);
        } else {
            vr_pfree(pkt, VP_DROP_FLOW_UNUSABLE);
            return 0;
        }
    }

    return vr_flow_action(router, fe, index, pkt, proto, fmd);
}
Example #9
0
static void
vr_flow_init_close(struct vrouter *router, struct vr_flow_entry *flow_e,
        struct vr_packet *pkt, struct vr_forwarding_md *fmd)
{
    unsigned int flow_index;
    unsigned int head_room = sizeof(struct agent_hdr) + sizeof(struct vr_eth);

    struct vr_packet *pkt_c;

    pkt_c = vr_pclone(pkt);
    if (!pkt_c)
        return;

    vr_preset(pkt_c);
    if (vr_pcow(pkt_c, head_room)) {
        vr_pfree(pkt_c, VP_DROP_PCOW_FAIL);
        return;
    }

    flow_index = fmd->fmd_flow_index;
    vr_trap(pkt_c, fmd->fmd_dvrf, AGENT_TRAP_SESSION_CLOSE,
            (void *)&flow_index);

    return;
}
Example #10
0
static int
vr_enqueue_flow(struct vrouter *router, struct vr_flow_entry *fe,
        struct vr_packet *pkt, unsigned int index,
        struct vr_forwarding_md *fmd)
{
    unsigned int i;
    unsigned short drop_reason = 0;
    struct vr_flow_queue *vfq = fe->fe_hold_list;
    struct vr_packet_node *pnode;

    if (!vfq) {
        drop_reason = VP_DROP_FLOW_UNUSABLE;
        goto drop;
    }

    i = __sync_fetch_and_add(&vfq->vfq_entries, 1);
    if (i >= VR_MAX_FLOW_QUEUE_ENTRIES) {
        drop_reason = VP_DROP_FLOW_QUEUE_LIMIT_EXCEEDED;
        goto drop;
    }

    pnode = &vfq->vfq_pnodes[i];
    /*
     * we cannot cache nexthop here. to cache, we need to hold reference
     * to the nexthop. to hold a reference, we will have to hold a lock,
     * which we cannot. the only known case of misbehavior if we do not
     * cache is ECMP. when the packet comes from the fabric, the nexthop
     * actually points to a local composite, whereas a route lookup actually
     * returns a different nexthop, in which case the ecmp index will return
     * a bad nexthop. to avoid that, we will cache the label, and reuse it
     */
    if (pkt->vp_nh &&
            (pkt->vp_nh->nh_type == NH_VRF_TRANSLATE) &&
            (pkt->vp_nh->nh_flags & NH_FLAG_VNID))
        pnode->pl_flags |= PN_FLAG_LABEL_IS_VNID;

    pkt->vp_nh = NULL;

    pnode->pl_vif_idx = pkt->vp_if->vif_idx;
    if (fmd) {
        pnode->pl_outer_src_ip = fmd->fmd_outer_src_ip;
        pnode->pl_label = fmd->fmd_label;
        if (fmd->fmd_to_me)
            pnode->pl_flags |= PN_FLAG_TO_ME;
    }

    __sync_synchronize();
    pnode->pl_packet = pkt;

    if (!i)
        vr_trap_flow(router, fe, pkt, index);

    return 0;
drop:
    vr_pfree(pkt, drop_reason);
    return 0;
}
Example #11
0
static flow_result_t
vr_flow_nat(struct vr_flow_entry *fe,
        struct vr_packet *pkt, struct vr_forwarding_md *fmd)
{
    if (pkt->vp_type == VP_TYPE_IP)
        return vr_inet_flow_nat(fe, pkt, fmd);

    vr_pfree(pkt, VP_DROP_FLOW_ACTION_INVALID);
    return FLOW_CONSUMED;
}
Example #12
0
static void
vr_fragment_queue_element_free(struct vr_fragment_queue_element *vfqe,
        unsigned int drop_reason)
{
    if (vfqe->fqe_pnode.pl_packet) {
        vr_pfree(vfqe->fqe_pnode.pl_packet, drop_reason);
    }

    vr_free(vfqe, VR_FRAGMENT_QUEUE_ELEMENT_OBJECT);
    return;
}
Example #13
0
void
vif_drop_pkt(struct vr_interface *vif, struct vr_packet *pkt, bool input)
{
    struct vr_interface_stats *stats = vif_get_stats(vif, pkt->vp_cpu);

    if (input)
        stats->vis_ierrors++;
    else
        stats->vis_oerrors++;
    vr_pfree(pkt, VP_DROP_INTERFACE_DROP);
    return;
}
Example #14
0
static int
vr_enqueue_flow(struct vr_flow_entry *fe, struct vr_packet *pkt,
        unsigned short proto, struct vr_forwarding_md *fmd)
{
    unsigned int i = 0;
    unsigned short drop_reason = 0;
    struct vr_list_node **head = &fe->fe_hold_list.node_p;
    struct vr_packet_node *pnode;

    while (*head && ++i) {
        head = &(*head)->node_n;
    }

    if (i >= VR_MAX_FLOW_QUEUE_ENTRIES) {
        drop_reason = VP_DROP_FLOW_QUEUE_LIMIT_EXCEEDED;
        goto drop;
    }

    pnode = (struct vr_packet_node *)vr_zalloc(sizeof(struct vr_packet_node));
    if (!pnode) {
        drop_reason = VP_DROP_FLOW_NO_MEMORY;
        goto drop;
    }

    /*
     * we cannot cache nexthop here. to cache, we need to hold reference
     * to the nexthop. to hold a reference, we will have to hold a lock,
     * which we cannot. the only known case of misbehavior if we do not
     * cache is ECMP. when the packet comes from the fabric, the nexthop
     * actually points to a local composite, whereas a route lookup actually
     * returns a different nexthop, in which case the ecmp index will return
     * a bad nexthop. to avoid that, we will cache the label, and reuse it
     */
    pkt->vp_nh = NULL;

    pnode->pl_packet = pkt;
    pnode->pl_proto = proto;
    pnode->pl_vif_idx = pkt->vp_if->vif_idx;
    if (fmd) {
        pnode->pl_outer_src_ip = fmd->fmd_outer_src_ip;
        pnode->pl_label = fmd->fmd_label;
    }

    *head = &pnode->pl_node;

    return 0;

drop:
    vr_pfree(pkt, drop_reason);
    return 0;
}
static int
vr_default_input(struct vr_packet *pkt)
{
    struct vr_interface *vif = pkt->vp_if;
    struct vrouter *router = vif->vif_router;

    if (router->vr_host_if && (vif != router->vr_host_if)) {
        vr_preset(pkt);
        return router->vr_host_if->vif_tx(router->vr_host_if, pkt);
    }
    
    vr_pfree(pkt, VP_DROP_NOWHERE_TO_GO);
    return 0;
}
Example #16
0
static int
vr_flow_lookup(struct vrouter *router, unsigned short vrf,
        struct vr_flow_key *key, struct vr_packet *pkt, unsigned short proto,
        struct vr_forwarding_md *fmd)
{
    unsigned int fe_index;
    struct vr_flow_entry *flow_e;

    pkt->vp_flags |= VP_FLAG_FLOW_SET;

    flow_e = vr_find_flow(router, key, &fe_index);
    if (!flow_e) {
        if (pkt->vp_nh &&
            (pkt->vp_nh->nh_flags & NH_FLAG_RELAXED_POLICY))
            return vr_flow_forward(vrf, pkt, proto, fmd);

        if (vr_flow_table_hold_count(router) > VR_MAX_FLOW_TABLE_HOLD_COUNT) {
            vr_pfree(pkt, VP_DROP_FLOW_UNUSABLE);
            return 0;
        }

        flow_e = vr_find_free_entry(router, key, &fe_index);
        if (!flow_e) {
            vr_pfree(pkt, VP_DROP_FLOW_TABLE_FULL);
            return 0;
        }

        flow_e->fe_vrf = vrf;
        /* mark as hold */
        vr_flow_entry_set_hold(router, flow_e);
        vr_do_flow_action(router, flow_e, fe_index, pkt, proto, fmd);
        return 0;
    } 
    

    return vr_do_flow_action(router, flow_e, fe_index, pkt, proto, fmd);
}
Example #17
0
static void
vr_arp_proxy(struct vr_arp *sarp, struct vr_packet *pkt,
        struct vr_forwarding_md *fmd, unsigned char *dmac)
{
    struct vr_eth *eth;
    struct vr_arp *arp;
    struct vr_forwarding_md fmd_new;
    struct vr_interface *vif = pkt->vp_if;

    eth = (struct vr_eth *)pkt_push(pkt, sizeof(*eth));
    if (!eth) {
        vr_pfree(pkt, VP_DROP_PUSH);
        return;
    }

    memcpy(eth->eth_dmac, sarp->arp_sha, VR_ETHER_ALEN);
    memcpy(eth->eth_smac, dmac, VR_ETHER_ALEN);
    eth->eth_proto = htons(VR_ETH_PROTO_ARP);

    arp = (struct vr_arp *)(pkt_data(pkt) + sizeof(*eth));
    arp->arp_hw = htons(VR_ARP_HW_TYPE_ETHER);
    arp->arp_proto = htons(VR_ETH_PROTO_IP);
    arp->arp_hwlen = VR_ETHER_ALEN;
    arp->arp_protolen = VR_IP_ADDRESS_LEN;
    arp->arp_op = htons(VR_ARP_OP_REPLY);
    memcpy(arp->arp_sha, dmac, VR_ETHER_ALEN);
    memcpy(arp->arp_dha, sarp->arp_sha, VR_ETHER_ALEN);
    memcpy(&arp->arp_dpa, &sarp->arp_spa, sizeof(sarp->arp_spa));
    memcpy(&arp->arp_spa, &sarp->arp_dpa, sizeof(sarp->arp_dpa));

    vr_init_forwarding_md(&fmd_new);
    fmd_new.fmd_dvrf = fmd->fmd_dvrf;
    vr_pkt_type(pkt, 0, &fmd_new);

    /*
     * XXX: for vcp ports, there won't be bridge table entries. to avoid
     * doing vr_bridge_input, we check for the flag NO_ARP_PROXY and
     * and if set, directly send out on that interface
     */
    if (vif_is_vhost(vif) ||
            (vif->vif_flags & VIF_FLAG_NO_ARP_PROXY)) {
        vif->vif_tx(vif, pkt, fmd);
    } else {
        vr_bridge_input(vif->vif_router, pkt, &fmd_new);
    }

    return;
}
Example #18
0
static void
vr_flush_entry(struct vrouter *router, struct vr_flow_entry *fe,
        struct vr_flow_md *flmd, struct vr_forwarding_md *fmd)
{
    struct vr_list_node *head;
    struct vr_packet_node *pnode;
    struct vr_packet *pkt;
    struct vr_interface *vif;

    head = fe->fe_hold_list.node_p;
    fe->fe_hold_list.node_p = NULL;

    while (head) {
        pnode = (struct vr_packet_node *)head;
        if (fmd) {
            fmd->fmd_outer_src_ip = pnode->pl_outer_src_ip;
            fmd->fmd_label = pnode->pl_label;
        }

        pkt = pnode->pl_packet;
        /* 
         * this is only a security check and not a catch all check. one note
         * of caution. please do not access pkt->vp_if till the if block is
         * succesfully bypassed
         */
        vif = __vrouter_get_interface(router, pnode->pl_vif_idx);
        if (!vif || (pkt->vp_if != vif)) {
            vr_pfree(pkt, VP_DROP_INVALID_IF);
            goto loop_continue;
        }

        if (!pkt->vp_nh) {
            if (vif_is_fabric(pkt->vp_if) && fmd &&
                    (fmd->fmd_label >= 0)) {
                pkt->vp_nh = __vrouter_get_label(router, fmd->fmd_label);
            }
        }

        vr_flow_action(router, fe, flmd->flmd_index, pkt,
                pnode->pl_proto, fmd);

loop_continue:
        head = pnode->pl_node.node_n;
        vr_free(pnode);
    }

    return;
}
Example #19
0
static int
vr_handle_arp_request(struct vr_arp *sarp, struct vr_packet *pkt,
                      struct vr_forwarding_md *fmd)
{
    bool handled = true;
    unsigned char dmac[VR_ETHER_ALEN];
    mac_response_t arp_result;

    struct vr_packet *pkt_c;
    struct vr_interface *vif = pkt->vp_if;

    arp_result = vif->vif_mac_request(vif, pkt, fmd, dmac);
    switch (arp_result) {
    case MR_PROXY:
        vr_arp_proxy(sarp, pkt, fmd, dmac);
        break;

    case MR_XCONNECT:
        vif_xconnect(pkt->vp_if, pkt, fmd);
        break;

    case MR_TRAP_X:
        pkt_c = vr_pclone(pkt);
        if (pkt_c)
            vif_xconnect(pkt->vp_if, pkt_c, fmd);

        vr_trap(pkt, fmd->fmd_dvrf, AGENT_TRAP_ARP, NULL);
        break;

    case MR_TRAP:
        vr_trap(pkt, fmd->fmd_dvrf, AGENT_TRAP_ARP, NULL);
        break;

    case MR_DROP:
        vr_pfree(pkt, VP_DROP_INVALID_ARP);
        break;

    case MR_FLOOD:
    default:
        handled = false;
        break;
    }

    return handled;
}
Example #20
0
static int
vr_flow_forward(unsigned short vrf, struct vr_packet *pkt,
        unsigned short proto, struct vr_forwarding_md *fmd)
{
    struct vr_interface *vif = pkt->vp_if;
    struct vrouter *router = vif->vif_router;

    if (proto != VR_ETH_PROTO_IP) {
        vr_pfree(pkt, VP_DROP_FLOW_INVALID_PROTOCOL);
        return 0;
    }

    if (pkt->vp_nh)
        return nh_output(vrf, pkt, pkt->vp_nh, fmd);

    pkt_set_data(pkt, pkt->vp_network_h);
    return vr_ip_input(router, vrf, pkt, fmd);
}
Example #21
0
void
vr_fragment_queue_free(struct vr_fragment_queue *queue)
{
    struct vr_fragment_queue_element *vfqe, *next;

    vfqe = queue->vfq_tail;
    queue->vfq_tail = NULL;
    while (vfqe) {
        next = vfqe->fqe_next;
        if (vfqe->fqe_pnode.pl_packet)
            vr_pfree(vfqe->fqe_pnode.pl_packet, VP_DROP_MISC);
        vfqe->fqe_pnode.pl_packet = NULL;
        vr_free(vfqe, VR_FRAGMENT_QUEUE_ELEMENT_OBJECT);
        vfqe = next;
    }

    return;
}
Example #22
0
unsigned int
vr_bridge_input(struct vrouter *router, unsigned short vrf,
                struct vr_packet *pkt, struct vr_forwarding_md *fmd)
{
    struct vr_route_req rt;
    struct vr_nexthop *nh;
    struct vr_forwarding_md cmd;
    char bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
    char *mac;

    /* First mark the packet as L2 */
    pkt->vp_type = VP_TYPE_L2;

    mac = (char *)pkt_data(pkt);
    rt.rtr_req.rtr_mac_size = VR_ETHER_ALEN;
    rt.rtr_req.rtr_mac =(int8_t *) mac;
    /* If multicast L2 packet, use broadcast composite nexthop */
    if (IS_MAC_BMCAST(mac)) {
        rt.rtr_req.rtr_mac = (int8_t *)bcast_mac;
        pkt->vp_flags |= VP_FLAG_MULTICAST;
    }

    rt.rtr_req.rtr_vrf_id = vrf;
    nh = vr_bridge_lookup(vrf, &rt, pkt);
    if (nh) {

        /*
         * If there is a label attached to this bridge entry add the
         * label
         */
        if (rt.rtr_req.rtr_label_flags & VR_RT_LABEL_VALID_FLAG) {
            if (!fmd) {
                vr_init_forwarding_md(&cmd);
                fmd = &cmd;
            }
            fmd->fmd_label = rt.rtr_req.rtr_label;
        }
 
        return nh_output(vrf, pkt, nh, fmd);
    }

    vr_pfree(pkt, VP_DROP_INVALID_NH);
    return 0;
}
Example #23
0
int
vr_gro_input(struct vr_packet *pkt, struct vr_nexthop *nh)
{
    unsigned short *nh_id;
    int handled = 1;

    if (!vr_gro_process)
        return !handled;

    nh_id = (unsigned short *)pkt_push(pkt, sizeof(*nh_id));
    if (!nh_id) {
        vr_pfree(pkt, VP_DROP_PUSH);
        return handled;
    }

    *nh_id = nh->nh_id;
    handled = vr_gro_process(pkt, nh->nh_dev, (nh->nh_family == AF_BRIDGE));
    return handled;
}
Example #24
0
int
vr_trap(struct vr_packet *pkt, unsigned short trap_vrf,
        unsigned short trap_reason, void *trap_param)
{
    struct vr_interface *vif = pkt->vp_if;
    struct vrouter *router = vif->vif_router;
    struct agent_send_params params;

    if (router->vr_agent_if && router->vr_agent_if->vif_send) {
        params.trap_vrf = trap_vrf;
        params.trap_reason = trap_reason;
        params.trap_param = trap_param;
        return router->vr_agent_if->vif_send(router->vr_agent_if, pkt,
                        &params);
    } else {
        vr_pfree(pkt, VP_DROP_TRAP_NO_IF);
    }

    return 0;
}
int
lh_enqueue_to_assembler(struct vrouter *router, struct vr_packet *pkt,
        struct vr_forwarding_md *fmd)
{
    int ret;
    unsigned int cpu;

    cpu = vr_get_cpu();
    if (cpu >= vr_num_cpus) {
        printk("cpu is %u, but max cpu is only %u\n", cpu, vr_num_cpus);
        vr_pfree(pkt, VP_DROP_FRAGMENTS);
        return -EINVAL;
    }

    ret = vr_fragment_enqueue(router, &vr_lfq_pcpu_queues[cpu].vrlfq_queue,
            pkt, fmd);
    if (!ret)
        queue_work(vr_linux_assembler_wq, &vr_lfq_pcpu_queues[cpu].vrlfq_work);

    return 0;
}
unsigned int
vr_arp_input(struct vrouter *router, unsigned short vrf,
        struct vr_packet *pkt)
{
    struct vr_arp sarp;

    memcpy(&sarp, pkt_data(pkt), sizeof(struct vr_arp));
    switch (ntohs(sarp.arp_op)) {
    case VR_ARP_OP_REQUEST:
        vr_handle_arp_request(router, vrf, &sarp, pkt);
        break;

    case VR_ARP_OP_REPLY:
        vr_handle_arp_reply(router, vrf, &sarp, pkt);
        break;

    default:
        vr_pfree(pkt, VP_DROP_INVALID_ARP);
    }

    return 0;
}
/*
 * arp responses from vhostX need to be cross connected. nothing
 * needs to be done for arp responses from VMs, while responses
 * from fabric needs to be Xconnected and sent to agent
 */
static int
vr_handle_arp_reply(struct vrouter *router, unsigned short vrf,
        struct vr_arp *sarp, struct vr_packet *pkt)
{
    struct vr_interface *vif = pkt->vp_if;
    struct vr_packet *cloned_pkt;

    if (vif_mode_xconnect(vif) || vif->vif_type == VIF_TYPE_HOST)
        return vif_xconnect(vif, pkt);

    if (vif->vif_type != VIF_TYPE_PHYSICAL) {
        vr_pfree(pkt, VP_DROP_INVALID_IF);
        return 0;
    }

    cloned_pkt = vr_pclone(pkt);
    if (cloned_pkt) {
        vr_preset(cloned_pkt);
        vif_xconnect(vif, cloned_pkt);
    }

    return vr_trap(pkt, vrf, AGENT_TRAP_ARP, NULL);
}
int
vr_mpls_input(struct vrouter *router, struct vr_packet *pkt,
        struct vr_forwarding_md *fmd)
{
    unsigned int label;
    unsigned short vrf;
    struct vr_nexthop *nh;
    unsigned char *data;
    struct vr_ip *ip;
    unsigned short drop_reason = 0;

    label = ntohl(*(unsigned int *)pkt_data(pkt));
    label >>= VR_MPLS_LABEL_SHIFT;
    if (label >= router->vr_max_labels) {
        drop_reason = VP_DROP_INVALID_LABEL;
        goto dropit;
    }

    /* Set network header to inner ip header only if unicast */
    if (vr_mpls_is_label_mcast(label) == true) {
        vr_mcast_mpls_input(router, pkt, fmd);
        return 0;
    }

    /* drop the TOStack label */
    data = pkt_pull(pkt, VR_MPLS_HDR_LEN);
    if (!data) {
        drop_reason = VP_DROP_PULL;
        goto dropit;
    }

    /* this is the new network header and inner network header too*/
    pkt_set_network_header(pkt, pkt->vp_data);
    pkt_set_inner_network_header(pkt, pkt->vp_data);
    pkt->vp_type = VP_TYPE_IP;

    nh = router->vr_ilm[label];
    if (!nh) {
        drop_reason = VP_DROP_INVALID_NH;
        goto dropit;
    }

    /*
     * We are typically looking at interface nexthops, and hence we will
     * hit the vrf of the destination device. But, labels can also point
     * to composite nexthops (ECMP being case in point), in which case we
     * will take the vrf from the nexthop. When everything else fails, we
     * will forward the packet in the vrf in which it came i.e fabric
     */
    if (nh->nh_vrf >= 0)
        vrf = nh->nh_vrf;
    else if (nh->nh_dev)
        vrf = nh->nh_dev->vif_vrf;
    else
        vrf = pkt->vp_if->vif_vrf;

    ip = (struct vr_ip *)pkt_data(pkt);
    if (ip->ip_csum == VR_DIAG_IP_CSUM) {
        pkt->vp_flags |= VP_FLAG_DIAG;
    } else if (vr_perfr) {
        pkt->vp_flags |= VP_FLAG_GRO;
    }

    nh_output(vrf, pkt, nh, fmd);

    return 0;

dropit:
    vr_pfree(pkt, drop_reason);
    return 0;
}
Example #29
0
static int
vif_discard_tx(struct vr_interface *vif, struct vr_packet *pkt)
{
    vr_pfree(pkt, VP_DROP_INTERFACE_TX_DISCARD);
    return 0;
}
Example #30
0
static int
agent_send(struct vr_interface *vif, struct vr_packet *pkt,
                void *ifspecific)
{
    int len;
    struct agent_hdr *hdr;
    unsigned char *rewrite;
    struct vr_interface_stats *stats = vif_get_stats(vif, pkt->vp_cpu);
    struct vr_packet *pkt_c;
    struct agent_send_params *params =
        (struct agent_send_params *)ifspecific;

    vr_preset(pkt);

    if (pkt_head_space(pkt) < AGENT_PKT_HEAD_SPACE) {
        len = pkt_len(pkt);

        if (agent_trap_may_truncate(params->trap_reason)) {
            len = MINIMUM(len, VR_AGENT_MIN_PACKET_LEN);
        }

        pkt_c = pkt_copy(pkt, 0, len);
        if (pkt_c) {
            vr_pfree(pkt, VP_DROP_DUPLICATED);
            pkt = pkt_c;
        }
    }

    hdr = (struct agent_hdr *)pkt_push(pkt, sizeof(struct agent_hdr));
    if (!hdr)
        goto drop;

    hdr->hdr_ifindex = htons(pkt->vp_if->vif_idx);
    hdr->hdr_vrf = htons(params->trap_vrf);
    hdr->hdr_cmd = htons(params->trap_reason);

    switch (params->trap_reason) {
    case AGENT_TRAP_FLOW_MISS:
    case AGENT_TRAP_ECMP_RESOLVE:
    case AGENT_TRAP_SOURCE_MISMATCH:
        if (params->trap_param)
            hdr->hdr_cmd_param = htonl(*(unsigned int *)(params->trap_param));
        break;

    case AGENT_TRAP_DIAG:
        if (params->trap_param)
            hdr->hdr_cmd_param = htonl(*(unsigned int *)(params->trap_param));
        break;

    default:
        hdr->hdr_cmd_param = 0;
        break;
    }

    rewrite = pkt_push(pkt, VR_ETHER_HLEN);
    if (!rewrite)
        goto drop;

    memcpy(rewrite, vif->vif_rewrite, VR_ETHER_HLEN);
    return vif->vif_tx(vif, pkt);

drop:
    stats->vis_oerrors++;
    vr_pfree(pkt, VP_DROP_PUSH);
    return 0;
}