static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, struct sk_buff *nskb) { struct ethhdr *eth; eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN); skb_reset_mac_header(nskb); ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest); ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); eth->h_proto = eth_hdr(oldskb)->h_proto; skb_pull(nskb, ETH_HLEN); }
/** * Removes cloud filter. Ensures that nothing is adding buffers to the RX * queue before disabling RX on the device. */ void vxlan_unlink(struct vhost_dev *vdev) { unsigned i = 0, rx_count; int ret; struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_eth_tunnel_filter_conf tunnel_filter_conf; if (vdev->ready == DEVICE_RX) { memset(&tunnel_filter_conf, 0, sizeof(struct rte_eth_tunnel_filter_conf)); ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac); ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac); tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q]; tunnel_filter_conf.filter_type = tep_filter_type[filter_idx]; if (tep_filter_type[filter_idx] == RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID) tunnel_filter_conf.inner_vlan = INNER_VLAN_ID; tunnel_filter_conf.queue_id = vdev->rx_q; tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN; ret = rte_eth_dev_filter_ctrl(ports[0], RTE_ETH_FILTER_TUNNEL, RTE_ETH_FILTER_DELETE, &tunnel_filter_conf); if (ret) { RTE_LOG(ERR, VHOST_DATA, "%d Failed to add device MAC address to cloud filter\n", vdev->rx_q); return; } for (i = 0; i < ETHER_ADDR_LEN; i++) vdev->mac_address.addr_bytes[i] = 0; /* Clear out the receive buffers */ rx_count = rte_eth_rx_burst(ports[0], (uint16_t)vdev->rx_q, pkts_burst, MAX_PKT_BURST); while (rx_count) { for (i = 0; i < rx_count; i++) rte_pktmbuf_free(pkts_burst[i]); rx_count = rte_eth_rx_burst(ports[0], (uint16_t)vdev->rx_q, pkts_burst, MAX_PKT_BURST); } vdev->ready = DEVICE_MAC_LEARNING; } }
void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) { struct sk_buff *skb; char *eth_fr; int fr_len; struct fip_vlan *vlan; #define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; skb = dev_alloc_skb(sizeof(struct fip_vlan)); if (!skb) return; fr_len = sizeof(*vlan); eth_fr = (char *)skb->data; vlan = (struct fip_vlan *)eth_fr; memset(vlan, 0, sizeof(*vlan)); ether_addr_copy(vlan->eth.h_source, qedf->mac); ether_addr_copy(vlan->eth.h_dest, my_fcoe_all_fcfs); vlan->eth.h_proto = htons(ETH_P_FIP); vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); vlan->fip.fip_op = htons(FIP_OP_VLAN); vlan->fip.fip_subcode = FIP_SC_VL_REQ; vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; ether_addr_copy(vlan->desc.mac.fd_mac, qedf->mac); vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; put_unaligned_be64(qedf->lport->wwnn, &vlan->desc.wwnn.fd_wwn); skb_put(skb, sizeof(*vlan)); skb->protocol = htons(ETH_P_FIP); skb_reset_mac_header(skb); skb_reset_network_header(skb); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Sending FIP VLAN " "request."); if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { QEDF_WARN(&(qedf->dbg_ctx), "Cannot send vlan request " "because link is not up.\n"); kfree_skb(skb); return; } qed_ops->ll2->start_xmit(qedf->cdev, skb); }
static inline void app_pkt_metadata_flush(struct rte_mbuf *pkt) { struct app_pkt_metadata *pkt_meta = (struct app_pkt_metadata *) RTE_MBUF_METADATA_UINT8_PTR(pkt, 0); struct ether_hdr *ether_hdr = (struct ether_hdr *) rte_pktmbuf_prepend(pkt, (uint16_t) sizeof(struct ether_hdr)); ether_addr_copy(&pkt_meta->nh_arp, ðer_hdr->d_addr); ether_addr_copy(&local_ether_addr, ðer_hdr->s_addr); ether_hdr->ether_type = rte_bswap16(ETHER_TYPE_IPv4); pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr); }
static void nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, struct nfp_flower_mac_mpls *msk, struct tc_cls_flower_offload *flow) { struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; flow_rule_match_eth_addrs(rule, &match); /* Populate mac frame. */ ether_addr_copy(ext->mac_dst, &match.key->dst[0]); ether_addr_copy(ext->mac_src, &match.key->src[0]); ether_addr_copy(msk->mac_dst, &match.mask->dst[0]); ether_addr_copy(msk->mac_src, &match.mask->src[0]); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { struct flow_match_mpls match; u32 t_mpls; flow_rule_match_mpls(rule, &match); t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) | FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) | FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) | NFP_FLOWER_MASK_MPLS_Q; ext->mpls_lse = cpu_to_be32(t_mpls); t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) | FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) | FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) | NFP_FLOWER_MASK_MPLS_Q; msk->mpls_lse = cpu_to_be32(t_mpls); } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q * bit, which indicates an mpls ether type but without any * mpls fields. */ struct flow_match_basic match; flow_rule_match_basic(rule, &match); if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) { ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); } } }
static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst, struct ba_record *pBA, enum tr_select TxRxSelect, u16 ReasonCode) { union delba_param_set DelbaParamSet; struct sk_buff *skb = NULL; struct rtllib_hdr_3addr *Delba = NULL; u8 *tag = NULL; u16 len = 6 + ieee->tx_headroom; if (net_ratelimit()) netdev_dbg(ieee->dev, "%s(): ReasonCode(%d) sentd to: %pM\n", __func__, ReasonCode, dst); memset(&DelbaParamSet, 0, 2); DelbaParamSet.field.Initiator = (TxRxSelect == TX_DIR) ? 1 : 0; DelbaParamSet.field.TID = pBA->BaParamSet.field.TID; skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr)); if (skb == NULL) return NULL; skb_reserve(skb, ieee->tx_headroom); Delba = (struct rtllib_hdr_3addr *) skb_put(skb, sizeof(struct rtllib_hdr_3addr)); ether_addr_copy(Delba->addr1, dst); ether_addr_copy(Delba->addr2, ieee->dev->dev_addr); ether_addr_copy(Delba->addr3, ieee->current_network.bssid); Delba->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT); tag = (u8 *)skb_put(skb, 6); *tag++ = ACT_CAT_BA; *tag++ = ACT_DELBA; put_unaligned_le16(DelbaParamSet.shortData, tag); tag += 2; put_unaligned_le16(ReasonCode, tag); tag += 2; #ifdef VERBOSE_DEBUG print_hex_dump_bytes("rtllib_DELBA(): ", DUMP_PREFIX_NONE, skb->data, skb->len); #endif return skb; }
/** * batadv_send_skb_packet() - send an already prepared packet * @skb: the packet to send * @hard_iface: the interface to use to send the broadcast packet * @dst_addr: the payload destination * * Send out an already prepared packet to the given neighbor or broadcast it * using the specified interface. Either hard_iface or neigh_node must be not * NULL. * If neigh_node is NULL, then the packet is broadcasted using hard_iface, * otherwise it is sent as unicast to the given neighbor. * * Regardless of the return value, the skb is consumed. * * Return: A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. */ int batadv_send_skb_packet(struct sk_buff *skb, struct batadv_hard_iface *hard_iface, const u8 *dst_addr) { struct batadv_priv *bat_priv; struct ethhdr *ethhdr; int ret; bat_priv = netdev_priv(hard_iface->soft_iface); if (hard_iface->if_status != BATADV_IF_ACTIVE) goto send_skb_err; if (unlikely(!hard_iface->net_dev)) goto send_skb_err; if (!(hard_iface->net_dev->flags & IFF_UP)) { pr_warn("Interface %s is not up - can't send packet via that interface!\n", hard_iface->net_dev->name); goto send_skb_err; } /* push to the ethernet header. */ if (batadv_skb_head_push(skb, ETH_HLEN) < 0) goto send_skb_err; skb_reset_mac_header(skb); ethhdr = eth_hdr(skb); ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr); ether_addr_copy(ethhdr->h_dest, dst_addr); ethhdr->h_proto = htons(ETH_P_BATMAN); skb_set_network_header(skb, ETH_HLEN); skb->protocol = htons(ETH_P_BATMAN); skb->dev = hard_iface->net_dev; /* Save a clone of the skb to use when decoding coded packets */ batadv_nc_skb_store_for_decoding(bat_priv, skb); /* dev_queue_xmit() returns a negative result on error. However on * congestion and traffic shaping, it drops and returns NET_XMIT_DROP * (which is > 0). This will not be treated as an error. */ ret = dev_queue_xmit(skb); return net_xmit_eval(ret); send_skb_err: kfree_skb(skb); return NET_XMIT_DROP; }
static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_skbmod *d = to_skbmod(a); int action; struct tcf_skbmod_params *p; u64 flags; int err; tcf_lastuse_update(&d->tcf_tm); bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); /* XXX: if you are going to edit more fields beyond ethernet header * (example when you add IP header replacement or vlan swap) * then MAX_EDIT_LEN needs to change appropriately */ err = skb_ensure_writable(skb, MAX_EDIT_LEN); if (unlikely(err)) { /* best policy is to drop on the floor */ qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats)); return TC_ACT_SHOT; } rcu_read_lock(); action = READ_ONCE(d->tcf_action); if (unlikely(action == TC_ACT_SHOT)) { qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats)); rcu_read_unlock(); return action; } p = rcu_dereference(d->skbmod_p); flags = p->flags; if (flags & SKBMOD_F_DMAC) ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst); if (flags & SKBMOD_F_SMAC) ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src); if (flags & SKBMOD_F_ETYPE) eth_hdr(skb)->h_proto = p->eth_type; rcu_read_unlock(); if (flags & SKBMOD_F_SWAPMAC) { u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */ /*XXX: I am sure we can come up with more efficient swapping*/ ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest); ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source); ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr); } return action; }
static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, struct ip_tunnel_key *encap_key, struct bnxt_tc_l2_key *l2_info, __le32 *encap_record_handle) { struct hwrm_cfa_encap_record_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_cfa_encap_record_alloc_input req = { 0 }; struct hwrm_cfa_encap_data_vxlan *encap = (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1); req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); ether_addr_copy(encap->src_mac_addr, l2_info->smac); if (l2_info->num_vlans) { encap->num_vlan_tags = l2_info->num_vlans; encap->ovlan_tci = l2_info->inner_vlan_tci; encap->ovlan_tpid = l2_info->inner_vlan_tpid; } encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; encap_ipv4->ttl = encap_key->ttl; encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst; encap_ipv4->src_ip_addr = encap_key->u.ipv4.src; encap_ipv4->protocol = IPPROTO_UDP; encap->dst_port = encap_key->tp_dst; encap->vni = tunnel_id_to_key32(encap_key->tun_id); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) *encap_record_handle = resp->encap_record_id; else netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); mutex_unlock(&bp->hwrm_cmd_lock); if (rc) rc = -EIO; return rc; }
static int qed_configure_filter_ucast(struct ecore_dev *edev, struct qed_filter_ucast_params *params) { struct ecore_filter_ucast ucast; if (!params->vlan_valid && !params->mac_valid) { DP_NOTICE(edev, true, "Tried configuring a unicast filter," "but both MAC and VLAN are not set\n"); return -EINVAL; } memset(&ucast, 0, sizeof(ucast)); switch (params->type) { case QED_FILTER_XCAST_TYPE_ADD: ucast.opcode = ECORE_FILTER_ADD; break; case QED_FILTER_XCAST_TYPE_DEL: ucast.opcode = ECORE_FILTER_REMOVE; break; case QED_FILTER_XCAST_TYPE_REPLACE: ucast.opcode = ECORE_FILTER_REPLACE; break; default: DP_NOTICE(edev, true, "Unknown unicast filter type %d\n", params->type); } if (params->vlan_valid && params->mac_valid) { ucast.type = ECORE_FILTER_MAC_VLAN; ether_addr_copy((struct ether_addr *)¶ms->mac, (struct ether_addr *)&ucast.mac); ucast.vlan = params->vlan; } else if (params->mac_valid) { ucast.type = ECORE_FILTER_MAC; ether_addr_copy((struct ether_addr *)¶ms->mac, (struct ether_addr *)&ucast.mac); } else { ucast.type = ECORE_FILTER_VLAN; ucast.vlan = params->vlan; } ucast.is_rx_filter = true; ucast.is_tx_filter = true; return ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); }
static void __mlxsw_sp_dpipe_table_host_entry_fill(struct devlink_dpipe_entry *entry, struct mlxsw_sp_rif *rif, unsigned char *ha, void *dip) { struct devlink_dpipe_value *value; u32 *rif_value; u8 *ha_value; /* Set Match RIF index */ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_RIF]; rif_value = value->value; *rif_value = mlxsw_sp_rif_index(rif); value->mapping_value = mlxsw_sp_rif_dev_ifindex(rif); value->mapping_valid = true; /* Set Match DIP */ value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_HOST_MATCH_DIP]; memcpy(value->value, dip, value->value_size); /* Set Action DMAC */ value = entry->action_values; ha_value = value->value; ether_addr_copy(ha_value, ha); }
/** * batadv_backbone_hash_find - looks for a claim in the hash * @bat_priv: the bat priv with all the soft interface information * @addr: the address of the originator * @vid: the VLAN ID * * Returns claim if found or NULL otherwise. */ static struct batadv_bla_backbone_gw * batadv_backbone_hash_find(struct batadv_priv *bat_priv, uint8_t *addr, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; struct hlist_head *head; struct batadv_bla_backbone_gw search_entry, *backbone_gw; struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; int index; if (!hash) return NULL; ether_addr_copy(search_entry.orig, addr); search_entry.vid = vid; index = batadv_choose_backbone_gw(&search_entry, hash->size); head = &hash->table[index]; rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, &search_entry)) continue; if (!atomic_inc_not_zero(&backbone_gw->refcount)) continue; backbone_gw_tmp = backbone_gw; break; } rcu_read_unlock(); return backbone_gw_tmp; }
/* * This function initializes the private structure parameters. * * The following wait queues are initialized - * - IOCTL wait queue * - Command wait queue * - Statistics wait queue * * ...and the following default parameters are set - * - Current key index : Set to 0 * - Rate index : Set to auto * - Media connected : Set to disconnected * - Adhoc link sensed : Set to false * - Nick name : Set to null * - Number of Tx timeout : Set to 0 * - Device address : Set to current address * - Rx histogram statistc : Set to 0 * * In addition, the CFG80211 work queue is also created. */ void mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev) { dev->netdev_ops = &mwifiex_netdev_ops; dev->destructor = free_netdev; /* Initialize private structure */ priv->current_key_index = 0; priv->media_connected = false; memset(priv->mgmt_ie, 0, sizeof(struct mwifiex_ie) * MAX_MGMT_IE_INDEX); priv->beacon_idx = MWIFIEX_AUTO_IDX_MASK; priv->proberesp_idx = MWIFIEX_AUTO_IDX_MASK; priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK; priv->gen_idx = MWIFIEX_AUTO_IDX_MASK; priv->num_tx_timeout = 0; ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr); memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { priv->hist_data = kmalloc(sizeof(*priv->hist_data), GFP_KERNEL); if (priv->hist_data) mwifiex_hist_data_reset(priv); } }
static inline void handle_unmpls(struct task_unmpls *task, struct rte_mbuf *mbuf) { struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); switch (peth->ether_type) { case ETYPE_MPLSU: /* MPLS Decapsulation */ mpls_decap(mbuf); peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); ether_addr_copy(&task->edaddr, &peth->d_addr); break; case ETYPE_LLDP: INCR_TX_DROP_COUNT(task->base.stats, 1); rte_pktmbuf_free(mbuf); return; case ETYPE_IPv6: tx_buf_pkt_single(&task->base, mbuf, 0); break; case ETYPE_IPv4: tx_buf_pkt_single(&task->base, mbuf, 0); break; default: mprintf("Core %u Error Removing MPLS: ether_type = %#06x\n", task->lconf->id, peth->ether_type); rte_pktmbuf_free(mbuf); } }
static int ath9k_of_init(struct ath_softc *sc) { struct device_node *np = sc->dev->of_node; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); enum ath_bus_type bus_type = common->bus_ops->ath_bus_type; const char *mac; char eeprom_name[100]; int ret; if (!of_device_is_available(np)) return 0; ath_dbg(common, CONFIG, "parsing configuration from OF node\n"); if (of_property_read_bool(np, "qca,no-eeprom")) { /* ath9k-eeprom-<bus>-<id>.bin */ scnprintf(eeprom_name, sizeof(eeprom_name), "ath9k-eeprom-%s-%s.bin", ath_bus_type_to_string(bus_type), dev_name(ah->dev)); ret = ath9k_eeprom_request(sc, eeprom_name); if (ret) return ret; ah->ah_flags &= ~AH_USE_EEPROM; ah->ah_flags |= AH_NO_EEP_SWAP; } mac = of_get_mac_address(np); if (!IS_ERR(mac)) ether_addr_copy(common->macaddr, mac); return 0; }
int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport, u8 *addr) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); u8 *mac_ptr; int err; in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(mdev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); MLX5_SET(modify_nic_vport_context_in, in, field_select.permanent_address, 1); mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context.permanent_address.mac_addr_47_32); ether_addr_copy(mac_ptr, addr); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA; * seq_out is used to initialize filtering of outgoing duplicate frames * originating from the newly added node. */ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], u16 seq_out) { struct hsr_node *node; unsigned long now; int i; node = kzalloc(sizeof(*node), GFP_ATOMIC); if (!node) return NULL; ether_addr_copy(node->MacAddressA, addr); /* We are only interested in time diffs here, so use current jiffies * as initialization. (0 could trigger an spurious ring error warning). */ now = jiffies; for (i = 0; i < HSR_PT_PORTS; i++) node->time_in[i] = now; for (i = 0; i < HSR_PT_PORTS; i++) node->seq_out[i] = seq_out; list_add_tail_rcu(&node->mac_list, node_db); return node; }
static int vlan_dev_set_mac_address(struct net_device *dev, void *p) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; struct sockaddr *addr = p; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (!(dev->flags & IFF_UP)) goto out; if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) { err = dev_uc_add(real_dev, addr->sa_data); if (err < 0) return err; } if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: ether_addr_copy(dev->dev_addr, addr->sa_data); return 0; }
int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct qedr_dev *dev = get_qedr_dev(ibcq->device); struct qedr_cq *cq = get_qedr_cq(ibcq); struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; u16 vlan_id; int i = 0; spin_lock_irqsave(&cq->cq_lock, flags); while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) { memset(&wc[i], 0, sizeof(*wc)); wc[i].qp = &qp->ibqp; wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; wc[i].opcode = IB_WC_RECV; wc[i].pkey_index = 0; wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ? IB_WC_GENERAL_ERR : IB_WC_SUCCESS; /* 0 - currently only one recv sg is supported */ wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length; wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); wc[i].wc_flags |= IB_WC_WITH_SMAC; vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan & VLAN_VID_MASK; if (vlan_id) { wc[i].wc_flags |= IB_WC_WITH_VLAN; wc[i].vlan_id = vlan_id; wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } qedr_inc_sw_cons(&qp->rq); i++; } while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) { memset(&wc[i], 0, sizeof(*wc)); wc[i].qp = &qp->ibqp; wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; wc[i].opcode = IB_WC_SEND; wc[i].status = IB_WC_SUCCESS; qedr_inc_sw_cons(&qp->sq); i++; } spin_unlock_irqrestore(&cq->cq_lock, flags); DP_DEBUG(dev, QEDR_MSG_GSI, "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n", num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons, qp->sq.gsi_cons, qp->ibqp.qp_num); return i; }
u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key) { struct cmd_obj *ph2c; struct set_stakey_parm *psetstakey_para; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct set_stakey_rsp *psetstakey_rsp = NULL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct sta_info *sta = (struct sta_info *)psta; u8 res = _SUCCESS; ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); if (ph2c == NULL) { res = _FAIL; goto exit; } psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL); if (psetstakey_para == NULL) { kfree(ph2c); res = _FAIL; goto exit; } psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_KERNEL); if (psetstakey_rsp == NULL) { kfree(ph2c); kfree(psetstakey_para); res = _FAIL; goto exit; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_); ph2c->rsp = (u8 *)psetstakey_rsp; ph2c->rspsz = sizeof(struct set_stakey_rsp); ether_addr_copy(psetstakey_para->addr, sta->hwaddr); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) psetstakey_para->algorithm = (unsigned char)psecuritypriv->dot11PrivacyAlgrthm; else GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false); if (unicast_key) memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16); else memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16); /* jeff: set this because at least sw key is ready */ padapter->securitypriv.busetkipkey = true; res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; }
int mg_table_lpm_apply_route( struct rte_mbuf **pkts, struct mg_bitmask* pkts_mask, void **entries, uint16_t offset_entry, uint16_t offset_pkt, uint16_t size) { uint16_t i; for(i=0;i<pkts_mask->size;i++){ if(mg_bitmask_get_bit(pkts_mask, i)){ // TODO: check if just 6 byte direct assignment is faster here (more parallel) // TODO: we could also do this in LUA, check if performance is affected... // TODO: we could also do this already on lookup. Check if performance is affected // copy data to packet //rte_memcpy((*pkts)->buf_addr + offset_pkt, *entries + offset_entry, size); struct ether_hdr * ethhdr = rte_pktmbuf_mtod(*pkts, struct ether_hdr *); ether_addr_copy((struct ether_addr*)(*entries + offset_entry), ðhdr->d_addr); } pkts++; entries++; } return 0; }
static int qtnf_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); const struct qtnf_sta_node *sta_node; int ret; sta_node = qtnf_sta_list_lookup_index(&vif->sta_list, idx); if (unlikely(!sta_node)) return -ENOENT; ether_addr_copy(mac, sta_node->mac_addr); ret = qtnf_cmd_get_sta_info(vif, sta_node->mac_addr, sinfo); if (unlikely(ret == -ENOENT)) { qtnf_sta_list_del(&vif->sta_list, mac); cfg80211_del_sta(vif->netdev, mac, GFP_KERNEL); sinfo->filled = 0; } return ret; }
/* * Store at least advertizing router's MAC address * plus the possible MAC address(es) to mpc->mps_macs. * For a freshly allocated MPOA client mpc->mps_macs == 0. */ static const uint8_t *copy_macs(struct mpoa_client *mpc, const uint8_t *router_mac, const uint8_t *tlvs, uint8_t mps_macs, uint8_t device_type) { int num_macs; num_macs = (mps_macs > 1) ? mps_macs : 1; if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */ if (mpc->number_of_mps_macs != 0) kfree(mpc->mps_macs); mpc->number_of_mps_macs = 0; mpc->mps_macs = kmalloc(num_macs * ETH_ALEN, GFP_KERNEL); if (mpc->mps_macs == NULL) { pr_info("(%s) out of mem\n", mpc->dev->name); return NULL; } } ether_addr_copy(mpc->mps_macs, router_mac); tlvs += 20; if (device_type == MPS_AND_MPC) tlvs += 20; if (mps_macs > 0) memcpy(mpc->mps_macs, tlvs, mps_macs*ETH_ALEN); tlvs += mps_macs*ETH_ALEN; mpc->number_of_mps_macs = num_macs; return tlvs; }
static int qtnf_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); int ret; if (vif->wdev.iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (vif->sta_state != QTNF_STA_DISCONNECTED) return -EBUSY; if (sme->bssid) ether_addr_copy(vif->bssid, sme->bssid); else eth_zero_addr(vif->bssid); ret = qtnf_cmd_send_connect(vif, sme); if (ret) { pr_err("VIF%u.%u: failed to connect\n", vif->mac->macid, vif->vifid); return ret; } vif->sta_state = QTNF_STA_CONNECTING; return 0; }
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr) { u32 *out; int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); u8 *out_addr; int err; out = mlx5_vzalloc(outlen); if (!out) return -ENOMEM; out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out, nic_vport_context.permanent_address); err = mlx5_query_nic_vport_context(mdev, vport, out, outlen); if (err) goto out; ether_addr_copy(addr, &out_addr[2]); out: kvfree(out); return err; }
static int dsa_slave_port_fdb_dump(struct net_device *dev, struct switchdev_obj_port_fdb *fdb, switchdev_obj_dump_cb_t *cb) { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; unsigned char addr[ETH_ALEN] = { 0 }; u16 vid = 0; int ret; if (!ds->drv->port_fdb_getnext) return -EOPNOTSUPP; for (;;) { bool is_static; ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid, &is_static); if (ret < 0) break; ether_addr_copy(fdb->addr, addr); fdb->vid = vid; fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; ret = cb(&fdb->obj); if (ret < 0) break; } return ret == -ENOENT ? 0 : ret; }
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); int err; void *nic_vport_ctx; u8 *perm_mac; in = mlx5_vzalloc(inlen); if (!in) { mlx5_core_warn(mdev, "failed to allocate inbox\n"); return -ENOMEM; } MLX5_SET(modify_nic_vport_context_in, in, field_select.permanent_address, 1); MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1); nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context); perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx, permanent_address); ether_addr_copy(&perm_mac[2], addr); err = mlx5_modify_nic_vport_context(mdev, in, inlen); kvfree(in); return err; }
/** * on_netinfo - callback for HDM to be informed about HW's MAC * @param iface - most interface instance * @param link_stat - link status * @param mac_addr - MAC address */ static void on_netinfo(struct most_interface *iface, unsigned char link_stat, unsigned char *mac_addr) { struct net_dev_context *nd; struct net_device *dev; const u8 *m = mac_addr; nd = get_net_dev_hold(iface); if (!nd) return; dev = nd->dev; if (link_stat) netif_carrier_on(dev); else netif_carrier_off(dev); if (m && is_valid_ether_addr(m)) { if (!is_valid_ether_addr(dev->dev_addr)) { netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n", m[0], m[1], m[2], m[3], m[4], m[5]); ether_addr_copy(dev->dev_addr, m); netif_dormant_off(dev); } else if (!ether_addr_equal(dev->dev_addr, m)) { netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n", m[0], m[1], m[2], m[3], m[4], m[5]); } } dev_put(nd->dev); }
static int dsa_slave_set_mac_address(struct net_device *dev, void *a) { struct dsa_slave_priv *p = netdev_priv(dev); struct net_device *master = p->parent->dst->master_netdev; struct sockaddr *addr = a; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (!(dev->flags & IFF_UP)) goto out; if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { err = dev_uc_add(master, addr->sa_data); if (err < 0) return err; } if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) dev_uc_del(master, dev->dev_addr); out: ether_addr_copy(dev->dev_addr, addr->sa_data); return 0; }
/* Responds to VF's READY message with VF's * ID, node, MAC address e.t.c * @vf: VF which sent READY message */ static void nic_mbx_send_ready(struct nicpf *nic, int vf) { union nic_mbx mbx = {}; int bgx_idx, lmac; const char *mac; mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; mbx.nic_cfg.vf_id = vf; mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; if (vf < MAX_LMAC) { bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); if (mac) ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac); } mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false; mbx.nic_cfg.node_id = nic->node; mbx.nic_cfg.loopback_supported = vf < MAX_LMAC; nic_send_msg_to_vf(nic, vf, &mbx); }