void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask) { u32 flag; if (is_tx_ring(ring)) flag = RCB_INT_FLAG_TX; else flag = RCB_INT_FLAG_RX; hns_rcb_int_clr_hw(ring->q, flag); hns_rcb_int_ctrl_hw(ring->q, flag, mask); }
static void __lb_other_process(struct hns_nic_ring_data *ring_data, struct sk_buff *skb) { struct net_device *ndev; struct hns_nic_priv *priv; struct hnae_ring *ring; struct netdev_queue *dev_queue; struct sk_buff *new_skb; unsigned int frame_size; int check_ok; u32 i; char buff[33]; /* 32B data and the last character '\0' */ if (!ring_data) { /* Just for doing create frame*/ ndev = skb->dev; priv = netdev_priv(ndev); frame_size = skb->len; memset(skb->data, 0xFF, frame_size); if ((!AE_IS_VER1(priv->enet_ver)) && (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) { memcpy(skb->data, ndev->dev_addr, 6); skb->data[5] += 0x1f; } frame_size &= ~1ul; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); memset(&skb->data[frame_size / 2 + 10], 0xBE, frame_size / 2 - 11); memset(&skb->data[frame_size / 2 + 12], 0xAF, frame_size / 2 - 13); return; } ring = ring_data->ring; ndev = ring_data->napi.dev; if (is_tx_ring(ring)) { /* for tx queue reset*/ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_reset_queue(dev_queue); return; } frame_size = skb->len; frame_size &= ~1ul; /* for mutl buffer*/ new_skb = skb_copy(skb, GFP_ATOMIC); dev_kfree_skb_any(skb); skb = new_skb; check_ok = 0; if (*(skb->data + 10) == 0xFF) { /* for rx check frame*/ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && (*(skb->data + frame_size / 2 + 12) == 0xAF)) check_ok = 1; } if (check_ok) { ndev->stats.rx_packets++; ndev->stats.rx_bytes += skb->len; } else { ndev->stats.rx_frame_errors++; for (i = 0; i < skb->len; i++) { snprintf(buff + i % 16 * 2, 3, /* tailing \0*/ "%02x", *(skb->data + i)); if ((i % 16 == 15) || (i == skb->len - 1)) pr_info("%s\n", buff); } } dev_kfree_skb_any(skb); }