int fjes_hw_init(struct fjes_hw *hw) { int ret; hw->base = fjes_hw_iomap(hw); if (!hw->base) return -EIO; ret = fjes_hw_reset(hw); if (ret) return ret; fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true); INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task); INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task); mutex_init(&hw->hw_info.lock); spin_lock_init(&hw->rx_status_lock); hw->max_epid = fjes_hw_get_max_epid(hw); hw->my_epid = fjes_hw_get_my_epid(hw); if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid)) return -ENXIO; ret = fjes_hw_setup(hw); hw->hw_info.trace = vzalloc(FJES_DEBUG_BUFFER_SIZE); hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE; return ret; }
int fjes_hw_init(struct fjes_hw *hw) { int ret; hw->base = fjes_hw_iomap(hw); if (!hw->base) return -EIO; ret = fjes_hw_reset(hw); if (ret) return ret; fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true); INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task); INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task); mutex_init(&hw->hw_info.lock); hw->max_epid = fjes_hw_get_max_epid(hw); hw->my_epid = fjes_hw_get_my_epid(hw); if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid)) return -ENXIO; ret = fjes_hw_setup(hw); return ret; }
static irqreturn_t fjes_intr(int irq, void *data) { struct fjes_adapter *adapter = data; struct fjes_hw *hw = &adapter->hw; irqreturn_t ret; u32 icr; icr = fjes_hw_capture_interrupt_status(hw); if (icr & REG_IS_MASK_IS_ASSERT) { if (icr & REG_ICTL_MASK_RX_DATA) fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID); if (icr & REG_ICTL_MASK_DEV_STOP_REQ) fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID); if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID); if (icr & REG_ICTL_MASK_TXRX_STOP_DONE) fjes_hw_set_irqmask(hw, REG_ICTL_MASK_TXRX_STOP_DONE, true); if (icr & REG_ICTL_MASK_INFO_UPDATE) fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID); ret = IRQ_HANDLED; } else { ret = IRQ_NONE; } return ret; }
static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid) { struct fjes_hw *hw = &adapter->hw; fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true); adapter->unset_rx_last = true; napi_schedule(&adapter->napi); }
static void fjes_free_irq(struct fjes_adapter *adapter) { struct fjes_hw *hw = &adapter->hw; adapter->interrupt_watch_enable = false; cancel_delayed_work_sync(&adapter->interrupt_watch_task); fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true); if (adapter->irq_registered) { free_irq(adapter->hw.hw_res.irq, adapter); adapter->irq_registered = false; } }
/* fjes_open - Called when a network interface is made active */ static int fjes_open(struct net_device *netdev) { struct fjes_adapter *adapter = netdev_priv(netdev); struct fjes_hw *hw = &adapter->hw; int result; if (adapter->open_guard) return -ENXIO; result = fjes_setup_resources(adapter); if (result) goto err_setup_res; hw->txrx_stop_req_bit = 0; hw->epstop_req_bit = 0; napi_enable(&adapter->napi); fjes_hw_capture_interrupt_status(hw); result = fjes_request_irq(adapter); if (result) goto err_req_irq; fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false); netif_tx_start_all_queues(netdev); netif_carrier_on(netdev); return 0; err_req_irq: fjes_free_irq(adapter); napi_disable(&adapter->napi); err_setup_res: fjes_free_resources(adapter); return result; }
static int fjes_poll(struct napi_struct *napi, int budget) { struct fjes_adapter *adapter = container_of(napi, struct fjes_adapter, napi); struct net_device *netdev = napi->dev; struct fjes_hw *hw = &adapter->hw; struct sk_buff *skb; int work_done = 0; int cur_epid = 0; int epidx; size_t frame_len; void *frame; spin_lock(&hw->rx_status_lock); for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; if (fjes_hw_get_partner_ep_status(hw, epidx) == EP_PARTNER_SHARED) adapter->hw.ep_shm_info[epidx] .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK; } spin_unlock(&hw->rx_status_lock); while (work_done < budget) { prefetch(&adapter->hw); frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid); if (frame) { skb = napi_alloc_skb(napi, frame_len); if (!skb) { adapter->stats64.rx_dropped += 1; hw->ep_shm_info[cur_epid].net_stats .rx_dropped += 1; adapter->stats64.rx_errors += 1; hw->ep_shm_info[cur_epid].net_stats .rx_errors += 1; } else { memcpy(skb_put(skb, frame_len), frame, frame_len); skb->protocol = eth_type_trans(skb, netdev); skb->ip_summed = CHECKSUM_UNNECESSARY; netif_receive_skb(skb); work_done++; adapter->stats64.rx_packets += 1; hw->ep_shm_info[cur_epid].net_stats .rx_packets += 1; adapter->stats64.rx_bytes += frame_len; hw->ep_shm_info[cur_epid].net_stats .rx_bytes += frame_len; if (is_multicast_ether_addr( ((struct ethhdr *)frame)->h_dest)) { adapter->stats64.multicast += 1; hw->ep_shm_info[cur_epid].net_stats .multicast += 1; } } fjes_rxframe_release(adapter, cur_epid); adapter->unset_rx_last = true; } else { break; } } if (work_done < budget) { napi_complete_done(napi, work_done); if (adapter->unset_rx_last) { adapter->rx_last_jiffies = jiffies; adapter->unset_rx_last = false; } if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) { napi_reschedule(napi); } else { spin_lock(&hw->rx_status_lock); for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; if (fjes_hw_get_partner_ep_status(hw, epidx) == EP_PARTNER_SHARED) adapter->hw.ep_shm_info[epidx].tx .info->v1i.rx_status &= ~FJES_RX_POLL_WORK; } spin_unlock(&hw->rx_status_lock); fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false); } } return work_done; }