static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, int len, bool *discard, bool *leak_packet) { struct efx_nic *efx = rx_queue->efx; unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; if (likely(len <= max_len)) return; /* The packet must be discarded, but this is only a fatal error * if the caller indicated it was */ *discard = true; if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { EFX_ERR_RL(efx, " RX queue %d seriously overlength " "RX event (0x%x > 0x%x+0x%x). Leaking\n", rx_queue->queue, len, max_len, efx->type->rx_buffer_padding); /* If this buffer was skb-allocated, then the meta * data at the end of the skb will be trashed. So * we have no choice but to leak the fragment. */ *leak_packet = (rx_buf->skb != NULL); efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); } else { EFX_ERR_RL(efx, " RX queue %d overlength RX event " "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len); } rx_queue->channel->n_rx_overlength++; }
static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); /* If there is an outstanding MCDI request, it has been terminated * either by a BADASSERT or REBOOT event. If the mcdi interface is * in polled mode, then do nothing because the MC reboot handler will * set the header correctly. However, if the mcdi interface is waiting * for a CMDDONE event it won't receive it [and since all MCDI events * are sent to the same queue, we can't be racing with * efx_mcdi_ev_cpl()] * * There's a race here with efx_mcdi_rpc(), because we might receive * a REBOOT event *before* the request has been copied out. In polled * mode (during startup) this is irrelevent, because efx_mcdi_complete() * is ignored. In event mode, this condition is just an edge-case of * receiving a REBOOT event after posting the MCDI request. Did the mc * reboot before or after the copyout? The best we can do always is * just return failure. */ spin_lock(&mcdi->iface_lock); if (efx_mcdi_complete(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) { mcdi->resprc = rc; mcdi->resplen = 0; ++mcdi->credits; } } else /* Nobody was waiting for an MCDI request, so trigger a reset */ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); spin_unlock(&mcdi->iface_lock); }
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, int len) { struct efx_nic *efx = rx_queue->efx; unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; if (likely(len <= max_len)) return; /* The packet must be discarded, but this is only a fatal error * if the caller indicated it was */ rx_buf->flags |= EFX_RX_PKT_DISCARD; if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { if (net_ratelimit()) netif_err(efx, rx_err, efx->net_dev, " RX queue %d seriously overlength " "RX event (0x%x > 0x%x+0x%x). Leaking\n", efx_rx_queue_index(rx_queue), len, max_len, efx->type->rx_buffer_padding); efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); } else { if (net_ratelimit()) netif_err(efx, rx_err, efx->net_dev, " RX queue %d overlength RX event " "(0x%x > 0x%x)\n", efx_rx_queue_index(rx_queue), len, max_len); } efx_rx_queue_channel(rx_queue)->n_rx_overlength++; }
static bool falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; struct falcon_nic_data *nic_data = efx->nic_data; if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) return true; if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) && EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { nic_data->xmac_poll_required = true; return true; } if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { netif_err(efx, rx_err, efx->net_dev, "channel %d seen global RX_RESET event. Resetting.\n", channel->channel); atomic_inc(&efx->rx_reset); efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); return true; } return false; }
int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) { bool reset; if (flow_control & EFX_FC_AUTO) { EFX_LOG(efx, "10G does not support flow control " "autonegotiation\n"); return -EINVAL; } if ((flow_control & EFX_FC_TX) && !(flow_control & EFX_FC_RX)) return -EINVAL; /* TX flow control may automatically turn itself off if the * link partner (intermittently) stops responding to pause * frames. There isn't any indication that this has happened, * so the best we do is leave it up to the user to spot this * and fix it be cycling transmit flow control on this end. */ reset = ((flow_control & EFX_FC_TX) && !(efx->flow_control & EFX_FC_TX)); if (EFX_WORKAROUND_11482(efx) && reset) { if (falcon_rev(efx) >= FALCON_REV_B0) { /* Recover by resetting the EM block */ if (efx->link_up) falcon_drain_tx_fifo(efx); } else { /* Schedule a reset to recover */ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); } } efx->flow_control = flow_control; return 0; }
/* Remove packets from the TX queue * * This removes packets from the TX queue, up to and including the * specified index. */ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, unsigned int index) { struct efx_nic *efx = tx_queue->efx; unsigned int stop_index, read_ptr; stop_index = (index + 1) & tx_queue->ptr_mask; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; while (read_ptr != stop_index) { struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; if (unlikely(buffer->len == 0)) { netif_err(efx, tx_err, efx->net_dev, "TX queue %d spurious TX completion id %x\n", tx_queue->queue, read_ptr); efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); return; } efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; } }
/* Remove packets from the TX queue * * This removes packets from the TX queue, up to and including the * specified index. */ static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, unsigned int index) { struct efx_nic *efx = tx_queue->efx; unsigned int stop_index, read_ptr; unsigned int mask = tx_queue->efx->type->txd_ring_mask; stop_index = (index + 1) & mask; read_ptr = tx_queue->read_count & mask; while (read_ptr != stop_index) { struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; if (unlikely(buffer->len == 0)) { EFX_ERR(tx_queue->efx, "TX queue %d spurious TX " "completion id %x\n", tx_queue->queue, read_ptr); efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); return; } efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = 1; buffer->len = 0; ++tx_queue->read_count; read_ptr = tx_queue->read_count & mask; } }
/* Issue the given command by writing the data into the shared memory PDU, * ring the doorbell and wait for completion. Copyout the result. */ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, size_t *outlen_actual) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); int rc; BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); efx_mcdi_acquire(mcdi); /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; spin_unlock_bh(&mcdi->iface_lock); efx_mcdi_copyin(efx, cmd, inbuf, inlen); if (mcdi->mode == MCDI_MODE_POLL) rc = efx_mcdi_poll(efx); else rc = efx_mcdi_await_completion(efx); if (rc != 0) { /* Close the race with efx_mcdi_ev_cpl() executing just too late * and completing a request we've just cancelled, by ensuring * that the seqno check therein fails. */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; ++mcdi->credits; spin_unlock_bh(&mcdi->iface_lock); netif_err(efx, hw, efx->net_dev, "MC command 0x%x inlen %d mode %d timed out\n", cmd, (int)inlen, mcdi->mode); } else { size_t resplen; /* At the very least we need a memory barrier here to ensure * we pick up changes from efx_mcdi_ev_cpl(). Protect against * a spurious efx_mcdi_ev_cpl() running concurrently by * acquiring the iface_lock. */ spin_lock_bh(&mcdi->iface_lock); rc = -mcdi->resprc; resplen = mcdi->resplen; spin_unlock_bh(&mcdi->iface_lock); if (rc == 0) { efx_mcdi_copyout(efx, outbuf, min(outlen, mcdi->resplen + 3) & ~0x3); if (outlen_actual != NULL) *outlen_actual = resplen; } else if (cmd == MC_CMD_REBOOT && rc == -EIO) ; /* Don't reset if MC_CMD_REBOOT returns EIO */ else if (rc == -EIO || rc == -EINTR) { netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", -rc); efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); } else netif_dbg(efx, hw, efx->net_dev, "MC command 0x%x inlen %d failed rc=%d\n", cmd, (int)inlen, -rc); } efx_mcdi_release(mcdi); return rc; }
int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, u8 qos) { struct efx_ef10_nic_data *nic_data = efx->nic_data; struct ef10_vf *vf; u16 old_vlan, new_vlan; int rc = 0, rc2 = 0; if (vf_i >= efx->vf_count) return -EINVAL; if (qos != 0) return -EINVAL; vf = nic_data->vf + vf_i; new_vlan = (vlan == 0) ? EFX_EF10_NO_VLAN : vlan; if (new_vlan == vf->vlan) return 0; if (vf->efx) { efx_device_detach_sync(vf->efx); efx_net_stop(vf->efx->net_dev); down_write(&vf->efx->filter_sem); vf->efx->type->filter_table_remove(vf->efx); rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED); if (rc) goto restore_filters; } if (vf->vport_assigned) { rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i); if (rc) { netif_warn(efx, drv, efx->net_dev, "Failed to change vlan on VF %d.\n", vf_i); netif_warn(efx, drv, efx->net_dev, "This is likely because the VF is bound to a driver in a VM.\n"); netif_warn(efx, drv, efx->net_dev, "Please unload the driver in the VM.\n"); goto restore_vadaptor; } vf->vport_assigned = 0; } if (!is_zero_ether_addr(vf->mac)) { rc = efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac); if (rc) goto restore_evb_port; } if (vf->vport_id) { rc = efx_ef10_vport_free(efx, vf->vport_id); if (rc) goto restore_mac; vf->vport_id = 0; } /* Do the actual vlan change */ old_vlan = vf->vlan; vf->vlan = new_vlan; /* Restore everything in reverse order */ rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED, MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, vf->vlan, &vf->vport_id); if (rc) goto reset_nic; restore_mac: if (!is_zero_ether_addr(vf->mac)) { rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); if (rc2) { eth_zero_addr(vf->mac); goto reset_nic; } } restore_evb_port: rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); if (rc2) goto reset_nic; else vf->vport_assigned = 1; restore_vadaptor: if (vf->efx) { rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); if (rc2) goto reset_nic; } restore_filters: if (vf->efx) { rc2 = vf->efx->type->filter_table_probe(vf->efx); if (rc2) goto reset_nic; rc2 = efx_net_open(vf->efx->net_dev); if (rc2) goto reset_nic; up_write(&vf->efx->filter_sem); netif_device_attach(vf->efx->net_dev); } return rc; reset_nic: if (vf->efx) { up_write(&vf->efx->filter_sem); netif_err(efx, drv, efx->net_dev, "Failed to restore the VF - scheduling reset.\n"); efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH); } else { netif_err(efx, drv, efx->net_dev, "Failed to restore the VF and cannot reset the VF " "- VF is not functional.\n"); netif_err(efx, drv, efx->net_dev, "Please reload the driver attached to the VF.\n"); } return rc ? rc : rc2; }