void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, bool reserve_vectors) { struct mlx4_en_dev *mdev = priv->mdev; mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors) mlx4_release_eq(priv->mdev->dev, cq->vector); cq->buf_size = 0; cq->buf = NULL; }
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq = *pcq; taskqueue_drain(cq->tq, &cq->cq_task); taskqueue_free(cq->tq); mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (priv->mdev->dev->caps.comp_pool && cq->vector) mlx4_release_eq(priv->mdev->dev, cq->vector); kfree(cq); *pcq = NULL; }
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq = *pcq; mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && cq->is_tx == RX) mlx4_release_eq(priv->mdev->dev, cq->vector); cq->vector = 0; cq->buf_size = 0; cq->buf = NULL; kfree(cq); *pcq = NULL; }
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq = *pcq; mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (priv->mdev->dev->caps.comp_pool && cq->vector) { mlx4_release_eq(priv->mdev->dev, cq->vector); } cq->vector = 0; cq->buf_size = 0; cq->buf = NULL; kfree(cq); *pcq = NULL; }
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int cq_idx) { struct mlx4_en_dev *mdev = priv->mdev; int err = 0; char name[25]; int timestamp_en = 0; bool assigned_eq = false; cq->dev = mdev->pndev[priv->port]; cq->mcq.set_ci_db = cq->wqres.db.db; cq->mcq.arm_db = cq->wqres.db.db + 1; *cq->mcq.set_ci_db = 0; *cq->mcq.arm_db = 0; memset(cq->buf, 0, cq->buf_size); if (cq->is_tx == RX) { if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector)) { cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); err = mlx4_assign_eq(mdev->dev, priv->port, MLX4_EQ_ID_TO_UUID(MLX4_EQ_ID_EN, priv->port, cq_idx), mlx4_en_cq_eq_cb, &priv->rx_cq[cq_idx], &cq->vector); if (err) { mlx4_err(mdev, "Failed assigning an EQ to %s\n", name); goto free_eq; } assigned_eq = true; } /* Set IRQ for specific name (per ring) */ err = mlx4_rename_eq(mdev->dev, priv->port, cq->vector, MLX4_EN_EQ_NAME_PRIORITY, "%s-%d", priv->dev->name, cq->ring); if (err) { mlx4_warn(mdev, "Failed to rename EQ, continuing with default name\n"); err = 0; } #if defined(HAVE_IRQ_DESC_GET_IRQ_DATA) && defined(HAVE_IRQ_TO_DESC_EXPORTED) cq->irq_desc = irq_to_desc(mlx4_eq_get_irq(mdev->dev, cq->vector)); #endif } else { /* For TX we use the same irq per ring we assigned for the RX */ struct mlx4_en_cq *rx_cq; cq_idx = cq_idx % priv->rx_ring_num; rx_cq = priv->rx_cq[cq_idx]; cq->vector = rx_cq->vector; } if (!cq->is_tx) cq->size = priv->rx_ring[cq->ring]->actual_size; if ((cq->is_tx && priv->hwtstamp_config.tx_type) || (!cq->is_tx && priv->hwtstamp_config.rx_filter)) timestamp_en = 1; err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, cq->vector, 0, timestamp_en, &cq->wqres.buf, false); if (err) goto free_eq; cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; if (cq->is_tx) { netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, NAPI_POLL_WEIGHT); } else { netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); #ifdef HAVE_NAPI_HASH_ADD napi_hash_add(&cq->napi); #endif } napi_enable(&cq->napi); return 0; free_eq: if (assigned_eq) mlx4_release_eq(mdev->dev, MLX4_EQ_ID_TO_UUID( MLX4_EQ_ID_EN, priv->port, cq_idx), cq->vector); cq->vector = mdev->dev->caps.num_comp_vectors; return err; }
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int cq_idx) { struct mlx4_en_dev *mdev = priv->mdev; int err = 0; int timestamp_en = 0; bool assigned_eq = false; cq->dev = mdev->pndev[priv->port]; cq->mcq.set_ci_db = cq->wqres.db.db; cq->mcq.arm_db = cq->wqres.db.db + 1; *cq->mcq.set_ci_db = 0; *cq->mcq.arm_db = 0; memset(cq->buf, 0, cq->buf_size); if (cq->is_tx == RX) { if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector)) { cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); err = mlx4_assign_eq(mdev->dev, priv->port, &cq->vector); if (err) { mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n", cq->vector); goto free_eq; } assigned_eq = true; } cq->irq_desc = irq_to_desc(mlx4_eq_get_irq(mdev->dev, cq->vector)); } else { /* For TX we use the same irq per ring we assigned for the RX */ struct mlx4_en_cq *rx_cq; cq_idx = cq_idx % priv->rx_ring_num; rx_cq = priv->rx_cq[cq_idx]; cq->vector = rx_cq->vector; } if (!cq->is_tx) cq->size = priv->rx_ring[cq->ring]->actual_size; if ((cq->is_tx && priv->hwtstamp_config.tx_type) || (!cq->is_tx && priv->hwtstamp_config.rx_filter)) timestamp_en = 1; err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, cq->vector, 0, timestamp_en); if (err) goto free_eq; cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; if (cq->is_tx) netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, NAPI_POLL_WEIGHT); else netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); napi_enable(&cq->napi); return 0; free_eq: if (assigned_eq) mlx4_release_eq(mdev->dev, cq->vector); cq->vector = mdev->dev->caps.num_comp_vectors; return err; }