static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn, struct mlx5_fpga_dma_buf *buf) { struct device *dma_device; int err = 0; if (unlikely(!buf->sg[0].data)) goto out; dma_device = &conn->fdev->mdev->pdev->dev; buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data, buf->sg[0].size, buf->dma_dir); err = dma_mapping_error(dma_device, buf->sg[0].dma_addr); if (unlikely(err)) { mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err); err = -ENOMEM; goto out; } if (!buf->sg[1].data) goto out; buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data, buf->sg[1].size, buf->dma_dir); err = dma_mapping_error(dma_device, buf->sg[1].dma_addr); if (unlikely(err)) { mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err); dma_unmap_single(dma_device, buf->sg[0].dma_addr, buf->sg[0].size, buf->dma_dir); err = -ENOMEM; } out: return err; }
static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf) { struct mlx5_ipsec_sadb_resp *resp = buf->sg[0].data; struct mlx5_ipsec_command_context *context; enum mlx5_ipsec_response_syndrome syndrome; struct mlx5_fpga_device *fdev = cb_arg; unsigned long flags; if (buf->sg[0].size < sizeof(*resp)) { mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n", buf->sg[0].size, sizeof(*resp)); return; } mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x sa_id %x\n", ntohl(resp->syndrome), ntohl(resp->sw_sa_handle)); spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); context = list_first_entry_or_null(&fdev->ipsec->pending_cmds, struct mlx5_ipsec_command_context, list); if (context) list_del(&context->list); spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); if (!context) { mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n"); return; } mlx5_fpga_dbg(fdev, "Handling response for %p\n", context); if (context->sa.sw_sa_handle != resp->sw_sa_handle) { mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n", ntohl(context->sa.sw_sa_handle), ntohl(resp->sw_sa_handle)); return; } syndrome = ntohl(resp->syndrome); context->status_code = syndrome_to_errno(syndrome); context->status = MLX5_FPGA_IPSEC_SACMD_COMPLETE; if (context->status_code) mlx5_fpga_warn(fdev, "IPSec SADB command failed with syndrome %08x\n", syndrome); complete(&context->complete); }
static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, struct mlx5_cqe64 *cqe, u8 status) { struct mlx5_fpga_dma_buf *buf; int ix, err; ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1); buf = conn->qp.rq.bufs[ix]; conn->qp.rq.bufs[ix] = NULL; if (!status) buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); conn->qp.rq.cc++; if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR))) mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); else mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n", buf, conn->fpga_qpn, status); mlx5_fpga_conn_unmap_buf(conn, buf); if (unlikely(status || !conn->qp.active)) { conn->qp.active = false; kfree(buf); return; } mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n", buf->sg[0].size); conn->recv_cb(conn->cb_arg, buf); buf->sg[0].size = MLX5_FPGA_RECV_SIZE; err = mlx5_fpga_conn_post_recv(conn, buf); if (unlikely(err)) { mlx5_fpga_warn(conn->fdev, "Failed to re-post recv buf: %d\n", err); kfree(buf); } }
static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, struct mlx5_fpga_dma_buf *buf, u8 status) { struct mlx5_ipsec_command_context *context; if (status) { context = container_of(buf, struct mlx5_ipsec_command_context, buf); mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n", status); context->status = MLX5_FPGA_IPSEC_SACMD_SEND_FAIL; complete(&context->complete); } }
int mlx5_fpga_mem_write(struct mlx5_fpga_device *fdev, size_t size, u64 addr, void *buf, enum mlx5_fpga_access_type access_type) { int ret; switch (access_type) { case MLX5_FPGA_ACCESS_TYPE_I2C: ret = mlx5_fpga_mem_write_i2c(fdev, size, addr, buf); if (ret) return ret; break; default: mlx5_fpga_warn(fdev, "Unexpected write access_type %u\n", access_type); return -EACCES; } return size; }
int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx) { struct mlx5_ipsec_command_context *context = ctx; int res; res = wait_for_completion_killable(&context->complete); if (res) { mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n"); return -EINTR; } if (context->status == MLX5_FPGA_IPSEC_SACMD_COMPLETE) res = context->status_code; else res = -EIO; kfree(context); return res; }
void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev, struct mlx5_accel_ipsec_sa *cmd) { struct mlx5_ipsec_command_context *context; struct mlx5_fpga_device *fdev = mdev->fpga; unsigned long flags; int res = 0; BUILD_BUG_ON((sizeof(struct mlx5_accel_ipsec_sa) & 3) != 0); if (!fdev || !fdev->ipsec) return ERR_PTR(-EOPNOTSUPP); context = kzalloc(sizeof(*context), GFP_ATOMIC); if (!context) return ERR_PTR(-ENOMEM); memcpy(&context->sa, cmd, sizeof(*cmd)); context->buf.complete = mlx5_fpga_ipsec_send_complete; context->buf.sg[0].size = sizeof(context->sa); context->buf.sg[0].data = &context->sa; init_completion(&context->complete); context->dev = fdev; spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); list_add_tail(&context->list, &fdev->ipsec->pending_cmds); spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); context->status = MLX5_FPGA_IPSEC_SACMD_PENDING; res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); if (res) { mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n", res); spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); list_del(&context->list); spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); kfree(context); return ERR_PTR(res); } /* Context will be freed by wait func after completion */ return context; }