int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) { struct qm_fd fd; int ret; const size_t size = 2 * sizeof(struct qm_sg_entry); int num_retries = 0; fd.cmd = 0; fd.format = qm_fd_compound; fd.cong_weight = req->fd_sgt[1].length; fd.addr = dma_map_single(qidev, req->fd_sgt, size, DMA_BIDIRECTIONAL); if (dma_mapping_error(qidev, fd.addr)) { dev_err(qidev, "DMA mapping error for QI enqueue request\n"); return -EIO; } do { ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0); if (likely(!ret)) return 0; if (-EBUSY != ret) break; num_retries++; } while (num_retries < 10000); dev_err(qidev, "qman_enqueue failed: %d\n", ret); return ret; }
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) { struct qm_fd fd; dma_addr_t addr; int ret; int num_retries = 0; qm_fd_clear_fd(&fd); qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1])); addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), DMA_BIDIRECTIONAL); if (dma_mapping_error(qidev, addr)) { dev_err(qidev, "DMA mapping error for QI enqueue request\n"); return -EIO; } qm_fd_addr_set64(&fd, addr); do { ret = qman_enqueue(req->drv_ctx->req_fq, &fd); if (likely(!ret)) return 0; if (ret != -EBUSY) break; num_retries++; } while (num_retries < CAAM_QI_ENQUEUE_RETRIES); dev_err(qidev, "qman_enqueue failed: %d\n", ret); return ret; }
static void do_enqueues(struct qman_fq *fq) { unsigned int loop; for (loop = 0; loop < NUM_ENQUEUES; loop++) { if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT | (((loop + 1) == NUM_ENQUEUES) ? QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0))) panic("qman_enqueue() failed\n"); fd_inc(&fd); } }
/* Transmit a frame */ static inline void send_frame(u32 fqid, const struct qm_fd *fd) { int ret; local_fq.fqid = fqid; retry: ret = qman_enqueue(&local_fq, fd, 0); if (ret) { cpu_spin(CPU_SPIN_BACKOFF_CYCLES); goto retry; } }
/* test */ static int do_enqueues(struct qman_fq *fq) { unsigned int loop; int err = 0; for (loop = 0; loop < NUM_ENQUEUES; loop++) { if (qman_enqueue(fq, &fd)) { pr_crit("qman_enqueue() failed\n"); err = -EIO; } fd_inc(&fd); } return err; }
static void do_enqueues(struct qman_fq *fq) { unsigned int loop = test_frames; dcbt_rw(eq_capture); while (loop) { int err; if (loop == test_start) eq_capture[0] = mfatb(); retry: err = qman_enqueue(fq, &fd, 0); if (err) { eq_jam++; cpu_spin(ENQUEUE_BACKOFF); goto retry; } #ifdef TEST_FD fd_inc(&fd); #endif loop--; } eq_capture[1] = mfatb(); }
enum IP_STATUS ipsec_encap_send(const struct ppam_rx_hash *ctxt, struct annotations_t *notes, void *ip_hdr_ptr) { struct iphdr *ip_hdr = ip_hdr_ptr; struct ipsec_tunnel_t *entry = notes->dest->tunnel; const struct qm_fd *fd = ¬es->dqrr->fd; struct qm_fd fd2; uint32_t ret; struct qman_fq *fq_to_sec; static int to_sec_fq_index; if (false == simple_fd_mode) { ipsec_create_compound_fd(&fd2, fd, ip_hdr, ENCRYPT); } else { fd2 = *fd; fd2.cmd = 0; fd2._format1 = qm_fd_contig; fd2.length20 = ip_hdr->tot_len; fd2.offset = fd->offset + ETHER_HDR_LEN; fd2.bpid = sec_bpid; /*Release to BPool used by SEC*/ } fd = &fd2; #ifdef STATS_TBD decorated_notify_inc_32(&(ctxt->stats->encap_pre_sec)); #endif if (unlikely(entry->fq_state == PARKED)) { mutex_lock(&entry->tlock); if (entry->fq_state == PARKED) { if (init_sec_fqs(entry, ENCRYPT, entry->ctxtA, entry->tunnel_id)) { fprintf(stderr, "error: %s: Failed to Init" " encap Context\n", __func__); mutex_unlock(&entry->tlock); return IP_STATUS_DROP; } entry->fq_state = SCHEDULED; } mutex_unlock(&entry->tlock); } if (entry->hb_tunnel) { fq_to_sec = entry->qm_fq_to_sec[to_sec_fq_index++]; to_sec_fq_index = to_sec_fq_index % NUM_TO_SEC_FQ; } else { fq_to_sec = entry->qm_fq_to_sec[0]; } loop: ret = qman_enqueue(fq_to_sec, fd, 0); if (unlikely(ret)) { uint64_t now, then = mfatb(); do { now = mfatb(); } while (now < (then + 1000)); goto loop; } return IP_STATUS_STOLEN; }