int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num) { struct bman_portal *p = get_affine_portal(); struct bm_mc_command *mcc; union bm_mc_result *mcr; int ret; DPAA_ASSERT(num > 0 && num <= 8); mcc = bm_mc_start(&p->p); mcc->bpid = pool->bpid; bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); if (!bm_mc_result_timeout(&p->p, &mcr)) { put_affine_portal(); pr_crit("BMan Acquire Timeout\n"); return -ETIMEDOUT; } ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; if (bufs) memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0])); put_affine_portal(); if (ret != num) ret = -ENOMEM; return ret; }
static void bm_rcr_cce_prefetch(struct bm_portal *portal) { __maybe_unused struct bm_rcr *rcr = &portal->rcr; DPAA_ASSERT(rcr->cmode == bm_rcr_cce); bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA); }
static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) { struct bm_rcr *rcr = &portal->rcr; struct bm_rcr_entry *rcursor; DPAA_ASSERT(rcr->busy); DPAA_ASSERT(rcr->pmode == bm_rcr_pvb); DPAA_ASSERT(rcr->available >= 1); dma_wmb(); rcursor = rcr->cursor; rcursor->_ncw_verb = myverb | rcr->vbit; dpaa_flush(rcursor); rcr_inc(rcr); rcr->available--; #ifdef CONFIG_FSL_DPAA_CHECKING rcr->busy = 0; #endif }
static void bm_mc_finish(struct bm_portal *portal) { #ifdef CONFIG_FSL_DPAA_CHECKING struct bm_mc *mc = &portal->mc; DPAA_ASSERT(mc->state == mc_idle); if (mc->state != mc_idle) pr_crit("Losing incomplete MC command\n"); #endif }
static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) { struct bm_mc *mc = &portal->mc; DPAA_ASSERT(mc->state == mc_idle); #ifdef CONFIG_FSL_DPAA_CHECKING mc->state = mc_user; #endif dpaa_zero(mc->cr); return mc->cr; }
static u8 bm_rcr_cce_update(struct bm_portal *portal) { struct bm_rcr *rcr = &portal->rcr; u8 diff, old_ci = rcr->ci; DPAA_ASSERT(rcr->cmode == bm_rcr_cce); rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1); bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA); diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); rcr->available += diff; return diff; }
static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) { struct bm_rcr *rcr = &portal->rcr; DPAA_ASSERT(!rcr->busy); if (!rcr->available) return NULL; #ifdef CONFIG_FSL_DPAA_CHECKING rcr->busy = 1; #endif dpaa_zero(rcr->cursor); return rcr->cursor; }
static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) { struct bm_mc *mc = &portal->mc; union bm_mc_result *rr = mc->rr + mc->rridx; DPAA_ASSERT(mc->state == mc_user); dma_wmb(); mc->cr->_ncw_verb = myverb | mc->vbit; dpaa_flush(mc->cr); dpaa_invalidate_touch_ro(rr); #ifdef CONFIG_FSL_DPAA_CHECKING mc->state = mc_hw; #endif }
static u32 __generate_thresh(u32 val, int roundup) { u32 e = 0; /* co-efficient, exponent */ int oddbit = 0; while (val > 0xff) { oddbit = val & 1; val >>= 1; e++; if (roundup && oddbit) val++; } DPAA_ASSERT(e < 0x10); return (val | (e << 8)); }
static u32 poll_portal_slow(struct bman_portal *p, u32 is) { u32 ret = is; if (is & BM_PIRQ_RCRI) { bm_rcr_cce_update(&p->p); bm_rcr_set_ithresh(&p->p, 0); bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI); is &= ~BM_PIRQ_RCRI; } /* There should be no status register bits left undefined */ DPAA_ASSERT(!is); return ret; }
int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num) { struct bman_portal *p; struct bm_rcr_entry *r; unsigned long irqflags; int avail, timeout = 1000; /* 1ms */ int i = num - 1; DPAA_ASSERT(num > 0 && num <= 8); do { p = get_affine_portal(); local_irq_save(irqflags); avail = bm_rcr_get_avail(&p->p); if (avail < 2) update_rcr_ci(p, avail); r = bm_rcr_start(&p->p); local_irq_restore(irqflags); put_affine_portal(); if (likely(r)) break; udelay(1); } while (--timeout); if (unlikely(!timeout)) return -ETIMEDOUT; p = get_affine_portal(); local_irq_save(irqflags); /* * we can copy all but the first entry, as this can trigger badness * with the valid-bit */ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs)); bm_buffer_set_bpid(r->bufs, pool->bpid); if (i) memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | (num & BM_RCR_VERB_BUFCOUNT_MASK)); local_irq_restore(irqflags); put_affine_portal(); return 0; }
static void bm_rcr_finish(struct bm_portal *portal) { #ifdef CONFIG_FSL_DPAA_CHECKING struct bm_rcr *rcr = &portal->rcr; int i; DPAA_ASSERT(!rcr->busy); i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); if (i != rcr_ptr2idx(rcr->cursor)) pr_crit("losing uncommitted RCR entries\n"); i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); if (i != rcr->ci) pr_crit("missing existing RCR completions\n"); if (rcr->ci != rcr_ptr2idx(rcr->cursor)) pr_crit("RCR destroyed unquiesced\n"); #endif }
static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal) { struct bm_mc *mc = &portal->mc; union bm_mc_result *rr = mc->rr + mc->rridx; DPAA_ASSERT(mc->state == mc_hw); /* * The inactive response register's verb byte always returns zero until * its command is submitted and completed. This includes the valid-bit, * in case you were wondering... */ if (!__raw_readb(&rr->verb)) { dpaa_invalidate_touch_ro(rr); return NULL; } mc->rridx ^= 1; mc->vbit ^= BM_MCC_VERB_VBIT; #ifdef CONFIG_FSL_DPAA_CHECKING mc->state = mc_idle; #endif return rr; }