static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b) { if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) { /* On SoCs with Bman revison 2.0, Bman only respects the 40 * LS-bits of buffer addresses, masking off the upper 8-bits on * release commands. The API provides for 48-bit addresses * because some SoCs support all 48-bits. When generating * garbage addresses for testing, we either need to zero the * upper 8-bits when releasing to Bman (otherwise we'll be * disappointed when the buffers we acquire back from Bman * don't match), or we need to mask the upper 8-bits off when * comparing. We do the latter. */ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) < (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) return -1; if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) > (bm_buffer_get64(b) & BMAN_TOKEN_MASK)) return 1; } else { if (bm_buffer_get64(a) < bm_buffer_get64(b)) return -1; if (bm_buffer_get64(a) > bm_buffer_get64(b)) return 1; } return 0; }
int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num) { struct bman_portal *p; struct bm_rcr_entry *r; unsigned long irqflags; int avail, timeout = 1000; /* 1ms */ int i = num - 1; DPAA_ASSERT(num > 0 && num <= 8); do { p = get_affine_portal(); local_irq_save(irqflags); avail = bm_rcr_get_avail(&p->p); if (avail < 2) update_rcr_ci(p, avail); r = bm_rcr_start(&p->p); local_irq_restore(irqflags); put_affine_portal(); if (likely(r)) break; udelay(1); } while (--timeout); if (unlikely(!timeout)) return -ETIMEDOUT; p = get_affine_portal(); local_irq_save(irqflags); /* * we can copy all but the first entry, as this can trigger badness * with the valid-bit */ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs)); bm_buffer_set_bpid(r->bufs, pool->bpid); if (i) memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | (num & BM_RCR_VERB_BUFCOUNT_MASK)); local_irq_restore(irqflags); put_affine_portal(); return 0; }