void bman_test_high(void) { struct bman_pool_params pparams = { .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID, .cb = depletion_cb, .cb_ctx = POOL_OPAQUE, }; int i, loops = LOOPS; bufs_init(); pr_info("BMAN: --- starting high-level test ---\n"); pool = bman_new_pool(&pparams); BUG_ON(!pool); /*******************/ /* Release buffers */ /*******************/ do_loop: i = 0; while (i < NUM_BUFS) { u32 flags = BMAN_RELEASE_FLAG_WAIT; int num = 8; if ((i + num) > NUM_BUFS) num = NUM_BUFS - i; if ((i + num) == NUM_BUFS) flags |= BMAN_RELEASE_FLAG_WAIT_SYNC; if (bman_release(pool, bufs_in + i, num, flags)) panic("bman_release() failed\n"); i += num; } /*******************/ /* Acquire buffers */ /*******************/ while (i > 0) { int tmp, num = 8; if (num > i) num = i; tmp = bman_acquire(pool, bufs_out + i - num, num, 0); BUG_ON(tmp != num); i -= num; } i = bman_acquire(pool, NULL, 1, 0); BUG_ON(i > 0); bufs_confirm(); if (--loops) goto do_loop; /************/ /* Clean up */ /************/ bman_free_pool(pool); pr_info("BMAN: --- finished high-level test ---\n"); }
void fsl_buffer_pool_fill(struct packet_pool_cfg *pool_cfg) { int err; int loop; struct bman_pool *p; struct packet_pool_cfg *bp = pool_cfg; for (; bp->bpid != -1; bp++) { struct bm_buffer bufs[8]; int num_bufs = 0; p = helper_pool[bp->bpid]; err = 0; /* Drain the pool of anything already in it. */ if (bp->num > 0) { do { if (err != 1) err = bman_acquire(p, bufs, 8, 0); if (err < 8) err = bman_acquire(p, bufs, 1, 0); if (err > 0) num_bufs += err; } while (err > 0); } if (num_bufs) TRACE("warn: drained %u bufs from BPID %d\n", num_bufs, bp->bpid); /* Fill the pool */ for (num_bufs = 0; num_bufs < bp->num;) { int rel = (bp->num - num_bufs) > 8 ? 8 : (bp->num - num_bufs); for (loop = 0; loop < rel; loop++) { void *ptr = DMA_MEM_ALLOC(L1_CACHE_BYTES, bp->size); if (!ptr) { TRACE("error: no space for bpid %d\n", bp->bpid); return; } bm_buffer_set64(&bufs[loop], vtop(ptr)); } do { err = bman_release(p, bufs, rel, 0); } while (err == -EBUSY); if (err) TRACE("error: release failure\n"); num_bufs += rel; } TRACE("Release %u bufs to BPID %d\n", num_bufs, bp->bpid); } }
void *fsl_buffer_alloc(u32 pool_id) { struct bm_buffer buf; int ret = bman_acquire(helper_pool[pool_id], &buf, 1, 0); if (ret < 0) { TRACE("bman acquire failure, pool %d, ret %d\n", pool_id, ret); return NULL; } return (void *)ptov(buf.addr); }
static int dpaa_mbuf_create_pool(struct rte_mempool *mp) { struct bman_pool *bp; struct bm_buffer bufs[8]; struct dpaa_bp_info *bp_info; uint8_t bpid; int num_bufs = 0, ret = 0; struct bman_pool_params params = { .flags = BMAN_POOL_FLAG_DYNAMIC_BPID }; MEMPOOL_INIT_FUNC_TRACE(); bp = bman_new_pool(¶ms); if (!bp) { DPAA_MEMPOOL_ERR("bman_new_pool() failed"); return -ENODEV; } bpid = bman_get_params(bp)->bpid; /* Drain the pool of anything already in it. */ do { /* Acquire is all-or-nothing, so we drain in 8s, * then in 1s for the remainder. */ if (ret != 1) ret = bman_acquire(bp, bufs, 8, 0); if (ret < 8) ret = bman_acquire(bp, bufs, 1, 0); if (ret > 0) num_bufs += ret; } while (ret > 0); if (num_bufs) DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d", num_bufs, bpid); rte_dpaa_bpid_info[bpid].mp = mp; rte_dpaa_bpid_info[bpid].bpid = bpid; rte_dpaa_bpid_info[bpid].size = mp->elt_size; rte_dpaa_bpid_info[bpid].bp = bp; rte_dpaa_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp); rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index; rte_dpaa_bpid_info[bpid].ptov_off = 0; rte_dpaa_bpid_info[bpid].flags = 0; bp_info = rte_malloc(NULL, sizeof(struct dpaa_bp_info), RTE_CACHE_LINE_SIZE); if (!bp_info) { DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info"); bman_free_pool(bp); return -ENOMEM; } rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid], sizeof(struct dpaa_bp_info)); mp->pool_data = (void *)bp_info; DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid); return 0; }
static int dpaa_mbuf_alloc_bulk(struct rte_mempool *pool, void **obj_table, unsigned int count) { struct rte_mbuf **m = (struct rte_mbuf **)obj_table; struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL]; struct dpaa_bp_info *bp_info; void *bufaddr; int i, ret; unsigned int n = 0; bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool); DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d", count, bp_info->bpid); if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) { DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers", count); return -1; } if (unlikely(!RTE_PER_LCORE(dpaa_io))) { ret = rte_dpaa_portal_init((void *)0); if (ret) { DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d", ret); return -1; } } while (n < count) { /* Acquire is all-or-nothing, so we drain in 7s, * then the remainder. */ if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) { ret = bman_acquire(bp_info->bp, bufs, DPAA_MBUF_MAX_ACQ_REL, 0); } else { ret = bman_acquire(bp_info->bp, bufs, count - n, 0); } /* In case of less than requested number of buffers available * in pool, qbman_swp_acquire returns 0 */ if (ret <= 0) { DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)", ret); /* The API expect the exact number of requested * buffers. Releasing all buffers allocated */ dpaa_mbuf_free_bulk(pool, obj_table, n); return -ENOBUFS; } /* assigning mbuf from the acquired objects */ for (i = 0; (i < ret) && bufs[i].addr; i++) { /* TODO-errata - objerved that bufs may be null * i.e. first buffer is valid, remaining 6 buffers * may be null. */ bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr); m[n] = (struct rte_mbuf *)((char *)bufaddr - bp_info->meta_data_size); DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN", (void *)bufaddr, (void *)m[n]); n++; } } DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d", n, bp_info->bpid); return 0; }