static void bufs_init(void) { int i; for (i = 0; i < NUM_BUFS; i++) bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i); bufs_received = 0; }
void fsl_buffer_pool_fill(struct packet_pool_cfg *pool_cfg) { int err; int loop; struct bman_pool *p; struct packet_pool_cfg *bp = pool_cfg; for (; bp->bpid != -1; bp++) { struct bm_buffer bufs[8]; int num_bufs = 0; p = helper_pool[bp->bpid]; err = 0; /* Drain the pool of anything already in it. */ if (bp->num > 0) { do { if (err != 1) err = bman_acquire(p, bufs, 8, 0); if (err < 8) err = bman_acquire(p, bufs, 1, 0); if (err > 0) num_bufs += err; } while (err > 0); } if (num_bufs) TRACE("warn: drained %u bufs from BPID %d\n", num_bufs, bp->bpid); /* Fill the pool */ for (num_bufs = 0; num_bufs < bp->num;) { int rel = (bp->num - num_bufs) > 8 ? 8 : (bp->num - num_bufs); for (loop = 0; loop < rel; loop++) { void *ptr = DMA_MEM_ALLOC(L1_CACHE_BYTES, bp->size); if (!ptr) { TRACE("error: no space for bpid %d\n", bp->bpid); return; } bm_buffer_set64(&bufs[loop], vtop(ptr)); } do { err = bman_release(p, bufs, rel, 0); } while (err == -EBUSY); if (err) TRACE("error: release failure\n"); num_bufs += rel; } TRACE("Release %u bufs to BPID %d\n", num_bufs, bp->bpid); } }
/* Drop a frame (releases buffers to Bman) */ static inline void drop_frame(const struct qm_fd *fd) { struct bm_buffer buf; int ret; BUG_ON(fd->format != qm_fd_contig); bm_buffer_set64(&buf, qm_fd_addr(fd)); retry: ret = bman_release(pool[fd->bpid], &buf, 1, 0); if (ret) { cpu_spin(CPU_SPIN_BACKOFF_CYCLES); goto retry; } }
int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num) { struct bman_portal *p; struct bm_rcr_entry *r; unsigned long irqflags; int avail, timeout = 1000; /* 1ms */ int i = num - 1; DPAA_ASSERT(num > 0 && num <= 8); do { p = get_affine_portal(); local_irq_save(irqflags); avail = bm_rcr_get_avail(&p->p); if (avail < 2) update_rcr_ci(p, avail); r = bm_rcr_start(&p->p); local_irq_restore(irqflags); put_affine_portal(); if (likely(r)) break; udelay(1); } while (--timeout); if (unlikely(!timeout)) return -ETIMEDOUT; p = get_affine_portal(); local_irq_save(irqflags); /* * we can copy all but the first entry, as this can trigger badness * with the valid-bit */ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs)); bm_buffer_set_bpid(r->bufs, pool->bpid); if (i) memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | (num & BM_RCR_VERB_BUFCOUNT_MASK)); local_irq_restore(irqflags); put_affine_portal(); return 0; }
static void dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr) { struct bm_buffer buf; int ret; DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid); bm_buffer_set64(&buf, addr); retry: ret = bman_release(bp_info->bp, &buf, 1, 0); if (ret) { DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying..."); cpu_spin(CPU_SPIN_BACKOFF_CYCLES); goto retry; } }
int dpa_bp_shared_port_seed(struct dpa_bp *bp) { /* In MAC-less and Shared-MAC scenarios the physical * address of the buffer pool in device tree is set * to 0 to specify that another entity (USDPAA) will * allocate and seed the buffers */ if (!bp->paddr) return 0; /* allocate memory region for buffers */ devm_request_mem_region(bp->dev, bp->paddr, bp->size * bp->config_count, KBUILD_MODNAME); bp->vaddr = devm_ioremap_prot(bp->dev, bp->paddr, bp->size * bp->config_count, 0); if (bp->vaddr == NULL) { pr_err("Could not map memory for pool %d\n", bp->bpid); return -EIO; } /* seed pool with buffers from that memory region */ if (bp->seed_pool) { int count = bp->target_count; size_t addr = bp->paddr; while (count) { struct bm_buffer bufs[8]; int num_bufs = 0; do { BUG_ON(addr > 0xffffffffffffull); bufs[num_bufs].bpid = bp->bpid; bm_buffer_set64(&bufs[num_bufs++], addr); addr += bp->size; } while (--count && (num_bufs < 8)); while (bman_release(bp->pool, bufs, num_bufs, 0)) cpu_relax(); } } return 0; }