void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, int mdidx) { int offset = mdidx * LNET_MAX_IOV; CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON); LASSERT(mdidx < desc->bd_md_max_brw); LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS))); md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); md->length = min_t(unsigned int, LNET_MAX_IOV, md->length); if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) { md->options |= LNET_MD_KIOV; if (GET_ENC_KIOV(desc)) md->start = &BD_GET_ENC_KIOV(desc, offset); else md->start = &BD_GET_KIOV(desc, offset); } else { md->options |= LNET_MD_IOVEC; if (GET_ENC_KVEC(desc)) md->start = &BD_GET_ENC_KVEC(desc, offset); else md->start = &BD_GET_KVEC(desc, offset); } }
void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) { int p_idx, g_idx; int i; LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); if (!GET_ENC_KIOV(desc)) return; LASSERT(desc->bd_iov_count > 0); spin_lock(&page_pools.epp_lock); p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <= page_pools.epp_total_pages); LASSERT(page_pools.epp_pools[p_idx]); for (i = 0; i < desc->bd_iov_count; i++) { LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page); LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); LASSERT(!page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = BD_GET_ENC_KIOV(desc, i).bv_page; if (++g_idx == PAGES_PER_POOL) { p_idx++; g_idx = 0; } } page_pools.epp_free_pages += desc->bd_iov_count; enc_pools_wakeup(); spin_unlock(&page_pools.epp_lock); kfree(GET_ENC_KIOV(desc)); GET_ENC_KIOV(desc) = NULL; }