static void oo_iobufset_resource_free(struct oo_iobufset *rs, int reset_pending) { efrm_pd_dma_unmap(rs->pd, rs->pages->n_bufs, EFHW_GFP_ORDER_TO_NIC_ORDER( compound_order(rs->pages->pages[0])), &rs->dma_addrs[0], sizeof(rs->dma_addrs[0]), &rs->buf_tbl_alloc, reset_pending); if (rs->pd != NULL) efrm_pd_release(rs->pd); oo_iobufset_pages_release(rs->pages); oo_iobufset_free_memory(rs); }
static int vi_set_rm_alloc(ci_resource_alloc_t* alloc_, ci_resource_table_t* priv_opt, efch_resource_t* rs, int intf_ver_id) { struct efch_vi_set_alloc* alloc = &alloc_->u.vi_set; struct efrm_client *client; struct efrm_vi_set* vi_set; struct efrm_pd* pd; unsigned vi_props; int rc; if( intf_ver_id >= 1 && alloc->in_pd_fd >= 0 ) { struct efrm_resource* rs; rc = efch_lookup_rs(alloc->in_pd_fd, alloc->in_pd_rs_id, EFRM_RESOURCE_PD, &rs); if( rc < 0 ) { EFCH_ERR("%s: ERROR: could not find PD fd=%d id="EFCH_RESOURCE_ID_FMT " rc=%d", __FUNCTION__, alloc->in_pd_fd, EFCH_RESOURCE_ID_PRI_ARG(alloc->in_pd_rs_id), rc); goto fail1; } pd = efrm_pd_from_resource(rs); client = rs->rs_client; efrm_client_add_ref(client); } else { rc = efrm_client_get(alloc->in_ifindex, NULL, NULL, &client); if( rc != 0 ) { EFCH_ERR("%s: ERROR: ifindex=%d not found rc=%d", __FUNCTION__, alloc->in_ifindex, rc); goto fail1; } rc = efrm_pd_alloc(&pd, client, NULL/*vf_opt*/, 0/*phys_addr_mode*/); if( rc != 0 ) { EFCH_ERR("%s: ERROR: efrm_pd_alloc(ifindex=%d) failed (rc=%d)", __FUNCTION__, alloc->in_ifindex, rc); goto fail2; } } vi_props = 0; rc = efrm_vi_set_alloc(pd, alloc->in_n_vis, vi_props, &vi_set); if( rc != 0 ) goto fail3; efrm_client_put(client); efrm_pd_release(pd); efch_filter_list_init(&rs->vi_set.fl); rs->vi_set.sniff_flags = 0; rs->rs_base = efrm_vi_set_to_resource(vi_set); return 0; fail3: efrm_pd_release(pd); fail2: efrm_client_put(client); fail1: return rc; }
int efrm_pd_alloc(struct efrm_pd **pd_out, struct efrm_client *client_opt, struct efrm_vf *vf_opt, int flags) { struct efrm_pd *pd; int rc, instance; struct efrm_pd_owner_ids *owner_ids; int orders_num = 0; EFRM_ASSERT((client_opt != NULL) || (vf_opt != NULL)); if ((flags & ~(EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE | EFRM_PD_ALLOC_FLAG_HW_LOOPBACK)) != 0) { rc = -EINVAL; goto fail1; } if (!(flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE)) { orders_num = efhw_nic_buffer_table_orders_num( client_opt->nic); EFRM_ASSERT(orders_num); EFRM_ASSERT(efhw_nic_buffer_table_orders( client_opt->nic)[0] == 0); } pd = kmalloc(sizeof(*pd) + orders_num * sizeof(pd->bt_managers[0]), GFP_KERNEL); if (pd == NULL) { rc = -ENOMEM; goto fail1; } pd->stack_id = 0; spin_lock_bh(&pd_manager->rm.rm_lock); instance = pd_manager->next_instance++; if (flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE) { pd->owner_id = OWNER_ID_PHYS_MODE; } else { #ifdef CONFIG_SFC_RESOURCE_VF if (vf_opt != NULL) owner_ids = vf_opt->owner_ids; else #endif owner_ids = efrm_nic_from_client(client_opt)->owner_ids; EFRM_ASSERT(owner_ids != NULL); pd->owner_id = efrm_pd_owner_id_alloc(owner_ids); } spin_unlock_bh(&pd_manager->rm.rm_lock); if (pd->owner_id == OWNER_ID_ALLOC_FAIL) { rc = -EBUSY; goto fail2; } #ifdef CONFIG_SFC_RESOURCE_VF pd->vf = vf_opt; if (pd->vf != NULL) { struct efrm_resource *vfrs = efrm_vf_to_resource(pd->vf); efrm_resource_ref(vfrs); client_opt = vfrs->rs_client; } #endif if (!(flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE)) { int ord; for (ord = 0; ord < orders_num; ord++) { efrm_bt_manager_ctor( &pd->bt_managers[ord], pd->owner_id, efhw_nic_buffer_table_orders( client_opt->nic)[ord] ); } } efrm_resource_init(&pd->rs, EFRM_RESOURCE_PD, instance); efrm_client_add_resource(client_opt, &pd->rs); pd->os_data = efrm_pd_os_stats_ctor(pd); pd->min_nic_order = 0; #if EFX_DRIVERLINK_API_VERSION < 25 pd->vport_id = EFRM_PD_VPORT_ID_NONE; #else pd->vport_handle = EFRM_PD_VPORT_ID_NONE; #endif mutex_init(&pd->remap_lock); if (flags & EFRM_PD_ALLOC_FLAG_HW_LOOPBACK) { if ((rc = efrm_pd_stack_id_alloc(pd)) != 0) { efrm_pd_release(pd); return rc; } } *pd_out = pd; return 0; fail2: kfree(pd); fail1: return rc; }
/* Allocate a new cluster. * * You need to hold the thc_mutex before calling this. */ static int thc_alloc(const char* cluster_name, int protocol, int port_be16, uid_t euid, int cluster_size, unsigned flags, tcp_helper_cluster_t** thc_out) { int rc, i; int rss_flags; struct efrm_pd* pd; int packet_buffer_mode = flags & THC_FLAG_PACKET_BUFFER_MODE; int tproxy = flags & THC_FLAG_TPROXY; int hw_loopback_enable = flags & THC_FLAG_HW_LOOPBACK_ENABLE; tcp_helper_cluster_t* thc = kmalloc(sizeof(*thc), GFP_KERNEL); if( thc == NULL ) return -ENOMEM; memset(thc, 0, sizeof(*thc)); ci_dllist_init(&thc->thc_tlos); strcpy(thc->thc_name, cluster_name); thc->thc_cluster_size = cluster_size; thc->thc_euid = euid; thc->thc_flags = flags; /* Needed to protect against oo_nics changes */ rtnl_lock(); for( i = 0; i < CI_CFG_MAX_REGISTER_INTERFACES; ++i ) { if( oo_nics[i].efrm_client == NULL || ! oo_check_nic_suitable_for_onload(&(oo_nics[i])) ) continue; if( (rc = efrm_pd_alloc(&pd, oo_nics[i].efrm_client, NULL, (packet_buffer_mode ? EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE : 0) | (hw_loopback_enable ? EFRM_PD_ALLOC_FLAG_HW_LOOPBACK : 0))) ) goto fail; /* * Currently we move on if we fail to get special tproxy RSS_MODE on * interface(s) (expect Siena, Huntington old fw, run out of rss contexts). */ rss_flags = tproxy ? EFRM_RSS_MODE_DST | EFRM_RSS_MODE_SRC : EFRM_RSS_MODE_DEFAULT; redo: rc = efrm_vi_set_alloc(pd, thc->thc_cluster_size, 0, rss_flags, &thc->thc_vi_set[i]); if( rc != 0 && rss_flags ) { LOG_E(ci_log("Installing special RSS mode filter failed on hwport %d, " "falling back to default mode. Transparent proxy will not " "work with this interface.", i)); rss_flags = 0; goto redo; } efrm_pd_release(pd); if( rc != 0 ) goto fail; } rtnl_unlock(); thc->thc_next = thc_head; thc_head = thc; *thc_out = thc; return 0; fail: rtnl_unlock(); for( i = 0; i < CI_CFG_MAX_REGISTER_INTERFACES; ++i ) if( thc->thc_vi_set[i] != NULL ) efrm_vi_set_release(thc->thc_vi_set[i]); kfree(thc); return rc; }