int efrm_filter_resource_alloc(struct vi_resource *vi_parent, struct filter_resource **frs_out) { struct filter_resource *frs; int rc, instance; EFRM_ASSERT(frs_out); EFRM_ASSERT(efrm_filter_manager); EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm); EFRM_ASSERT(vi_parent != NULL); EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_parent->rs.rs_handle) == EFRM_RESOURCE_VI); /* Allocate resource data structure. This is called in atomic * context by the onload driver. */ frs = kmalloc(sizeof(struct filter_resource), GFP_ATOMIC); if (!frs) return -ENOMEM; /* Allocate an instance. */ rc = kfifo_get(efrm_filter_manager->free_ids, (unsigned char *)&instance, sizeof(instance)); if (rc != sizeof(instance)) { EFRM_TRACE("%s: out of instances", __FUNCTION__); EFRM_ASSERT(rc == 0); rc = -EBUSY; goto fail1; } /* Initialise the resource DS. */ efrm_resource_init(&frs->rs, EFRM_RESOURCE_FILTER, instance); frs->pt = vi_parent; efrm_resource_ref(&frs->pt->rs); frs->filter_idx = -1; EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " VI %d", __FUNCTION__, EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle), EFRM_RESOURCE_INSTANCE(vi_parent->rs.rs_handle)); efrm_client_add_resource(vi_parent->rs.rs_client, &frs->rs); *frs_out = frs; return 0; fail1: memset(frs, 0, sizeof(*frs)); kfree(frs); return rc; }
int efrm_vf_resource_alloc(struct efrm_client *client, struct efrm_vf *linked, int use_iommu, struct efrm_vf **vf_out) { struct efrm_vf_nic_params *nic = &efrm_vf_manager->nic[client->nic->index]; struct efrm_vf *vf; int rc = 0; struct efrm_pd_owner_ids *owner_ids; if (nic->vf_count != nic->vfs_probed) { EFRM_ERR("%s: not all VFs for NIC %d are discovered yet: " "%d out of %d", __func__, client->nic->index, nic->vfs_probed, nic->vf_count); return -EBUSY; } spin_lock_bh(&efrm_vf_manager->rm.rm_lock); if (list_empty(&nic->free_list)) { spin_unlock_bh(&efrm_vf_manager->rm.rm_lock); return rc == 0 ? -ENOBUFS : rc; } vf = list_entry(nic->free_list.next, struct efrm_vf, link); list_del(&vf->link); spin_unlock_bh(&efrm_vf_manager->rm.rm_lock); rc = efrm_vf_alloc_init(vf, linked, use_iommu); if (rc != 0) { /* Scary warnings are already printed, just return */ /* Add to the tail of the list in hope another function * is better. */ list_add_tail(&vf->link, &efrm_vf_manager->nic[vf->nic_index].free_list); return rc; } EFRM_ASSERT(vf); EFRM_ASSERT(vf->irq_count); EFRM_ASSERT(vf->vi_count); rc = efrm_buddy_range_ctor(&vf->vi_instances, vf->vi_base, vf->vi_base + vf->vi_count); if (rc < 0) { EFRM_ERR("NIC %d VF %d: efrm_buddy_range_ctor(%d, %d) failed", client->nic->index, vf->pci_dev_fn, vf->vi_base, vf->vi_base + vf->vi_count); spin_lock_bh(&efrm_vf_manager->rm.rm_lock); list_add(&vf->link, &efrm_vf_manager->nic[vf->nic_index].free_list); spin_unlock_bh(&efrm_vf_manager->rm.rm_lock); return -ENOMEM; } EFRM_ASSERT(vf->rs.rs_ref_count == 0); efrm_resource_init(&vf->rs, EFRM_RESOURCE_VF, vf->pci_dev_fn); efrm_client_add_resource(client, &vf->rs); owner_ids = efrm_pd_owner_ids_ctor( /* On falcon owner_ids are global, so base this block on the * base vi id to avoid overlap. */ client->nic->devtype.arch == EFHW_ARCH_EF10 ? 1 : vf->vi_base, (1 << vf->vi_scale)); if (!owner_ids) return -ENOMEM; vf->owner_ids = owner_ids; EFRM_TRACE("NIC %d VF %d allocated", client->nic->index, vf->pci_dev_fn); *vf_out = vf; return 0; }
int efrm_pd_alloc(struct efrm_pd **pd_out, struct efrm_client *client_opt, struct efrm_vf *vf_opt, int flags) { struct efrm_pd *pd; int rc, instance; struct efrm_pd_owner_ids *owner_ids; int orders_num = 0; EFRM_ASSERT((client_opt != NULL) || (vf_opt != NULL)); if ((flags & ~(EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE | EFRM_PD_ALLOC_FLAG_HW_LOOPBACK)) != 0) { rc = -EINVAL; goto fail1; } if (!(flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE)) { orders_num = efhw_nic_buffer_table_orders_num( client_opt->nic); EFRM_ASSERT(orders_num); EFRM_ASSERT(efhw_nic_buffer_table_orders( client_opt->nic)[0] == 0); } pd = kmalloc(sizeof(*pd) + orders_num * sizeof(pd->bt_managers[0]), GFP_KERNEL); if (pd == NULL) { rc = -ENOMEM; goto fail1; } pd->stack_id = 0; spin_lock_bh(&pd_manager->rm.rm_lock); instance = pd_manager->next_instance++; if (flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE) { pd->owner_id = OWNER_ID_PHYS_MODE; } else { #ifdef CONFIG_SFC_RESOURCE_VF if (vf_opt != NULL) owner_ids = vf_opt->owner_ids; else #endif owner_ids = efrm_nic_from_client(client_opt)->owner_ids; EFRM_ASSERT(owner_ids != NULL); pd->owner_id = efrm_pd_owner_id_alloc(owner_ids); } spin_unlock_bh(&pd_manager->rm.rm_lock); if (pd->owner_id == OWNER_ID_ALLOC_FAIL) { rc = -EBUSY; goto fail2; } #ifdef CONFIG_SFC_RESOURCE_VF pd->vf = vf_opt; if (pd->vf != NULL) { struct efrm_resource *vfrs = efrm_vf_to_resource(pd->vf); efrm_resource_ref(vfrs); client_opt = vfrs->rs_client; } #endif if (!(flags & EFRM_PD_ALLOC_FLAG_PHYS_ADDR_MODE)) { int ord; for (ord = 0; ord < orders_num; ord++) { efrm_bt_manager_ctor( &pd->bt_managers[ord], pd->owner_id, efhw_nic_buffer_table_orders( client_opt->nic)[ord] ); } } efrm_resource_init(&pd->rs, EFRM_RESOURCE_PD, instance); efrm_client_add_resource(client_opt, &pd->rs); pd->os_data = efrm_pd_os_stats_ctor(pd); pd->min_nic_order = 0; #if EFX_DRIVERLINK_API_VERSION < 25 pd->vport_id = EFRM_PD_VPORT_ID_NONE; #else pd->vport_handle = EFRM_PD_VPORT_ID_NONE; #endif mutex_init(&pd->remap_lock); if (flags & EFRM_PD_ALLOC_FLAG_HW_LOOPBACK) { if ((rc = efrm_pd_stack_id_alloc(pd)) != 0) { efrm_pd_release(pd); return rc; } } *pd_out = pd; return 0; fail2: kfree(pd); fail1: return rc; }