static void filter_rm_dtor(struct efrm_resource_manager *rm) { EFRM_TRACE("%s:", __func__); EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm); EFRM_ASSERT(&efrm_filter_manager->rm == rm); kfifo_vfree(efrm_filter_manager->free_ids); EFRM_TRACE("%s: done", __func__); }
int efrm_filter_resource_alloc(struct vi_resource *vi_parent, struct filter_resource **frs_out) { struct filter_resource *frs; int rc, instance; EFRM_ASSERT(frs_out); EFRM_ASSERT(efrm_filter_manager); EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm); EFRM_ASSERT(vi_parent != NULL); EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_parent->rs.rs_handle) == EFRM_RESOURCE_VI); /* Allocate resource data structure. This is called in atomic * context by the onload driver. */ frs = kmalloc(sizeof(struct filter_resource), GFP_ATOMIC); if (!frs) return -ENOMEM; /* Allocate an instance. */ rc = kfifo_get(efrm_filter_manager->free_ids, (unsigned char *)&instance, sizeof(instance)); if (rc != sizeof(instance)) { EFRM_TRACE("%s: out of instances", __FUNCTION__); EFRM_ASSERT(rc == 0); rc = -EBUSY; goto fail1; } /* Initialise the resource DS. */ efrm_resource_init(&frs->rs, EFRM_RESOURCE_FILTER, instance); frs->pt = vi_parent; efrm_resource_ref(&frs->pt->rs); frs->filter_idx = -1; EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " VI %d", __FUNCTION__, EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle), EFRM_RESOURCE_INSTANCE(vi_parent->rs.rs_handle)); efrm_client_add_resource(vi_parent->rs.rs_client, &frs->rs); *frs_out = frs; return 0; fail1: memset(frs, 0, sizeof(*frs)); kfree(frs); return rc; }
void efrm_vf_init_nic_params(struct efhw_nic* nic, const struct vi_resource_dimensions *res_dim) { struct efrm_vf_nic_params *nic_params = &efrm_vf_manager->nic[nic->index]; #ifndef NDEBUG int nic_index; /* Sanity check the nic index to ensure it's been initialised before * we got here. */ for (nic_index = 0; nic_index < EFHW_MAX_NR_DEVS; ++nic_index) if (efrm_nic_tablep->nic[nic_index] != NULL && memcmp(efrm_nic_tablep->nic[nic_index]->mac_addr, nic->mac_addr, ETH_ALEN) == 0) break; EFRM_ASSERT(nic_index < EFHW_MAX_NR_DEVS); #endif EFRM_TRACE("vf_vi_base=%u vf_vi_scale=%u vf_count=%u", res_dim->vf_vi_base, res_dim->vf_vi_scale, res_dim->vf_count); nic_params->vi_base = res_dim->vf_vi_base; nic_params->vi_scale = res_dim->vf_vi_scale; nic_params->vf_count = res_dim->vf_count; }
static int efrm_pd_stack_id_alloc(struct efrm_pd *pd) { struct efrm_nic *nic = efrm_nic(pd->rs.rs_client->nic); const int word_bitcount = sizeof(*nic->stack_id_usage) * 8; int i, v, bitno, id; spin_lock(&nic->lock); for (i = 0; i < sizeof(nic->stack_id_usage) / sizeof(*nic->stack_id_usage) && ((v = nic->stack_id_usage[i]) == ~0u); ++i) ; bitno = v ? ci_ffs64(~v) - 1 : 0; id = i * word_bitcount + bitno + 1; if (id <= EFRM_MAX_STACK_ID) nic->stack_id_usage[i] |= 1 << bitno; spin_unlock(&nic->lock); if (id > EFRM_MAX_STACK_ID) { /* we run out of stack ids suppression of self traffic * is not possible. */ EFRM_TRACE("%s: WARNING: no free stack ids", __FUNCTION__); pd->stack_id = 0; return -ENOMEM; } pd->stack_id = id; return 0; }
void efrm_vf_resource_free(struct efrm_vf *vf) { EFRM_TRACE("NIC %d VF %d free", vf->rs.rs_client->nic->index, vf->pci_dev_fn); EFRM_ASSERT(vf->rs.rs_ref_count == 0); efrm_buddy_dtor(&vf->vi_instances); efrm_vf_free_reset(vf); spin_lock_bh(&efrm_vf_manager->rm.rm_lock); list_add(&vf->link, &efrm_vf_manager->nic[vf->nic_index].free_list); spin_unlock_bh(&efrm_vf_manager->rm.rm_lock); efrm_client_put(vf->rs.rs_client); efrm_pd_owner_ids_dtor(vf->owner_ids); }
int efrm_create_filter_resource_manager(struct efrm_resource_manager **rm_out) { int rc; EFRM_ASSERT(rm_out); efrm_filter_manager = kmalloc(sizeof(struct filter_resource_manager), GFP_KERNEL); if (efrm_filter_manager == 0) return -ENOMEM; memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager)); rc = efrm_resource_manager_ctor(&efrm_filter_manager->rm, filter_rm_dtor, "FILTER", EFRM_RESOURCE_FILTER); if (rc < 0) goto fail1; /* Create a pool of free instances */ rc = efrm_kfifo_id_ctor(&efrm_filter_manager->free_ids, 0, EFHW_IP_FILTER_NUM, &efrm_filter_manager->rm.rm_lock); if (rc != 0) goto fail2; *rm_out = &efrm_filter_manager->rm; EFRM_TRACE("%s: filter resources created - %d IDs", __func__, kfifo_len(efrm_filter_manager->free_ids)); return 0; fail2: efrm_resource_manager_dtor(&efrm_filter_manager->rm); fail1: memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager)); kfree(efrm_filter_manager); return rc; }
void efrm_filter_resource_free(struct filter_resource *frs) { struct efhw_nic *nic = frs->rs.rs_client->nic; int id; EFRM_RESOURCE_ASSERT_VALID(&frs->rs, 1); EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __func__, EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle)); efhw_nic_ipfilter_clear(nic, frs->filter_idx); frs->filter_idx = -1; efrm_vi_resource_release(frs->pt); /* Free this filter. */ id = EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle); EFRM_VERIFY_EQ(kfifo_put(efrm_filter_manager->free_ids, (unsigned char *)&id, sizeof(id)), sizeof(id)); efrm_client_put(frs->rs.rs_client); EFRM_DO_DEBUG(memset(frs, 0, sizeof(*frs))); kfree(frs); }
int efrm_vf_resource_alloc(struct efrm_client *client, struct efrm_vf *linked, int use_iommu, struct efrm_vf **vf_out) { struct efrm_vf_nic_params *nic = &efrm_vf_manager->nic[client->nic->index]; struct efrm_vf *vf; int rc = 0; struct efrm_pd_owner_ids *owner_ids; if (nic->vf_count != nic->vfs_probed) { EFRM_ERR("%s: not all VFs for NIC %d are discovered yet: " "%d out of %d", __func__, client->nic->index, nic->vfs_probed, nic->vf_count); return -EBUSY; } spin_lock_bh(&efrm_vf_manager->rm.rm_lock); if (list_empty(&nic->free_list)) { spin_unlock_bh(&efrm_vf_manager->rm.rm_lock); return rc == 0 ? -ENOBUFS : rc; } vf = list_entry(nic->free_list.next, struct efrm_vf, link); list_del(&vf->link); spin_unlock_bh(&efrm_vf_manager->rm.rm_lock); rc = efrm_vf_alloc_init(vf, linked, use_iommu); if (rc != 0) { /* Scary warnings are already printed, just return */ /* Add to the tail of the list in hope another function * is better. */ list_add_tail(&vf->link, &efrm_vf_manager->nic[vf->nic_index].free_list); return rc; } EFRM_ASSERT(vf); EFRM_ASSERT(vf->irq_count); EFRM_ASSERT(vf->vi_count); rc = efrm_buddy_range_ctor(&vf->vi_instances, vf->vi_base, vf->vi_base + vf->vi_count); if (rc < 0) { EFRM_ERR("NIC %d VF %d: efrm_buddy_range_ctor(%d, %d) failed", client->nic->index, vf->pci_dev_fn, vf->vi_base, vf->vi_base + vf->vi_count); spin_lock_bh(&efrm_vf_manager->rm.rm_lock); list_add(&vf->link, &efrm_vf_manager->nic[vf->nic_index].free_list); spin_unlock_bh(&efrm_vf_manager->rm.rm_lock); return -ENOMEM; } EFRM_ASSERT(vf->rs.rs_ref_count == 0); efrm_resource_init(&vf->rs, EFRM_RESOURCE_VF, vf->pci_dev_fn); efrm_client_add_resource(client, &vf->rs); owner_ids = efrm_pd_owner_ids_ctor( /* On falcon owner_ids are global, so base this block on the * base vi id to avoid overlap. */ client->nic->devtype.arch == EFHW_ARCH_EF10 ? 1 : vf->vi_base, (1 << vf->vi_scale)); if (!owner_ids) return -ENOMEM; vf->owner_ids = owner_ids; EFRM_TRACE("NIC %d VF %d allocated", client->nic->index, vf->pci_dev_fn); *vf_out = vf; return 0; }