int efab_vi_resource_mmap(struct efrm_vi *virs, unsigned long *bytes, void *opaque, int *map_num, unsigned long *offset, int index) { int rc = -EINVAL; EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); ci_assert_equal((*bytes &~ CI_PAGE_MASK), 0); switch( index ) { case EFCH_VI_MMAP_IO: rc = efab_vi_rm_mmap_io(virs, bytes, opaque, map_num, offset); break; case EFCH_VI_MMAP_MEM: rc = efab_vi_rm_mmap_mem(virs, bytes, opaque, map_num, offset); break; case EFCH_VI_MMAP_PIO: rc = efab_vi_rm_mmap_pio(virs, bytes, opaque, map_num, offset); break; case EFCH_VI_MMAP_CTPIO: rc = efab_vi_rm_mmap_ctpio(virs, bytes, opaque, map_num, offset); break; default: ci_assert(0); } return rc; }
int efrm_eventq_register_callback(struct efrm_vi *virs, efrm_evq_callback_fn handler, void *arg) { struct efrm_nic_per_vi *cb_info; int instance; int bit; int rc = 0; EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); EFRM_ASSERT(virs->q[EFHW_EVQ].capacity != 0); EFRM_ASSERT(handler != NULL); mutex_lock(®ister_evq_cb_mutex); if (virs->evq_callback_fn != NULL) { rc = -EBUSY; goto unlock_and_out; } virs->evq_callback_arg = arg; wmb(); virs->evq_callback_fn = handler; instance = virs->rs.rs_instance; cb_info = &efrm_nic(virs->rs.rs_client->nic)->vis[instance]; cb_info->vi = virs; bit = test_and_set_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED, &cb_info->state); EFRM_ASSERT(bit == 0); unlock_and_out: mutex_unlock(®ister_evq_cb_mutex); return rc; }
void efrm_eventq_kill_callback(struct efrm_vi *virs) { struct efrm_nic_per_vi *cb_info; int32_t evq_state; int instance; int bit; EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); EFRM_ASSERT(virs->q[EFHW_EVQ].capacity != 0); EFRM_ASSERT(virs->rs.rs_client != NULL); mutex_lock(®ister_evq_cb_mutex); instance = virs->rs.rs_instance; cb_info = &efrm_nic(virs->rs.rs_client->nic)->vis[instance]; cb_info->vi = NULL; /* Disable the callback. */ #ifdef CONFIG_SFC_RESOURCE_VF if (virs->allocation.vf) spin_lock(&virs->allocation.vf->vf_evq_cb_lock); #endif bit = test_and_clear_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED, &cb_info->state); EFRM_ASSERT(bit); /* do not call me twice! */ #ifdef CONFIG_SFC_RESOURCE_VF if (virs->allocation.vf) spin_unlock(&virs->allocation.vf->vf_evq_cb_lock); #endif /* If the vi had been primed, unset it. */ test_and_clear_bit(VI_RESOURCE_EVQ_STATE_WAKEUP_PENDING, &cb_info->state); /* Spin until the callback is complete. */ do { rmb(); udelay(1); evq_state = cb_info->state; } while ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY))); virs->evq_callback_fn = NULL; mutex_unlock(®ister_evq_cb_mutex); }
void efrm_vi_get_mappings(struct efrm_vi* vi, struct efrm_vi_mappings* vm) { struct efhw_nic *nic = vi->rs.rs_client->nic; EFRM_RESOURCE_ASSERT_VALID(&vi->rs, 0); common_vi_get_mappings(vi, nic, vm); switch( nic->devtype.arch ) { case EFHW_ARCH_FALCON: falcon_vi_get_mappings(vi, nic, vm); break; case EFHW_ARCH_EF10: ef10_vi_get_mappings(vi, nic, vm); break; default: EFRM_ASSERT(0); break; } }
int efrm_port_sniff(struct efrm_resource *rs, int enable, int promiscuous, int rss_context) { int rc; ci_int32 owner; struct efhw_nic *nic = rs->rs_client->nic; if( enable && !capable(CAP_NET_ADMIN) ) return -EPERM; /* Check that the current sniff owner is valid for the operation we're * doing, and mark the op as in progress. */ if( enable ) { if( ci_cas32_fail(&efrm_nic(nic)->rx_sniff_rxq, EFRM_PORT_SNIFF_NO_OWNER, EFRM_PORT_SNIFF_OP_IN_PROGRESS) ) return -EBUSY; } else { if( ci_cas32_fail(&efrm_nic(nic)->rx_sniff_rxq, rs->rs_instance, EFRM_PORT_SNIFF_OP_IN_PROGRESS) ) return -EBUSY; } EFRM_RESOURCE_ASSERT_VALID(rs, 0); rc = efhw_nic_set_port_sniff(nic, rs->rs_instance, enable, promiscuous, rss_context); if( (enable && rc == 0) || (!enable && rc != 0) ) owner = rs->rs_instance; else owner = EFRM_PORT_SNIFF_NO_OWNER; EFRM_VERIFY_EQ(ci_cas32_fail(&efrm_nic(nic)->rx_sniff_rxq, EFRM_PORT_SNIFF_OP_IN_PROGRESS, owner), 0); return rc; }
int efab_vi_resource_mmap_bytes(struct efrm_vi* virs, int map_type) { int bytes = 0; EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); if( map_type == 0 ) { /* I/O mapping. */ bytes += CI_PAGE_SIZE; } else { /* Memory mapping. */ if( virs->q[EFHW_EVQ].capacity != 0 ) bytes += efhw_iopages_size(&virs->q[EFHW_EVQ].pages); if( virs->q[EFHW_TXQ].capacity ) bytes += efhw_iopages_size(&virs->q[EFHW_TXQ].pages); if( virs->q[EFHW_RXQ].capacity ) bytes += efhw_iopages_size(&virs->q[EFHW_RXQ].pages); } /* Round up to whole number of pages. */ return bytes; }
extern int efch_resource_id_lookup(efch_resource_id_t id, ci_resource_table_t *rt, efch_resource_t **out) { uint32_t index = id.index; efch_resource_t *rs; ci_assert(rt); ci_assert(out); if( id.index >= rt->resource_table_highwater ) return -EINVAL; /* NB. This needs no lock because resources cannot be detached from * a ci_resource_table_t. They can only go away when * the ci_resource_table_t is destroyed. */ if ((rs = rt->resource_table[index]) == NULL || rs->rs_base == NULL) return -ENOENT; EFRM_RESOURCE_ASSERT_VALID(rs->rs_base, 0); *out = rs; return 0; }
void efrm_filter_resource_free(struct filter_resource *frs) { struct efhw_nic *nic = frs->rs.rs_client->nic; int id; EFRM_RESOURCE_ASSERT_VALID(&frs->rs, 1); EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __func__, EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle)); efhw_nic_ipfilter_clear(nic, frs->filter_idx); frs->filter_idx = -1; efrm_vi_resource_release(frs->pt); /* Free this filter. */ id = EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle); EFRM_VERIFY_EQ(kfifo_put(efrm_filter_manager->free_ids, (unsigned char *)&id, sizeof(id)), sizeof(id)); efrm_client_put(frs->rs.rs_client); EFRM_DO_DEBUG(memset(frs, 0, sizeof(*frs))); kfree(frs); }