int ef_vi_free(ef_vi* ep, ef_driver_handle fd) { int rc; if( ep->vi_io_mmap_ptr != NULL ) { rc = ci_resource_munmap(fd, ep->vi_io_mmap_ptr, ep->vi_io_mmap_bytes); if( rc < 0 ) { LOGV(ef_log("%s: ci_resource_munmap %d", __FUNCTION__, rc)); return rc; } } if( ep->vi_mem_mmap_ptr != NULL ) { /* TODO: support variable sized DMAQ and evq */ rc = ci_resource_munmap(fd, ep->vi_mem_mmap_ptr, ep->vi_mem_mmap_bytes); if( rc < 0 ) { LOGVV(ef_log("%s: ci_resource_munmap iobuffer %d", __FUNCTION__, rc)); return rc; } } free(ep->ep_state); EF_VI_DEBUG(memset(ep, 0, sizeof(*ep))); LOGVVV(ef_log("%s: DONE", __FUNCTION__)); return 0; }
/* Certain VI functionalities are only supported on certain NIC types. * This function validates that the requested functionality is present * on the selected NIC. */ static int check_nic_compatibility(unsigned vi_flags, unsigned ef_vi_arch) { switch (ef_vi_arch) { case EFHW_ARCH_FALCON: if (vi_flags & EF_VI_TX_PUSH_ALWAYS) { LOGVV(ef_log("%s: ERROR: TX PUSH ALWAYS flag not supported" " on FALCON architecture", __FUNCTION__)); return -EOPNOTSUPP; } if (vi_flags & EF_VI_RX_TIMESTAMPS) { LOGVV(ef_log("%s: ERROR: RX TIMESTAMPS flag not supported" " on FALCON architecture", __FUNCTION__)); return -EOPNOTSUPP; } if (vi_flags & EF_VI_TX_TIMESTAMPS) { LOGVV(ef_log("%s: ERROR: TX TIMESTAMPS flag not supported" " on FALCON architecture", __FUNCTION__)); return -EOPNOTSUPP; } return 0; case EFHW_ARCH_EF10: return 0; default: return -EINVAL; } }
int ef_vi_filter_add(ef_vi *vi, ef_driver_handle dh, const ef_filter_spec *fs, ef_filter_cookie *filter_cookie_out) { if( ! vi->vi_clustered ) return ef_filter_add(dh, vi->vi_resource_id, fs, filter_cookie_out); ef_log("%s: WARNING: Ignored attempt to set a filter on a cluster", __FUNCTION__); return 0; }
unsigned ef_vi_mtu(ef_vi* vi, ef_driver_handle fd) { ci_resource_op_t op; int rc; op.op = CI_RSOP_VI_GET_MTU; op.id = efch_make_resource_id(vi->vi_resource_id); rc = ci_resource_op(fd, &op); if( rc < 0 ) LOGV(ef_log("%s: ci_resource_op %d", __FUNCTION__, rc)); return op.u.vi_get_mtu.out_mtu; }
int ef_vi_get_mac(ef_vi* vi, ef_driver_handle dh, void* mac_out) { ci_resource_op_t op; int rc; op.op = CI_RSOP_VI_GET_MAC; op.id = efch_make_resource_id(vi->vi_resource_id); rc = ci_resource_op(dh, &op); if( rc < 0 ) LOGV(ef_log("%s: ci_resource_op %d", __FUNCTION__, rc)); memcpy(mac_out, op.u.vi_get_mac.out_mac, 6); return rc; }
int ef_vi_flush(ef_vi* ep, ef_driver_handle fd) { ci_resource_op_t op; int rc; op.op = CI_RSOP_PT_ENDPOINT_FLUSH; op.id = efch_make_resource_id(ep->vi_resource_id); rc = ci_resource_op(fd, &op); if( rc < 0 ) { LOGV(ef_log("ef_vi_flush: ci_resource_op %d", rc)); return rc; } return 0; }
int ef_vi_pace(ef_vi* ep, ef_driver_handle fd, int val) { ci_resource_op_t op; int rc; op.op = CI_RSOP_PT_ENDPOINT_PACE; op.id = efch_make_resource_id(ep->vi_resource_id); op.u.pt.pace = val; rc = ci_resource_op(fd, &op); if( rc < 0 ) { LOGV(ef_log("ef_vi_pace: ci_resource_op %d", rc)); return rc; } return 0; }
int __ef_vi_alloc(ef_vi* vi, ef_driver_handle vi_dh, efch_resource_id_t pd_or_vi_set_id, ef_driver_handle pd_or_vi_set_dh, int index_in_vi_set, int ifindex, int evq_capacity, int rxq_capacity, int txq_capacity, ef_vi* evq, ef_driver_handle evq_dh, int vi_clustered, enum ef_vi_flags vi_flags) { struct ef_vi_nic_type nic_type; ci_resource_alloc_t ra; char *mem_mmap_ptr_orig, *mem_mmap_ptr; char *io_mmap_ptr, *io_mmap_base; ef_vi_state* state; int rc; const char* s; uint32_t* ids; void* p; int q_label; EF_VI_BUG_ON((evq == NULL) != (evq_capacity != 0)); EF_VI_BUG_ON(! evq_capacity && ! rxq_capacity && ! txq_capacity); /* Ensure ef_vi_free() only frees what we allocate. */ io_mmap_ptr = NULL; io_mmap_base = NULL; mem_mmap_ptr = mem_mmap_ptr_orig = NULL; if( evq == NULL ) q_label = 0; else if( (q_label = evq->vi_qs_n) == EF_VI_MAX_QS ) return -EBUSY; if( ifindex < 0 && (s = getenv("EF_VI_IFINDEX")) ) ifindex = atoi(s); if( evq_capacity == -1 ) evq_capacity = (s = getenv("EF_VI_EVQ_SIZE")) ? atoi(s) : -1; if( txq_capacity == -1 ) txq_capacity = (s = getenv("EF_VI_TXQ_SIZE")) ? atoi(s) : -1; if( rxq_capacity == -1 ) rxq_capacity = (s = getenv("EF_VI_RXQ_SIZE")) ? atoi(s) : -1; if( evq_capacity == -1 && (vi_flags & EF_VI_RX_PACKED_STREAM) ) /* At time of writing we're doing this at user-level as well as in * driver. Utimately we want this default to be applied in the driver * so we don't have to know this magic number (which may change in * future). For now we also apply it here so that the default will be * applied when running against a 201405-u1 driver. This can be * removed once the driver ABI changes. */ evq_capacity = 32768; /* Allocate resource and mmap. */ memset(&ra, 0, sizeof(ra)); ef_vi_set_intf_ver(ra.intf_ver, sizeof(ra.intf_ver)); ra.ra_type = EFRM_RESOURCE_VI; ra.u.vi_in.ifindex = ifindex; ra.u.vi_in.pd_or_vi_set_fd = pd_or_vi_set_dh; ra.u.vi_in.pd_or_vi_set_rs_id = pd_or_vi_set_id; ra.u.vi_in.vi_set_instance = index_in_vi_set; ra.u.vi_in.ps_buf_size_kb = (vi_flags & EF_VI_RX_PS_BUF_SIZE_64K) ? 64 : 1024; if( evq != NULL ) { ra.u.vi_in.evq_fd = evq_dh; ra.u.vi_in.evq_rs_id = efch_make_resource_id(evq->vi_resource_id); } else { ra.u.vi_in.evq_fd = -1; evq = vi; } ra.u.vi_in.evq_capacity = evq_capacity; ra.u.vi_in.txq_capacity = txq_capacity; ra.u.vi_in.rxq_capacity = rxq_capacity; ra.u.vi_in.tx_q_tag = q_label; ra.u.vi_in.rx_q_tag = q_label; ra.u.vi_in.flags = vi_flags_to_efab_flags(vi_flags); rc = ci_resource_alloc(vi_dh, &ra); if( rc < 0 ) { LOGVV(ef_log("%s: ci_resource_alloc %d", __FUNCTION__, rc)); goto fail1; } evq_capacity = ra.u.vi_out.evq_capacity; txq_capacity = ra.u.vi_out.txq_capacity; rxq_capacity = ra.u.vi_out.rxq_capacity; rc = -ENOMEM; state = malloc(ef_vi_calc_state_bytes(rxq_capacity, txq_capacity)); if( state == NULL ) goto fail1; if( ra.u.vi_out.io_mmap_bytes ) { rc = ci_resource_mmap(vi_dh, ra.out_id.index, EFCH_VI_MMAP_IO, ra.u.vi_out.io_mmap_bytes, &p); if( rc < 0 ) { LOGVV(ef_log("%s: ci_resource_mmap (io) %d", __FUNCTION__, rc)); goto fail2; } { /* On systems with large pages, multiple VI windows are mapped into * each system page. Therefore the VI window may not appear at the * start of the I/O mapping. */ int inst_in_iopage = 0; int vi_windows_per_page = CI_PAGE_SIZE / 8192; if( vi_windows_per_page > 1 ) inst_in_iopage = ra.u.vi_out.instance & (vi_windows_per_page - 1); io_mmap_base = (char*) p; io_mmap_ptr = io_mmap_base + inst_in_iopage * 8192; } } if( ra.u.vi_out.mem_mmap_bytes ) { rc = ci_resource_mmap(vi_dh, ra.out_id.index, EFCH_VI_MMAP_MEM, ra.u.vi_out.mem_mmap_bytes, &p); if( rc < 0 ) { LOGVV(ef_log("%s: ci_resource_mmap (mem) %d", __FUNCTION__, rc)); goto fail3; } mem_mmap_ptr = mem_mmap_ptr_orig = (char*) p; } rc = ef_vi_arch_from_efhw_arch(ra.u.vi_out.nic_arch); EF_VI_BUG_ON(rc < 0); nic_type.arch = (unsigned char) rc; nic_type.variant = ra.u.vi_out.nic_variant; nic_type.revision = ra.u.vi_out.nic_revision; rc = check_nic_compatibility(vi_flags, nic_type.arch); if( rc != 0 ) goto fail4; ids = (void*) (state + 1); ef_vi_init(vi, nic_type.arch, nic_type.variant, nic_type.revision, vi_flags, state); ef_vi_init_out_flags(vi, (ra.u.vi_out.out_flags & EFHW_VI_CLOCK_SYNC_STATUS) ? EF_VI_OUT_CLOCK_SYNC_STATUS : 0); ef_vi_init_io(vi, io_mmap_ptr); if( evq_capacity ) { ef_vi_init_evq(vi, evq_capacity, mem_mmap_ptr); mem_mmap_ptr += ((evq_capacity * sizeof(efhw_event_t) + CI_PAGE_SIZE - 1) & CI_PAGE_MASK); } if( rxq_capacity ) { ef_vi_init_rxq(vi, rxq_capacity, mem_mmap_ptr, ids, ra.u.vi_out.rx_prefix_len); mem_mmap_ptr += (ef_vi_rx_ring_bytes(vi) + CI_PAGE_SIZE-1) & CI_PAGE_MASK; ids += rxq_capacity; if( vi_flags & EF_VI_RX_TIMESTAMPS ) { int rx_ts_correction; rc = get_ts_correction(vi_dh, ra.out_id.index, &rx_ts_correction); if( rc < 0 ) goto fail4; ef_vi_init_rx_timestamping(vi, rx_ts_correction); } } if( txq_capacity ) ef_vi_init_txq(vi, txq_capacity, mem_mmap_ptr, ids); vi->vi_io_mmap_ptr = io_mmap_base; vi->vi_mem_mmap_ptr = mem_mmap_ptr_orig; vi->vi_io_mmap_bytes = ra.u.vi_out.io_mmap_bytes; vi->vi_mem_mmap_bytes = ra.u.vi_out.mem_mmap_bytes; vi->vi_resource_id = ra.out_id.index; if( ra.u.vi_out.out_flags & EFHW_VI_PS_BUF_SIZE_SET ) vi->vi_ps_buf_size = ra.u.vi_out.ps_buf_size; else vi->vi_ps_buf_size = 1024 * 1024; BUG_ON(vi->vi_ps_buf_size != 64*1024 && vi->vi_ps_buf_size != 1024*1024); vi->vi_clustered = vi_clustered; vi->vi_i = ra.u.vi_out.instance; ef_vi_init_state(vi); rc = ef_vi_add_queue(evq, vi); BUG_ON(rc != q_label); vi->vi_is_packed_stream = !! (vi_flags & EF_VI_RX_PACKED_STREAM); if( vi->vi_is_packed_stream ) ef_vi_packed_stream_update_credit(vi); return q_label; fail4: if( mem_mmap_ptr != NULL ) ci_resource_munmap(vi_dh, mem_mmap_ptr, ra.u.vi_out.mem_mmap_bytes); fail3: if( io_mmap_base != NULL ) ci_resource_munmap(vi_dh, io_mmap_base, ra.u.vi_out.io_mmap_bytes); fail2: free(state); fail1: --evq->vi_qs_n; return rc; }
static int __ef_pd_alloc(ef_pd* pd, ef_driver_handle pd_dh, int ifindex, enum ef_pd_flags flags, int vlan_id) { ci_resource_alloc_t ra; const char* s; int rc; if( (s = getenv("EF_VI_PD_FLAGS")) != NULL ) { if( ! strcmp(s, "vf") ) flags = EF_PD_VF; else if( ! strcmp(s, "phys") ) flags = EF_PD_PHYS_MODE; else if( ! strcmp(s, "default") ) flags = 0; } if( flags & EF_PD_VF ) flags |= EF_PD_PHYS_MODE; memset(&ra, 0, sizeof(ra)); ef_vi_set_intf_ver(ra.intf_ver, sizeof(ra.intf_ver)); ra.ra_type = EFRM_RESOURCE_PD; ra.u.pd.in_ifindex = ifindex; ra.u.pd.in_flags = 0; if( flags & EF_PD_VF ) ra.u.pd.in_flags |= EFCH_PD_FLAG_VF; if( flags & EF_PD_PHYS_MODE ) ra.u.pd.in_flags |= EFCH_PD_FLAG_PHYS_ADDR; if( flags & EF_PD_RX_PACKED_STREAM ) ra.u.pd.in_flags |= EFCH_PD_FLAG_RX_PACKED_STREAM; if( flags & EF_PD_VPORT ) ra.u.pd.in_flags |= EFCH_PD_FLAG_VPORT; if( flags & EF_PD_MCAST_LOOP ) ra.u.pd.in_flags |= EFCH_PD_FLAG_MCAST_LOOP; if( flags & EF_PD_MEMREG_64KiB ) /* FIXME: We're overloading the packed-stream flag here. The only * effect it has is to force ef_memreg to use at least 64KiB buffer * table entries. Unfortunately this won't work if the adapter is not * in packed-stream mode. */ ra.u.pd.in_flags |= EFCH_PD_FLAG_RX_PACKED_STREAM; ra.u.pd.in_vlan_id = vlan_id; rc = ci_resource_alloc(pd_dh, &ra); if( rc < 0 ) { LOGVV(ef_log("ef_pd_alloc: ci_resource_alloc %d", rc)); return rc; } pd->pd_flags = flags; pd->pd_resource_id = ra.out_id.index; pd->pd_intf_name = malloc(IF_NAMESIZE); if( pd->pd_intf_name == NULL ) { LOGVV(ef_log("ef_pd_alloc: malloc failed")); return -ENOMEM; } if( if_indextoname(ifindex, pd->pd_intf_name) == NULL ) { free(pd->pd_intf_name); LOGVV(ef_log("ef_pd_alloc: if_indextoname failed %d", errno)); return -errno; } pd->pd_cluster_name = NULL; pd->pd_cluster_sock = -1; pd->pd_cluster_dh = 0; pd->pd_cluster_viset_resource_id = 0; return 0; }