static void common_vi_get_mappings(struct efrm_vi* vi_rs, struct efhw_nic* nic, struct efrm_vi_mappings* vm) { memset(vm, 0, sizeof(*vm)); vm->evq_size = vi_rs->q[EFHW_EVQ].capacity; if( vm->evq_size != 0 ) vm->evq_base = efhw_iopages_ptr(&vi_rs->q[EFHW_EVQ].pages); vm->timer_quantum_ns = nic->timer_quantum_ns; vm->rxq_prefix_len = vi_rs->rx_prefix_len; vm->rxq_size = vi_rs->q[EFHW_RXQ].capacity; if( vm->rxq_size != 0 ) vm->rxq_descriptors = efhw_iopages_ptr(&vi_rs->q[EFHW_RXQ].pages); vm->rx_ts_correction = vi_rs->rx_ts_correction; vm->txq_size = vi_rs->q[EFHW_TXQ].capacity; if( vm->txq_size != 0 ) vm->txq_descriptors = efhw_iopages_ptr(&vi_rs->q[EFHW_TXQ].pages); vm->out_flags = vi_rs->out_flags; }
int efhw_keventq_ctor(struct efhw_nic *nic, int instance, struct efhw_keventq *evq, struct efhw_ev_handler *ev_handlers) { unsigned int page_order; int rc; dma_addr_t dma_addrs[EFHW_BUFFER_TABLE_BLOCK_SIZE]; unsigned buf_bytes = evq->hw.capacity * sizeof(efhw_event_t); EFHW_ASSERT(nic->devtype.arch == EFHW_ARCH_FALCON); EFHW_ASSERT(buf_bytes); page_order = get_order(buf_bytes); evq->instance = instance; evq->ev_handlers = ev_handlers; /* allocate an IObuffer for the eventq */ rc = efhw_nic_event_queue_alloc_iobuffer(nic, &evq->hw, evq->instance, dma_addrs, page_order); if (rc < 0) return rc; /* Zero the timer-value for this queue. AND Tell the nic about the event queue. */ efhw_nic_event_queue_enable(nic, evq->instance, evq->hw.capacity, evq->hw.bt_block->btb_vaddr >> EFHW_NIC_PAGE_SHIFT, dma_addrs, 1 << page_order, ev_handlers != NULL /* interrupting */, 1 /* dos protection enable*/, 0 /* not used on falcon */, 0 /* not used on falcon */, 0 /* not used on falcon */, NULL /* not used on falcon */, NULL /* not used on falcon */); evq->lock = KEVQ_UNLOCKED; evq->evq_base = efhw_iopages_ptr(&evq->hw.iobuff); evq->evq_ptr = 0; evq->evq_mask = (evq->hw.capacity * sizeof(efhw_event_t)) - 1u; EFHW_TRACE("%s: [%d] base=%p end=%p", __FUNCTION__, evq->instance, evq->evq_base, evq->evq_base + buf_bytes); return 0; }
/*! Helper function to allocate the iobuffer needed by an eventq * - it ensures the eventq has the correct alignment for the NIC * * \param rm Event-queue resource manager * \param instance Event-queue instance (index) * \param buf_bytes Requested size of eventq * \return < 0 if iobuffer allocation fails */ int efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic, struct eventq_resource_hardware *h, int evq_instance, unsigned buf_bytes) { unsigned int page_order; int rc; /* Allocate an iobuffer. */ page_order = get_order(buf_bytes); EFHW_TRACE("allocating eventq size %x", 1u << (page_order + PAGE_SHIFT)); rc = efhw_iopages_alloc(nic, &h->iobuff, page_order); if (rc < 0) { EFHW_WARN("%s: failed to allocate %u pages", __FUNCTION__, 1u << page_order); return rc; } /* Set the eventq pages to match EFHW_CLEAR_EVENT() */ if (EFHW_CLEAR_EVENT_VALUE) memset(efhw_iopages_ptr(&h->iobuff), EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE); EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order)); /* For Falcon the NIC is programmed with the base buffer address of a * contiguous region of buffer space. This means that larger than a * PAGE event queues can be expected to allocate even when the host's * physical memory is fragmented */ EFHW_ASSERT(efhw_nic_have_hw(nic)); EFHW_ASSERT(page_order <= h->buf_tbl_alloc.order); /* Initialise the buffer table entries. */ falcon_nic_buffer_table_set_n(nic, h->buf_tbl_alloc.base, efhw_iopages_dma_addr(&h->iobuff), EFHW_NIC_PAGE_SIZE, 0, 1 << page_order, 0); falcon_nic_buffer_table_confirm(nic); return 0; }
static inline efhw_event_t * efrm_eventq_base(struct efrm_vi *virs) { return (efhw_event_t *) efhw_iopages_ptr(&virs->q[EFHW_EVQ].pages); }