/** Allocate queue n and return handle for queue manager */ static void setup_queue(void) { struct e10k_queue_ops ops = { .update_txtail = update_txtail, .update_rxtail = update_rxtail }; size_t tx_size, txhwb_size, rx_size; void *tx_virt, *txhwb_virt, *rx_virt; vregion_flags_t flags; uint8_t vector, core; errval_t err; INITDEBUG("setup_queue\n"); // Decide on which flags to use for the mappings flags = (cache_coherence ? VREGION_FLAGS_READ_WRITE : VREGION_FLAGS_READ_WRITE_NOCACHE); // Allocate memory for descriptor rings tx_size = e10k_q_tdesc_legacy_size * NTXDESCS; tx_virt = alloc_map_frame(flags, tx_size, &tx_frame); assert(tx_virt != NULL); rx_size = e10k_q_rdesc_legacy_size * NRXDESCS; rx_virt = alloc_map_frame(flags, rx_size, &rx_frame); assert(rx_virt != NULL); #ifdef PRINT_QUEUES glbl_rx_virt = rx_virt; glbl_rx_size = rx_size; #endif // Register memory with device manager txhwb_virt = NULL; if (use_txhwb) { INITDEBUG("Using transmit write-back\n"); txhwb_size = BASE_PAGE_SIZE; txhwb_virt = alloc_map_frame(flags, txhwb_size, &txhwb_frame); assert(txhwb_virt != NULL); memset(txhwb_virt, 0, sizeof(uint32_t)); assert(txhwb_virt != NULL); } // Initialize queue manager q = e10k_queue_init(tx_virt, NTXDESCS, txhwb_virt, rx_virt, NRXDESCS, &ops, NULL); if (use_interrupts && use_msix) { INITDEBUG("Enabling MSI-X interrupts\n"); err = pci_setup_inthandler(interrupt_handler, NULL, &vector); assert(err_is_ok(err)); core = disp_get_core_id(); } else { if (use_interrupts) { INITDEBUG("Enabling legacy interrupts\n"); } vector = 0; core = 0; } idc_register_queue_memory(qi, tx_frame, txhwb_frame, rx_frame, RXBUFSZ, vector, core); }
/** * \brief globally enables the interrupts for the given device * * \param dev IOAT DMA device * \param type the interrupt type to enable */ errval_t ioat_dma_device_irq_setup(struct ioat_dma_device *dev, dma_irq_t type) { errval_t err; ioat_dma_intrctrl_t intcrtl = 0; intcrtl = ioat_dma_intrctrl_intp_en_insert(intcrtl, 1); dev->common.irq_type = type; switch (type) { case DMA_IRQ_MSIX: /* The number of MSI-X vectors should equal the number of channels */ IOATDEV_DEBUG("MSI-X interrupt setup for device (%u, %u, %u)\n", dev->common.id, dev->pci_addr.bus, dev->pci_addr.device, dev->pci_addr.function); err = pci_msix_enable_addr(&dev->pci_addr, &dev->irq_msix_count); if (err_is_fail(err)) { return err; } assert(dev->irq_msix_count > 0); IOATDEV_DEBUG("MSI-X enabled #vecs=%d\n", dev->common.id, dev->irq_msix_count); err = pci_setup_inthandler(ioat_dma_device_irq_handler, dev, &dev->irq_msix_vector); assert(err_is_ok(err)); uint8_t dest = get_local_apic_id(); IOATDEV_DEBUG("MSI-X routing to apic=%u\n", dev->common.id, dest); err = pci_msix_vector_init_addr(&dev->pci_addr, 0, dest, dev->irq_msix_vector); assert(err_is_ok(err)); /* enable the interrupts */ intcrtl = ioat_dma_intrctrl_msix_vec_insert(intcrtl, 1); intcrtl = ioat_dma_intrctrl_intp_en_insert(intcrtl, 1); break; case DMA_IRQ_MSI: IOATDEV_DEBUG("Initializing MSI interrupts \n", dev->common.id); assert(!"NYI"); break; case DMA_IRQ_INTX: IOATDEV_DEBUG("Initializing INTx interrupts \n", dev->common.id); assert(!"NYI"); break; default: /* disabled */ intcrtl = 0; IOATDEV_DEBUG("Disabling interrupts \n", dev->common.id); break; } ioat_dma_intrctrl_wr(&dev->device, intcrtl); #if IOAT_DEBUG_INTR_ENABLED /* * check if interrupts are working. */ msix_intr_happened = 0; struct ioat_dma_channel *chan; chan = (struct ioat_dma_channel *)dev->common.channels.c[0]; ioat_dma_request_nop_chan(chan); err = ioat_dma_channel_issue_pending(chan); if (err_is_fail(err)) { return err; } while(msix_intr_happened == 0) { uint64_t status = ioat_dma_channel_get_status(chan); err = event_dispatch_non_block(get_default_waitset()); if (!ioat_dma_channel_is_active(status) && !ioat_dma_channel_is_idle(status)) { USER_PANIC("DMA request turned channel into erroneous state.") } switch(err_no(err)) { case LIB_ERR_NO_EVENT: thread_yield(); break; case SYS_ERR_OK: continue; default: USER_PANIC_ERR(err, "dispatching event"); } } #endif return SYS_ERR_OK; }