Пример #1
0
/**
 * \brief globally enables the interrupts for the given device
 *
 * \param dev   IOAT DMA device
 * \param type  the interrupt type to enable
 */
errval_t ioat_dma_device_irq_setup(struct ioat_dma_device *dev,
                                   dma_irq_t type)
{
    ioat_dma_intrctrl_t intcrtl = 0;
    intcrtl = ioat_dma_intrctrl_intp_en_insert(intcrtl, 1);

    dev->common.irq_type = type;
    switch (type) {
        case DMA_IRQ_MSIX:
            IOATDEV_DEBUG("Initializing MSI-X interrupts \n", dev->common.id);
            assert(!"NYI");
            break;
        case DMA_IRQ_MSI:
            IOATDEV_DEBUG("Initializing MSI interrupts \n", dev->common.id);
            assert(!"NYI");
            break;
        case DMA_IRQ_INTX:
            IOATDEV_DEBUG("Initializing INTx interrupts \n", dev->common.id);
            assert(!"NYI");
            break;
        default:
            /* disabled */
            intcrtl = 0;
            IOATDEV_DEBUG("Disabling interrupts \n", dev->common.id);
            break;
    }

    ioat_dma_intrctrl_wr(&dev->device, intcrtl);

    return SYS_ERR_OK;
}
Пример #2
0
static void ioat_dma_device_irq_handler(void* arg)
{
    errval_t err;
    struct dma_device *dev = arg;

    IOATDEV_DEBUG("############ MSIX INTERRUPT HAPPENED.\n", dev->id);

#if IOAT_DEBUG_INTR_ENABLED
    msix_intr_happened=1;
#endif

    err = ioat_dma_device_poll_channels(dev);
    if (err_is_fail(err)) {
        if (err_no(err) == DMA_ERR_DEVICE_IDLE) {
            IOATDEV_DEBUG("WARNING: MSI-X interrupt on idle device\n", dev->id);
            return;
        }
        USER_PANIC_ERR(err, "dma poll device returned an error\n");
    }
}
Пример #3
0
static errval_t device_init_ioat_v3(struct ioat_dma_device *dev)
{
    errval_t err;

    IOATDEV_DEBUG("initialize Crystal Beach 3 DMA device\n", dev->common.id);

    ioat_dma_dmacapability_t cap = ioat_dma_dmacapability_rd(&dev->device);

    if (ioat_dma_cbver_minor_extract(dev->version) == 2) {
        IOATDEV_DEBUG("disabling XOR and PQ opcodes for Crystal Beach 3.2\n",
                      dev->common.id);
        cap = ioat_dma_dmacapability_xor_insert(cap, 0x0);
        cap = ioat_dma_dmacapability_pq_insert(cap, 0x0);
    } else if (ioat_dma_cbver_minor_extract(dev->version) == 3) {
        IOATDEV_DEBUG("devices of Crystal Beach Version 3.3 are not supported.\n",
                      dev->common.id);
        return DMA_ERR_DEVICE_UNSUPPORTED;
    }

    /* if DCA is enabled, we cannot support the RAID functions */
    if (ioat_dma_dca_is_enabled()) {
        IOATDEV_DEBUG("Disabling XOR and PQ while DCA is enabled\n", dev->common.id);
        cap = ioat_dma_dmacapability_xor_insert(cap, 0x0);
        cap = ioat_dma_dmacapability_pq_insert(cap, 0x0);
    }

    if (ioat_dma_dmacapability_xor_extract(cap)) {
        IOATDEV_DEBUG("device supports XOR RAID.\n", dev->common.id);

        dev->flags |= IOAT_DMA_DEV_F_RAID;

        /*
         * this may need some additional functions to prepare
         * the specific transfers...
         *
         * max_xor = 8;
         * prepare_xor, prepare_xor_val
         */
    }

    if (ioat_dma_dmacapability_pq_extract(cap)) {
        IOATDEV_DEBUG("device supports PQ RAID.\n", dev->common.id);

        dev->flags |= IOAT_DMA_DEV_F_RAID;

        /*
         * this may need some additional functions to prepare the
         * DMA descriptors
         *
         * max_xor = 8;
         * max_pq = 8;
         * prepare_pq, perpare_pq_val
         *
         * also set the prepare_xor pointers...
         *
         */
    }

    /* set the interrupt type to disabled*/
    dev->common.irq_type = DMA_IRQ_DISABLED;
    dev->common.type = DMA_DEV_TYPE_IOAT;

    /* allocate memory for completion status writeback */
    err = dma_mem_alloc(IOAT_DMA_COMPLSTATUS_SIZE, IOAT_DMA_COMPLSTATUS_FLAGS,
                        &dev->complstatus);
    if (err_is_fail(err)) {
        return err;
    }

    dev->common.channels.count = ioat_dma_chancnt_num_rdf(&dev->device);

    dev->common.channels.c = calloc(dev->common.channels.count,
                                    sizeof(*dev->common.channels.c));
    if (dev->common.channels.c == NULL) {
        dma_mem_free(&dev->complstatus);
        return LIB_ERR_MALLOC_FAIL;
    }

    /* channel enumeration */

    IOATDEV_DEBUG("channel enumeration. discovered %u channels\n", dev->common.id,
                  dev->common.channels.count);

    uint32_t max_xfer_size = (1 << ioat_dma_xfercap_max_rdf(&dev->device));

    for (uint8_t i = 0; i < dev->common.channels.count; ++i) {
        struct dma_channel **chan = &dev->common.channels.c[i];
        err = ioat_dma_channel_init(dev, i, max_xfer_size,
                                    (struct ioat_dma_channel **) chan);
    }

    if (dev->flags & IOAT_DMA_DEV_F_DCA) {
        /*TODO: DCA initialization device->dca = ioat3_dca_init(pdev, device->reg_base);*/
    }

    return SYS_ERR_OK;
}
Пример #4
0
static errval_t device_init_ioat_v2(struct ioat_dma_device *dev)
{
    IOATDEV_DEBUG("devices of Crystal Beach Version 2.xx are currently not supported.\n",
                  dev->common.id);
    return DMA_ERR_DEVICE_UNSUPPORTED;
}
Пример #5
0
/**
 * \brief initializes a IOAT DMA device with the giving capability
 *
 * \param mmio capability representing the device's MMIO registers
 * \param dev  returns a pointer to the device structure
 *
 * \returns SYS_ERR_OK on success
 *          errval on error
 */
errval_t ioat_dma_device_init(struct capref mmio,
                              struct ioat_dma_device **dev)
{
    errval_t err;

    struct ioat_dma_device *ioat_device = calloc(1, sizeof(*ioat_device));
    if (ioat_device == NULL) {
        return LIB_ERR_MALLOC_FAIL;
    }

#if DMA_BENCH_ENABLED
     bench_init();
#endif

    struct dma_device *dma_dev = &ioat_device->common;

    struct frame_identity mmio_id;
    err = invoke_frame_identify(mmio, &mmio_id);
    if (err_is_fail(err)) {
        free(ioat_device);
        return err;
    }

    dma_dev->id = device_id++;
    dma_dev->mmio.paddr = mmio_id.base;
    dma_dev->mmio.bytes = (1UL << mmio_id.bits);
    dma_dev->mmio.frame = mmio;

    IOATDEV_DEBUG("init device with mmio range: {paddr=0x%016lx, size=%u kB}\n",
                  dma_dev->id, mmio_id.base, 1 << mmio_id.bits);

    err = vspace_map_one_frame_attr((void**) &dma_dev->mmio.vaddr,
                                    dma_dev->mmio.bytes, dma_dev->mmio.frame,
                                    VREGION_FLAGS_READ_WRITE_NOCACHE,
                                    NULL,
                                    NULL);
    if (err_is_fail(err)) {
        free(ioat_device);
        return err;
    }

    ioat_dma_initialize(&ioat_device->device, NULL, (void *) dma_dev->mmio.vaddr);

    ioat_device->version = ioat_dma_cbver_rd(&ioat_device->device);

    IOATDEV_DEBUG("device registers mapped at 0x%016lx. IOAT version: %u.%u\n",
                  dma_dev->id, dma_dev->mmio.vaddr,
                  ioat_dma_cbver_major_extract(ioat_device->version),
                  ioat_dma_cbver_minor_extract(ioat_device->version));

    switch (ioat_dma_cbver_major_extract(ioat_device->version)) {
        case ioat_dma_cbver_1x:
            err = device_init_ioat_v1(ioat_device);
            break;
        case ioat_dma_cbver_2x:
            err = device_init_ioat_v2(ioat_device);
            break;
        case ioat_dma_cbver_3x:
            err = device_init_ioat_v3(ioat_device);
            break;
        default:
            err = DMA_ERR_DEVICE_UNSUPPORTED;
    }

    if (err_is_fail(err)) {
        vspace_unmap((void*) dma_dev->mmio.vaddr);
        free(ioat_device);
        return err;
    }

    dma_dev->f.deregister_memory = NULL;
    dma_dev->f.register_memory = NULL;
    dma_dev->f.poll = ioat_dma_device_poll_channels;

    *dev = ioat_device;

    return err;
}
Пример #6
0
/**
 * \brief globally enables the interrupts for the given device
 *
 * \param dev   IOAT DMA device
 * \param type  the interrupt type to enable
 */
errval_t ioat_dma_device_irq_setup(struct ioat_dma_device *dev,
                                   dma_irq_t type)
{
    errval_t err;

    ioat_dma_intrctrl_t intcrtl = 0;
    intcrtl = ioat_dma_intrctrl_intp_en_insert(intcrtl, 1);

    dev->common.irq_type = type;
    switch (type) {
        case DMA_IRQ_MSIX:
            /* The number of MSI-X vectors should equal the number of channels */
            IOATDEV_DEBUG("MSI-X interrupt setup for device (%u, %u, %u)\n",
                          dev->common.id, dev->pci_addr.bus, dev->pci_addr.device,
                          dev->pci_addr.function);

            err = pci_msix_enable_addr(&dev->pci_addr, &dev->irq_msix_count);
            if (err_is_fail(err)) {
                return err;
            }

            assert(dev->irq_msix_count > 0);

            IOATDEV_DEBUG("MSI-X enabled #vecs=%d\n", dev->common.id,
                          dev->irq_msix_count);

            err = pci_setup_inthandler(ioat_dma_device_irq_handler, dev,
                                       &dev->irq_msix_vector);
            assert(err_is_ok(err));

            uint8_t dest = get_local_apic_id();

            IOATDEV_DEBUG("MSI-X routing to apic=%u\n", dev->common.id,
                          dest);

            err = pci_msix_vector_init_addr(&dev->pci_addr, 0, dest,
                                            dev->irq_msix_vector);
            assert(err_is_ok(err));

            /* enable the interrupts */
            intcrtl = ioat_dma_intrctrl_msix_vec_insert(intcrtl, 1);
            intcrtl = ioat_dma_intrctrl_intp_en_insert(intcrtl, 1);
            break;
        case DMA_IRQ_MSI:
            IOATDEV_DEBUG("Initializing MSI interrupts \n", dev->common.id);
            assert(!"NYI");
            break;
        case DMA_IRQ_INTX:
            IOATDEV_DEBUG("Initializing INTx interrupts \n", dev->common.id);
            assert(!"NYI");
            break;
        default:
            /* disabled */
            intcrtl = 0;
            IOATDEV_DEBUG("Disabling interrupts \n", dev->common.id);
            break;
    }

    ioat_dma_intrctrl_wr(&dev->device, intcrtl);


#if IOAT_DEBUG_INTR_ENABLED
    /*
     * check if interrupts are working.
     */
    msix_intr_happened = 0;

    struct ioat_dma_channel *chan;
    chan = (struct ioat_dma_channel *)dev->common.channels.c[0];

    ioat_dma_request_nop_chan(chan);
    err = ioat_dma_channel_issue_pending(chan);
    if (err_is_fail(err)) {
        return err;
    }

    while(msix_intr_happened == 0) {
        uint64_t status = ioat_dma_channel_get_status(chan);
        err = event_dispatch_non_block(get_default_waitset());

        if (!ioat_dma_channel_is_active(status) && !ioat_dma_channel_is_idle(status)) {
            USER_PANIC("DMA request turned channel into erroneous state.")
        }

        switch(err_no(err)) {
            case LIB_ERR_NO_EVENT:
                thread_yield();
                break;
            case SYS_ERR_OK:
                continue;
            default:
                USER_PANIC_ERR(err, "dispatching event");
        }
    }
#endif

    return SYS_ERR_OK;
}