/* * Function: _interrupt * * Purpose: * Interrupt Handler. * Mask all interrupts on device and wake up interrupt * thread. It is assumed that the interrupt thread unmasks * interrupts again when interrupt handling is complete. * Parameters: * ctrl - BDE control structure for this device. * Returns: * Nothing */ static void _cmic_interrupt(bde_ctrl_t *ctrl) { int d; uint32_t mask = 0, stat, imask = 0, fmask = 0; d = (((uint8 *)ctrl - (uint8 *)_devices) / sizeof (bde_ctrl_t)); /* Check for secondary interrupt handler */ if (lkbde_irq_mask_get(d, &mask, &fmask) < 0) { fmask = 0; } if (fmask != 0) { imask = mask & ~fmask; /* Check for pending user mode interrupts */ stat = user_bde->read(d, CMIC_IRQ_STAT); if ((stat & imask) == 0) { /* All handled in kernel mode */ lkbde_irq_mask_set(d, CMIC_IRQ_MASK, imask, 0); return; } } lkbde_irq_mask_set(d, CMIC_IRQ_MASK, 0, 0); atomic_set(&_interrupt_has_taken_place, 1); #ifdef BDE_LINUX_NON_INTERRUPTIBLE wake_up(&_interrupt_wq); #else wake_up_interruptible(&_interrupt_wq); #endif }
static void _bcm88750_interrupt(bde_ctrl_t *ctrl) { int d; d = (((uint8 *)ctrl - (uint8 *)_devices) / sizeof (bde_ctrl_t)); lkbde_irq_mask_set(d, CMIC_IRQ_MASK, 0, 0); lkbde_irq_mask_set(d, CMIC_IRQ_MASK_1, 0, 0); lkbde_irq_mask_set(d, CMIC_IRQ_MASK_2, 0, 0); atomic_set(&_interrupt_has_taken_place, 1); #ifdef BDE_LINUX_NON_INTERRUPTIBLE wake_up(&_interrupt_wq); #else wake_up_interruptible(&_interrupt_wq); #endif }
static void _bcm88750_interrupt(bde_ctrl_t *ctrl) { int d; bde_inst_resource_t *res; d = (((uint8 *)ctrl - (uint8 *)_devices) / sizeof (bde_ctrl_t)); res = &_bde_inst_resource[ctrl->inst]; lkbde_irq_mask_set(d, CMIC_IRQ_MASK, 0, 0); lkbde_irq_mask_set(d, CMIC_IRQ_MASK_1, 0, 0); lkbde_irq_mask_set(d, CMIC_IRQ_MASK_2, 0, 0); atomic_set(&res->intr, 1); #ifdef BDE_LINUX_NON_INTERRUPTIBLE wake_up(&res->intr_wq); #else wake_up_interruptible(&res->intr_wq); #endif }
/* * Function: _ioctl * * Purpose: * Handle IOCTL commands from user mode. * Parameters: * cmd - IOCTL cmd * arg - IOCTL parameters * Returns: * 0 on success, <0 on error */ static int _ioctl(unsigned int cmd, unsigned long arg) { lubde_ioctl_t io; uint32 pbase, size; const ibde_dev_t *bde_dev; int inst_id; bde_inst_resource_t *res; if (copy_from_user(&io, (void *)arg, sizeof(io))) { return -EFAULT; } io.rc = LUBDE_SUCCESS; switch(cmd) { case LUBDE_VERSION: io.d0 = 0; break; case LUBDE_GET_NUM_DEVICES: io.d0 = user_bde->num_devices(io.dev); break; case LUBDE_GET_DEVICE: bde_dev = user_bde->get_dev(io.dev); if (bde_dev) { io.d0 = bde_dev->device; io.d1 = bde_dev->rev; if (BDE_DEV_MEM_MAPPED(_devices[io.dev].dev_type)) { /* Get physical address to map */ io.d2 = lkbde_get_dev_phys(io.dev); io.d3 = lkbde_get_dev_phys_hi(io.dev); } } else { io.rc = LUBDE_FAIL; } break; case LUBDE_GET_DEVICE_TYPE: io.d0 = _devices[io.dev].dev_type; break; case LUBDE_GET_BUS_FEATURES: user_bde->pci_bus_features(io.dev, (int *) &io.d0, (int *) &io.d1, (int *) &io.d2); break; case LUBDE_PCI_CONFIG_PUT32: if (_devices[io.dev].dev_type & BDE_PCI_DEV_TYPE) { user_bde->pci_conf_write(io.dev, io.d0, io.d1); } else { io.rc = LUBDE_FAIL; } break; case LUBDE_PCI_CONFIG_GET32: if (_devices[io.dev].dev_type & BDE_PCI_DEV_TYPE) { io.d0 = user_bde->pci_conf_read(io.dev, io.d0); } else { io.rc = LUBDE_FAIL; } break; case LUBDE_GET_DMA_INFO: inst_id = io.dev; if (_bde_multi_inst){ _dma_resource_get(inst_id, &pbase, &size); } else { lkbde_get_dma_info(&pbase, &size); } io.d0 = pbase; io.d1 = size; /* Optionally enable DMA mmap via /dev/linux-kernel-bde */ io.d2 = USE_LINUX_BDE_MMAP; break; case LUBDE_ENABLE_INTERRUPTS: if (_devices[io.dev].dev_type & BDE_SWITCH_DEV_TYPE) { if (_devices[io.dev].isr && !_devices[io.dev].enabled) { user_bde->interrupt_connect(io.dev, _devices[io.dev].isr, _devices+io.dev); _devices[io.dev].enabled = 1; } } else { /* Process ethernet device interrupt */ /* FIXME: for multiple chips */ if (!_devices[io.dev].enabled) { user_bde->interrupt_connect(io.dev, (void(*)(void *))_ether_interrupt, _devices+io.dev); _devices[io.dev].enabled = 1; } } break; case LUBDE_DISABLE_INTERRUPTS: if (_devices[io.dev].enabled) { user_bde->interrupt_disconnect(io.dev); _devices[io.dev].enabled = 0; } break; case LUBDE_WAIT_FOR_INTERRUPT: if (_devices[io.dev].dev_type & BDE_SWITCH_DEV_TYPE) { res = &_bde_inst_resource[_devices[io.dev].inst]; #ifdef BDE_LINUX_NON_INTERRUPTIBLE wait_event_timeout(res->intr_wq, atomic_read(&res->intr) != 0, 100); #else wait_event_interruptible(res->intr_wq, atomic_read(&res->intr) != 0); #endif /* * Even if we get multiple interrupts, we * only run the interrupt handler once. */ atomic_set(&res->intr, 0); } else { #ifdef BDE_LINUX_NON_INTERRUPTIBLE wait_event_timeout(_ether_interrupt_wq, atomic_read(&_ether_interrupt_has_taken_place) != 0, 100); #else wait_event_interruptible(_ether_interrupt_wq, atomic_read(&_ether_interrupt_has_taken_place) != 0); #endif /* * Even if we get multiple interrupts, we * only run the interrupt handler once. */ atomic_set(&_ether_interrupt_has_taken_place, 0); } break; case LUBDE_USLEEP: sal_usleep(io.d0); break; case LUBDE_UDELAY: sal_udelay(io.d0); break; case LUBDE_SEM_OP: switch (io.d0) { case LUBDE_SEM_OP_CREATE: io.p0 = (bde_kernel_addr_t)sal_sem_create("", io.d1, io.d2); break; case LUBDE_SEM_OP_DESTROY: sal_sem_destroy((sal_sem_t)io.p0); break; case LUBDE_SEM_OP_TAKE: io.rc = sal_sem_take((sal_sem_t)io.p0, io.d2); break; case LUBDE_SEM_OP_GIVE: io.rc = sal_sem_give((sal_sem_t)io.p0); break; default: io.rc = LUBDE_FAIL; break; } break; case LUBDE_WRITE_IRQ_MASK: io.rc = lkbde_irq_mask_set(io.dev, io.d0, io.d1, 0); break; case LUBDE_SPI_READ_REG: if (user_bde->spi_read(io.dev, io.d0, io.dx.buf, io.d1) == -1) { io.rc = LUBDE_FAIL; } break; case LUBDE_SPI_WRITE_REG: if (user_bde->spi_write(io.dev, io.d0, io.dx.buf, io.d1) == -1) { io.rc = LUBDE_FAIL; } break; case LUBDE_READ_REG_16BIT_BUS: io.d1 = user_bde->read(io.dev, io.d0); break; case LUBDE_WRITE_REG_16BIT_BUS: io.rc = user_bde->write(io.dev, io.d0, io.d1); break; #if (defined(BCM_PETRA_SUPPORT) || defined(BCM_DFE_SUPPORT)) case LUBDE_CPU_WRITE_REG: { if (lkbde_cpu_write(io.dev, io.d0, (uint32*)io.dx.buf) == -1) { io.rc = LUBDE_FAIL; } break; } case LUBDE_CPU_READ_REG: { if (lkbde_cpu_read(io.dev, io.d0, (uint32*)io.dx.buf) == -1) { io.rc = LUBDE_FAIL; } break; } case LUBDE_CPU_PCI_REGISTER: { if (lkbde_cpu_pci_register(io.dev) == -1) { io.rc = LUBDE_FAIL; } break; } #endif case LUBDE_DEV_RESOURCE: bde_dev = user_bde->get_dev(io.dev); if (bde_dev) { if (BDE_DEV_MEM_MAPPED(_devices[io.dev].dev_type)) { /* Get physical address to map */ io.rc = lkbde_get_dev_resource(io.dev, io.d0, &io.d1, &io.d2, &io.d3); } } else { io.rc = LUBDE_FAIL; } break; case LUBDE_IPROC_READ_REG: io.d1 = user_bde->iproc_read(io.dev, io.d0); if (io.d1 == -1) { io.rc = LUBDE_FAIL; } break; case LUBDE_IPROC_WRITE_REG: if (user_bde->iproc_write(io.dev, io.d0, io.d1) == -1) { io.rc = LUBDE_FAIL; } break; case LUBDE_ATTACH_INSTANCE: io.rc = _instance_attach(io.d0, io.d1); break; default: gprintk("Error: Invalid ioctl (%08x)\n", cmd); io.rc = LUBDE_FAIL; break; } if (copy_to_user((void *)arg, &io, sizeof(io))) { return -EFAULT; } return 0; }
static void _cmicd_interrupt(bde_ctrl_t *ctrl) { int d; int cmc = BDE_CMICD_PCIE_CMC; uint32 stat, mask = 0, fmask = 0, imask = 0; bde_inst_resource_t *res; d = (((uint8 *)ctrl - (uint8 *)_devices) / sizeof (bde_ctrl_t)); res = &_bde_inst_resource[ctrl->inst]; lkbde_irq_mask_get(d, &mask, &fmask); while (fmask) { stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT0_OFFSET(cmc)); imask = mask & ~fmask; if (stat & imask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT1_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK1_OFFSET(cmc)); if (stat & mask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT2_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK2_OFFSET(cmc)); if (stat & mask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT3_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK3_OFFSET(cmc)); if (stat & mask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT4_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK4_OFFSET(cmc)); if (stat & mask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT5_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK5_OFFSET(cmc)); if (stat & mask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT6_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK6_OFFSET(cmc)); if (stat & mask) { break; } return; } lkbde_irq_mask_set(d, CMIC_CMCx_PCIE_IRQ_MASK0_OFFSET(cmc), 0, 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK1_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK2_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK3_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK4_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK5_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK6_OFFSET(cmc), 0); atomic_set(&res->intr, 1); #ifdef BDE_LINUX_NON_INTERRUPTIBLE wake_up(&res->intr_wq); #else wake_up_interruptible(&res->intr_wq); #endif }
static void _cmicm_interrupt(bde_ctrl_t *ctrl) { int d; int cmc = BDE_CMICM_PCIE_CMC; uint32 stat, mask = 0, fmask = 0, imask = 0; d = (((uint8 *)ctrl - (uint8 *)_devices) / sizeof (bde_ctrl_t)); lkbde_irq_mask_get(d, &mask, &fmask); while (fmask) { stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT0_OFFSET(cmc)); imask = mask & ~fmask; if (stat & imask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT1_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK1_OFFSET(cmc)); if (stat & mask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT2_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK2_OFFSET(cmc)); if (stat & mask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT3_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK3_OFFSET(cmc)); if (stat & mask) { break; } stat = user_bde->read(d, CMIC_CMCx_IRQ_STAT4_OFFSET(cmc)); mask = user_bde->read(d, CMIC_CMCx_PCIE_IRQ_MASK4_OFFSET(cmc)); if (stat & mask) { break; } return; } if (ctrl->dev_type & BDE_AXI_DEV_TYPE) { lkbde_irq_mask_set(d, CMIC_CMCx_UC0_IRQ_MASK0_OFFSET(cmc), 0, 0); user_bde->write(d, CMIC_CMCx_UC0_IRQ_MASK1_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_UC0_IRQ_MASK2_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_UC0_IRQ_MASK3_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_UC0_IRQ_MASK4_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_UC0_IRQ_MASK0_OFFSET(1), 0); user_bde->write(d, CMIC_CMCx_UC0_IRQ_MASK0_OFFSET(2), 0); } else { lkbde_irq_mask_set(d, CMIC_CMCx_PCIE_IRQ_MASK0_OFFSET(cmc), 0, 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK1_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK2_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK3_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK4_OFFSET(cmc), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK0_OFFSET(1), 0); user_bde->write(d, CMIC_CMCx_PCIE_IRQ_MASK0_OFFSET(2), 0); } atomic_set(&_interrupt_has_taken_place, 1); #ifdef BDE_LINUX_NON_INTERRUPTIBLE wake_up(&_interrupt_wq); #else wake_up_interruptible(&_interrupt_wq); #endif }