static void clear_nps_pkt_err_intr(struct nitrox_device *ndev) { union nps_pkt_int pkt_int; unsigned long value, offset; int i; pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT); dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT 0x%016llx\n", pkt_int.value); if (pkt_int.s.slc_err) { offset = NPS_PKT_SLC_ERR_TYPE; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); dev_err_ratelimited(DEV(ndev), "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value); offset = NPS_PKT_SLC_RERR_LO; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); /* enable the solicit ports */ for_each_set_bit(i, &value, BITS_PER_LONG) enable_pkt_solicit_port(ndev, i); dev_err_ratelimited(DEV(ndev), "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value); offset = NPS_PKT_SLC_RERR_HI; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); dev_err_ratelimited(DEV(ndev), "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value); } if (pkt_int.s.in_err) { offset = NPS_PKT_IN_ERR_TYPE; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); dev_err_ratelimited(DEV(ndev), "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value); offset = NPS_PKT_IN_RERR_LO; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); /* enable the input ring */ for_each_set_bit(i, &value, BITS_PER_LONG) enable_pkt_input_ring(ndev, i); dev_err_ratelimited(DEV(ndev), "NPS_PKT_IN_RERR_LO 0x%016lx\n", value); offset = NPS_PKT_IN_RERR_HI; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); dev_err_ratelimited(DEV(ndev), "NPS_PKT_IN_RERR_HI 0x%016lx\n", value); } }
/** * Resets syncpoint and waitbase values of a * single client to sw shadows */ void nvhost_syncpt_reset_client(struct platform_device *pdev) { struct nvhost_device_data *pdata = platform_get_drvdata(pdev); struct nvhost_master *nvhost_master = nvhost_get_host(pdev); u32 id; BUG_ON(!(syncpt_op().reset && syncpt_op().reset_wait_base)); for_each_set_bit(id, (unsigned long *)&pdata->syncpts, BITS_PER_LONG) syncpt_op().reset(&nvhost_master->syncpt, id); for_each_set_bit(id, (unsigned long *)&pdata->waitbases, BITS_PER_LONG) syncpt_op().reset_wait_base(&nvhost_master->syncpt, id); wmb(); }
static int adc108s102_update_scan_mode(struct iio_dev *indio_dev, unsigned long const *active_scan_mask) { struct adc108s102_state *st = iio_priv(indio_dev); unsigned int bit, cmds; /* * Fill in the first x shorts of tx_buf with the number of channels * enabled for sampling by the triggered buffer. */ cmds = 0; for_each_set_bit(bit, active_scan_mask, ADC108S102_MAX_CHANNELS) st->tx_buf[cmds++] = cpu_to_be16(ADC108S102_CMD(bit)); /* One dummy command added, to clock in the last response */ st->tx_buf[cmds++] = 0x00; /* build SPI ring message */ st->ring_xfer.tx_buf = &st->tx_buf[0]; st->ring_xfer.rx_buf = &st->rx_buf[0]; st->ring_xfer.len = cmds * sizeof(st->tx_buf[0]); spi_message_init_with_transfers(&st->ring_msg, &st->ring_xfer, 1); return 0; }
void xadc_handle_events(struct iio_dev *indio_dev, unsigned long events) { unsigned int i; for_each_set_bit(i, &events, 8) xadc_handle_event(indio_dev, i); }
static irqreturn_t arb_gnt_isr(int irq, void *dev_id) { struct tegra_arb_dev *dev = dev_id; unsigned long status; u32 cpu_int_en; unsigned int bit; unsigned long flags; spin_lock_irqsave(&arb->lock, flags); status = arb_sema_read(ARB_GRANT_STATUS); pr_debug("%s: 0x%lx\n", __func__, status); /* disable the arb semaphores which were signalled */ cpu_int_en = arb_gnt_read(ARB_CPU_INT_EN); arb_gnt_write((cpu_int_en & ~(status & cpu_int_en)), ARB_CPU_INT_EN); status &= cpu_int_en; for_each_set_bit(bit, &status, BITS_PER_LONG) complete(&dev->arb_gnt_complete[bit]); spin_unlock_irqrestore(&arb->lock, flags); return IRQ_HANDLED; }
int nvhost_job_pin(struct nvhost_job *job, struct nvhost_syncpt *sp) { int err = 0, i = 0; phys_addr_t gather_phys = 0; void *gather_addr = NULL; unsigned long waitchk_mask = job->waitchk_mask; /* get current syncpt values for waitchk */ for_each_set_bit(i, &waitchk_mask, sizeof(job->waitchk_mask)) nvhost_syncpt_update_min(sp, i); /* pin gathers */ for (i = 0; i < job->num_gathers; i++) { struct nvhost_job_gather *g = &job->gathers[i]; /* process each gather mem only once */ if (!g->ref) { g->ref = mem_op().get(job->memmgr, job->gathers[i].mem_id); if (IS_ERR(g->ref)) { err = PTR_ERR(g->ref); g->ref = NULL; break; } gather_phys = mem_op().pin(job->memmgr, g->ref); if (IS_ERR((void *)gather_phys)) { mem_op().put(job->memmgr, g->ref); err = gather_phys; break; } /* store the gather ref into unpin array */ job->unpins[job->num_unpins++] = g->ref; gather_addr = mem_op().mmap(g->ref); if (!gather_addr) { err = -ENOMEM; break; } err = do_relocs(job, g->mem_id, gather_addr); if (!err) err = do_waitchks(job, sp, g->mem_id, gather_addr); mem_op().munmap(g->ref, gather_addr); if (err) break; } g->mem = gather_phys + g->offset; } wmb(); return err; }
static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr) { u32 idx; rtc_irq_eoi_tracking_reset(ioapic); for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS) ioapic_set_irq(ioapic, idx, 1, true); kvm_rtc_eoi_tracking_restore_all(ioapic); }
/* function is called when global control register has been updated. */ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) { int bit; u64 diff = pmu->global_ctrl ^ data; pmu->global_ctrl = data; for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) reprogram_counter(pmu, bit); }
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) { int i, up; unsigned long bitmap; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap; for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY) map[up] = i; } }
static irqreturn_t cplds_irq_handler(int in_irq, void *d) { struct cplds *fpga = d; unsigned long pending; unsigned int bit; pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask; for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit)); return IRQ_HANDLED; }
static ssize_t commands_show(struct device *dev, struct device_attribute *attr, char *buf) { int cmd, len = 0; struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; for_each_set_bit(cmd, &nd_desc->dsm_mask, BITS_PER_LONG) len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd)); len += sprintf(buf + len, "\n"); return len; }
static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data) { struct tb10x_gpio *tb10x_gpio = data; u32 r = tb10x_reg_read(tb10x_gpio, OFFSET_TO_REG_CHANGE); u32 m = tb10x_reg_read(tb10x_gpio, OFFSET_TO_REG_INT_EN); const unsigned long bits = r & m; int i; for_each_set_bit(i, &bits, 32) generic_handle_irq(irq_find_mapping(tb10x_gpio->domain, i)); return IRQ_HANDLED; }
static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) { unsigned long abort_source = dev->abort_source; int i; if (abort_source & DW_IC_TX_ABRT_NOACK) { for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); return -EREMOTEIO; } for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); if (abort_source & DW_IC_TX_ARB_LOST) return -EAGAIN; else if (abort_source & DW_IC_TX_ABRT_GCALL_READ) return -EINVAL; /* wrong msgs[] data */ else return -EIO; }
/** * input_ff_create() - create force-feedback device * @dev: input device supporting force-feedback * @max_effects: maximum number of effects supported by the device * * This function allocates all necessary memory for a force feedback * portion of an input device and installs all default handlers. * @dev->ffbit should be already set up before calling this function. * Once ff device is created you need to setup its upload, erase, * playback and other handlers before registering input device */ int input_ff_create(struct input_dev *dev, unsigned int max_effects) { struct ff_device *ff; size_t ff_dev_size; int i; if (!max_effects) { dev_err(&dev->dev, "cannot allocate device without any effects\n"); return -EINVAL; } if (max_effects > FF_MAX_EFFECTS) { dev_err(&dev->dev, "cannot allocate more than FF_MAX_EFFECTS effects\n"); return -EINVAL; } ff_dev_size = sizeof(struct ff_device) + max_effects * sizeof(struct file *); if (ff_dev_size < max_effects) /* overflow */ return -EINVAL; ff = kzalloc(ff_dev_size, GFP_KERNEL); if (!ff) return -ENOMEM; ff->effects = kcalloc(max_effects, sizeof(struct ff_effect), GFP_KERNEL); if (!ff->effects) { kfree(ff); return -ENOMEM; } ff->max_effects = max_effects; mutex_init(&ff->mutex); dev->ff = ff; dev->flush = flush_effects; dev->event = input_ff_event; __set_bit(EV_FF, dev->evbit); /* Copy "true" bits into ff device bitmap */ for_each_set_bit(i, dev->ffbit, FF_CNT) __set_bit(i, ff->ffbit); /* we can emulate RUMBLE with periodic effects */ if (test_bit(FF_PERIODIC, ff->ffbit)) __set_bit(FF_RUMBLE, dev->ffbit); return 0; }
static ssize_t commands_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); int cmd, len = 0; if (!nvdimm->cmd_mask) return sprintf(buf, "\n"); for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG) len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd)); len += sprintf(buf + len, "\n"); return len; }
static irqreturn_t pmc_irq_handler(int irq, void *data) { struct at91_pmc *pmc = (struct at91_pmc *)data; unsigned long sr; int n; sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR); if (!sr) return IRQ_NONE; for_each_set_bit(n, &sr, BITS_PER_LONG) generic_handle_irq(irq_find_mapping(pmc->irqdomain, n)); return IRQ_HANDLED; }
static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); unsigned long mask; int pin; chip->irq_mask_ack(data); mask = __raw_readw(KEYDETR); for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS) generic_handle_irq(irq_linear_revmap(x3proto_irq_domain, pin)); chip->irq_unmask(data); }
void nbp_vlan_flush(struct net_bridge_port *port) { struct net_port_vlans *pv; u16 vid; ASSERT_RTNL(); pv = rtnl_dereference(port->vlan_info); if (!pv) return; for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) vlan_vid_del(port->dev, htons(ETH_P_8021Q), vid); __vlan_flush(pv); }
static void oxnas_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct oxnas_gpio_bank *bank = gpiochip_get_data(gc); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long stat; unsigned int pin; chained_irq_enter(chip, desc); stat = readl(bank->reg_base + IRQ_PENDING); for_each_set_bit(pin, &stat, BITS_PER_LONG) generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin)); chained_irq_exit(chip, desc); }
static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask) { struct cpu_map *map; int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE; nr = bitmap_weight(mask->mask, nbits); map = cpu_map__empty_new(nr); if (map) { int cpu, i = 0; for_each_set_bit(cpu, mask->mask, nbits) map->map[i++] = cpu; } return map; }
static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) { unsigned long pending; int offset; struct pl061_gpio *chip = irq_desc_get_handler_data(desc); struct irq_chip *irqchip = irq_desc_get_chip(desc); chained_irq_enter(irqchip, desc); pending = readb(chip->base + GPIOMIS); writeb(pending, chip->base + GPIOIC); if (pending) { for_each_set_bit(offset, &pending, PL061_GPIO_NR) generic_handle_irq(pl061_to_irq(&chip->gc, offset)); } chained_irq_exit(irqchip, desc); }
static void ftgpio_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct ftgpio_gpio *g = gpiochip_get_data(gc); struct irq_chip *irqchip = irq_desc_get_chip(desc); int offset; unsigned long stat; chained_irq_enter(irqchip, desc); stat = readl(g->base + GPIO_INT_STAT_RAW); if (stat) for_each_set_bit(offset, &stat, gc->ngpio) generic_handle_irq(irq_find_mapping(gc->irq.domain, offset)); chained_irq_exit(irqchip, desc); }
static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id) { struct bcma_drv_cc *cc = dev_id; u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN); u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ); u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL); unsigned long irqs = (val ^ pol) & mask; int gpio; if (!irqs) return IRQ_NONE; for_each_set_bit(gpio, &irqs, cc->gpio.ngpio) generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio)); bcma_chipco_gpio_polarity(cc, irqs, val & irqs); return IRQ_HANDLED; }
static irqreturn_t ssb_gpio_irq_extif_handler(int irq, void *dev_id) { struct ssb_bus *bus = dev_id; struct ssb_extif *extif = &bus->extif; u32 val = ssb_read32(extif->dev, SSB_EXTIF_GPIO_IN); u32 mask = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTMASK); u32 pol = ssb_read32(extif->dev, SSB_EXTIF_GPIO_INTPOL); unsigned long irqs = (val ^ pol) & mask; int gpio; if (!irqs) return IRQ_NONE; for_each_set_bit(gpio, &irqs, bus->gpio.ngpio) generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio)); ssb_extif_gpio_polarity(extif, irqs, val & irqs); return IRQ_HANDLED; }
static irqreturn_t ssb_gpio_irq_chipco_handler(int irq, void *dev_id) { struct ssb_bus *bus = dev_id; struct ssb_chipcommon *chipco = &bus->chipco; u32 val = chipco_read32(chipco, SSB_CHIPCO_GPIOIN); u32 mask = chipco_read32(chipco, SSB_CHIPCO_GPIOIRQ); u32 pol = chipco_read32(chipco, SSB_CHIPCO_GPIOPOL); unsigned long irqs = (val ^ pol) & mask; int gpio; if (!irqs) return IRQ_NONE; for_each_set_bit(gpio, &irqs, bus->gpio.ngpio) generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio)); ssb_chipco_gpio_polarity(chipco, irqs, val & irqs); return IRQ_HANDLED; }
static irqreturn_t keyscan_isr(int irq, void *dev_id) { struct st_keyscan *keypad = dev_id; unsigned short *keycode = keypad->input_dev->keycode; unsigned long state, change; int bit_nr; state = readl(keypad->base + KEYSCAN_MATRIX_STATE_OFF) & 0xffff; change = keypad->last_state ^ state; keypad->last_state = state; for_each_set_bit(bit_nr, &change, BITS_PER_LONG) input_report_key(keypad->input_dev, keycode[bit_nr], state & BIT(bit_nr)); input_sync(keypad->input_dev); return IRQ_HANDLED; }
static irqreturn_t dio48e_irq_handler(int irq, void *dev_id) { struct dio48e_gpio *const dio48egpio = dev_id; struct gpio_chip *const chip = &dio48egpio->chip; const unsigned long irq_mask = dio48egpio->irq_mask; unsigned long gpio; for_each_set_bit(gpio, &irq_mask, 2) generic_handle_irq(irq_find_mapping(chip->irqdomain, 19 + gpio*24)); raw_spin_lock(&dio48egpio->lock); outb(0x00, dio48egpio->base + 0xF); raw_spin_unlock(&dio48egpio->lock); return IRQ_HANDLED; }
int main() { DEFINE_BITMAP(bm, 1000); bitmap_zero(bm, 1000); bitmap_set(bm, 3, 2); assert(!test_bit(2, bm)); assert(test_bit(3, bm)); assert(test_bit(4, bm)); assert(!test_bit(5, bm)); int bit; int start = 3; for_each_set_bit(bit, bm, 1000) assert(bit == start++); printf("passed\n"); return 0; }
static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); struct msic_gpio *mg = irq_data_get_irq_handler_data(data); struct irq_chip *chip = irq_data_get_irq_chip(data); struct intel_msic *msic = pdev_to_intel_msic(mg->pdev); int i; int bitnr; u8 pin; unsigned long pending = 0; for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) { intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin); pending = pin; if (pending) { for_each_set_bit(bitnr, &pending, BITS_PER_BYTE) generic_handle_irq(mg->irq_base + (i * BITS_PER_BYTE) + bitnr); } } chip->irq_eoi(data); }
void hi6401_irq_work_func(struct work_struct *work) { struct hi6401_irq *hi6401_irq = container_of(work, struct hi6401_irq, hi6401_irq_delay_work.work); unsigned long pending = 0; int offset; unsigned long flags = 0; BUG_ON(NULL == hi6401_irq); mutex_lock(&hi6401_irq->sr_mutex); pending = hi6401_irq_read(hi6401_irq, HI6401_REG_IRQ_0); pending &= HI6401_MASK_FIELD; pending &= (~hi6401_irq_read(hi6401_irq, HI6401_REG_IRQM_0)); hi6401_irq_write(hi6401_irq, HI6401_REG_IRQ_0, pending); enable_irq(hi6401_irq->irq); /* handle each irq */ spin_lock_irqsave(&hi6401_irq->lock, flags); if (pending) { for_each_set_bit(offset, &pending, HI6401_BITS) generic_handle_irq(hi6401_irq->irqs[offset]); } else { pr_err("clr all hi6401 irq\n"); hi6401_irq_write(hi6401_irq, HI6401_REG_IRQ_0, 0xFF); hi6401_irq_write(hi6401_irq, HI6401_REG_IRQ_1, 0xFF); hi6401_irq_write(hi6401_irq, HI6401_REG_IRQM_1, 0xFF); } spin_unlock_irqrestore(&hi6401_irq->lock, flags); wake_lock_timeout(&hi6401_irq->wake_lock, 3000); mutex_unlock(&hi6401_irq->sr_mutex); }