static irqreturn_t yas_trigger_handler(int irq, void *p)
{
	struct iio_poll_func *pf = p;
	struct iio_dev *indio_dev = pf->indio_dev;
	struct yas_state *st = iio_priv(indio_dev);
	int len = 0, i, j;
	int32_t *acc;

	acc = (int32_t *)kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
	if (acc == NULL) {
		E("%s: memory alloc failed in buffer bh\n", __func__);
		goto done;
	}
	if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
		j = 0;
		for (i = 0; i < 3; i++) {
			if (test_bit(i, indio_dev->active_scan_mask)) {
				acc[j] = st->accel_data[i];
				j++;
			}
		}
		len = j * 4;
	}

	
	if (indio_dev->scan_timestamp)
		*(s64 *)((u8 *)acc + ALIGN(len, sizeof(s64))) = pf->timestamp;

	iio_push_to_buffers(indio_dev, (u8 *)acc);
	kfree(acc);
done:
	iio_trigger_notify_done(indio_dev->trig);
	return IRQ_HANDLED;
}
/**
 * ade7758_ring_preenable() setup the parameters of the ring before enabling
 *
 * The complex nature of the setting of the number of bytes per datum is due
 * to this driver currently ensuring that the timestamp is stored at an 8
 * byte boundary.
 **/
static int ade7758_ring_preenable(struct iio_dev *indio_dev)
{
	struct ade7758_state *st = iio_priv(indio_dev);
	struct iio_buffer *ring = indio_dev->buffer;
	size_t d_size;
	unsigned channel;

	if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
		return -EINVAL;

	channel = find_first_bit(indio_dev->active_scan_mask,
				 indio_dev->masklength);

	d_size = st->ade7758_ring_channels[channel].scan_type.storagebits / 8;

	if (ring->scan_timestamp) {
		d_size += sizeof(s64);

		if (d_size % sizeof(s64))
			d_size += sizeof(s64) - (d_size % sizeof(s64));
	}

	if (indio_dev->buffer->access->set_bytes_per_datum)
		indio_dev->buffer->access->
			set_bytes_per_datum(indio_dev->buffer, d_size);

	ade7758_write_waveform_type(&indio_dev->dev,
		st->ade7758_ring_channels[channel].address);

	return 0;
}
Example #3
0
static void pnp_print_irq(pnp_info_buffer_t *buffer, char *space, struct pnp_irq *irq)
{
	int first = 1, i;

	pnp_printf(buffer, "%sirq ", space);
	for (i = 0; i < PNP_IRQ_NR; i++)
		if (test_bit(i, irq->map)) {
			if (!first) {
				pnp_printf(buffer, ",");
			} else {
				first = 0;
			}
			if (i == 2 || i == 9)
				pnp_printf(buffer, "2/9");
			else
				pnp_printf(buffer, "%i", i);
		}
	if (bitmap_empty(irq->map, PNP_IRQ_NR))
		pnp_printf(buffer, "<none>");
	if (irq->flags & IORESOURCE_IRQ_HIGHEDGE)
		pnp_printf(buffer, " High-Edge");
	if (irq->flags & IORESOURCE_IRQ_LOWEDGE)
		pnp_printf(buffer, " Low-Edge");
	if (irq->flags & IORESOURCE_IRQ_HIGHLEVEL)
		pnp_printf(buffer, " High-Level");
	if (irq->flags & IORESOURCE_IRQ_LOWLEVEL)
		pnp_printf(buffer, " Low-Level");
	pnp_printf(buffer, "\n");
}
Example #4
0
/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
 * specific to be rolled into the core.
 */
static irqreturn_t adis16209_trigger_handler(int irq, void *p)
{
    struct iio_poll_func *pf = p;
    struct iio_dev *indio_dev = pf->indio_dev;
    struct adis16209_state *st = iio_priv(indio_dev);
    struct iio_buffer *ring = indio_dev->buffer;

    int i = 0;
    s16 *data;
    size_t datasize = ring->access->get_bytes_per_datum(ring);

    data = kmalloc(datasize , GFP_KERNEL);
    if (data == NULL) {
        dev_err(&st->us->dev, "memory alloc failed in ring bh");
        return -ENOMEM;
    }

    if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
            adis16209_read_ring_data(&indio_dev->dev, st->rx) >= 0)
        for (; i < bitmap_weight(indio_dev->active_scan_mask,
                                 indio_dev->masklength); i++)
            data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));

    /* Guaranteed to be aligned with 8 byte boundary */
    if (ring->scan_timestamp)
        *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;

    ring->access->store_to(ring, (u8 *)data, pf->timestamp);

    iio_trigger_notify_done(indio_dev->trig);
    kfree(data);

    return IRQ_HANDLED;
}
static int __vlan_del(struct net_port_vlans *v, u16 vid)
{
	if (!test_bit(vid, v->vlan_bitmap))
		return -EINVAL;

	__vlan_delete_pvid(v, vid);
	clear_bit(vid, v->untagged_bitmap);

	if (v->port_idx && vid) {
		struct net_device *dev = v->parent.port->dev;
		const struct net_device_ops *ops = dev->netdev_ops;

		if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
			ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid);
	}

	clear_bit(vid, v->vlan_bitmap);
	v->num_vlans--;
	if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
		if (v->port_idx)
			rcu_assign_pointer(v->parent.port->vlan_info, NULL);
		else
			rcu_assign_pointer(v->parent.br->vlan_info, NULL);
		kfree_rcu(v, rcu);
	}
	return 0;
}
static irqreturn_t yas_trigger_handler(int irq, void *p)
{
	struct iio_poll_func *pf = p;
	struct iio_dev *indio_dev = pf->indio_dev;
	struct yas_state *st = iio_priv(indio_dev);
	int len = 0, i, j;
	int32_t *mag;

	mag = (int32_t *) kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
	if (mag == NULL)
		goto done;
	if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
		j = 0;
		for (i = 0; i < 3; i++) {
			if (test_bit(i, indio_dev->active_scan_mask)) {
				mag[j] = st->compass_data[i];
				j++;
			}
		}
		len = j * 4;
	}

	/* Guaranteed to be aligned with 8 byte boundary */
	if (indio_dev->scan_timestamp)
		*(s64 *)((u8 *)mag + ALIGN(len, sizeof(s64))) = pf->timestamp;
	iio_push_to_buffers(indio_dev, (u8 *)mag);
	kfree(mag);
done:
	iio_trigger_notify_done(indio_dev->trig);
	return IRQ_HANDLED;
}
Example #7
0
static void check_compatibility(const S390CPUModel *max_model,
                                const S390CPUModel *model, Error **errp)
{
    S390FeatBitmap missing;

    if (model->def->gen > max_model->def->gen) {
        error_setg(errp, "Selected CPU generation is too new. Maximum "
                   "supported model in the configuration: \'%s\'",
                   max_model->def->name);
        return;
    } else if (model->def->gen == max_model->def->gen &&
               model->def->ec_ga > max_model->def->ec_ga) {
        error_setg(errp, "Selected CPU GA level is too new. Maximum "
                   "supported model in the configuration: \'%s\'",
                   max_model->def->name);
        return;
    }

    /* detect the missing features to properly report them */
    bitmap_andnot(missing, model->features, max_model->features, S390_FEAT_MAX);
    if (bitmap_empty(missing, S390_FEAT_MAX)) {
        return;
    }

    error_setg(errp, " ");
    s390_feat_bitmap_to_ascii(missing, errp, error_prepend_missing_feat);
    error_prepend(errp, "Some features requested in the CPU model are not "
                  "available in the configuration: ");
}
static irqreturn_t yas_trigger_handler(int irq, void *p)
{
	struct iio_poll_func *pf = p;
	struct iio_dev *indio_dev = pf->indio_dev;
	struct yas_state *st = iio_priv(indio_dev);
	struct iio_buffer *buffer = indio_dev->buffer;
	size_t datasize = buffer->access->get_bytes_per_datum(buffer);
	int len = 0, i, j;
	int32_t *mag;
	s64 timestamp;

	mag = (int32_t *) kmalloc(datasize, GFP_KERNEL);
	if (mag == NULL)
		goto done;
	if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
		j = 0;
		for (i = 0; i < 3; i++) {
			if (test_bit(i, indio_dev->active_scan_mask)) {
				mag[j] = st->compass_data[i];
				j++;
			}
		}
		len = j * 4;
	}

	timestamp = yas_iio_get_boottime_ns();
	*(s64 *)((u8 *)mag + ALIGN(len, sizeof(s64))) = timestamp;

	iio_push_to_buffer(indio_dev->buffer, (u8 *)mag, 0);
	kfree(mag);
done:
	iio_trigger_notify_done(indio_dev->trig);
	return IRQ_HANDLED;
}
Example #9
0
static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
{
    struct pnp_resource *pnp_res;
    struct resource *res;
    int i;

    /* IRQ priority: this table is good for i386 */
    static unsigned short xtab[16] = {
        5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
    };

    pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, idx);
    if (!pnp_res) {
        dev_err(&dev->dev, "too many IRQ resources\n");
        /* pretend we were successful so at least the manager won't try again */
        return 1;
    }

    res = &pnp_res->res;

    /* check if this resource has been manually set, if so skip */
    if (!(res->flags & IORESOURCE_AUTO)) {
        dev_dbg(&dev->dev, "  irq %d already set to %d flags %#lx\n",
            idx, (int) res->start, res->flags);
        return 1;
    }

    /* set the initial values */
    pnp_res->index = idx;
    res->flags |= rule->flags | IORESOURCE_IRQ;
    res->flags &= ~IORESOURCE_UNSET;

    if (bitmap_empty(rule->map, PNP_IRQ_NR)) {
        res->flags |= IORESOURCE_DISABLED;
        dev_dbg(&dev->dev, "  irq %d disabled\n", idx);
        return 1;    /* skip disabled resource requests */
    }

    /* TBD: need check for >16 IRQ */
    res->start = find_next_bit(rule->map, PNP_IRQ_NR, 16);
    if (res->start < PNP_IRQ_NR) {
        res->end = res->start;
        dev_dbg(&dev->dev, "  assign irq %d %d\n", idx,
            (int) res->start);
        return 1;
    }
    for (i = 0; i < 16; i++) {
        if (test_bit(xtab[i], rule->map)) {
            res->start = res->end = xtab[i];
            if (pnp_check_irq(dev, res)) {
                dev_dbg(&dev->dev, "  assign irq %d %d\n", idx,
                    (int) res->start);
                return 1;
            }
        }
    }
    dev_dbg(&dev->dev, "  couldn't assign irq %d\n", idx);
    return 0;
}
/**
 * iio_simple_dummy_trigger_h() - the trigger handler function
 * @irq: the interrupt number
 * @p: private data - always a pointer to the poll func.
 *
 * This is the guts of buffered capture. On a trigger event occurring,
 * if the pollfunc is attached then this handler is called as a threaded
 * interrupt (and hence may sleep). It is responsible for grabbing data
 * from the device and pushing it into the associated buffer.
 */
static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
{
    struct iio_poll_func *pf = p;
    struct iio_dev *indio_dev = pf->indio_dev;
    struct iio_buffer *buffer = indio_dev->buffer;
    int len = 0;
    u16 *data;

    data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
    if (data == NULL)
        return -ENOMEM;

    if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
        /*
         * Three common options here:
         * hardware scans: certain combinations of channels make
         *   up a fast read.  The capture will consist of all of them.
         *   Hence we just call the grab data function and fill the
         *   buffer without processing.
         * software scans: can be considered to be random access
         *   so efficient reading is just a case of minimal bus
         *   transactions.
         * software culled hardware scans:
         *   occasionally a driver may process the nearest hardware
         *   scan to avoid storing elements that are not desired. This
         *   is the fidliest option by far.
         * Here lets pretend we have random access. And the values are
         * in the constant table fakedata.
         */
        int i, j;
        for (i = 0, j = 0;
                i < bitmap_weight(indio_dev->active_scan_mask,
                                  indio_dev->masklength);
                i++) {
            j = find_next_bit(buffer->scan_mask,
                              indio_dev->masklength, j + 1);
            /* random access read form the 'device' */
            data[i] = fakedata[j];
            len += 2;
        }
    }
    /* Store a timestampe at an 8 byte boundary */
    if (indio_dev->scan_timestamp)
        *(s64 *)(((phys_addr_t)data + len
                  + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
            = iio_get_time_ns();
    buffer->access->store_to(buffer, (u8 *)data, pf->timestamp);

    kfree(data);

    /*
     * Tell the core we are done with this trigger and ready for the
     * next one.
     */
    iio_trigger_notify_done(indio_dev->trig);

    return IRQ_HANDLED;
}
Example #11
0
static int _scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
{
	int empty = bitmap_empty(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);

	__set_bit(id, info->shadow_free_bitmap);
	info->shadow[id] = NULL;

	return empty || info->wait_ring_available;
}
Example #12
0
const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga,
                                    S390FeatBitmap features)
{
    const S390CPUDef *last_compatible = NULL;
    const S390CPUDef *matching_cpu_type = NULL;
    int i;

    if (!gen) {
        ec_ga = 0;
    }
    if (!gen && type) {
        gen = s390_get_gen_for_cpu_type(type);
    }

    for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
        const S390CPUDef *def = &s390_cpu_defs[i];
        S390FeatBitmap missing;

        /* don't even try newer generations if we know the generation */
        if (gen) {
            if (def->gen > gen) {
                break;
            } else if (def->gen == gen && ec_ga && def->ec_ga > ec_ga) {
                break;
            }
        }

        if (features) {
            /* see if the model satisfies the minimum features */
            bitmap_andnot(missing, def->base_feat, features, S390_FEAT_MAX);
            /*
             * Ignore certain features that are in the base model, but not
             * relevant for the search (esp. MSA subfunctions).
             */
            bitmap_andnot(missing, missing, ignored_base_feat, S390_FEAT_MAX);
            if (!bitmap_empty(missing, S390_FEAT_MAX)) {
                break;
            }
        }

        /* stop the search if we found the exact model */
        if (def->type == type && def->ec_ga == ec_ga) {
            return def;
        }
        /* remember if we've at least seen one with the same cpu type */
        if (def->type == type) {
            matching_cpu_type = def;
        }
        last_compatible = def;
    }
    /* prefer the model with the same cpu type, esp. don't take the BC for EC */
    if (matching_cpu_type) {
        return matching_cpu_type;
    }
    return last_compatible;
}
void wa_rpipes_destroy(struct wahc *wa)
{
	struct device *dev = &wa->usb_iface->dev;

	if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
		WARN_ON(1);
		dev_err(dev, "BUG: pipes not released on exit: %*pb\n",
			wa->rpipes, wa->rpipe_bm);
	}
	kfree(wa->rpipe_bm);
}
Example #14
0
/* Check if all specified nodes are online */
static int nodes_online(unsigned long *nodes)
{
	DECLARE_BITMAP(online2, MAX_NUMNODES);

	bitmap_copy(online2, nodes_addr(node_online_map), MAX_NUMNODES);
	if (bitmap_empty(online2, MAX_NUMNODES))
		set_bit(0, online2);
	if (!bitmap_subset(nodes, online2, MAX_NUMNODES))
		return -EINVAL;
	return 0;
}
Example #15
0
void wa_rpipes_destroy(struct wahc *wa)
{
	struct device *dev = &wa->usb_iface->dev;

	if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
		char buf[256];
		WARN_ON(1);
		bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes);
		dev_err(dev, "BUG: pipes not released on exit: %s\n", buf);
	}
	kfree(wa->rpipe_bm);
}
Example #16
0
static void __exit wusbcore_exit(void)
{
	clear_bit(0, wusb_cluster_id_table);
	if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
		printk(KERN_ERR "BUG: WUSB Cluster IDs not released on exit: %*pb\n",
		       CLUSTER_IDS, wusb_cluster_id_table);
		WARN_ON(1);
	}
	usb_unregister_notify(&wusb_usb_notifier);
	destroy_workqueue(wusbd);
	wusb_crypto_exit();
}
Example #17
0
/* note NULL used as error indicator as it doesn't make sense. */
static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
					  unsigned int masklength,
					  unsigned long *mask)
{
	if (bitmap_empty(mask, masklength))
		return NULL;
	while (*av_masks) {
		if (bitmap_subset(mask, av_masks, masklength))
			return av_masks;
		av_masks += BITS_TO_LONGS(masklength);
	}
	return NULL;
}
Example #18
0
static int pnp_assign_irq(struct pnp_dev * dev, struct pnp_irq *rule, int idx)
{
	unsigned long *start, *end, *flags;
	int i;

	/* IRQ priority: this table is good for i386 */
	static unsigned short xtab[16] = {
		5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
	};

	if (!dev || !rule)
		return -EINVAL;

	if (idx >= PNP_MAX_IRQ) {
		pnp_err("More than 2 irqs is incompatible with pnp specifications.");
		/* pretend we were successful so at least the manager won't try again */
		return 1;
	}

	/* check if this resource has been manually set, if so skip */
	if (!(dev->res.irq_resource[idx].flags & IORESOURCE_AUTO))
		return 1;

	start = &dev->res.irq_resource[idx].start;
	end = &dev->res.irq_resource[idx].end;
	flags = &dev->res.irq_resource[idx].flags;

	/* set the initial values */
	*flags |= rule->flags | IORESOURCE_IRQ;
	*flags &=  ~IORESOURCE_UNSET;

	if (bitmap_empty(rule->map, PNP_IRQ_NR)) {
		*flags |= IORESOURCE_DISABLED;
		return 1; /* skip disabled resource requests */
	}

	/* TBD: need check for >16 IRQ */
	*start = find_next_bit(rule->map, PNP_IRQ_NR, 16);
	if (*start < PNP_IRQ_NR) {
		*end = *start;
		return 1;
	}
	for (i = 0; i < 16; i++) {
		if(test_bit(xtab[i], rule->map)) {
			*start = *end = xtab[i];
			if(pnp_check_irq(dev, idx))
				return 1;
		}
	}
	return 0;
}
Example #19
0
/* convert S390CPUDef into a static CpuModelInfo */
static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model,
                                bool delta_changes)
{
    QDict *qdict = qdict_new();
    S390FeatBitmap bitmap;

    /* always fallback to the static base model */
    info->name = g_strdup_printf("%s-base", model->def->name);

    if (delta_changes) {
        /* features deleted from the base feature set */
        bitmap_andnot(bitmap, model->def->base_feat, model->features,
                      S390_FEAT_MAX);
        if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
            s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
        }

        /* features added to the base feature set */
        bitmap_andnot(bitmap, model->features, model->def->base_feat,
                      S390_FEAT_MAX);
        if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
            s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat);
        }
    } else {
        /* expand all features */
        s390_feat_bitmap_to_ascii(model->features, qdict,
                                  qdict_add_enabled_feat);
        bitmap_complement(bitmap, model->features, S390_FEAT_MAX);
        s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
    }

    if (!qdict_size(qdict)) {
        QDECREF(qdict);
    } else {
        info->props = QOBJECT(qdict);
        info->has_props = true;
    }
}
Example #20
0
/*
 * Run software resends of IRQ's
 */
static void resend_irqs(unsigned long arg)
{
	struct irq_desc *desc;
	int irq;

	while (!bitmap_empty(irqs_resend, NR_IRQS)) {
		irq = find_first_bit(irqs_resend, NR_IRQS);
		clear_bit(irq, irqs_resend);
		desc = irq_desc + irq;
		local_irq_disable();
		desc->handle_irq(irq, desc, NULL);
		local_irq_enable();
	}
}
Example #21
0
/*
 * Run software resends of IRQ's
 */
static void resend_irqs(unsigned long arg)
{
	struct irq_desc *desc;
	int irq;

	while (!bitmap_empty(irqs_resend, nr_irqs)) {
		irq = find_first_bit(irqs_resend, nr_irqs);
		clear_bit(irq, irqs_resend);
		desc = irq_to_desc(irq);
		local_irq_disable();
		desc->handle_irq(desc);
		local_irq_enable();
	}
}
Example #22
0
static void balloon_deflate_page(VirtIOBalloon *balloon,
                                 MemoryRegion *mr, hwaddr offset)
{
    void *addr = memory_region_get_ram_ptr(mr) + offset;
    RAMBlock *rb;
    size_t rb_page_size;
    ram_addr_t ram_offset, host_page_base;
    void *host_addr;
    int ret;

    /* XXX is there a better way to get to the RAMBlock than via a
     * host address? */
    rb = qemu_ram_block_from_host(addr, false, &ram_offset);
    rb_page_size = qemu_ram_pagesize(rb);
    host_page_base = ram_offset & ~(rb_page_size - 1);

    if (balloon->pbp
        && rb == balloon->pbp->rb
        && host_page_base == balloon->pbp->base) {
        int subpages = rb_page_size / BALLOON_PAGE_SIZE;

        /*
         * This means the guest has asked to discard some of the 4kiB
         * subpages of a host page, but then changed its mind and
         * asked to keep them after all.  It's exceedingly unlikely
         * for a guest to do this in practice, but handle it anyway,
         * since getting it wrong could mean discarding memory the
         * guest is still using. */
        bitmap_clear(balloon->pbp->bitmap,
                     (ram_offset - balloon->pbp->base) / BALLOON_PAGE_SIZE,
                     subpages);

        if (bitmap_empty(balloon->pbp->bitmap, subpages)) {
            g_free(balloon->pbp);
            balloon->pbp = NULL;
        }
    }

    host_addr = (void *)((uintptr_t)addr & ~(rb_page_size - 1));

    /* When a page is deflated, we hint the whole host page it lives
     * on, since we can't do anything smaller */
    ret = qemu_madvise(host_addr, rb_page_size, QEMU_MADV_WILLNEED);
    if (ret != 0) {
        warn_report("Couldn't MADV_WILLNEED on balloon deflate: %s",
                    strerror(errno));
        /* Otherwise ignore, failing to page hint shouldn't be fatal */
    }
}
Example #23
0
/**
 * ade7758_ring_preenable() setup the parameters of the ring before enabling
 *
 * The complex nature of the setting of the number of bytes per datum is due
 * to this driver currently ensuring that the timestamp is stored at an 8
 * byte boundary.
 **/
static int ade7758_ring_preenable(struct iio_dev *indio_dev)
{
	unsigned channel;

	if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
		return -EINVAL;

	channel = find_first_bit(indio_dev->active_scan_mask,
				 indio_dev->masklength);

	ade7758_write_waveform_type(&indio_dev->dev,
		indio_dev->channels[channel].address);

	return 0;
}
Example #24
0
/* Check whether the current CPU supports all VQs in the committed set */
int sve_verify_vq_map(void)
{
	int ret = 0;

	sve_probe_vqs(sve_secondary_vq_map);
	bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
		      SVE_VQ_MAX);
	if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
		pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
			smp_processor_id());
		ret = -EINVAL;
	}

	return ret;
}
Example #25
0
static void __exit wusbcore_exit(void)
{
	clear_bit(0, wusb_cluster_id_table);
	if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
		char buf[256];
		bitmap_scnprintf(buf, sizeof(buf), wusb_cluster_id_table,
				 CLUSTER_IDS);
		printk(KERN_ERR "BUG: WUSB Cluster IDs not released "
		       "on exit: %s\n", buf);
		WARN_ON(1);
	}
	usb_unregister_notify(&wusb_usb_notifier);
	destroy_workqueue(wusbd);
	wusb_crypto_exit();
}
Example #26
0
static void __exit bb_cleanup_module(void)
{
	if (run_test && test_running) {
		kthread_stop(test_thread);
	}

	spin_lock(&pdeviceslock);
	while(!bitmap_empty(present_devices, BB_DEV_MAX)) {
		int i = find_first_bit(present_devices, BB_DEV_MAX);
		/* bb_destroy clears the bit in present_devices */
		bb_destroy(&present_bbs[i]);
	}
	spin_unlock(&pdeviceslock);
	unregister_chrdev_region(bb_devt, BB_DEV_MAX);
	class_destroy(bb_dev_class);
	printk("Kernel black board unloaded.\n");
}
Example #27
0
/* Whilst this makes a lot of calls to iio_sw_ring functions - it is too device
 * specific to be rolled into the core.
 */
static irqreturn_t ade7758_trigger_handler(int irq, void *p)
{
	struct iio_poll_func *pf = p;
	struct iio_dev *indio_dev = pf->indio_dev;
	struct ade7758_state *st = iio_priv(indio_dev);
	s64 dat64[2];
	u32 *dat32 = (u32 *)dat64;

	if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
		if (ade7758_spi_read_burst(indio_dev) >= 0)
			*dat32 = get_unaligned_be32(&st->rx_buf[5]) & 0xFFFFFF;

	iio_push_to_buffers_with_timestamp(indio_dev, dat64, pf->timestamp);

	iio_trigger_notify_done(indio_dev->trig);

	return IRQ_HANDLED;
}
Example #28
0
/* Do sanity checking on a policy */
static int mpol_check_policy(int mode, unsigned long *nodes)
{
	int empty = bitmap_empty(nodes, MAX_NUMNODES);

	switch (mode) {
	case MPOL_DEFAULT:
		if (!empty)
			return -EINVAL;
		break;
	case MPOL_BIND:
	case MPOL_INTERLEAVE:
		/* Preferred will only use the first bit, but allow
		   more for now. */
		if (empty)
			return -EINVAL;
		break;
	}
	return nodes_online(nodes);
}
static int evtchn_set_pending(struct vcpu *v, int port)
{
    struct domain *d = v->domain;
    int vcpuid;

    /*
     * The following bit operations must happen in strict order.
     * NB. On x86, the atomic bit operations also act as memory barriers.
     * There is therefore sufficiently strict ordering for this architecture --
     * others may require explicit memory barriers.
     */

    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
        return 1;

    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
                           &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }
    
    /* Check if some VCPU might be polling for this event. */
    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
        return 0;

    /* Wake any interested (or potentially interested) pollers. */
    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
          vcpuid < d->max_vcpus;
          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
    {
        v = d->vcpu[vcpuid];
        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
             test_and_clear_bit(vcpuid, d->poll_mask) )
        {
            v->poll_evtchn = 0;
            vcpu_unblock(v);
        }
    }

    return 0;
}
Example #30
0
const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga,
                                    S390FeatBitmap features)
{
    const S390CPUDef *last_compatible = NULL;
    int i;

    if (!gen) {
        ec_ga = 0;
    }
    if (!gen && type) {
        gen = s390_get_gen_for_cpu_type(type);
    }

    for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
        const S390CPUDef *def = &s390_cpu_defs[i];
        S390FeatBitmap missing;

        /* don't even try newer generations if we know the generation */
        if (gen) {
            if (def->gen > gen) {
                break;
            } else if (def->gen == gen && ec_ga && def->ec_ga > ec_ga) {
                break;
            }
        }

        if (features) {
            /* see if the model satisfies the minimum features */
            bitmap_andnot(missing, def->base_feat, features, S390_FEAT_MAX);
            if (!bitmap_empty(missing, S390_FEAT_MAX)) {
                break;
            }
        }

        /* stop the search if we found the exact model */
        if (def->type == type && def->ec_ga == ec_ga) {
            return def;
        }
        last_compatible = def;
    }
    return last_compatible;
}