Exemple #1
0
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
	size_t n)
{
	u32 i, mmb;
	u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
	struct mdss_data_type *mdata = mdss_mdp_get_mdata();

	if (n <= fixed_cnt)
		return fixed_cnt;
	else
		n -= fixed_cnt;

	i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);

	if (i != 0 && n != i) {
		pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
			n, i);
		return 0;
	}

	mdss_mdp_smp_mmb_free(smp_map->reserved, false);

	
	for (; i < n; i++) {
		if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
			break;

		mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
		set_bit(mmb, smp_map->reserved);
		set_bit(mmb, mdata->mmb_alloc_map);
	}

	return i + fixed_cnt;
}
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
                                    size_t n, bool force_alloc)
{
    u32 i, mmb;
    u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
    struct mdss_data_type *mdata = mdss_mdp_get_mdata();

    if (n <= fixed_cnt)
        return fixed_cnt;
    else
        n -= fixed_cnt;

    i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);

    /*
     * SMP programming is not double buffered. Fail the request,
     * that calls for change in smp configuration (addition/removal
     * of smp blocks), so that fallback solution happens.
     */
#if defined(CONFIG_ARCH_MSM8226) || (CONFIG_ARCH_MSM8974)
    if (i != 0 && n != i && !force_alloc) {
        pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
                 n, i);
        pr_debug("Can't change mmb configuration in set call\n");
        return 0;
    }
#else
    if (i != 0 && n != i) {
        pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
                 n, i);
        pr_debug("Can't change mmb configuration in set call\n");
        return 0;
    }
#endif
    /*
     * Clear previous SMP reservations and reserve according to the
     * latest configuration
     */
    mdss_mdp_smp_mmb_free(smp_map->reserved, false);

    /* Reserve mmb blocks*/
    for (; i < n; i++) {
        if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
            break;

        mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
        set_bit(mmb, smp_map->reserved);
        set_bit(mmb, mdata->mmb_alloc_map);
    }

    return i + fixed_cnt;
}
Exemple #3
0
/* Generate a custom zonelist for the BIND policy. */
static struct zonelist *bind_zonelist(unsigned long *nodes)
{
	struct zonelist *zl;
	int num, max, nd;

	max = 1 + MAX_NR_ZONES * bitmap_weight(nodes, MAX_NUMNODES);
	zl = kmalloc(sizeof(void *) * max, GFP_KERNEL);
	if (!zl)
		return NULL;
	num = 0;
	for (nd = find_first_bit(nodes, MAX_NUMNODES);
	     nd < MAX_NUMNODES;
	     nd = find_next_bit(nodes, MAX_NUMNODES, 1+nd)) {
		int k;
		for (k = MAX_NR_ZONES-1; k >= 0; k--) {
			struct zone *z = &NODE_DATA(nd)->node_zones[k];
			if (!z->present_pages)
				continue;
			zl->zones[num++] = z;
			if (k > policy_zone)
				policy_zone = k;
		}
	}
	BUG_ON(num >= max);
	zl->zones[num] = NULL;
	return zl;
}
static irqreturn_t ad7298_trigger_handler(int irq, void *p)
{
	struct iio_poll_func *pf = p;
	struct iio_dev *indio_dev = pf->indio_dev;
	struct ad7298_state *st = iio_priv(indio_dev);
	struct iio_buffer *ring = indio_dev->buffer;
	s64 time_ns;
	__u16 buf[16];
	int b_sent, i;

	b_sent = spi_sync(st->spi, &st->ring_msg);
	if (b_sent)
		return b_sent;

	if (ring->scan_timestamp) {
		time_ns = iio_get_time_ns();
		memcpy((u8 *)buf + st->d_size - sizeof(s64),
			&time_ns, sizeof(time_ns));
	}

	for (i = 0; i < bitmap_weight(indio_dev->active_scan_mask,
						 indio_dev->masklength); i++)
		buf[i] = be16_to_cpu(st->rx_buf[i]);

	indio_dev->buffer->access->store_to(ring, (u8 *)buf, time_ns);
	iio_trigger_notify_done(indio_dev->trig);

	return IRQ_HANDLED;
}
Exemple #5
0
void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
{
	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
	struct batadv_hashtable *hash = bat_priv->orig_hash;
	struct hlist_head *head;
	struct batadv_orig_node *orig_node;
	unsigned long *word;
	uint32_t i;
	size_t word_index;
	uint8_t *w;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
			spin_lock_bh(&orig_node->ogm_cnt_lock);
			word_index = hard_iface->if_num * BATADV_NUM_WORDS;
			word = &(orig_node->bcast_own[word_index]);

			batadv_bit_get_packet(bat_priv, word, 1, 0);
			w = &orig_node->bcast_own_sum[hard_iface->if_num];
			*w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
			spin_unlock_bh(&orig_node->ogm_cnt_lock);
		}
		rcu_read_unlock();
	}
Exemple #6
0
static ssize_t read_file_slot(struct file *file, char __user *user_buf,
			      size_t count, loff_t *ppos)
{
	struct ath9k_htc_priv *priv = file->private_data;
	char buf[512];
	unsigned int len = 0;

	spin_lock_bh(&priv->tx.tx_lock);

	len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");

	len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
			       priv->tx.tx_slot, MAX_TX_BUF_NUM);

	len += snprintf(buf + len, sizeof(buf) - len, "\n");

	len += snprintf(buf + len, sizeof(buf) - len,
			"Used slots     : %d\n",
			bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));

	spin_unlock_bh(&priv->tx.tx_lock);

	if (len > sizeof(buf))
		len = sizeof(buf);

	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
Exemple #7
0
static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
{
	struct pblk_line_meta *lm = &pblk->lm;
	int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
	u64 written_secs = 0;
	int valid_chunks = 0;
	int i;

	for (i = 0; i < lm->blk_per_line; i++) {
		struct nvm_chk_meta *chunk = &line->chks[i];

		if (chunk->state & NVM_CHK_ST_OFFLINE)
			continue;

		written_secs += chunk->wp;
		valid_chunks++;
	}

	if (lm->blk_per_line - nr_bb != valid_chunks)
		pblk_err(pblk, "recovery line %d is bad\n", line->id);

	pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);

	return written_secs;
}
Exemple #8
0
/**
 * ad799x_ring_preenable() setup the parameters of the ring before enabling
 *
 * The complex nature of the setting of the nuber of bytes per datum is due
 * to this driver currently ensuring that the timestamp is stored at an 8
 * byte boundary.
 **/
static int ad799x_ring_preenable(struct iio_dev *indio_dev)
{
	struct iio_buffer *ring = indio_dev->buffer;
	struct ad799x_state *st = iio_priv(indio_dev);

	/*
	 * Need to figure out the current mode based upon the requested
	 * scan mask in iio_dev
	 */

	if (st->id == ad7997 || st->id == ad7998)
		ad7997_8_set_scan_mode(st, *indio_dev->active_scan_mask);

	st->d_size = bitmap_weight(indio_dev->active_scan_mask,
				   indio_dev->masklength) * 2;

	if (ring->scan_timestamp) {
		st->d_size += sizeof(s64);

		if (st->d_size % sizeof(s64))
			st->d_size += sizeof(s64) - (st->d_size % sizeof(s64));
	}

	if (indio_dev->buffer->access->set_bytes_per_datum)
		indio_dev->buffer->access->
			set_bytes_per_datum(indio_dev->buffer, st->d_size);

	return 0;
}
/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
 * specific to be rolled into the core.
 */
static irqreturn_t adis16209_trigger_handler(int irq, void *p)
{
    struct iio_poll_func *pf = p;
    struct iio_dev *indio_dev = pf->indio_dev;
    struct adis16209_state *st = iio_priv(indio_dev);
    struct iio_buffer *ring = indio_dev->buffer;

    int i = 0;
    s16 *data;
    size_t datasize = ring->access->get_bytes_per_datum(ring);

    data = kmalloc(datasize , GFP_KERNEL);
    if (data == NULL) {
        dev_err(&st->us->dev, "memory alloc failed in ring bh");
        return -ENOMEM;
    }

    if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
            adis16209_read_ring_data(&indio_dev->dev, st->rx) >= 0)
        for (; i < bitmap_weight(indio_dev->active_scan_mask,
                                 indio_dev->masklength); i++)
            data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));

    /* Guaranteed to be aligned with 8 byte boundary */
    if (ring->scan_timestamp)
        *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;

    ring->access->store_to(ring, (u8 *)data, pf->timestamp);

    iio_trigger_notify_done(indio_dev->trig);
    kfree(data);

    return IRQ_HANDLED;
}
/**
 * iio_simple_dummy_trigger_h() - the trigger handler function
 * @irq: the interrupt number
 * @p: private data - always a pointer to the poll func.
 *
 * This is the guts of buffered capture. On a trigger event occurring,
 * if the pollfunc is attached then this handler is called as a threaded
 * interrupt (and hence may sleep). It is responsible for grabbing data
 * from the device and pushing it into the associated buffer.
 */
static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
{
    struct iio_poll_func *pf = p;
    struct iio_dev *indio_dev = pf->indio_dev;
    struct iio_buffer *buffer = indio_dev->buffer;
    int len = 0;
    u16 *data;

    data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
    if (data == NULL)
        return -ENOMEM;

    if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
        /*
         * Three common options here:
         * hardware scans: certain combinations of channels make
         *   up a fast read.  The capture will consist of all of them.
         *   Hence we just call the grab data function and fill the
         *   buffer without processing.
         * software scans: can be considered to be random access
         *   so efficient reading is just a case of minimal bus
         *   transactions.
         * software culled hardware scans:
         *   occasionally a driver may process the nearest hardware
         *   scan to avoid storing elements that are not desired. This
         *   is the fidliest option by far.
         * Here lets pretend we have random access. And the values are
         * in the constant table fakedata.
         */
        int i, j;
        for (i = 0, j = 0;
                i < bitmap_weight(indio_dev->active_scan_mask,
                                  indio_dev->masklength);
                i++) {
            j = find_next_bit(buffer->scan_mask,
                              indio_dev->masklength, j + 1);
            /* random access read form the 'device' */
            data[i] = fakedata[j];
            len += 2;
        }
    }
    /* Store a timestampe at an 8 byte boundary */
    if (indio_dev->scan_timestamp)
        *(s64 *)(((phys_addr_t)data + len
                  + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
            = iio_get_time_ns();
    buffer->access->store_to(buffer, (u8 *)data, pf->timestamp);

    kfree(data);

    /*
     * Tell the core we are done with this trigger and ready for the
     * next one.
     */
    iio_trigger_notify_done(indio_dev->trig);

    return IRQ_HANDLED;
}
Exemple #11
0
static irqreturn_t ad799x_trigger_handler(int irq, void *p)
{
	struct iio_poll_func *pf = p;
	struct iio_dev *indio_dev = pf->indio_dev;
	struct ad799x_state *st = iio_priv(indio_dev);
	struct iio_buffer *ring = indio_dev->buffer;
	s64 time_ns;
	__u8 *rxbuf;
	int b_sent;
	u8 cmd;

	rxbuf = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
	if (rxbuf == NULL)
		goto out;

	switch (st->id) {
	case ad7991:
	case ad7995:
	case ad7999:
		cmd = st->config |
			(*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT);
		break;
	case ad7992:
	case ad7993:
	case ad7994:
		cmd = (*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT) |
			AD7998_CONV_RES_REG;
		break;
	case ad7997:
	case ad7998:
		cmd = AD7997_8_READ_SEQUENCE | AD7998_CONV_RES_REG;
		break;
	default:
		cmd = 0;
	}

	b_sent = i2c_smbus_read_i2c_block_data(st->client,
			cmd, bitmap_weight(indio_dev->active_scan_mask,
					   indio_dev->masklength) * 2, rxbuf);
	if (b_sent < 0)
		goto done;

	time_ns = iio_get_time_ns();

	if (indio_dev->scan_timestamp)
		memcpy(rxbuf + indio_dev->scan_bytes - sizeof(s64),
			&time_ns, sizeof(time_ns));

	ring->access->store_to(indio_dev->buffer, rxbuf, time_ns);
done:
	kfree(rxbuf);
	if (b_sent < 0)
		return b_sent;
out:
	iio_trigger_notify_done(indio_dev->trig);

	return IRQ_HANDLED;
}
Exemple #12
0
void pblk_submit_rec(struct work_struct *work)
{
	struct pblk_rec_ctx *recovery =
			container_of(work, struct pblk_rec_ctx, ws_rec);
	struct pblk *pblk = recovery->pblk;
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_rq *rqd = recovery->rqd;
	struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
	int max_secs = nvm_max_phys_sects(dev);
	struct bio *bio;
	unsigned int nr_rec_secs;
	unsigned int pgs_read;
	int ret;

	nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status,
								max_secs);

	bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
	if (!bio) {
		pr_err("pblk: not able to create recovery bio\n");
		return;
	}

	bio->bi_iter.bi_sector = 0;
	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
	rqd->bio = bio;
	rqd->nr_ppas = nr_rec_secs;

	pgs_read = pblk_rb_read_to_bio_list(&pblk->rwb, bio, &recovery->failed,
								nr_rec_secs);
	if (pgs_read != nr_rec_secs) {
		pr_err("pblk: could not read recovery entries\n");
		goto err;
	}

	if (pblk_setup_w_rec_rq(pblk, rqd, c_ctx)) {
		pr_err("pblk: could not setup recovery request\n");
		goto err;
	}

#ifdef CONFIG_NVM_DEBUG
	atomic_long_add(nr_rec_secs, &pblk->recov_writes);
#endif

	ret = pblk_submit_io(pblk, rqd);
	if (ret) {
		pr_err("pblk: I/O submission failed: %d\n", ret);
		goto err;
	}

	mempool_free(recovery, pblk->rec_pool);
	return;

err:
	bio_put(bio);
	pblk_free_rqd(pblk, rqd, WRITE);
}
Exemple #13
0
/**
 * Conveniently returns the number of defined subpackets in a given register.
 * Note that this is different than nr_subpackets, which is the maximum number
 * of subpackets that could be defined in the descriptor.
 */
static int rmi_register_subpackets(struct rmi_reg_descriptor *desc, int reg)
{
	struct rmi_register_desc *rdesc;

	rdesc = flex_array_get(desc->structure, reg);
	if (rdesc)
		return bitmap_weight(rdesc->subpackets, rdesc->nr_subpackets);
	return 0;
}
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe *pipe, struct mdss_mdp_pipe_smp_map *smp_map,
	size_t n)
{
	u32 i, mmb;
	u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
	struct mdss_data_type *mdata = mdss_mdp_get_mdata();

	if (n <= fixed_cnt)
		return fixed_cnt;
	else
		n -= fixed_cnt;

	i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);	

	/*
	 * SMP programming is not double buffered. Fail the request,
	 * that calls for change in smp configuration (addition/removal
	 * of smp blocks), so that fallback solution happens.
	 */
	if (pipe->src_fmt->is_yuv || (pipe->flags & MDP_BACKEND_COMPOSITION)) {
	if (i != 0 && n != i) {
		pr_debug("Can't change mmb configuration in set call\n");
		return 0;
	}
	}
	
	 /* 
	 * Clear previous SMP reservations and reserve according to the 
	 * latest configuration 
	 */ 
	 mdss_mdp_smp_mmb_free(smp_map->reserved, false); 

	/* reserve more blocks if needed, but can't free mmb at this point */
	for (; i < n; i++) {
		if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
			break;

		mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
		set_bit(mmb, smp_map->reserved);
		set_bit(mmb, mdata->mmb_alloc_map);
	}

	return i + fixed_cnt;
}
Exemple #15
0
static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
{
	struct nvm_tgt_dev *dev = pblk->dev;
	struct nvm_geo *geo = &dev->geo;
	struct pblk_line_meta *lm = &pblk->lm;
	int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);

	return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
				nr_bb * geo->sec_per_blk;
}
Exemple #16
0
unsigned int sbitmap_weight(const struct sbitmap *sb)
{
	unsigned int i, weight = 0;

	for (i = 0; i < sb->map_nr; i++) {
		const struct sbitmap_word *word = &sb->map[i];

		weight += bitmap_weight(&word->word, word->depth);
	}
	return weight;
}
Exemple #17
0
/* For each queue, from the most- to least-constrained:
 * find an LSB that can be assigned to the queue. If there are N queues that
 * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
 * dedicated LSB. Remaining LSB regions become a shared resource.
 * If we have fewer LSBs than queues, all LSB regions become shared resources.
 */
static int ccp_assign_lsbs(struct ccp_device *ccp)
{
	DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
	int n_lsbs = 0;
	int bitno;
	int i, lsb_cnt;
	int rc = 0;

	bitmap_zero(lsb_pub, MAX_LSB_CNT);

	/* Create an aggregate bitmap to get a total count of available LSBs */
	for (i = 0; i < ccp->cmd_q_count; i++)
		bitmap_or(lsb_pub,
			  lsb_pub, ccp->cmd_q[i].lsbmask,
			  MAX_LSB_CNT);

	n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);

	if (n_lsbs >= ccp->cmd_q_count) {
		/* We have enough LSBS to give every queue a private LSB.
		 * Brute force search to start with the queues that are more
		 * constrained in LSB choice. When an LSB is privately
		 * assigned, it is removed from the public mask.
		 * This is an ugly N squared algorithm with some optimization.
		 */
		for (lsb_cnt = 1;
		     n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
		     lsb_cnt++) {
			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
							  lsb_pub);
			if (rc < 0)
				return -EINVAL;
			n_lsbs = rc;
		}
	}

	rc = 0;
	/* What's left of the LSBs, according to the public mask, now become
	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
	 * that can't be used as a shared resource, so mark the LSB slots for
	 * them as "in use".
	 */
	bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);

	bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
	while (bitno < MAX_LSB_CNT) {
		bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
		bitmap_set(qlsb, bitno, 1);
		bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
	}

	return rc;
}
static int ad7298_ring_preenable(struct iio_dev *indio_dev)
{
	struct ad7298_state *st = iio_priv(indio_dev);
	struct iio_buffer *ring = indio_dev->buffer;
	size_t d_size;
	int i, m;
	unsigned short command;
	int scan_count = bitmap_weight(indio_dev->active_scan_mask,
				       indio_dev->masklength);
	d_size = scan_count * (AD7298_STORAGE_BITS / 8);

	if (ring->scan_timestamp) {
		d_size += sizeof(s64);

		if (d_size % sizeof(s64))
			d_size += sizeof(s64) - (d_size % sizeof(s64));
	}

	if (ring->access->set_bytes_per_datum)
		ring->access->set_bytes_per_datum(ring, d_size);

	st->d_size = d_size;

	command = AD7298_WRITE | st->ext_ref;

	for (i = 0, m = AD7298_CH(0); i < AD7298_MAX_CHAN; i++, m >>= 1)
		if (test_bit(i, indio_dev->active_scan_mask))
			command |= m;

	st->tx_buf[0] = cpu_to_be16(command);

	/*                        */
	st->ring_xfer[0].tx_buf = &st->tx_buf[0];
	st->ring_xfer[0].len = 2;
	st->ring_xfer[0].cs_change = 1;
	st->ring_xfer[1].tx_buf = &st->tx_buf[1];
	st->ring_xfer[1].len = 2;
	st->ring_xfer[1].cs_change = 1;

	spi_message_init(&st->ring_msg);
	spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
	spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);

	for (i = 0; i < scan_count; i++) {
		st->ring_xfer[i + 2].rx_buf = &st->rx_buf[i];
		st->ring_xfer[i + 2].len = 2;
		st->ring_xfer[i + 2].cs_change = 1;
		spi_message_add_tail(&st->ring_xfer[i + 2], &st->ring_msg);
	}
	/*                                              */
	st->ring_xfer[i + 1].cs_change = 0;

	return 0;
}
Exemple #19
0
unsigned long omfs_count_free(struct super_block *sb)
{
	unsigned int i;
	unsigned long sum = 0;
	struct omfs_sb_info *sbi = OMFS_SB(sb);
	int nbits = sb->s_blocksize * 8;

	for (i = 0; i < sbi->s_imap_size; i++)
		sum += nbits - bitmap_weight(sbi->s_imap[i], nbits);

	return sum;
}
Exemple #20
0
unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
{
	struct quad_buffer_head qbh;
	unsigned long *bits;
	unsigned count;

	bits = hpfs_map_4sectors(s, secno, &qbh, 4);
	if (!bits)
		return 0;
	count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
	hpfs_brelse4(&qbh);
	return count;
}
Exemple #21
0
static int cma_used_get(void *data, u64 *val)
{
	struct cma *cma = data;
	unsigned long used;

	mutex_lock(&cma->lock);
	/* pages counter is smaller than sizeof(int) */
	used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
	mutex_unlock(&cma->lock);
	*val = (u64)used << cma->order_per_bit;

	return 0;
}
Exemple #22
0
static irqreturn_t max1363_trigger_handler(int irq, void *p)
{
	struct iio_poll_func *pf = p;
	struct iio_dev *indio_dev = pf->indio_dev;
	struct max1363_state *st = iio_priv(indio_dev);
	s64 time_ns;
	__u8 *rxbuf;
	int b_sent;
	size_t d_size;
	unsigned long numvals = bitmap_weight(st->current_mode->modemask,
					      MAX1363_MAX_CHANNELS);

	/* Ensure the timestamp is 8 byte aligned */
	if (st->chip_info->bits != 8)
		d_size = numvals*2;
	else
		d_size = numvals;
	if (indio_dev->buffer->scan_timestamp) {
		d_size += sizeof(s64);
		if (d_size % sizeof(s64))
			d_size += sizeof(s64) - (d_size % sizeof(s64));
	}
	/* Monitor mode prevents reading. Whilst not currently implemented
	 * might as well have this test in here in the meantime as it does
	 * no harm.
	 */
	if (numvals == 0)
		return IRQ_HANDLED;

	rxbuf = kmalloc(d_size,	GFP_KERNEL);
	if (rxbuf == NULL)
		return -ENOMEM;
	if (st->chip_info->bits != 8)
		b_sent = i2c_master_recv(st->client, rxbuf, numvals*2);
	else
		b_sent = i2c_master_recv(st->client, rxbuf, numvals);
	if (b_sent < 0)
		goto done;

	time_ns = iio_get_time_ns();

	if (indio_dev->buffer->scan_timestamp)
		memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
	iio_push_to_buffer(indio_dev->buffer, rxbuf, time_ns);

done:
	iio_trigger_notify_done(indio_dev->trig);
	kfree(rxbuf);

	return IRQ_HANDLED;
}
Exemple #23
0
static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
					int lsb_cnt, int n_lsbs,
					unsigned long *lsb_pub)
{
	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
	int bitno;
	int qlsb_wgt;
	int i;

	/* For each queue:
	 * If the count of potential LSBs available to a queue matches the
	 * ordinal given to us in lsb_cnt:
	 * Copy the mask of possible LSBs for this queue into "qlsb";
	 * For each bit in qlsb, see if the corresponding bit in the
	 * aggregation mask is set; if so, we have a match.
	 *     If we have a match, clear the bit in the aggregation to
	 *     mark it as no longer available.
	 *     If there is no match, clear the bit in qlsb and keep looking.
	 */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];

		qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);

		if (qlsb_wgt == lsb_cnt) {
			bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);

			bitno = find_first_bit(qlsb, MAX_LSB_CNT);
			while (bitno < MAX_LSB_CNT) {
				if (test_bit(bitno, lsb_pub)) {
					/* We found an available LSB
					 * that this queue can access
					 */
					cmd_q->lsb = bitno;
					bitmap_clear(lsb_pub, bitno, 1);
					dev_info(ccp->dev,
						 "Queue %d gets LSB %d\n",
						 i, bitno);
					break;
				}
				bitmap_clear(qlsb, bitno, 1);
				bitno = find_first_bit(qlsb, MAX_LSB_CNT);
			}
			if (bitno >= MAX_LSB_CNT)
				return -EINVAL;
			n_lsbs--;
		}
	}
	return n_lsbs;
}
Exemple #24
0
static u32 mdss_mdp_smp_mmb_reserve(unsigned long *smp, size_t n)
{
	u32 i, mmb;

	/* reserve more blocks if needed, but can't free mmb at this point */
	for (i = bitmap_weight(smp, SMP_MB_CNT); i < n; i++) {
		if (bitmap_full(mdss_mdp_smp_mmb_pool, SMP_MB_CNT))
			break;

		mmb = find_first_zero_bit(mdss_mdp_smp_mmb_pool, SMP_MB_CNT);
		set_bit(mmb, smp);
		set_bit(mmb, mdss_mdp_smp_mmb_pool);
	}
	return i;
}
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
	size_t n)
{
	u32 i, mmb;
	u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
	struct mdss_data_type *mdata = mdss_mdp_get_mdata();

	if (n <= fixed_cnt)
		return fixed_cnt;
	else
		n -= fixed_cnt;

	/* reserve more blocks if needed, but can't free mmb at this point */
	for (i = bitmap_weight(smp_map->allocated, SMP_MB_CNT); i < n; i++) {
		if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
			break;

		mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
		set_bit(mmb, smp_map->reserved);
		set_bit(mmb, mdata->mmb_alloc_map);
	}

	return i + fixed_cnt;
}
Exemple #26
0
static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
{
	struct cpu_map *map;
	int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;

	nr = bitmap_weight(mask->mask, nbits);

	map = cpu_map__empty_new(nr);
	if (map) {
		int cpu, i = 0;

		for_each_set_bit(cpu, mask->mask, nbits)
			map->map[i++] = cpu;
	}
	return map;

}
Exemple #27
0
/*
 * Fill a DRP IE's allocation fields from a MAS bitmap.
 */
static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
			       struct uwb_mas_bm *mas)
{
	int z, i, num_fields = 0, next = 0;
	struct uwb_drp_alloc *zones;
	__le16 current_bmp;
	DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
	DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);

	zones = drp_ie->allocs;

	bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);

	/* Determine unique MAS bitmaps in zones from bitmap. */
	for (z = 0; z < UWB_NUM_ZONES; z++) {
		bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
		if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
			bool found = false;
			current_bmp = (__le16) *tmp_mas_bm;
			for (i = 0; i < next; i++) {
				if (current_bmp == zones[i].mas_bm) {
					zones[i].zone_bm |= 1 << z;
					found = true;
					break;
				}
			}
			if (!found)  {
				num_fields++;
				zones[next].zone_bm = 1 << z;
				zones[next].mas_bm = current_bmp;
				next++;
			}
		}
		bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
	}

	/* Store in format ready for transmission (le16). */
	for (i = 0; i < num_fields; i++) {
		drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
		drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
	}

	drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
		+ num_fields * sizeof(struct uwb_drp_alloc);
}
Exemple #28
0
/**
 * read_ec_accel_data_unsafe() - Read acceleration data from EC shared memory.
 * @st:        Pointer to state information for device.
 * @scan_mask: Bitmap of the sensor indices to scan.
 * @data:      Location to store data.
 *
 * This is the unsafe function for reading the EC data. It does not guarantee
 * that the EC will not modify the data as it is being read in.
 */
static void read_ec_accel_data_unsafe(struct cros_ec_accel_legacy_state *st,
				      unsigned long scan_mask, s16 *data)
{
	int i = 0;
	int num_enabled = bitmap_weight(&scan_mask, MAX_AXIS);

	/* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
	while (num_enabled--) {
		i = find_next_bit(&scan_mask, MAX_AXIS, i);
		ec_cmd_read_u16(st->ec,
				EC_MEMMAP_ACC_DATA +
				sizeof(s16) *
				(1 + i + st->sensor_num * MAX_AXIS),
				data);
		*data *= st->sign[i];
		i++;
		data++;
	}
}
Exemple #29
0
/**
 * ad7298_update_scan_mode() setup the spi transfer buffer for the new scan mask
 **/
static int ad7298_update_scan_mode(struct iio_dev *indio_dev,
	const unsigned long *active_scan_mask)
{
	struct ad7298_state *st = iio_priv(indio_dev);
	int i, m;
	unsigned short command;
	int scan_count;

	/* Now compute overall size */
	scan_count = bitmap_weight(active_scan_mask, indio_dev->masklength);

	command = AD7298_WRITE | st->ext_ref;

	for (i = 0, m = AD7298_CH(0); i < AD7298_MAX_CHAN; i++, m >>= 1)
		if (test_bit(i, active_scan_mask))
			command |= m;

	st->tx_buf[0] = cpu_to_be16(command);

	/* build spi ring message */
	st->ring_xfer[0].tx_buf = &st->tx_buf[0];
	st->ring_xfer[0].len = 2;
	st->ring_xfer[0].cs_change = 1;
	st->ring_xfer[1].tx_buf = &st->tx_buf[1];
	st->ring_xfer[1].len = 2;
	st->ring_xfer[1].cs_change = 1;

	spi_message_init(&st->ring_msg);
	spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
	spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);

	for (i = 0; i < scan_count; i++) {
		st->ring_xfer[i + 2].rx_buf = &st->rx_buf[i];
		st->ring_xfer[i + 2].len = 2;
		st->ring_xfer[i + 2].cs_change = 1;
		spi_message_add_tail(&st->ring_xfer[i + 2], &st->ring_msg);
	}
	/* make sure last transfer cs_change is not set */
	st->ring_xfer[i + 1].cs_change = 0;

	return 0;
}
Exemple #30
0
static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
{
	int q_mask = 1 << cmd_q->id;
	int queues = 0;
	int j;

	/* Build a bit mask to know which LSBs this queue has access to.
	 * Don't bother with segment 0 as it has special privileges.
	 */
	for (j = 1; j < MAX_LSB_CNT; j++) {
		if (status & q_mask)
			bitmap_set(cmd_q->lsbmask, j, 1);
		status >>= LSB_REGION_WIDTH;
	}
	queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
	dev_info(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
		 cmd_q->id, queues);

	return queues ? 0 : -EINVAL;
}