Esempio n. 1
0
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
{
	u32 obj;

	spin_lock(&bitmap->lock);

	obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
	if (obj >= bitmap->max) {
		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
				& bitmap->mask;
		obj = find_first_zero_bit(bitmap->table, bitmap->max);
	}

	if (obj < bitmap->max) {
		set_bit(obj, bitmap->table);
		bitmap->last = (obj + 1);
		if (bitmap->last == bitmap->max)
			bitmap->last = 0;
		obj |= bitmap->top;
	} else
		obj = -1;

	if (obj != -1)
		--bitmap->avail;

	spin_unlock(&bitmap->lock);

	return obj;
}
Esempio n. 2
0
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
				  dma_addr_t cpu_addr,
				  enum dw_pcie_as_type as_type)
{
	int ret;
	u32 free_win;
	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);

	free_win = find_first_zero_bit(&ep->ib_window_map,
				       sizeof(ep->ib_window_map));
	if (free_win >= ep->num_ib_windows) {
		dev_err(pci->dev, "no free inbound window\n");
		return -EINVAL;
	}

	ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
				       as_type);
	if (ret < 0) {
		dev_err(pci->dev, "Failed to program IB window\n");
		return ret;
	}

	ep->bar_to_atu[bar] = free_win;
	set_bit(free_win, &ep->ib_window_map);

	return 0;
}
Esempio n. 3
0
static int camera_v4l2_fh_open(struct file *filep)
{
	struct msm_video_device *pvdev = video_drvdata(filep);
	struct camera_v4l2_private *sp;
	unsigned int stream_id;

	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
	if (!sp) {
		pr_err("%s : memory not available\n", __func__);
		return -ENOMEM;
	}

	filep->private_data = &sp->fh;

	
	stream_id = atomic_read(&pvdev->opened);
	sp->stream_id = find_first_zero_bit(
		(const unsigned long *)&stream_id, MSM_CAMERA_STREAM_CNT_BITS);
	pr_debug("%s: Found stream_id=%d\n", __func__, sp->stream_id);

	v4l2_fh_init(&sp->fh, pvdev->vdev);
	v4l2_fh_add(&sp->fh);

	return 0;
}
Esempio n. 4
0
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
{
	int ret = 0;

	spin_lock(&bitmap->lock);
	*obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
	if (*obj >= bitmap->max) {
		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
			       & bitmap->mask;
		*obj = find_first_zero_bit(bitmap->table, bitmap->max);
	}

	if (*obj < bitmap->max) {
		set_bit(*obj, bitmap->table);
		bitmap->last = (*obj + 1);
		if (bitmap->last == bitmap->max)
			bitmap->last = 0;
		*obj |= bitmap->top;
	} else {
		ret = -1;
	}

	spin_unlock(&bitmap->lock);

	return ret;
}
Esempio n. 5
0
int pc_dimm_get_free_slot(const int *hint, int max_slots, Error **errp)
{
    unsigned long *bitmap = bitmap_new(max_slots);
    int slot = 0;

    object_child_foreach(qdev_get_machine(), pc_dimm_slot2bitmap, bitmap);

    /* check if requested slot is not occupied */
    if (hint) {
        if (*hint >= max_slots) {
            error_setg(errp, "invalid slot# %d, should be less than %d",
                       *hint, max_slots);
        } else if (!test_bit(*hint, bitmap)) {
            slot = *hint;
        } else {
            error_setg(errp, "slot %d is busy", *hint);
        }
        goto out;
    }

    /* search for free slot */
    slot = find_first_zero_bit(bitmap, max_slots);
    if (slot == max_slots) {
        error_setg(errp, "no free slots available");
    }
out:
    g_free(bitmap);
    return slot;
}
/**
 * ir_register_class() - creates the sysfs for /sys/class/irrcv/irrcv?
 * @input_dev:	the struct input_dev descriptor of the device
 *
 * This routine is used to register the syfs code for IR class
 */
int ir_register_class(struct input_dev *input_dev)
{
	int rc;
	struct kobject *kobj;

	struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
	int devno = find_first_zero_bit(&ir_core_dev_number,
					IRRCV_NUM_DEVICES);

	if (unlikely(devno < 0))
		return devno;

	ir_dev->attr.attrs = ir_dev_attrs;
	ir_dev->class_dev = device_create(ir_input_class, NULL,
					  input_dev->dev.devt, ir_dev,
					  "irrcv%d", devno);
	kobj = &ir_dev->class_dev->kobj;

	printk(KERN_WARNING "Creating IR device %s\n", kobject_name(kobj));
	rc = sysfs_create_group(kobj, &ir_dev->attr);
	if (unlikely(rc < 0)) {
		device_destroy(ir_input_class, input_dev->dev.devt);
		return -ENOMEM;
	}

	ir_dev->devno = devno;
	set_bit(devno, &ir_core_dev_number);

	return 0;
};
static struct card_blk_data *card_blk_alloc(struct memory_card *card)
{
	struct card_blk_data *card_data;
	int devidx, ret;

	devidx = find_first_zero_bit(dev_use, CARD_NUM_MINORS);

	if(card->card_type == CARD_INAND)
		devidx = CARD_INAND_START_MINOR>>CARD_SHIFT;
	
	if (devidx >= CARD_NUM_MINORS)
		return ERR_PTR(-ENOSPC);
	__set_bit(devidx, dev_use);

	card_data = kmalloc(sizeof(struct card_blk_data), GFP_KERNEL);
	if (!card_data) {
		ret = -ENOMEM;
		return ERR_PTR(ret);
	}

	memset(card_data, 0, sizeof(struct card_blk_data));

	card_data->block_bits = 9;

	card_data->disk = alloc_disk(1 << CARD_SHIFT);
	if (card_data->disk == NULL) {
		ret = -ENOMEM;
		kfree(card_data);
		return ERR_PTR(ret);
	}

	spin_lock_init(&card_data->lock);
	card_data->usage = 1;

	ret = card_init_queue(&card_data->queue, card, &card_data->lock);
	if (ret) {
		put_disk(card_data->disk);
		return ERR_PTR(ret);
	}

	card_data->queue.prep_fn = card_blk_prep_rq;
	card_data->queue.issue_fn = card_blk_issue_rq;
	card_data->queue.data = card_data;

	card_data->disk->major = major;
	card_data->disk->minors = 1 << CARD_SHIFT;
	card_data->disk->first_minor = devidx << CARD_SHIFT;
	card_data->disk->fops = &card_ops;
	card_data->disk->private_data = card_data;
	card_data->disk->queue = card_data->queue.queue;
	card_data->disk->driverfs_dev = &card->dev;

	sprintf(card_data->disk->disk_name, "cardblk%s", card->name);

	blk_queue_logical_block_size(card_data->queue.queue, 1 << card_data->block_bits);

	set_capacity(card_data->disk, card->capacity);

	return card_data;
}
Esempio n. 8
0
int cxgb4_get_free_ftid(struct net_device *dev, int family)
{
	struct adapter *adap = netdev2adap(dev);
	struct tid_info *t = &adap->tids;
	int ftid;

	spin_lock_bh(&t->ftid_lock);
	if (family == PF_INET) {
		ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
		if (ftid >= t->nftids)
			ftid = -1;
	} else {
		if (is_t6(adap->params.chip)) {
			ftid = bitmap_find_free_region(t->ftid_bmap,
						       t->nftids, 1);
			if (ftid < 0)
				goto out_unlock;

			/* this is only a lookup, keep the found region
			 * unallocated
			 */
			bitmap_release_region(t->ftid_bmap, ftid, 1);
		} else {
			ftid = bitmap_find_free_region(t->ftid_bmap,
						       t->nftids, 2);
			if (ftid < 0)
				goto out_unlock;

			bitmap_release_region(t->ftid_bmap, ftid, 2);
		}
	}
out_unlock:
	spin_unlock_bh(&t->ftid_lock);
	return ftid;
}
Esempio n. 9
0
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
	size_t n)
{
	u32 i, mmb;
	u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
	struct mdss_data_type *mdata = mdss_mdp_get_mdata();

	if (n <= fixed_cnt)
		return fixed_cnt;
	else
		n -= fixed_cnt;

	i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);

	if (i != 0 && n != i) {
		pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
			n, i);
		return 0;
	}

	mdss_mdp_smp_mmb_free(smp_map->reserved, false);

	
	for (; i < n; i++) {
		if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
			break;

		mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
		set_bit(mmb, smp_map->reserved);
		set_bit(mmb, mdata->mmb_alloc_map);
	}

	return i + fixed_cnt;
}
Esempio n. 10
0
/*
 * nfs4_alloc_slot - efficiently look for a free slot
 *
 * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
 * If found, we mark the slot as used, update the highest_used_slotid,
 * and respectively set up the sequence operation args.
 *
 * Note: must be called with under the slot_tbl_lock.
 */
struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
{
	struct nfs4_slot *ret = ERR_PTR(-EBUSY);
	u32 slotid;

	dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
		tbl->max_slotid + 1);
	slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
	if (slotid > tbl->max_slotid)
		goto out;
	ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
	if (IS_ERR(ret))
		goto out;
	__set_bit(slotid, tbl->used_slots);
	if (slotid > tbl->highest_used_slotid ||
			tbl->highest_used_slotid == NFS4_NO_SLOT)
		tbl->highest_used_slotid = slotid;
	ret->generation = tbl->generation;

out:
	dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
		!IS_ERR(ret) ? ret->slot_nr : -1);
	return ret;
}
/**
 * hpsb_get_tlabel - allocate a transaction label
 * @packet: the packet who's tlabel/tpool we set
 *
 * Every asynchronous transaction on the 1394 bus needs a transaction
 * label to match the response to the request.  This label has to be
 * different from any other transaction label in an outstanding request to
 * the same node to make matching possible without ambiguity.
 *
 * There are 64 different tlabels, so an allocated tlabel has to be freed
 * with hpsb_free_tlabel() after the transaction is complete (unless it's
 * reused again for the same target node).
 *
 * Return value: Zero on success, otherwise non-zero. A non-zero return
 * generally means there are no available tlabels. If this is called out
 * of interrupt or atomic context, then it will sleep until can return a
 * tlabel.
 */
int hpsb_get_tlabel(struct hpsb_packet *packet)
{
	unsigned long flags;
	struct hpsb_tlabel_pool *tp;
	int n = NODEID_TO_NODE(packet->node_id);

	if (unlikely(n == ALL_NODES))
		return 0;
	tp = &packet->host->tpool[n];

	if (irqs_disabled() || in_atomic()) {
		if (down_trylock(&tp->count))
			return 1;
	} else {
		down(&tp->count);
	}

	spin_lock_irqsave(&tp->lock, flags);

	packet->tlabel = find_next_zero_bit(tp->pool, 64, tp->next);
	if (packet->tlabel > 63)
		packet->tlabel = find_first_zero_bit(tp->pool, 64);
	tp->next = (packet->tlabel + 1) % 64;
	/* Should _never_ happen */
	BUG_ON(test_and_set_bit(packet->tlabel, tp->pool));
	tp->allocations++;
	spin_unlock_irqrestore(&tp->lock, flags);

	return 0;
}
static bool hasFullReservation(TicketSystem *_this)
{
    TicketSystemBitImpl *__this = container_of(_this, TicketSystemBitImpl, parent);

    return (!(find_first_zero_bit(&__this->flags, BITS_PER_LONG)>=BITS_PER_LONG));

}
Esempio n. 13
0
static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
				   unsigned int nr_irqs, void *args)
{
	struct v2m_data *v2m = domain->host_data;
	int hwirq, offset, err = 0;

	spin_lock(&v2m->msi_cnt_lock);
	offset = find_first_zero_bit(v2m->bm, v2m->nr_spis);
	if (offset < v2m->nr_spis)
		__set_bit(offset, v2m->bm);
	else
		err = -ENOSPC;
	spin_unlock(&v2m->msi_cnt_lock);

	if (err)
		return err;

	hwirq = v2m->spi_start + offset;

	err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
	if (err) {
		gicv2m_unalloc_msi(v2m, hwirq);
		return err;
	}

	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
				      &gicv2m_irq_chip, v2m);

	return 0;
}
/* same as hpsb_get_tlabel, except that it returns immediately */
static int hpsb_get_tlabel_atomic(struct hpsb_packet *packet)
{
	unsigned long flags, *tp;
	u8 *next;
	int tlabel, n = NODEID_TO_NODE(packet->node_id);

	/* Broadcast transactions are complete once the request has been sent.
	 * Use the same transaction label for all broadcast transactions. */
	if (unlikely(n == ALL_NODES)) {
		packet->tlabel = 0;
		return 0;
	}
	tp = packet->host->tl_pool[n].map;
	next = &packet->host->next_tl[n];

	spin_lock_irqsave(&hpsb_tlabel_lock, flags);
	tlabel = find_next_zero_bit(tp, 64, *next);
	if (tlabel > 63)
		tlabel = find_first_zero_bit(tp, 64);
	if (tlabel > 63) {
		spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
		return -EAGAIN;
	}
	__set_bit(tlabel, tp);
	*next = (tlabel + 1) & 63;
	spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);

	packet->tlabel = tlabel;
	return 0;
}
Esempio n. 15
0
static int genl_allocate_reserve_groups(int n_groups, int *first_id)
{
	unsigned long *new_groups;
	int start = 0;
	int i;
	int id;
	bool fits;

	do {
		if (start == 0)
			id = find_first_zero_bit(mc_groups,
						 mc_groups_longs *
						 BITS_PER_LONG);
		else
			id = find_next_zero_bit(mc_groups,
						mc_groups_longs * BITS_PER_LONG,
						start);

		fits = true;
		for (i = id;
		     i < min_t(int, id + n_groups,
			       mc_groups_longs * BITS_PER_LONG);
		     i++) {
			if (test_bit(i, mc_groups)) {
				start = i;
				fits = false;
				break;
			}
		}

		if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
			unsigned long new_longs = mc_groups_longs +
						  BITS_TO_LONGS(n_groups);
			size_t nlen = new_longs * sizeof(unsigned long);

			if (mc_groups == &mc_group_start) {
				new_groups = kzalloc(nlen, GFP_KERNEL);
				if (!new_groups)
					return -ENOMEM;
				mc_groups = new_groups;
				*mc_groups = mc_group_start;
			} else {
				new_groups = krealloc(mc_groups, nlen,
						      GFP_KERNEL);
				if (!new_groups)
					return -ENOMEM;
				mc_groups = new_groups;
				for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
					mc_groups[mc_groups_longs + i] = 0;
			}
			mc_groups_longs = new_longs;
		}
	} while (!fits);

	for (i = id; i < id + n_groups; i++)
		set_bit(i, mc_groups);
	*first_id = id;
	return 0;
}
Esempio n. 16
0
static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
    struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
    struct autofs_dirhash *dh = &sbi->dirhash;
    struct autofs_dir_ent *ent;
    unsigned int n;
    int slsize;
    struct autofs_symlink *sl;

    DPRINTK(("autofs_root_symlink: %s <- ", symname));
    autofs_say(dentry->d_name.name,dentry->d_name.len);

    if ( !autofs_oz_mode(sbi) )
        return -EACCES;

    if ( autofs_hash_lookup(dh, &dentry->d_name) )
        return -EEXIST;

    n = find_first_zero_bit(sbi->symlink_bitmap,AUTOFS_MAX_SYMLINKS);
    if ( n >= AUTOFS_MAX_SYMLINKS )
        return -ENOSPC;

    set_bit(n,sbi->symlink_bitmap);
    sl = &sbi->symlink[n];
    sl->len = strlen(symname);
    sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
    if ( !sl->data ) {
        clear_bit(n,sbi->symlink_bitmap);
        return -ENOSPC;
    }

    ent = kmalloc(sizeof(struct autofs_dir_ent), GFP_KERNEL);
    if ( !ent ) {
        kfree(sl->data);
        clear_bit(n,sbi->symlink_bitmap);
        return -ENOSPC;
    }

    ent->name = kmalloc(dentry->d_name.len+1, GFP_KERNEL);
    if ( !ent->name ) {
        kfree(sl->data);
        kfree(ent);
        clear_bit(n,sbi->symlink_bitmap);
        return -ENOSPC;
    }

    memcpy(sl->data,symname,slsize);
    sl->mtime = CURRENT_TIME;

    ent->ino = AUTOFS_FIRST_SYMLINK + n;
    ent->hash = dentry->d_name.hash;
    memcpy(ent->name, dentry->d_name.name, 1+(ent->len = dentry->d_name.len));
    ent->dentry = NULL;	/* We don't keep the dentry for symlinks */

    autofs_hash_insert(dh,ent);
    d_instantiate(dentry, iget(dir->i_sb,ent->ino));

    return 0;
}
Esempio n. 17
0
static int make_inode_number(void)
{
	int i = find_first_zero_bit((void *) proc_alloc_map, PROC_NDYNAMIC);
	if (i<0 || i>=PROC_NDYNAMIC) 
		return -1;
	set_bit(i, (void *) proc_alloc_map);
	return PROC_DYNAMIC_FIRST + i;
}
Esempio n. 18
0
int find_and_set_bit_appMemInfo(volatile unsigned long *bitmap)
{
    int nr;
    nr = find_first_zero_bit((void *)bitmap, sizeof(int)*8);
    if(nr >= sizeof(int)*8)
        return -1;
    set_bit(nr, bitmap);
    return nr;
}
Esempio n. 19
0
File: xpad.c Progetto: coolmint/xpad
/*
 * Light up the segment corresponding to the pad number on Xbox 360 Controllers
 */
static void xpad_identify_controller(struct usb_xpad *xpad)
{
	if (xpad->xtype != XTYPE_XBOX360 && xpad->xtype != XTYPE_XBOX360W)
		return;

	xpad->pad_nr = find_first_zero_bit(&xpad_pad_seq, 32);
	set_bit(xpad->pad_nr, &xpad_pad_seq);

	xpad_send_led_command(xpad, (xpad->pad_nr % 4) + 2);
}
Esempio n. 20
0
File: cmd.c Progetto: AMouri/linux
static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
{
	u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
	if (link >= WL12XX_MAX_LINKS)
		return -EBUSY;

	__set_bit(link, wl->links_map);
	*hlid = link;
	return 0;
}
/**
 * blk_queue_start_tag - find a free tag and assign it
 * @q:  the request queue for the device
 * @rq:  the block request that needs tagging
 *
 *  Description:
 *    This can either be used as a stand-alone helper, or possibly be
 *    assigned as the queue &prep_rq_fn (in which case &struct request
 *    automagically gets a tag assigned). Note that this function
 *    assumes that any type of request can be queued! if this is not
 *    true for your device, you must check the request type before
 *    calling this function.  The request will also be removed from
 *    the request queue, so it's the drivers responsibility to readd
 *    it if it should need to be restarted for some reason.
 *
 *  Notes:
 *   queue lock must be held.
 **/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
	struct blk_queue_tag *bqt = q->queue_tags;
	unsigned max_depth;
	int tag;

	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
		printk(KERN_ERR
		       "%s: request %p for device [%s] already tagged %d",
		       __func__, rq,
		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
		BUG();
	}

	/*
	 * Protect against shared tag maps, as we may not have exclusive
	 * access to the tag map.
	 *
	 * We reserve a few tags just for sync IO, since we don't want
	 * to starve sync IO on behalf of flooding async IO.
	 */
	max_depth = bqt->max_depth;
	if (!rq_is_sync(rq) && max_depth > 1) {
		switch (max_depth) {
		case 2:
			max_depth = 1;
			break;
		case 3:
			max_depth = 2;
			break;
		default:
			max_depth -= 2;
		}
		if (q->in_flight[BLK_RW_ASYNC] > max_depth)
			return 1;
	}

	do {
		tag = find_first_zero_bit(bqt->tag_map, max_depth);
		if (tag >= max_depth)
			return 1;

	} while (test_and_set_bit_lock(tag, bqt->tag_map));
	/*
	 * We need lock ordering semantics given by test_and_set_bit_lock.
	 * See blk_queue_end_tag for details.
	 */

	rq->cmd_flags |= REQ_QUEUED;
	rq->tag = tag;
	bqt->tag_index[tag] = rq;
	blk_start_request(rq);
	list_add(&rq->queuelist, &q->tag_busy_list);
	return 0;
}
Esempio n. 22
0
static uint8_t ufshc_get_xfer_free_slot(ufs_hba_t* hba) {
    int32_t free_slot;

    free_slot = find_first_zero_bit(&hba->outstanding_xfer_reqs,
                                    hba->nutrs);
    if (-1 == free_slot) {
        UFS_ERROR("UFS no free transfer slot available.\n");
        return BAD_SLOT;
    }

    return (uint8_t)free_slot;
}
Esempio n. 23
0
struct page *alloc_foreign_page(void)
{
    ulong bit;
    do {
        bit = find_first_zero_bit(foreign_map_bitmap,
                                  foreign_map_pgs);
        if (bit >= foreign_map_pgs)
            return NULL;
    } while (test_and_set_bit(bit, foreign_map_bitmap) == 1);

    return pfn_to_page(foreign_map_pfn + bit);
}
Esempio n. 24
0
static int pmb_alloc_entry(void)
{
	int pos;

	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
	if (pos >= 0 && pos < NR_PMB_ENTRIES)
		__set_bit(pos, pmb_map);
	else
		pos = -ENOSPC;

	return pos;
}
Esempio n. 25
0
/**
 * xilinx_pcie_assign_msi - Allocate MSI number
 *
 * Return: A valid IRQ on success and error value on failure.
 */
static int xilinx_pcie_assign_msi(void)
{
	int pos;

	pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
	if (pos < XILINX_NUM_MSI_IRQS)
		set_bit(pos, msi_irq_in_use);
	else
		return -ENOSPC;

	return pos;
}
Esempio n. 26
0
static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
{
	int err = 0;

	*ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
	if (*ix >= l2_table->size)
		err = -ENOSPC;
	else
		__set_bit(*ix, l2_table->bitmap);

	return err;
}
/*
 * find_couple_state - Find the maximum state platform can enter
 *
 * @index: pointer to variable which stores the maximum state
 * @cluster: cluster number
 *
 * Must be called with function holds mmp_lpm_lock
 */
static void find_coupled_state(int *index, int cluster)
{
    int i;
    int platform_lpm = DEFAULT_LPM_FLAG;

    for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++)
        platform_lpm &= mmp_enter_lpm[cluster][i];

    *index = min(find_first_zero_bit((void *)&platform_lpm, LPM_NUM),
                 pm_qos_request(PM_QOS_CPUIDLE_BLOCK)) - 1;

}
Esempio n. 28
0
static unsigned int get_tag(struct nullb_queue *nq)
{
	unsigned int tag;

	do {
		tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
		if (tag >= nq->queue_depth)
			return -1U;
	} while (test_and_set_bit_lock(tag, nq->tag_map));

	return tag;
}
Esempio n. 29
0
int tegra30_ahub_allocate_rx_fifo(enum tegra30_ahub_rxcif *rxcif,
				  char *dmachan, int dmachan_len,
				  dma_addr_t *fiforeg)
{
	int channel;
	u32 reg, val;
	struct tegra30_ahub_cif_conf cif_conf;

	channel = find_first_zero_bit(ahub->rx_usage,
				      TEGRA30_AHUB_CHANNEL_CTRL_COUNT);
	if (channel >= TEGRA30_AHUB_CHANNEL_CTRL_COUNT)
		return -EBUSY;

	__set_bit(channel, ahub->rx_usage);

	*rxcif = TEGRA30_AHUB_RXCIF_APBIF_RX0 + channel;
	snprintf(dmachan, dmachan_len, "rx%d", channel);
	*fiforeg = ahub->apbif_addr + TEGRA30_AHUB_CHANNEL_RXFIFO +
		   (channel * TEGRA30_AHUB_CHANNEL_RXFIFO_STRIDE);

	pm_runtime_get_sync(ahub->dev);

	reg = TEGRA30_AHUB_CHANNEL_CTRL +
	      (channel * TEGRA30_AHUB_CHANNEL_CTRL_STRIDE);
	val = tegra30_apbif_read(reg);
	val &= ~(TEGRA30_AHUB_CHANNEL_CTRL_RX_THRESHOLD_MASK |
		 TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_MASK);
	val |= (7 << TEGRA30_AHUB_CHANNEL_CTRL_RX_THRESHOLD_SHIFT) |
	       TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_EN |
	       TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_16;
	tegra30_apbif_write(reg, val);

	cif_conf.threshold = 0;
	cif_conf.audio_channels = 2;
	cif_conf.client_channels = 2;
	cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
	cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
	cif_conf.expand = 0;
	cif_conf.stereo_conv = 0;
	cif_conf.replicate = 0;
	cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_RX;
	cif_conf.truncate = 0;
	cif_conf.mono_conv = 0;

	reg = TEGRA30_AHUB_CIF_RX_CTRL +
	      (channel * TEGRA30_AHUB_CIF_RX_CTRL_STRIDE);
	ahub->soc_data->set_audio_cif(ahub->regmap_apbif, reg, &cif_conf);

	pm_runtime_put(ahub->dev);

	return 0;
}
static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map,
                                    size_t n, bool force_alloc)
{
    u32 i, mmb;
    u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT);
    struct mdss_data_type *mdata = mdss_mdp_get_mdata();

    if (n <= fixed_cnt)
        return fixed_cnt;
    else
        n -= fixed_cnt;

    i = bitmap_weight(smp_map->allocated, SMP_MB_CNT);

    /*
     * SMP programming is not double buffered. Fail the request,
     * that calls for change in smp configuration (addition/removal
     * of smp blocks), so that fallback solution happens.
     */
#if defined(CONFIG_ARCH_MSM8226) || (CONFIG_ARCH_MSM8974)
    if (i != 0 && n != i && !force_alloc) {
        pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
                 n, i);
        pr_debug("Can't change mmb configuration in set call\n");
        return 0;
    }
#else
    if (i != 0 && n != i) {
        pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n",
                 n, i);
        pr_debug("Can't change mmb configuration in set call\n");
        return 0;
    }
#endif
    /*
     * Clear previous SMP reservations and reserve according to the
     * latest configuration
     */
    mdss_mdp_smp_mmb_free(smp_map->reserved, false);

    /* Reserve mmb blocks*/
    for (; i < n; i++) {
        if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT))
            break;

        mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT);
        set_bit(mmb, smp_map->reserved);
        set_bit(mmb, mdata->mmb_alloc_map);
    }

    return i + fixed_cnt;
}