Beispiel #1
0
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
{
	u32 obj;

	if (likely(cnt == 1 && align == 1))
		return mlx4_bitmap_alloc(bitmap);

	spin_lock(&bitmap->lock);

	obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
				bitmap->last, cnt, align - 1);
	if (obj >= bitmap->max) {
		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
				& bitmap->mask;
		obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
						0, cnt, align - 1);
	}

	if (obj < bitmap->max) {
		bitmap_set(bitmap->table, obj, cnt);
		if (obj == bitmap->last) {
			bitmap->last = (obj + cnt);
			if (bitmap->last >= bitmap->max)
				bitmap->last = 0;
		}
		obj |= bitmap->top;
	} else
		obj = -1;

	spin_unlock(&bitmap->lock);

	return obj;
}
Beispiel #2
0
static struct cpdma_desc __iomem *
cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
{
	unsigned long flags;
	int index;
	int desc_start;
	int desc_end;
	struct cpdma_desc __iomem *desc = NULL;

	spin_lock_irqsave(&pool->lock, flags);

	if (is_rx) {
		desc_start = 0;
		desc_end = pool->num_desc/2;
	 } else {
		desc_start = pool->num_desc/2;
		desc_end = pool->num_desc;
	}

	index = bitmap_find_next_zero_area(pool->bitmap,
				desc_end, desc_start, num_desc, 0);
	if (index < desc_end) {
		bitmap_set(pool->bitmap, index, num_desc);
		desc = pool->iomap + pool->desc_size * index;
		pool->used_desc++;
	}

	spin_unlock_irqrestore(&pool->lock, flags);
	return desc;
}
Beispiel #3
0
static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size)
{
	struct mtu3_fifo_info *fifo = mep->fifo;
	u32 num_bits = DIV_ROUND_UP(seg_size, MTU3_EP_FIFO_UNIT);
	u32 start_bit;

	/* ensure that @mep->fifo_seg_size is power of two */
	num_bits = roundup_pow_of_two(num_bits);
	if (num_bits > fifo->limit)
		return -EINVAL;

	mep->fifo_seg_size = num_bits * MTU3_EP_FIFO_UNIT;
	num_bits = num_bits * (mep->slot + 1);
	start_bit = bitmap_find_next_zero_area(fifo->bitmap,
			fifo->limit, 0, num_bits, 0);
	if (start_bit >= fifo->limit)
		return -EOVERFLOW;

	bitmap_set(fifo->bitmap, start_bit, num_bits);
	mep->fifo_size = num_bits * MTU3_EP_FIFO_UNIT;
	mep->fifo_addr = fifo->base + MTU3_EP_FIFO_UNIT * start_bit;

	dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n",
		__func__, mep->fifo_seg_size, mep->fifo_size, start_bit);

	return mep->fifo_addr;
}
Beispiel #4
0
/**
 * irq_alloc_descs - allocate and initialize a range of irq descriptors
 * @irq:	Allocate for specific irq number if irq >= 0
 * @from:	Start the search from this irq number
 * @cnt:	Number of consecutive irqs to allocate.
 * @node:	Preferred node on which the irq descriptor should be allocated
 *
 * Returns the first irq number or error code
 */
int __ref
irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
{
	int start, ret;

	if (!cnt)
		return -EINVAL;

	mutex_lock(&sparse_irq_lock);

	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
	ret = -EEXIST;
	if (irq >=0 && start != irq)
		goto err;

	ret = -ENOMEM;
	if (start >= nr_irqs)
		goto err;

	bitmap_set(allocated_irqs, start, cnt);
	mutex_unlock(&sparse_irq_lock);
	return alloc_descs(start, cnt, node);

err:
	mutex_unlock(&sparse_irq_lock);
	return ret;
}
Beispiel #5
0
/*
 * reserve a port number.  if "0" specified, we will try to pick one
 * starting at roce_next_port.  roce_next_port will take on the values
 * 1..4096
 */
u16 usnic_transport_rsrv_port(enum usnic_transport_type type, u16 port_num)
{
	if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
		spin_lock(&roce_bitmap_lock);
		if (!port_num) {
			port_num = bitmap_find_next_zero_area(roce_bitmap,
						ROCE_BITMAP_SZ,
						roce_next_port /* start */,
						1 /* nr */,
						0 /* align */);
			roce_next_port = (port_num & 4095) + 1;
		} else if (test_bit(port_num, roce_bitmap)) {
			usnic_err("Failed to allocate port for %s\n",
					usnic_transport_to_str(type));
			spin_unlock(&roce_bitmap_lock);
			goto out_fail;
		}
		bitmap_set(roce_bitmap, port_num, 1);
		spin_unlock(&roce_bitmap_lock);
	} else {
		usnic_err("Failed to allocate port - transport %s unsupported\n",
				usnic_transport_to_str(type));
		goto out_fail;
	}

	usnic_dbg("Allocating port %hu for %s\n", port_num,
			usnic_transport_to_str(type));
	return port_num;

out_fail:
	return 0;
}
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;


	for (;;) {
		mutex_lock(&cma->lock);
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count) {
			mutex_unlock(&cma->lock);
			break;
		}
		bitmap_set(cma->bitmap, pageno, count);
		/*
		 * It's safe to drop the lock here. We've marked this region for
		 * our exclusive use. If the migration fails we will take the
		 * lock again and unmark it.
		 */
		mutex_unlock(&cma->lock);

		pfn = cma->base_pfn + pageno;
		mutex_lock(&cma_mutex);
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		mutex_unlock(&cma_mutex);
		if (ret == 0) {
			page = pfn_to_page(pfn);
			break;
		} else if (ret != -EBUSY) {
			clear_cma_bitmap(cma, pfn, count);
			break;
		}
		clear_cma_bitmap(cma, pfn, count);
		pr_debug("%s(): memory range at %p is busy, retrying\n",
				 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	pr_debug("%s(): returned %p\n", __func__, page);
	return page;
}
Beispiel #7
0
static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
{
	int start;
	struct ccp_device *ccp = cmd_q->ccp;

	for (;;) {
		mutex_lock(&ccp->sb_mutex);

		start = (u32)bitmap_find_next_zero_area(ccp->sb,
							ccp->sb_count,
							ccp->sb_start,
							count, 0);
		if (start <= ccp->sb_count) {
			bitmap_set(ccp->sb, start, count);

			mutex_unlock(&ccp->sb_mutex);
			break;
		}

		ccp->sb_avail = 0;

		mutex_unlock(&ccp->sb_mutex);

		/* Wait for KSB entries to become available */
		if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
			return 0;
	}

	return KSB_START + start;
}
Beispiel #8
0
// ARM10C 20141122
// irq: 16, 1
// ARM10C 20141122
// irq: 32, 1
// ARM10C 20141213
// irq: 160, 1
int irq_reserve_irqs(unsigned int from, unsigned int cnt)
{
	unsigned int start;
	int ret = 0;
	// ret: 0
	// ret: 0
	// ret: 0

	// cnt: 1, from: 16, nr_irqs: 160
	// cnt: 1, from: 32, nr_irqs: 160
	// cnt: 1, from: 160, nr_irqs: 416
	if (!cnt || (from + cnt) > nr_irqs)
		return -EINVAL;

	mutex_lock(&sparse_irq_lock);

	// sparse_irq_lock을 이용한 mutex lock 수행
	// sparse_irq_lock을 이용한 mutex lock 수행
	// sparse_irq_lock을 이용한 mutex lock 수행

	// nr_irqs: 160, from: 16, cnt: 1
	// bitmap_find_next_zero_area(allocated_irqs, 160, 16, 1, 0): 161
	// nr_irqs: 160, from: 32, cnt: 1
	// bitmap_find_next_zero_area(allocated_irqs, 160, 32, 1, 0): 161
	// nr_irqs: 416, from: 160, cnt: 1
	// bitmap_find_next_zero_area(allocated_irqs, 416, 160, 1, 0): 417
	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
	// start: 161
	// start: 161
	// start: 417

	// start: 161, from: 16
	// start: 161, from: 32
	// start: 417, from: 160
	if (start == from)
		bitmap_set(allocated_irqs, start, cnt);
	else
		// ret: 0, EEXIST: 17
		// ret: 0, EEXIST: 17
		// ret: 0, EEXIST: 17
		ret = -EEXIST;
		// ret: -17 (EEXIST)
		// ret: -17 (EEXIST)
		// ret: -17 (EEXIST)

	mutex_unlock(&sparse_irq_lock);

	// sparse_irq_lock을 이용한 mutex unlock 수행
	// sparse_irq_lock을 이용한 mutex unlock 수행
	// sparse_irq_lock을 이용한 mutex unlock 수행

	// ret: -17 (EEXIST)
	// ret: -17 (EEXIST)
	// ret: -17 (EEXIST)
	return ret;
	// return -17 (EEXIST)
	// return -17 (EEXIST)
	// return -17 (EEXIST)
}
static struct page *__dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count) {
			printk(KERN_ERR "%s : cma->count is %lu, "
					"pageno is %lu\n", __func__,
					cma->count, pageno);
			ret = -ENOMEM;
			goto error;
		}

		pfn = cma->base_pfn + pageno;
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			break;
		} else if (ret != -EBUSY && ret != -EAGAIN) {
			goto error;
		}
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);

	pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
	return pfn_to_page(pfn);
error:
	pr_err("%s(): returned error (%d)\n", __func__, ret);
	mutex_unlock(&cma_mutex);
	return NULL;
}
Beispiel #10
0
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	/* HACK for MFC context buffer */
	if (!strncmp(dev_name(dev), "ion_video", strlen("ion_video")) && (count > 8))
		start = 16;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count)
			break;

		pfn = cma->base_pfn + pageno;
		ret = cma->isolated ?
			0 : alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			cma->free_count -= count;
			break;
		} else if (ret != -EBUSY) {
			break;
		}
		pr_debug("%s(): memory range at %p is busy, retrying\n",
			 __func__, pfn_to_page(pfn));
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);
	pr_debug("%s(): returned %p\n", __func__, page);
	return page;
}
Beispiel #11
0
u32 _mali_osk_bitmap_alloc_range(struct _mali_osk_bitmap *bitmap, int cnt)
{
	u32 obj;

	MALI_DEBUG_ASSERT_POINTER(bitmap);

	if (0 >= cnt) {
		return -1;
	}

	if (1 == cnt) {
		return _mali_osk_bitmap_alloc(bitmap);
	}

	_mali_osk_spinlock_lock(bitmap->lock);
	obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
					 bitmap->last, cnt, 0);

	if (obj >= bitmap->max) {
		obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
						 bitmap->reserve, cnt, 0);
	}

	if (obj < bitmap->max) {
		bitmap_set(bitmap->table, obj, cnt);

		bitmap->last = (obj + cnt);
		if (bitmap->last >= bitmap->max) {
			bitmap->last = bitmap->reserve;
		}
	} else {
		obj = -1;
	}

	if (obj != -1) {
		bitmap->avail -= cnt;
	}

	_mali_osk_spinlock_unlock(bitmap->lock);

	return obj;
}
Beispiel #12
0
/* Allocate the requested number of contiguous LSB slots
 * from the LSB bitmap. Look in the private range for this
 * queue first; failing that, check the public area.
 * If no space is available, wait around.
 * Return: first slot number
 */
static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
{
	struct ccp_device *ccp;
	int start;

	/* First look at the map for the queue */
	if (cmd_q->lsb >= 0) {
		start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
							LSB_SIZE,
							0, count, 0);
		if (start < LSB_SIZE) {
			bitmap_set(cmd_q->lsbmap, start, count);
			return start + cmd_q->lsb * LSB_SIZE;
		}
	}

	/* No joy; try to get an entry from the shared blocks */
	ccp = cmd_q->ccp;
	for (;;) {
		mutex_lock(&ccp->sb_mutex);

		start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
							MAX_LSB_CNT * LSB_SIZE,
							0,
							count, 0);
		if (start <= MAX_LSB_CNT * LSB_SIZE) {
			bitmap_set(ccp->lsbmap, start, count);

			mutex_unlock(&ccp->sb_mutex);
			return start;
		}

		ccp->sb_avail = 0;

		mutex_unlock(&ccp->sb_mutex);

		/* Wait for KSB entries to become available */
		if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
			return 0;
	}
}
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn = 0, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
		 count, align);

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count)
			break;

		pfn = cma->base_pfn + pageno;
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			break;
		} else if (ret != -EBUSY) {
			break;
		}
		pr_debug("%s(): memory range at %p (%lx, %lx) is busy,"\
			" retrying\n", __func__, pfn_to_page(pfn),
			pfn, pfn + count);
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	if (!page)
		cma_bitmap_show(dev);

	mutex_unlock(&cma_mutex);
	pr_debug("%s(): returned %p pfn(%lx)\n", __func__, page, pfn);
	return page;
}
Beispiel #14
0
static struct cpdma_desc __iomem *
cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
{
	unsigned long flags;
	int index;
	struct cpdma_desc __iomem *desc = NULL;
	static int last_index = 4096;

	spin_lock_irqsave(&pool->lock, flags);

	if(is_rx) {
		index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc/2, 0,
					num_desc, 0);
	 } else {
		if(last_index >= pool->num_desc) last_index = pool->num_desc / 2;

		index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, last_index,
				 num_desc, 0);

		if (!(index < pool->num_desc)) {
			index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc,
						pool->num_desc/2, num_desc,0);
		}

		if(index < pool->num_desc)
			last_index = index + 1;
		else
			last_index = pool->num_desc / 2;
	}

	if (index < pool->num_desc) {
		bitmap_set(pool->bitmap, index, num_desc);
		desc = pool->iomap + pool->desc_size * index;
		pool->used_desc++;
	}

	spin_unlock_irqrestore(&pool->lock, flags);
	return desc;
}
Beispiel #15
0
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
				int align, unsigned long *obj)
{
	int ret = 0;
	int i;

	if (likely(cnt == 1 && align == 1))
		return hns_roce_bitmap_alloc(bitmap, obj);

	spin_lock(&bitmap->lock);

	*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
					  bitmap->last, cnt, align - 1);
	if (*obj >= bitmap->max) {
		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
			       & bitmap->mask;
		*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
						  cnt, align - 1);
	}

	if (*obj < bitmap->max) {
		for (i = 0; i < cnt; i++)
			set_bit(*obj + i, bitmap->table);

		if (*obj == bitmap->last) {
			bitmap->last = (*obj + cnt);
			if (bitmap->last >= bitmap->max)
				bitmap->last = 0;
		}
		*obj |= bitmap->top;
	} else {
		ret = -1;
	}

	spin_unlock(&bitmap->lock);

	return ret;
}
Beispiel #16
0
static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
				      unsigned int num, bool managed)
{
	unsigned int area, start = m->alloc_start;
	unsigned int end = m->alloc_end;

	bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
	bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
	area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
	if (area >= end)
		return area;
	if (managed)
		bitmap_set(cm->managed_map, area, num);
	else
		bitmap_set(cm->alloc_map, area, num);
	return area;
}
Beispiel #17
0
ssize_t rbmp_allocate(struct rbmp * b)
{
        ssize_t id;

        if (!b)
                return bad_id(b);

        id = (ssize_t) bitmap_find_next_zero_area(b->bitmap,
                                                  BITS_IN_BITMAP,
                                                  0, 1, 0);
        if (id < 0)
                return bad_id(b);

        bitmap_set(b->bitmap, id, 1);

        return id + b->offset;
}
/**
 * irq_reserve_irqs - mark irqs allocated
 * @from:	mark from irq number
 * @cnt:	number of irqs to mark
 *
 * Returns 0 on success or an appropriate error code
 */
int irq_reserve_irqs(unsigned int from, unsigned int cnt)
{
	unsigned int start;
	int ret = 0;

	if (!cnt || (from + cnt) > nr_irqs)
		return -EINVAL;

	mutex_lock(&sparse_irq_lock);
	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
	if (start == from)
		bitmap_set(allocated_irqs, start, cnt);
	else
		ret = -EEXIST;
	mutex_unlock(&sparse_irq_lock);
	return ret;
}
Beispiel #19
0
static unsigned long __alloc_dma_pages(unsigned int pages)
{
	unsigned long ret = 0, flags;
	unsigned long start;

	if (dma_initialized == 0)
		dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend);

	spin_lock_irqsave(&dma_page_lock, flags);

	start = bitmap_find_next_zero_area(dma_page, dma_pages, 0, pages, 0);
	if (start < dma_pages) {
		ret = dma_base + (start << PAGE_SHIFT);
		bitmap_set(dma_page, start, pages);
	}
	spin_unlock_irqrestore(&dma_page_lock, flags);
	return ret;
}
Beispiel #20
0
/**
 * irq_alloc_descs - allocate and initialize a range of irq descriptors
 * @irq:	Allocate for specific irq number if irq >= 0
 * @from:	Start the search from this irq number
 * @cnt:	Number of consecutive irqs to allocate.
 * @node:	Preferred node on which the irq descriptor should be allocated
 * @owner:	Owning module (can be NULL)
 *
 * Returns the first irq number or error code
 */
int __ref
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
		  struct module *owner)
{
	int start, ret;

	if (!cnt)
		return -EINVAL;

	if (irq >= 0) {
		if (from > irq)
			return -EINVAL;
		from = irq;
	} else {
		/*
		 * For interrupts which are freely allocated the
		 * architecture can force a lower bound to the @from
		 * argument. x86 uses this to exclude the GSI space.
		 */
		from = arch_dynirq_lower_bound(from);
	}

	mutex_lock(&sparse_irq_lock);

	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
					   from, cnt, 0);
	ret = -EEXIST;
	if (irq >=0 && start != irq)
		goto err;

	if (start + cnt > nr_irqs) {
		ret = irq_expand_nr_irqs(start + cnt);
		if (ret)
			goto err;
	}

	bitmap_set(allocated_irqs, start, cnt);
	mutex_unlock(&sparse_irq_lock);
	return alloc_descs(start, cnt, node, owner);

err:
	mutex_unlock(&sparse_irq_lock);
	return ret;
}
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count)
			break;

		pfn = cma->base_pfn + pageno;
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			break;
		} else if (ret != -EBUSY) {
			break;
		}
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);
	return page;
}
static struct cpdma_desc __iomem *
cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
{
	rtdm_lockctx_t context;
	int index;
	struct cpdma_desc __iomem *desc = NULL;

	rtdm_lock_get_irqsave(&pool->lock, context);

	index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
					   num_desc, 0);
	if (index < pool->num_desc) {
		bitmap_set(pool->bitmap, index, num_desc);
		desc = pool->iomap + pool->desc_size * index;
		pool->used_desc++;
	}

	rtdm_lock_put_irqrestore(&pool->lock, context);
	return desc;
}
static struct cpdma_desc __iomem *
cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
{
	unsigned long flags;
	int index;
	struct cpdma_desc __iomem *desc = NULL;

	spin_lock_irqsave(&pool->lock, flags);

	index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
					   num_desc, 0);
	if (index < pool->num_desc) {
		bitmap_set(pool->bitmap, index, num_desc);
		desc = pool->iomap + pool->desc_size * index;
		pool->used_desc++;
	}

	spin_unlock_irqrestore(&pool->lock, flags);
	return desc;
}
Beispiel #24
0
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
{
	unsigned long flags;
	int index;
	bool ret;
	struct cpdma_ctlr	*ctlr = chan->ctlr;
	struct cpdma_desc_pool	*pool = ctlr->pool;

	spin_lock_irqsave(&pool->lock, flags);

	index = bitmap_find_next_zero_area(pool->bitmap,
				pool->num_desc, pool->num_desc/2, 1, 0);

	if (index < pool->num_desc)
		ret = true;
	else
		ret = false;

	spin_unlock_irqrestore(&pool->lock, flags);
	return ret;
}
Beispiel #25
0
unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
			       unsigned long start, unsigned int nr,
			       unsigned long shift, unsigned long boundary_size,
			       unsigned long align_mask)
{
	unsigned long index;

	/* We don't want the last of the limit */
	size -= 1;
again:
	index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
	if (index < size) {
		if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
			start = ALIGN(shift + index, boundary_size) - shift;
			goto again;
		}
		bitmap_set(map, index, nr);
		return index;
	}
	return -1;
}
unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
			       unsigned long start, unsigned int nr,
			       unsigned long shift, unsigned long boundary_size,
			       unsigned long align_mask)
{
	unsigned long index;

	
	size -= 1;
again:
	index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
	if (index < size) {
		if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
			
			start = index + 1;
			goto again;
		}
		bitmap_set(map, index, nr);
		return index;
	}
	return -1;
}
Beispiel #27
0
int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num)
{
    unsigned long flags;
    int offset, order = get_count_order(num);

    spin_lock_irqsave(&bmp->lock, flags);

    offset = bitmap_find_next_zero_area(bmp->bitmap, bmp->irq_count, 0,
                                        num, (1 << order) - 1);
    if (offset > bmp->irq_count)
        goto err;

    bitmap_set(bmp->bitmap, offset, num);
    spin_unlock_irqrestore(&bmp->lock, flags);

    pr_debug("msi_bitmap: allocated 0x%x at offset 0x%x\n", num, offset);

    return offset;
err:
    spin_unlock_irqrestore(&bmp->lock, flags);
    return -ENOMEM;
}
Beispiel #28
0
/**
 * irq_alloc_descs - allocate and initialize a range of irq descriptors
 * @irq:	Allocate for specific irq number if irq >= 0
 * @from:	Start the search from this irq number
 * @cnt:	Number of consecutive irqs to allocate.
 * @node:	Preferred node on which the irq descriptor should be allocated
 * @owner:	Owning module (can be NULL)
 *
 * Returns the first irq number or error code
 */
int __ref
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
		  struct module *owner)
{
	int start, ret;

	if (!cnt)
		return -EINVAL;

	if (irq >= 0) {
		if (from > irq)
			return -EINVAL;
		from = irq;
	}

	mutex_lock(&sparse_irq_lock);

	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
					   from, cnt, 0);
	ret = -EEXIST;
	if (irq >=0 && start != irq)
		goto err;

	if (start + cnt > nr_irqs) {
		ret = irq_expand_nr_irqs(start + cnt);
		if (ret)
			goto err;
	}
	
	bitmap_set(allocated_irqs, start, cnt);
	mutex_unlock(&sparse_irq_lock);
	
	return alloc_descs(start, cnt, node, owner);

err:
	mutex_unlock(&sparse_irq_lock);
	return ret;
}
Beispiel #29
0
static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
			     struct isp1362_ep *ep, u16 len)
{
	int ptd_offset = -EINVAL;
	int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
	int found;

	BUG_ON(len > epq->buf_size);

	if (!epq->buf_avail)
		return -ENOMEM;

	if (ep->num_ptds)
		pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
		    epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
	BUG_ON(ep->num_ptds != 0);

	found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
						num_ptds, 0);
	if (found >= epq->buf_count)
		return -EOVERFLOW;

	DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
	    num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
	ptd_offset = get_ptd_offset(epq, found);
	WARN_ON(ptd_offset < 0);
	ep->ptd_offset = ptd_offset;
	ep->num_ptds += num_ptds;
	epq->buf_avail -= num_ptds;
	BUG_ON(epq->buf_avail > epq->buf_count);
	ep->ptd_index = found;
	bitmap_set(&epq->buf_map, found, num_ptds);
	DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
	    __func__, epq->name, ep->ptd_index, ep->ptd_offset,
	    epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);

	return found;
}
Beispiel #30
0
int spapr_irq_msi_alloc(sPAPRMachineState *spapr, uint32_t num, bool align,
                        Error **errp)
{
    int irq;

    /*
     * The 'align_mask' parameter of bitmap_find_next_zero_area()
     * should be one less than a power of 2; 0 means no
     * alignment. Adapt the 'align' value of the former allocator
     * to fit the requirements of bitmap_find_next_zero_area()
     */
    align -= 1;

    irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
                                     align);
    if (irq == spapr->irq_map_nr) {
        error_setg(errp, "can't find a free %d-IRQ block", num);
        return -1;
    }

    bitmap_set(spapr->irq_map, irq, num);

    return irq + SPAPR_IRQ_MSI;
}