/**
 * @ingroup trans
 * @anchor hpsb_get_tlabel
 * allocate a transaction label
 *
 * Every asynchronous transaction on the 1394 bus needs a transaction
 * label to match the response to the request.  This label has to be
 * different from any other transaction label in an outstanding request to
 * the same node to make matching possible without ambiguity.
 *
 * There are 64 different tlabels, so an allocated tlabel has to be freed
 * with hpsb_free_tlabel() after the transaction is complete (unless it's
 * reused again for the same target node).
 *
 * @param packet - the packet who's tlabel/tpool we set
 * @return  Zero on success, otherwise non-zero. A non-zero return
 * generally means there are no available tlabels. 
 */
int hpsb_get_tlabel(struct hpsb_packet *packet)
{
	unsigned long flags;
	struct hpsb_tlabel_pool *tp;

	tp = &packet->host->tpool[packet->node_id & NODE_MASK];

	//~ if (irqs_disabled() || in_atomic()) {
	//~ if(in_interrupt()) {
		//~ if (down_trylock(&tp->count))
			//~ return 1;
	//~ } else {
		//~ down(&tp->count);
	//~ }
	//~ rtos_res_lock(&tp->count); //64 tasks can call here without blocking
	if(atomic_read(&tp->count)<0) {
		HPSB_ERR("run out of tlabel\n");
		return 1;
	}
	atomic_dec(&tp->count);

	rtos_spin_lock_irqsave(&tp->lock, flags);

	packet->tlabel = find_next_zero_bit(tp->pool, 64, tp->next);
	if (packet->tlabel > 63)
		packet->tlabel = find_first_zero_bit(tp->pool, 64);
	tp->next = (packet->tlabel + 1) % 64;
	/* Should _never_ happen */
	RTOS_ASSERT(!test_and_set_bit(packet->tlabel, tp->pool),;);
示例#2
0
文件: id_table.c 项目: coyizumi/cs111
/*
 * Trivial bitmap-based allocator. If the random flag is set, the
 * allocator is designed to:
 * - pseudo-randomize the id returned such that it is not trivially predictable.
 * - avoid reuse of recently used id (at the expense of predictability)
 */
u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
{
	unsigned long flags;
	u32 obj;

	spin_lock_irqsave(&alloc->lock, flags);

	obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
	if (obj >= alloc->max)
		obj = find_first_zero_bit(alloc->table, alloc->max);

	if (obj < alloc->max) {
		if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
			alloc->last += arc4random() % RANDOM_SKIP;
		else
			alloc->last = obj + 1;
		if (alloc->last >= alloc->max)
			alloc->last = 0;
		set_bit(obj, alloc->table);
		obj += alloc->start;
	} else
		obj = -1;

	spin_unlock_irqrestore(&alloc->lock, flags);
	return obj;
}
示例#3
0
文件: bitmap.c 项目: pmem/ndctl
int bitmap_full(const unsigned long *src, unsigned int nbits)
{
	if (small_const_nbits(nbits))
		return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));

	return find_next_zero_bit(src, nbits, 0UL) == nbits;
}
static int cma_bitmap_show(struct device *dev)
{
	struct cma *cma = dev_get_cma_area(dev);
	unsigned long start = 0, set = 0, end = 0, sum = 0;

	pr_debug("cma free list pfn[%lx %lx]: dev(%s)\n", cma->base_pfn,
		cma->base_pfn + cma->count - 1, dev ? dev_name(dev) : "");

	while (1) {
		set = find_next_bit(cma->bitmap, cma->count, start);
		if (set >= cma->count)
			break;
		end = find_next_zero_bit(cma->bitmap, cma->count, set);

		if (set > 0)
			pr_debug("[%6lx:%6lx] %6lx %6lx",
				cma->base_pfn + start, cma->base_pfn + set - 1,
				set - start, end - set);
		start = end;
		sum += (end - set);
	}

	if (start < cma->count)
		pr_debug("[%6lx:%6lx] %6lx ",
			cma->base_pfn + start, cma->base_pfn + cma->count - 1,
			cma->count - start);

	pr_info("Total: free(%lx) set(%lx) all(%lx)\n",
		cma->count - sum, sum, cma->count);
	return 0;
}
示例#5
0
static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
			      bool wrap)
{
	unsigned int orig_hint = hint;
	int nr;

	while (1) {
		nr = find_next_zero_bit(&word->word, word->depth, hint);
		if (unlikely(nr >= word->depth)) {
			/*
			 * We started with an offset, and we didn't reset the
			 * offset to 0 in a failure case, so start from 0 to
			 * exhaust the map.
			 */
			if (orig_hint && hint && wrap) {
				hint = orig_hint = 0;
				continue;
			}
			return -1;
		}

		if (!test_and_set_bit(nr, &word->word))
			break;

		hint = nr + 1;
		if (hint >= word->depth - 1)
			hint = 0;
	}

	return nr;
}
示例#6
0
static void dma_contiguous_deisolate_until(struct device *dev, int idx_until)
{
	struct cma *cma = dev_get_cma_area(dev);
	int idx;

	if (!cma || !idx_until)
		return;

	mutex_lock(&cma_mutex);

	if (!cma->isolated) {
		mutex_unlock(&cma_mutex);
		dev_err(dev, "Not isolated!\n");
		return;
	}

	idx = find_first_zero_bit(cma->bitmap, idx_until);
	while (idx < idx_until) {
		int idx_set;

		idx_set = find_next_bit(cma->bitmap, idx_until, idx);

		free_contig_range(cma->base_pfn + idx, idx_set - idx);

		idx = find_next_zero_bit(cma->bitmap, idx_until, idx_set);
	}

	cma->isolated = false;

	mutex_unlock(&cma_mutex);
}
/**
 * hpsb_get_tlabel - allocate a transaction label
 * @packet: the packet who's tlabel/tpool we set
 *
 * Every asynchronous transaction on the 1394 bus needs a transaction
 * label to match the response to the request.  This label has to be
 * different from any other transaction label in an outstanding request to
 * the same node to make matching possible without ambiguity.
 *
 * There are 64 different tlabels, so an allocated tlabel has to be freed
 * with hpsb_free_tlabel() after the transaction is complete (unless it's
 * reused again for the same target node).
 *
 * Return value: Zero on success, otherwise non-zero. A non-zero return
 * generally means there are no available tlabels. If this is called out
 * of interrupt or atomic context, then it will sleep until can return a
 * tlabel.
 */
int hpsb_get_tlabel(struct hpsb_packet *packet)
{
	unsigned long flags;
	struct hpsb_tlabel_pool *tp;
	int n = NODEID_TO_NODE(packet->node_id);

	if (unlikely(n == ALL_NODES))
		return 0;
	tp = &packet->host->tpool[n];

	if (irqs_disabled() || in_atomic()) {
		if (down_trylock(&tp->count))
			return 1;
	} else {
		down(&tp->count);
	}

	spin_lock_irqsave(&tp->lock, flags);

	packet->tlabel = find_next_zero_bit(tp->pool, 64, tp->next);
	if (packet->tlabel > 63)
		packet->tlabel = find_first_zero_bit(tp->pool, 64);
	tp->next = (packet->tlabel + 1) % 64;
	/* Should _never_ happen */
	BUG_ON(test_and_set_bit(packet->tlabel, tp->pool));
	tp->allocations++;
	spin_unlock_irqrestore(&tp->lock, flags);

	return 0;
}
示例#8
0
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
{
	int ret = 0;

	spin_lock(&bitmap->lock);
	*obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
	if (*obj >= bitmap->max) {
		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
			       & bitmap->mask;
		*obj = find_first_zero_bit(bitmap->table, bitmap->max);
	}

	if (*obj < bitmap->max) {
		set_bit(*obj, bitmap->table);
		bitmap->last = (*obj + 1);
		if (bitmap->last == bitmap->max)
			bitmap->last = 0;
		*obj |= bitmap->top;
	} else {
		ret = -1;
	}

	spin_unlock(&bitmap->lock);

	return ret;
}
示例#9
0
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
{
	u32 obj;

	spin_lock(&bitmap->lock);

	obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
	if (obj >= bitmap->max) {
		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
				& bitmap->mask;
		obj = find_first_zero_bit(bitmap->table, bitmap->max);
	}

	if (obj < bitmap->max) {
		set_bit(obj, bitmap->table);
		bitmap->last = (obj + 1);
		if (bitmap->last == bitmap->max)
			bitmap->last = 0;
		obj |= bitmap->top;
	} else
		obj = -1;

	if (obj != -1)
		--bitmap->avail;

	spin_unlock(&bitmap->lock);

	return obj;
}
/* same as hpsb_get_tlabel, except that it returns immediately */
static int hpsb_get_tlabel_atomic(struct hpsb_packet *packet)
{
	unsigned long flags, *tp;
	u8 *next;
	int tlabel, n = NODEID_TO_NODE(packet->node_id);

	/* Broadcast transactions are complete once the request has been sent.
	 * Use the same transaction label for all broadcast transactions. */
	if (unlikely(n == ALL_NODES)) {
		packet->tlabel = 0;
		return 0;
	}
	tp = packet->host->tl_pool[n].map;
	next = &packet->host->next_tl[n];

	spin_lock_irqsave(&hpsb_tlabel_lock, flags);
	tlabel = find_next_zero_bit(tp, 64, *next);
	if (tlabel > 63)
		tlabel = find_first_zero_bit(tp, 64);
	if (tlabel > 63) {
		spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);
		return -EAGAIN;
	}
	__set_bit(tlabel, tp);
	*next = (tlabel + 1) & 63;
	spin_unlock_irqrestore(&hpsb_tlabel_lock, flags);

	packet->tlabel = tlabel;
	return 0;
}
示例#11
0
static int mlxsw_sp2_kvdl_part_find_zero_bits(struct mlxsw_sp2_kvdl_part *part,
					      unsigned int bit_count,
					      unsigned int *p_bit)
{
	unsigned int start_bit;
	unsigned int bit;
	unsigned int i;
	bool wrap = false;

	start_bit = part->last_allocated_bit + 1;
	if (start_bit == part->usage_bit_count)
		start_bit = 0;
	bit = start_bit;
again:
	bit = find_next_zero_bit(part->usage, part->usage_bit_count, bit);
	if (!wrap && bit + bit_count >= part->usage_bit_count) {
		wrap = true;
		bit = 0;
		goto again;
	}
	if (wrap && bit + bit_count >= start_bit)
		return -ENOBUFS;
	for (i = 0; i < bit_count; i++) {
		if (test_bit(bit + i, part->usage)) {
			bit += bit_count;
			goto again;
		}
	}
	*p_bit = bit;
	return 0;
}
示例#12
0
文件: pdx.c 项目: CPFL/xen
void __init pfn_pdx_hole_setup(unsigned long mask)
{
    unsigned int i, j, bottom_shift = 0, hole_shift = 0;

    /*
     * We skip the first MAX_ORDER bits, as we never want to compress them.
     * This guarantees that page-pointer arithmetic remains valid within
     * contiguous aligned ranges of 2^MAX_ORDER pages. Among others, our
     * buddy allocator relies on this assumption.
     */
    for ( j = MAX_ORDER-1; ; )
    {
        i = find_next_zero_bit(&mask, BITS_PER_LONG, j);
        j = find_next_bit(&mask, BITS_PER_LONG, i);
        if ( j >= BITS_PER_LONG )
            break;
        if ( j - i > hole_shift )
        {
            hole_shift = j - i;
            bottom_shift = i;
        }
    }
    if ( !hole_shift )
        return;

    printk(KERN_INFO "PFN compression on bits %u...%u\n",
           bottom_shift, bottom_shift + hole_shift - 1);

    pfn_pdx_hole_shift  = hole_shift;
    pfn_pdx_bottom_mask = (1UL << bottom_shift) - 1;
    ma_va_bottom_mask   = (PAGE_SIZE << bottom_shift) - 1;
    pfn_hole_mask       = ((1UL << hole_shift) - 1) << bottom_shift;
    pfn_top_mask        = ~(pfn_pdx_bottom_mask | pfn_hole_mask);
    ma_top_mask         = pfn_top_mask << PAGE_SHIFT;
}
示例#13
0
文件: genetlink.c 项目: 020gzh/linux
static int genl_allocate_reserve_groups(int n_groups, int *first_id)
{
	unsigned long *new_groups;
	int start = 0;
	int i;
	int id;
	bool fits;

	do {
		if (start == 0)
			id = find_first_zero_bit(mc_groups,
						 mc_groups_longs *
						 BITS_PER_LONG);
		else
			id = find_next_zero_bit(mc_groups,
						mc_groups_longs * BITS_PER_LONG,
						start);

		fits = true;
		for (i = id;
		     i < min_t(int, id + n_groups,
			       mc_groups_longs * BITS_PER_LONG);
		     i++) {
			if (test_bit(i, mc_groups)) {
				start = i;
				fits = false;
				break;
			}
		}

		if (id + n_groups > mc_groups_longs * BITS_PER_LONG) {
			unsigned long new_longs = mc_groups_longs +
						  BITS_TO_LONGS(n_groups);
			size_t nlen = new_longs * sizeof(unsigned long);

			if (mc_groups == &mc_group_start) {
				new_groups = kzalloc(nlen, GFP_KERNEL);
				if (!new_groups)
					return -ENOMEM;
				mc_groups = new_groups;
				*mc_groups = mc_group_start;
			} else {
				new_groups = krealloc(mc_groups, nlen,
						      GFP_KERNEL);
				if (!new_groups)
					return -ENOMEM;
				mc_groups = new_groups;
				for (i = 0; i < BITS_TO_LONGS(n_groups); i++)
					mc_groups[mc_groups_longs + i] = 0;
			}
			mc_groups_longs = new_longs;
		}
	} while (!fits);

	for (i = id; i < id + n_groups; i++)
		set_bit(i, mc_groups);
	*first_id = id;
	return 0;
}
示例#14
0
文件: open.c 项目: Rick33/freevms
/*
 * Find an empty file descriptor entry, and mark it busy.
 */
int get_unused_fd(void)
{
	struct files_struct * files = current->files;
	int fd, error;

  	error = -EMFILE;
	write_lock(&files->file_lock);

repeat:
 	fd = find_next_zero_bit(files->open_fds, 
				files->max_fdset, 
				files->next_fd);

	/*
	 * N.B. For clone tasks sharing a files structure, this test
	 * will limit the total number of files that can be opened.
	 */
	if (fd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
		goto out;

	/* Do we need to expand the fdset array? */
	if (fd >= files->max_fdset) {
		error = expand_fdset(files, fd);
		if (!error) {
			error = -EMFILE;
			goto repeat;
		}
		goto out;
	}
	
	/* 
	 * Check whether we need to expand the fd array.
	 */
	if (fd >= files->max_fds) {
		error = expand_fd_array(files, fd);
		if (!error) {
			error = -EMFILE;
			goto repeat;
		}
		goto out;
	}

	FD_SET(fd, files->open_fds);
	FD_CLR(fd, files->close_on_exec);
	files->next_fd = fd + 1;
#if 1
	/* Sanity check */
	if (files->fd[fd] != NULL) {
		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
		files->fd[fd] = NULL;
	}
#endif
	error = fd;

out:
	write_unlock(&files->file_lock);
	return error;
}
示例#15
0
/*
 * Register a cec device node
 *
 * The registration code assigns minor numbers and registers the new device node
 * with the kernel. An error is returned if no free minor number can be found,
 * or if the registration of the device node fails.
 *
 * Zero is returned on success.
 *
 * Note that if the cec_devnode_register call fails, the release() callback of
 * the cec_devnode structure is *not* called, so the caller is responsible for
 * freeing any data.
 */
static int __must_check cec_devnode_register(struct cec_devnode *devnode,
					     struct module *owner)
{
	int minor;
	int ret;

	/* Initialization */
	INIT_LIST_HEAD(&devnode->fhs);
	mutex_init(&devnode->lock);

	/* Part 1: Find a free minor number */
	mutex_lock(&cec_devnode_lock);
	minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
	if (minor == CEC_NUM_DEVICES) {
		mutex_unlock(&cec_devnode_lock);
		pr_err("could not get a free minor\n");
		return -ENFILE;
	}

	set_bit(minor, cec_devnode_nums);
	mutex_unlock(&cec_devnode_lock);

	devnode->minor = minor;
	devnode->dev.bus = &cec_bus_type;
	devnode->dev.devt = MKDEV(MAJOR(cec_dev_t), minor);
	devnode->dev.release = cec_devnode_release;
	dev_set_name(&devnode->dev, "cec%d", devnode->minor);
	device_initialize(&devnode->dev);

	/* Part 2: Initialize and register the character device */
	cdev_init(&devnode->cdev, &cec_devnode_fops);
	devnode->cdev.kobj.parent = &devnode->dev.kobj;
	devnode->cdev.owner = owner;

	ret = cdev_add(&devnode->cdev, devnode->dev.devt, 1);
	if (ret < 0) {
		pr_err("%s: cdev_add failed\n", __func__);
		goto clr_bit;
	}

	ret = device_add(&devnode->dev);
	if (ret)
		goto cdev_del;

	devnode->registered = true;
	return 0;

cdev_del:
	cdev_del(&devnode->cdev);
clr_bit:
	mutex_lock(&cec_devnode_lock);
	clear_bit(devnode->minor, cec_devnode_nums);
	mutex_unlock(&cec_devnode_lock);
	return ret;
}
示例#16
0
static inline int dupfd(unsigned int fd, unsigned int start)
{
	struct files_struct * files = current->files;
	struct file * file;
	unsigned int newfd;
	int error;

	error = -EINVAL;
	if (start >= NR_OPEN)
		goto out;

	error = -EBADF;
	file = fget(fd);
	if (!file)
		goto out;

repeat:
	error = -EMFILE;
	if (start < files->next_fd)
		start = files->next_fd;
	/* At this point, start MUST be <= max_fdset */
#if 1
	if (start > files->max_fdset)
		printk (KERN_ERR "dupfd: fd %d, max %d\n", 
			start, files->max_fdset);
#endif
	newfd = find_next_zero_bit(files->open_fds->fds_bits, 
				files->max_fdset,
				start);
	if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
		goto out_putf;

	error = expand_files(files, newfd);
	if (error < 0)
		goto out_putf;
	if (error) /* If we might have blocked, try again. */
		goto repeat;

	FD_SET(newfd, files->open_fds);
	FD_CLR(newfd, files->close_on_exec);
	if (start <= files->next_fd)
		files->next_fd = newfd + 1;
	fd_install(newfd, file);
	error = newfd;
out:
#ifdef FDSET_DEBUG	
	if (error < 0)
		printk (KERN_ERR __FUNCTION__ ": return %d\n", error);
#endif
	return error;

out_putf:
	fput(file);
	goto out;
}
示例#17
0
/* Install a given filp in a given files_struct, with CLOEXEC set.
 * Safe for files != current->files.
 * Mostly cut-and-paste from linux-2.6.0/fs/fcntl.c:locate_fd()
 */
int cr_dup_other(struct files_struct *files, struct file *filp)
{
    unsigned int newfd;
    unsigned int start;
    unsigned int max_fds;
    int error;
    cr_fdtable_t *fdt;

    spin_lock(&files->file_lock);

repeat: 
    fdt = cr_fdtable(files);
    start = CR_NEXT_FD(files, fdt);
    newfd = start;
    max_fds = CR_MAX_FDS(fdt);
    if (start < max_fds) {
	newfd = find_next_zero_bit(CR_OPEN_FDS_BITS(fdt),
				   max_fds, start);
    }

    /* XXX: Really shouldn't be using current here.
     * However, I haven't bothered to figure out the locking
     * requirements for using anything else.
     * XXX: Probably could just pass the limit in.
     * XXX: Later kernels push this into expand_files()
     */
    error = -EMFILE;
    if (newfd >= CR_RLIM(current)[RLIMIT_NOFILE].rlim_cur) {
	goto out;
    }

    error = expand_files(files, newfd);
    if (error < 0) {
	goto out;
    } else if (error) {
	/* grew - search again (also reacquires fdt) */
	goto repeat;
    }

    CR_NEXT_FD(files, fdt) = newfd + 1;

    /* Claim */
    cr_set_open_fd(newfd, fdt);
    cr_set_close_on_exec(newfd, fdt);

    /* Install */
    get_file(filp);
    rcu_assign_pointer(fdt->fd[newfd], filp);

    error = newfd;
    
out:
    spin_unlock(&files->file_lock);
    return error;
}
示例#18
0
文件: open.c 项目: 274914765/C
/*
 * Find an empty file descriptor entry, and mark it busy.
 */
int get_unused_fd_flags(int flags)
{
    struct files_struct * files = current->files;
    int fd, error;
    struct fdtable *fdt;

      error = -EMFILE;
    spin_lock(&files->file_lock);

repeat:
    fdt = files_fdtable(files);
    fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
                files->next_fd);

    /*
     * N.B. For clone tasks sharing a files structure, this test
     * will limit the total number of files that can be opened.
     */
    if (fd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
        goto out;

    /* Do we need to expand the fd array or fd set?  */
    error = expand_files(files, fd);
    if (error < 0)
        goto out;

    if (error) {
        /*
          * If we needed to expand the fs array we
         * might have blocked - try again.
         */
        error = -EMFILE;
        goto repeat;
    }

    FD_SET(fd, fdt->open_fds);
    if (flags & O_CLOEXEC)
        FD_SET(fd, fdt->close_on_exec);
    else
        FD_CLR(fd, fdt->close_on_exec);
    files->next_fd = fd + 1;
#if 1
    /* Sanity check */
    if (fdt->fd[fd] != NULL) {
        printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
        fdt->fd[fd] = NULL;
    }
#endif
    error = fd;

out:
    spin_unlock(&files->file_lock);
    return error;
}
示例#19
0
文件: vmap.c 项目: CrazyXen/XEN_CODE
static unsigned int vm_size(const void *va)
{
    unsigned int start = vm_index(va), end;

    if ( !start )
        return 0;

    end = find_next_zero_bit(vm_bitmap, vm_top, start + 1);

    return min(end, vm_top) - start;
}
static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
{
	unsigned long flags;

	spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
	rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
	if (rpipe_idx < wa->rpipes)
		set_bit(rpipe_idx, wa->rpipe_bm);
	spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);

	return rpipe_idx;
}
/**
 * media_devnode_register - register a media device node
 * @mdev: media device node structure we want to register
 *
 * The registration code assigns minor numbers and registers the new device node
 * with the kernel. An error is returned if no free minor number can be found,
 * or if the registration of the device node fails.
 *
 * Zero is returned on success.
 *
 * Note that if the media_devnode_register call fails, the release() callback of
 * the media_devnode structure is *not* called, so the caller is responsible for
 * freeing any data.
 */
int __must_check media_devnode_register(struct media_devnode *mdev)
{
	int minor;
	int ret;

	/* Part 1: Find a free minor number */
	mutex_lock(&media_devnode_lock);
	minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0);
	if (minor == MEDIA_NUM_DEVICES) {
		mutex_unlock(&media_devnode_lock);
		printk(KERN_ERR "could not get a free minor\n");
		return -ENFILE;
	}

	set_bit(minor, media_devnode_nums);
	mutex_unlock(&media_devnode_lock);

	mdev->minor = minor;

	/* Part 2: Initialize and register the character device */
	cdev_init(&mdev->cdev, &media_devnode_fops);
	mdev->cdev.owner = mdev->fops->owner;

	ret = cdev_add(&mdev->cdev, MKDEV(MAJOR(media_dev_t), mdev->minor), 1);
	if (ret < 0) {
		printk(KERN_ERR "%s: cdev_add failed\n", __func__);
		goto error;
	}

	/* Part 3: Register the media device */
	mdev->dev.bus = &media_bus_type;
	mdev->dev.devt = MKDEV(MAJOR(media_dev_t), mdev->minor);
	mdev->dev.release = media_devnode_release;
	if (mdev->parent)
		mdev->dev.parent = mdev->parent;
	dev_set_name(&mdev->dev, "media%d", mdev->minor);
	ret = device_register(&mdev->dev);
	if (ret < 0) {
		printk(KERN_ERR "%s: device_register failed\n", __func__);
		goto error;
	}

	/* Part 4: Activate this minor. The char device can now be used. */
	set_bit(MEDIA_FLAG_REGISTERED, &mdev->flags);

	return 0;

error:
	cdev_del(&mdev->cdev);
	clear_bit(mdev->minor, media_devnode_nums);
	return ret;
}
示例#22
0
static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len)
{
	unsigned long i, cnt;
	ktime_t time;

	time = ktime_get();
	for (cnt = i = 0; i < BITMAP_LEN; cnt++)
		i = find_next_zero_bit(bitmap, len, i) + 1;
	time = ktime_get() - time;
	pr_err("find_next_zero_bit: %18llu ns, %6ld iterations\n", time, cnt);

	return 0;
}
示例#23
0
static u32 alloc_index(struct rxe_pool *pool)
{
    u32 index;
    u32 range = pool->max_index - pool->min_index + 1;

    index = find_next_zero_bit(pool->table, range, pool->last);
    if (index >= range)
        index = find_first_zero_bit(pool->table, range);

    set_bit(index, pool->table);
    pool->last = index;
    return index + pool->min_index;
}
unsigned long __init_new_context(void)
{
	unsigned long ctx = next_mmu_context;

	while (test_and_set_bit(ctx, context_map)) {
		ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
		if (ctx > LAST_CONTEXT)
			ctx = 0;
	}
	next_mmu_context = (ctx + 1) & LAST_CONTEXT;

	return ctx;
}
static uint32_t stm_channel_alloc(uint32_t off)
{
	struct stm_drvdata *drvdata = stmdrvdata;
	uint32_t ch;

	do {
		ch = find_next_zero_bit(drvdata->chs.bitmap,
					NR_STM_CHANNEL, off);
	} while ((ch < NR_STM_CHANNEL) &&
		 test_and_set_bit(ch, drvdata->chs.bitmap));

	return ch;
}
示例#26
0
文件: bbuild.c 项目: jebtang/NOVA
static int __nova_build_blocknode_map(struct super_block *sb,
	unsigned long *bitmap, unsigned long bsize, unsigned long scale)
{
	struct nova_sb_info *sbi = NOVA_SB(sb);
	struct free_list *free_list;
	unsigned long next = 0;
	unsigned long low = 0;
	unsigned long start, end;
	int cpuid = 0;

	free_list = nova_get_free_list(sb, cpuid);
	start = free_list->block_start;
	end = free_list->block_end + 1;
	while (1) {
		next = find_next_zero_bit(bitmap, end, start);
		if (next == bsize)
			break;
		if (next == end) {
			if (cpuid == sbi->cpus - 1)
				cpuid = SHARED_CPU;
			else
				cpuid++;
			free_list = nova_get_free_list(sb, cpuid);
			start = free_list->block_start;
			end = free_list->block_end + 1;
			continue;
		}

		low = next;
		next = find_next_bit(bitmap, end, next);
		if (nova_insert_blocknode_map(sb, cpuid,
				low << scale , (next << scale) - 1)) {
			nova_dbg("Error: could not insert %lu - %lu\n",
				low << scale, ((next << scale) - 1));
		}
		start = next;
		if (next == bsize)
			break;
		if (next == end) {
			if (cpuid == sbi->cpus - 1)
				cpuid = SHARED_CPU;
			else
				cpuid++;
			free_list = nova_get_free_list(sb, cpuid);
			start = free_list->block_start;
			end = free_list->block_end + 1;
		}
	}
	return 0;
}
示例#27
0
/**
 * dma_contiguous_isolate() - isolate contiguous memory from the page allocator
 * @dev: Pointer to device which owns the contiguous memory
 *
 * This function isolates contiguous memory from the page allocator. If some of
 * the contiguous memory is allocated, it is reclaimed.
 */
int dma_contiguous_isolate(struct device *dev)
{
	struct cma *cma = dev_get_cma_area(dev);
	int ret;
	int idx;

	if (!cma)
		return -ENODEV;

	if (cma->count == 0)
		return 0;

	mutex_lock(&cma_mutex);

	if (cma->isolated) {
		mutex_unlock(&cma_mutex);
		dev_err(dev, "Alread isolated!\n");
		return 0;
	}

	idx = find_first_zero_bit(cma->bitmap, cma->count);
	while (idx < cma->count) {
		int idx_set;

		idx_set = find_next_bit(cma->bitmap, cma->count, idx);
		do {
			ret = alloc_contig_range(cma->base_pfn + idx,
						cma->base_pfn + idx_set,
						MIGRATE_CMA);
		} while (ret == -EBUSY);

		if (ret < 0) {
			mutex_unlock(&cma_mutex);
			dma_contiguous_deisolate_until(dev, idx_set);
			dev_err(dev, "Failed to isolate %#lx@%#010llx (%d).\n",
				(idx_set - idx) * PAGE_SIZE,
				PFN_PHYS(cma->base_pfn + idx), ret);
			return ret;
		}

		idx = find_next_zero_bit(cma->bitmap, cma->count, idx_set);
	}

	cma->isolated = true;

	mutex_unlock(&cma_mutex);

	return 0;
}
示例#28
0
int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
{
	int ret __maybe_unused;
	if (pool->used == pool->total)
		return -ENOMEM;
	*fqid = pool->fqid_base + pool->next;
	ret = test_and_set_bit(pool->next, pool->bits);
	BUG_ON(ret);
	if (++pool->used == pool->total)
		return 0;
	pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
	if (pool->next >= pool->total)
		pool->next = find_first_zero_bit(pool->bits, pool->total);
	BUG_ON(pool->next >= pool->total);
	return 0;
}
示例#29
0
文件: blk-tag.c 项目: E-LLP/n900
/**
 * blk_queue_start_tag - find a free tag and assign it
 * @q:  the request queue for the device
 * @rq:  the block request that needs tagging
 *
 *  Description:
 *    This can either be used as a stand-alone helper, or possibly be
 *    assigned as the queue &prep_rq_fn (in which case &struct request
 *    automagically gets a tag assigned). Note that this function
 *    assumes that any type of request can be queued! if this is not
 *    true for your device, you must check the request type before
 *    calling this function.  The request will also be removed from
 *    the request queue, so it's the drivers responsibility to readd
 *    it if it should need to be restarted for some reason.
 *
 *  Notes:
 *   queue lock must be held.
 **/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
	struct blk_queue_tag *bqt = q->queue_tags;
	unsigned max_depth, offset;
	int tag;

	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
		printk(KERN_ERR
		       "%s: request %p for device [%s] already tagged %d",
		       __func__, rq,
		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
		BUG();
	}

	/*
	 * Protect against shared tag maps, as we may not have exclusive
	 * access to the tag map.
	 *
	 * We reserve a few tags just for sync IO, since we don't want
	 * to starve sync IO on behalf of flooding async IO.
	 */
	max_depth = bqt->max_depth;
	if (rq_is_sync(rq))
		offset = 0;
	else
		offset = max_depth >> 2;

	do {
		tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
		if (tag >= max_depth)
			return 1;

	} while (test_and_set_bit_lock(tag, bqt->tag_map));
	/*
	 * We need lock ordering semantics given by test_and_set_bit_lock.
	 * See blk_queue_end_tag for details.
	 */

	rq->cmd_flags |= REQ_QUEUED;
	rq->tag = tag;
	bqt->tag_index[tag] = rq;
	blkdev_dequeue_request(rq);
	list_add(&rq->queuelist, &q->tag_busy_list);
	return 0;
}
示例#30
0
文件: ipack.c 项目: AllenWeb/linux
static int ipack_assign_bus_number(void)
{
	int busnum;

	mutex_lock(&ipack_mutex);
	busnum = find_next_zero_bit(busmap.busmap, IPACK_MAXBUS, 1);

	if (busnum >= IPACK_MAXBUS) {
		pr_err("too many buses\n");
		busnum = -1;
		goto error_find_busnum;
	}

	set_bit(busnum, busmap.busmap);

error_find_busnum:
	mutex_unlock(&ipack_mutex);
	return busnum;
}