Example #1
0
static void __init test_zero_clear(void)
{
	DECLARE_BITMAP(bmap, 1024);

	/* Known way to set all bits */
	memset(bmap, 0xff, 128);

	expect_eq_pbl("0-22", bmap, 23);
	expect_eq_pbl("0-1023", bmap, 1024);

	/* single-word bitmaps */
	bitmap_clear(bmap, 0, 9);
	expect_eq_pbl("9-1023", bmap, 1024);

	bitmap_zero(bmap, 35);
	expect_eq_pbl("64-1023", bmap, 1024);

	/* cross boundaries operations */
	bitmap_clear(bmap, 79, 19);
	expect_eq_pbl("64-78,98-1023", bmap, 1024);

	bitmap_zero(bmap, 115);
	expect_eq_pbl("128-1023", bmap, 1024);

	/* Zeroing entire area */
	bitmap_zero(bmap, 1024);
	expect_eq_pbl("", bmap, 1024);
}
Example #2
0
/* receive and process one packet within the sequence number window.
 *
 * returns:
 *  1 if the window was moved (either new or very old)
 *  0 if the window was not moved/shifted.
 */
int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
			  int32_t seq_num_diff, int set_mark)
{
	struct batadv_priv *bat_priv = priv;

	/* sequence number is slightly older. We already got a sequence number
	 * higher than this one, so we just mark it.
	 */
	if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) {
		if (set_mark)
			batadv_set_bit(seq_bits, -seq_num_diff);
		return 0;
	}

	/* sequence number is slightly newer, so we shift the window and
	 * set the mark if required
	 */
	if (seq_num_diff > 0 && seq_num_diff < BATADV_TQ_LOCAL_WINDOW_SIZE) {
		batadv_bitmap_shift_left(seq_bits, seq_num_diff);

		if (set_mark)
			batadv_set_bit(seq_bits, 0);
		return 1;
	}

	/* sequence number is much newer, probably missed a lot of packets */
	if (seq_num_diff >= BATADV_TQ_LOCAL_WINDOW_SIZE &&
	    seq_num_diff < BATADV_EXPECTED_SEQNO_RANGE) {
		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
			   "We missed a lot of packets (%i) !\n",
			   seq_num_diff - 1);
		bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
		if (set_mark)
			batadv_set_bit(seq_bits, 0);
		return 1;
	}

	/* received a much older packet. The other host either restarted
	 * or the old packet got delayed somewhere in the network. The
	 * packet should be dropped without calling this function if the
	 * seqno window is protected.
	 */
	if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
	    seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {

		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
			   "Other host probably restarted!\n");

		bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
		if (set_mark)
			batadv_set_bit(seq_bits, 0);

		return 1;
	}

	/* never reached */
	return 0;
}
Example #3
0
static int smu_sw_init(void *handle)
{
	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
	struct smu_context *smu = &adev->smu;
	int ret;

	smu->pool_size = adev->pm.smu_prv_buffer_size;
	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
	mutex_init(&smu->smu_feature.mutex);
	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
	bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
	smu->watermarks_bitmap = 0;
	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;

	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;

	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
	smu->display_config = &adev->pm.pm_display_cfg;

	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
	ret = smu_init_microcode(smu);
	if (ret) {
		pr_err("Failed to load smu firmware!\n");
		return ret;
	}

	ret = smu_smc_table_sw_init(smu);
	if (ret) {
		pr_err("Failed to sw init smc table!\n");
		return ret;
	}

	return 0;
}
Example #4
0
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
							unsigned long flags)
{
	struct nvm_tgt_dev *dev = rrpc->dev;
	struct rrpc_block *rblk;
	int is_gc = flags & NVM_IOTYPE_GC;

	spin_lock(&rlun->lock);
	if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
		pr_err("nvm: rrpc: cannot give block to non GC request\n");
		spin_unlock(&rlun->lock);
		return NULL;
	}

	rblk = __rrpc_get_blk(rrpc, rlun);
	if (!rblk) {
		pr_err("nvm: rrpc: cannot get new block\n");
		spin_unlock(&rlun->lock);
		return NULL;
	}
	spin_unlock(&rlun->lock);

	bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
	rblk->next_page = 0;
	rblk->nr_invalid_pages = 0;
	atomic_set(&rblk->data_cmnt_size, 0);

	return rblk;
}
Example #5
0
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{
	int err = 0;
	size_t size;

	if ((max - min + 1) < pool->max_elem) {
		pr_warn("not enough indices for max_elem\n");
		err = -EINVAL;
		goto out;
	}

	pool->max_index = max;
	pool->min_index = min;

	size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
	pool->table = kmalloc(size, GFP_KERNEL);
	if (!pool->table) {
		err = -ENOMEM;
		goto out;
	}

	pool->table_size = size;
	bitmap_zero(pool->table, max - min + 1);

out:
	return err;
}
Example #6
0
struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ioc;

	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
	if (ioc) {
		atomic_long_set(&ioc->refcount, 1);
		atomic_set(&ioc->nr_tasks, 1);
		spin_lock_init(&ioc->lock);
//*		ioc->ioprio_changed = 0;
		bitmap_zero(ioc->ioprio_changed, IOC_IOPRIO_CHANGED_BITS);
		ioc->ioprio = 0;
		ioc->last_waited = 0; /* doesn't matter... */
		ioc->nr_batch_requests = 0; /* because this is 0 */
		INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ioc->cic_list);
		INIT_RADIX_TREE(&ioc->bfq_radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ioc->bfq_cic_list);
		ioc->ioc_data = NULL;
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
		ioc->cgroup_changed = 0;
#endif
	}

	return ioc;
}
Example #7
0
int regcache_set_reg_present(struct regmap *map, unsigned int reg)
{
	unsigned long *cache_present;
	unsigned int cache_present_size;
	unsigned int nregs;
	int i;

	nregs = reg + 1;
	cache_present_size = BITS_TO_LONGS(nregs);
	cache_present_size *= sizeof(long);

	if (!map->cache_present) {
		cache_present = kmalloc(cache_present_size, GFP_KERNEL);
		if (!cache_present)
			return -ENOMEM;
		bitmap_zero(cache_present, nregs);
		map->cache_present = cache_present;
		map->cache_present_nbits = nregs;
	}

	if (nregs > map->cache_present_nbits) {
		cache_present = krealloc(map->cache_present,
					 cache_present_size, GFP_KERNEL);
		if (!cache_present)
			return -ENOMEM;
		for (i = 0; i < nregs; i++)
			if (i >= map->cache_present_nbits)
				clear_bit(i, cache_present);
		map->cache_present = cache_present;
		map->cache_present_nbits = nregs;
	}

	set_bit(reg, map->cache_present);
	return 0;
}
Example #8
0
static int __init bb_init_module(void)
{
	int err;

	bb_dev_class = class_create(THIS_MODULE, "bb-dev");
	if (IS_ERR(bb_dev_class))
		return PTR_ERR(bb_dev_class);

	err = alloc_chrdev_region(&bb_devt, 0, BB_DEV_MAX, "bb");
	bb_minor = 0;
	bb_major = MAJOR(bb_devt);

	if (err < 0) {
		printk(KERN_ERR "%s: failed to allocate char dev region\n",
			__FILE__);
		goto err_destroy_class;
	}

	bitmap_zero(present_devices, BB_DEV_MAX);

	/* This is here for demo purposes, activate with the param */
	if (run_test)
		test_thread = kthread_run(bb_test, NULL, "bb_test");

	printk("Kernel black board loaded %d %d.\n", bb_major, bb_minor);
	printk("See http://savannah.nongnu.org/projects/tsp for details.\n");
	return 0;

err_destroy_class:
	class_destroy(bb_dev_class);

	return err;
}
int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
{
	unsigned a, b;

	bitmap_zero(maskp, nmaskbits);
	do {
		if (!isdigit(*bp))
			return -EINVAL;
		a = simple_strtoul(bp, (char **)&bp, 10);
		b = a;
		if (*bp == '-') {
			bp++;
			if (!isdigit(*bp))
				return -EINVAL;
			b = simple_strtoul(bp, (char **)&bp, 10);
		}
		if (!(a <= b))
			return -EINVAL;
		if (b >= nmaskbits)
			b = nmaskbits-1;
		while (a <= b) {
			set_bit(a, maskp);
			a++;
		}
		if (*bp == ',')
			bp++;
	} while (*bp != '\0' && *bp != '\n');
	return 0;
}
Example #10
0
static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
					unsigned int option_flags,
					struct acpi_resource_extended_irq *p)
{
	int i;
	pnp_irq_mask_t map;
	unsigned char flags;

	if (p->interrupt_count == 0)
		return;

	bitmap_zero(map.bits, PNP_IRQ_NR);
	for (i = 0; i < p->interrupt_count; i++) {
		if (p->interrupts[i]) {
			if (p->interrupts[i] < PNP_IRQ_NR)
				__set_bit(p->interrupts[i], map.bits);
			else
				dev_err(&dev->dev, "ignoring IRQ %d option "
					"(too large for %d entry bitmap)\n",
					p->interrupts[i], PNP_IRQ_NR);
		}
	}

	flags = irq_flags(p->triggering, p->polarity, p->sharable);
	pnp_register_irq_resource(dev, option_flags, &map, flags);
}
void heci_device_init(struct heci_device *dev)
{
	/* setup our list array */
	INIT_LIST_HEAD(&dev->file_list);
	INIT_LIST_HEAD(&dev->device_list);
	mutex_init(&dev->device_lock);
	init_waitqueue_head(&dev->wait_hw_ready);
	init_waitqueue_head(&dev->wait_recvd_msg);
	init_waitqueue_head(&dev->wait_dma_ready);
	dev->dev_state = HECI_DEV_INITIALIZING;

	/* We need to reserve something, because client #0
	 * is reserved for HECI bus messages
	 */
	bitmap_zero(dev->host_clients_map, HECI_CLIENTS_MAX);
	dev->open_handle_count = 0;

	/*
	 * Reserving the first three client IDs
	 * 0: Reserved for HECI Bus Message communications
	 * 1: Reserved for Watchdog
	 * 2: Reserved for AMTHI
	 */
	bitmap_set(dev->host_clients_map, 0, 3);

	heci_io_list_init(&dev->read_list);
	heci_io_list_init(&dev->write_list);
	heci_io_list_init(&dev->write_waiting_list);
	heci_io_list_init(&dev->ctrl_wr_list);
	heci_io_list_init(&dev->ctrl_rd_list);

	INIT_DELAYED_WORK(&dev->timer_work, heci_timer);
	INIT_WORK(&dev->init_work, heci_host_client_init);
}
Example #12
0
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
							unsigned long flags)
{
	struct nvm_lun *lun = rlun->parent;
	struct nvm_block *blk;
	struct rrpc_block *rblk;

	spin_lock(&lun->lock);
	blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
	if (!blk) {
		pr_err("nvm: rrpc: cannot get new block from media manager\n");
		spin_unlock(&lun->lock);
		return NULL;
	}

	rblk = rrpc_get_rblk(rlun, blk->id);
	list_add_tail(&rblk->list, &rlun->open_list);
	spin_unlock(&lun->lock);

	blk->priv = rblk;
	bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
	rblk->next_page = 0;
	rblk->nr_invalid_pages = 0;
	atomic_set(&rblk->data_cmnt_size, 0);

	return rblk;
}
Example #13
0
int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
		     u32 reserved)
{
	int i;

	/* num must be a power of 2 */
	if (num != 1 << (ffs(num) - 1))
		return -EINVAL;

	alloc->last = 0;
	alloc->top  = 0;
	alloc->max  = num;
	alloc->mask = mask;
	spin_lock_init(&alloc->lock);
	alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof (long),
			       GFP_KERNEL);
	if (!alloc->table)
		return -ENOMEM;

	bitmap_zero(alloc->table, num);
	for (i = 0; i < reserved; ++i)
		set_bit(i, alloc->table);

	return 0;
}
Example #14
0
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
			u32 reserved, u32 flags)
{
	int i;

	alloc->start = start;
	alloc->flags = flags;
	if (flags & C4IW_ID_TABLE_F_RANDOM)
		alloc->last = arc4random() % RANDOM_SKIP;
	else
		alloc->last = 0;
	alloc->max  = num;
	spin_lock_init(&alloc->lock);
	alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long),
				GFP_KERNEL);
	if (!alloc->table)
		return -ENOMEM;

	bitmap_zero(alloc->table, num);
	if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
		for (i = 0; i < reserved; ++i)
			set_bit(i, alloc->table);

	return 0;
}
Example #15
0
void xnsched_initq(struct xnsched_mlq *q)
{
	int prio;

	q->elems = 0;
	bitmap_zero(q->prio_map, XNSCHED_MLQ_LEVELS);

	for (prio = 0; prio < XNSCHED_MLQ_LEVELS; prio++)
		INIT_LIST_HEAD(q->heads + prio);
}
Example #16
0
static int __init find_bit_test(void)
{
	unsigned long nbits = BITMAP_LEN / SPARSE;

	pr_err("\nStart testing find_bit() with random-filled bitmap\n");

	get_random_bytes(bitmap, sizeof(bitmap));
	get_random_bytes(bitmap2, sizeof(bitmap2));

	test_find_next_bit(bitmap, BITMAP_LEN);
	test_find_next_zero_bit(bitmap, BITMAP_LEN);
	test_find_last_bit(bitmap, BITMAP_LEN);

	/*
	 * test_find_first_bit() may take some time, so
	 * traverse only part of bitmap to avoid soft lockup.
	 */
	test_find_first_bit(bitmap, BITMAP_LEN / 10);
	test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);

	pr_err("\nStart testing find_bit() with sparse bitmap\n");

	bitmap_zero(bitmap, BITMAP_LEN);
	bitmap_zero(bitmap2, BITMAP_LEN);

	while (nbits--) {
		__set_bit(prandom_u32() % BITMAP_LEN, bitmap);
		__set_bit(prandom_u32() % BITMAP_LEN, bitmap2);
	}

	test_find_next_bit(bitmap, BITMAP_LEN);
	test_find_next_zero_bit(bitmap, BITMAP_LEN);
	test_find_last_bit(bitmap, BITMAP_LEN);
	test_find_first_bit(bitmap, BITMAP_LEN);
	test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);

	/*
	 * Everything is OK. Return error just to let user run benchmark
	 * again without annoying rmmod.
	 */
	return -EINVAL;
}
static void __vlan_flush(struct net_port_vlans *v)
{
	smp_wmb();
	v->pvid = 0;
	bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
	if (v->port_idx)
		rcu_assign_pointer(v->parent.port->vlan_info, NULL);
	else
		rcu_assign_pointer(v->parent.br->vlan_info, NULL);
	kfree_rcu(v, rcu);
}
Example #18
0
void
igmp_sn_init(void)
{
	memset(&g_mtb, 0, sizeof(struct mcast_table));
#if defined(CONFIG_RTL8367_API_8370)
	bitmap_zero(g_l2t_cache, RTK_MAX_NUM_OF_LEARN_LIMIT);
#endif
	setup_timer(&g_membership_expired_timer, on_membership_timer, 0);

	br_mcast_group_event_hook = rtl8367_mcast_group_event;
}
Example #19
0
/* For each queue, from the most- to least-constrained:
 * find an LSB that can be assigned to the queue. If there are N queues that
 * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
 * dedicated LSB. Remaining LSB regions become a shared resource.
 * If we have fewer LSBs than queues, all LSB regions become shared resources.
 */
static int ccp_assign_lsbs(struct ccp_device *ccp)
{
	DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
	int n_lsbs = 0;
	int bitno;
	int i, lsb_cnt;
	int rc = 0;

	bitmap_zero(lsb_pub, MAX_LSB_CNT);

	/* Create an aggregate bitmap to get a total count of available LSBs */
	for (i = 0; i < ccp->cmd_q_count; i++)
		bitmap_or(lsb_pub,
			  lsb_pub, ccp->cmd_q[i].lsbmask,
			  MAX_LSB_CNT);

	n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);

	if (n_lsbs >= ccp->cmd_q_count) {
		/* We have enough LSBS to give every queue a private LSB.
		 * Brute force search to start with the queues that are more
		 * constrained in LSB choice. When an LSB is privately
		 * assigned, it is removed from the public mask.
		 * This is an ugly N squared algorithm with some optimization.
		 */
		for (lsb_cnt = 1;
		     n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
		     lsb_cnt++) {
			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
							  lsb_pub);
			if (rc < 0)
				return -EINVAL;
			n_lsbs = rc;
		}
	}

	rc = 0;
	/* What's left of the LSBs, according to the public mask, now become
	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
	 * that can't be used as a shared resource, so mark the LSB slots for
	 * them as "in use".
	 */
	bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);

	bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
	while (bitno < MAX_LSB_CNT) {
		bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
		bitmap_set(qlsb, bitno, 1);
		bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
	}

	return rc;
}
Example #20
0
void
igmp_init(void)
{
	spin_lock_init(&g_lut_lock);
	spin_lock_init(&g_mtb_lock);
	memset(&g_mtb, 0, sizeof(struct mcast_table));
#if defined(CONFIG_RTL8367_API_8370)
	bitmap_zero(g_l2t_cache, RTK_MAX_NUM_OF_LEARN_LIMIT);
#endif
	setup_timer(&g_membership_expired_timer, on_membership_timer, 0);
}
Example #21
0
File: br_vlan.c Project: 7799/linux
static void __vlan_flush(struct net_port_vlans *v)
{
	smp_wmb();
	v->pvid = 0;
	bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
	if (v->port_idx)
		RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
	else
		RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
	kfree_rcu(v, rcu);
}
Example #22
0
void mifintrbit_init(struct mifintrbit *intr, struct scsc_mif_abs *mif)
{
	int i;

	spin_lock_init(&intr->spinlock);
	/* Set all handlersd to default before hooking the hardware interrupt */
	for (i = 0; i < MIFINTRBIT_NUM_INT; i++) {
		intr->mifintrbit_irq_handler[i] = mifintrbit_default_handler;
	}

	/* reset bitmaps */
	bitmap_zero(intr->bitmap_tohost, MIFINTRBIT_NUM_INT);
	bitmap_zero(intr->bitmap_fromhost_r4, MIFINTRBIT_NUM_INT);
	bitmap_zero(intr->bitmap_fromhost_m4, MIFINTRBIT_NUM_INT);

	/* register isr with mif abstraction */
	mif->irq_reg_handler(mif, mifiintrman_isr, (void *)intr);

	/* cache mif */
	intr->mif = mif;
}
Example #23
0
/*
 * hub_pio_init  -  PIO-related hub initialization
 *
 * @hub:	hubinfo structure for our hub
 */
void hub_pio_init(cnodeid_t cnode)
{
	nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
	unsigned i;

	/* initialize big window piomaps for this hub */
	bitmap_zero(hub_data(cnode)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
	for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
		IIO_ITTE_DISABLE(nasid, i);

	hub_set_piomode(nasid);
}
Example #24
0
void input_key_get_status(unsigned long *keys, int bits)
{
	struct input_device *idev;

	bitmap_zero(keys, bits);

	if (bits > KEY_MAX)
		bits = KEY_MAX;

	list_for_each_entry(idev, &input_devices, list)
		bitmap_or(keys, keys, idev->keys, bits);
}
int __devinit c2_init_pd_table(struct c2_dev *c2dev)
{

	c2dev->pd_table.last = 0;
	c2dev->pd_table.max = c2dev->props.max_pd;
	spin_lock_init(&c2dev->pd_table.lock);
	c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
					sizeof(long), GFP_KERNEL);
	if (!c2dev->pd_table.table)
		return -ENOMEM;
	bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
	return 0;
}
Example #26
0
static void test_size(int nbits)
{
	BITMAP_DECLARE(bitmap, nbits);
	int i;

	bitmap_zero(bitmap, nbits);
	ok_eq(bitmap_ffs(bitmap, 0, nbits), nbits);

	for (i = 0; i < nbits; i++) {
		bitmap_zero(bitmap, nbits);
		bitmap_set_bit(bitmap, i);

		ok_eq(bitmap_ffs(bitmap, 0, nbits), i);
		ok_eq(bitmap_ffs(bitmap, i, nbits), i);
		ok_eq(bitmap_ffs(bitmap, i + 1, nbits), nbits);

		bitmap_zero(bitmap, nbits);
		bitmap_fill_range(bitmap, i, nbits);

		ok_eq(bitmap_ffs(bitmap, 0, nbits), i);
		ok_eq(bitmap_ffs(bitmap, i, nbits), i);
		ok_eq(bitmap_ffs(bitmap, i + 1, nbits), (i + 1));
		ok_eq(bitmap_ffs(bitmap, nbits - 1, nbits), (nbits - 1));

		if (i > 0) {
			ok_eq(bitmap_ffs(bitmap, 0, i), i);
			ok_eq(bitmap_ffs(bitmap, 0, i - 1), (i - 1));
		}

		if (i > 0) {
			bitmap_zero(bitmap, nbits);
			bitmap_fill_range(bitmap, 0, i);

			ok_eq(bitmap_ffs(bitmap, 0, nbits), 0);
			ok_eq(bitmap_ffs(bitmap, i - 1, nbits), (i - 1));
			ok_eq(bitmap_ffs(bitmap, i, nbits), nbits);
		}
	}
}
Example #27
0
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
	/* Force users to call KVM_ARM_VCPU_INIT */
	vcpu->arch.target = -1;
	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);

	/* Set up the timer */
	kvm_timer_vcpu_init(vcpu);

	kvm_arm_reset_debug_ptr(vcpu);

	return 0;
}
Example #28
0
/**
 * media_entity_graph_walk_start - Start walking the media graph at a given entity
 * @graph: Media graph structure that will be used to walk the graph
 * @entity: Starting entity
 *
 * This function initializes the graph traversal structure to walk the entities
 * graph starting at the given entity. The traversal structure must not be
 * modified by the caller during graph traversal. When done the structure can
 * safely be freed.
 */
void media_entity_graph_walk_start(struct media_entity_graph *graph,
				   struct media_entity *entity)
{
	graph->top = 0;
	graph->stack[graph->top].entity = NULL;
	bitmap_zero(graph->entities, MEDIA_ENTITY_ENUM_MAX_ID);

	if (WARN_ON(entity->id >= MEDIA_ENTITY_ENUM_MAX_ID))
		return;

	__set_bit(entity->id, graph->entities);
	stack_push(graph, entity);
}
Example #29
0
static void __init test_copy(void)
{
	DECLARE_BITMAP(bmap1, 1024);
	DECLARE_BITMAP(bmap2, 1024);

	bitmap_zero(bmap1, 1024);
	bitmap_zero(bmap2, 1024);

	/* single-word bitmaps */
	bitmap_set(bmap1, 0, 19);
	bitmap_copy(bmap2, bmap1, 23);
	expect_eq_pbl("0-18", bmap2, 1024);

	bitmap_set(bmap2, 0, 23);
	bitmap_copy(bmap2, bmap1, 23);
	expect_eq_pbl("0-18", bmap2, 1024);

	/* multi-word bitmaps */
	bitmap_set(bmap1, 0, 109);
	bitmap_copy(bmap2, bmap1, 1024);
	expect_eq_pbl("0-108", bmap2, 1024);

	bitmap_fill(bmap2, 1024);
	bitmap_copy(bmap2, bmap1, 1024);
	expect_eq_pbl("0-108", bmap2, 1024);

	/* the following tests assume a 32- or 64-bit arch (even 128b
	 * if we care)
	 */

	bitmap_fill(bmap2, 1024);
	bitmap_copy(bmap2, bmap1, 109);  /* ... but 0-padded til word length */
	expect_eq_pbl("0-108,128-1023", bmap2, 1024);

	bitmap_fill(bmap2, 1024);
	bitmap_copy(bmap2, bmap1, 97);  /* ... but aligned on word length */
	expect_eq_pbl("0-108,128-1023", bmap2, 1024);
}
Example #30
0
/*
 * apic_is_clustered_box() -- Check if we can expect good TSC
 *
 * Thus far, the major user of this is IBM's Summit2 series:
 *
 * Clustered boxes may have unsynced TSC problems if they are
 * multi-chassis. Use available data to take a good guess.
 * If in doubt, go HPET.
 */
__cpuinit int apic_is_clustered_box(void)
{
	int i, clusters, zeros;
	unsigned id;
	u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

	for (i = 0; i < NR_CPUS; i++) {
		/* are we being called early in kernel startup? */
		if (bios_cpu_apicid) {
			id = bios_cpu_apicid[i];
		}
		else if (i < nr_cpu_ids) {
			if (cpu_present(i))
				id = per_cpu(x86_bios_cpu_apicid, i);
			else
				continue;
		}
		else
			break;

		if (id != BAD_APICID)
			__set_bit(APIC_CLUSTERID(id), clustermap);
	}

	/* Problem:  Partially populated chassis may not have CPUs in some of
	 * the APIC clusters they have been allocated.  Only present CPUs have
	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
	 * Since clusters are allocated sequentially, count zeros only if
	 * they are bounded by ones.
	 */
	clusters = 0;
	zeros = 0;
	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
		if (test_bit(i, clustermap)) {
			clusters += 1 + zeros;
			zeros = 0;
		} else
			++zeros;
	}

	/*
	 * If clusters > 2, then should be multi-chassis.
	 * May have to revisit this when multi-core + hyperthreaded CPUs come
	 * out, but AFAIK this will work even for them.
	 */
	return (clusters > 2);
}