示例#1
0
/* Generate a custom zonelist for the BIND policy. */
static struct zonelist *bind_zonelist(unsigned long *nodes)
{
	struct zonelist *zl;
	int num, max, nd;

	max = 1 + MAX_NR_ZONES * bitmap_weight(nodes, MAX_NUMNODES);
	zl = kmalloc(sizeof(void *) * max, GFP_KERNEL);
	if (!zl)
		return NULL;
	num = 0;
	for (nd = find_first_bit(nodes, MAX_NUMNODES);
	     nd < MAX_NUMNODES;
	     nd = find_next_bit(nodes, MAX_NUMNODES, 1+nd)) {
		int k;
		for (k = MAX_NR_ZONES-1; k >= 0; k--) {
			struct zone *z = &NODE_DATA(nd)->node_zones[k];
			if (!z->present_pages)
				continue;
			zl->zones[num++] = z;
			if (k > policy_zone)
				policy_zone = k;
		}
	}
	BUG_ON(num >= max);
	zl->zones[num] = NULL;
	return zl;
}
示例#2
0
文件: dma_lib.c 项目: VizXu/linux
static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
{
	int bit;
	int start, limit;

	switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
	case TXCHAN_EVT0:
		start = 0;
		limit = 10;
		break;
	case TXCHAN_EVT1:
		start = 10;
		limit = MAX_TXCH;
		break;
	default:
		start = 0;
		limit = MAX_TXCH;
		break;
	}
retry:
	bit = find_next_bit(txch_free, MAX_TXCH, start);
	if (bit >= limit)
		return -ENOSPC;
	if (!test_and_clear_bit(bit, txch_free))
		goto retry;

	return bit;
}
示例#3
0
static void
host_memory_backend_get_host_nodes(Object *obj, Visitor *v, void *opaque,
                                   const char *name, Error **errp)
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);
    uint16List *host_nodes = NULL;
    uint16List **node = &host_nodes;
    unsigned long value;

    value = find_first_bit(backend->host_nodes, MAX_NODES);
    if (value == MAX_NODES) {
        return;
    }

    *node = g_malloc0(sizeof(**node));
    (*node)->value = value;
    node = &(*node)->next;

    do {
        value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1);
        if (value == MAX_NODES) {
            break;
        }

        *node = g_malloc0(sizeof(**node));
        (*node)->value = value;
        node = &(*node)->next;
    } while (true);

    visit_type_uint16List(v, &host_nodes, name, errp);
}
/* "unused": the lockres has no locks, is not on the dirty list,
 * has no inflight locks (in the gap between mastery and acquiring
 * the first lock), and has no bits in its refmap.
 * truly ready to be freed. */
int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
	int bit;

	assert_spin_locked(&res->spinlock);

	if (__dlm_lockres_has_locks(res))
		return 0;

	/* Locks are in the process of being created */
	if (res->inflight_locks)
		return 0;

	if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
		return 0;

	if (res->state & DLM_LOCK_RES_RECOVERING)
		return 0;

	/* Another node has this resource with this node as the master */
	bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
	if (bit < O2NM_MAX_NODES)
		return 0;

	return 1;
}
示例#5
0
文件: hostmem.c 项目: AmesianX/panda
static void
host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name,
                                   void *opaque, Error **errp)
{
    HostMemoryBackend *backend = MEMORY_BACKEND(obj);
    uint16List *host_nodes = NULL;
    uint16List **node = &host_nodes;
    unsigned long value;

    value = find_first_bit(backend->host_nodes, MAX_NODES);

    node = host_memory_append_node(node, value);

    if (value == MAX_NODES) {
        goto out;
    }

    do {
        value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1);
        if (value == MAX_NODES) {
            break;
        }

        node = host_memory_append_node(node, value);
    } while (true);

out:
    visit_type_uint16List(v, name, &host_nodes, errp);
}
示例#6
0
static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
{
	unsigned long *pending = &vcpu->arch.pending_exceptions;
	unsigned int priority;

	if (vcpu->requests) {
		if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
			smp_mb();
			update_timer_ints(vcpu);
		}
	}

	priority = __ffs(*pending);
	while (priority <= BOOKE_IRQPRIO_MAX) {
		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
			break;

		priority = find_next_bit(pending,
		                         BITS_PER_BYTE * sizeof(*pending),
		                         priority + 1);
	}

	/* Tell the guest about our interrupt status */
	vcpu->arch.shared->int_pending = !!*pending;
}
示例#7
0
文件: mpm.c 项目: Arunvasu/taoshan
void msm_mpm_exit_sleep(bool from_idle)
{
	unsigned long pending;
	int i;
	int k;

	for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
		pending = msm_mpm_read(MSM_MPM_STATUS_REG_PENDING, i);

		if (MSM_MPM_DEBUG_PENDING_IRQ & msm_mpm_debug_mask)
			pr_info("%s: pending.%d: 0x%08lx", __func__,
					i, pending);

		k = find_first_bit(&pending, 32);
		while (k < 32) {
			unsigned int mpm_irq = 32 * i + k;
			unsigned int apps_irq = msm_mpm_get_irq_m2a(mpm_irq);
			struct irq_desc *desc = apps_irq ?
				irq_to_desc(apps_irq) : NULL;

			if (desc && !irqd_is_level_type(&desc->irq_data)) {
				irq_set_pending(apps_irq);
				if (from_idle)
					check_irq_resend(desc, apps_irq);
			}

			k = find_next_bit(&pending, 32, k + 1);
		}
	}

	msm_mpm_clear();
}
示例#8
0
文件: manager.c 项目: 274914765/C
static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
{
    struct pnp_resource *pnp_res;
    struct resource *res;
    int i;

    /* IRQ priority: this table is good for i386 */
    static unsigned short xtab[16] = {
        5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
    };

    pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, idx);
    if (!pnp_res) {
        dev_err(&dev->dev, "too many IRQ resources\n");
        /* pretend we were successful so at least the manager won't try again */
        return 1;
    }

    res = &pnp_res->res;

    /* check if this resource has been manually set, if so skip */
    if (!(res->flags & IORESOURCE_AUTO)) {
        dev_dbg(&dev->dev, "  irq %d already set to %d flags %#lx\n",
            idx, (int) res->start, res->flags);
        return 1;
    }

    /* set the initial values */
    pnp_res->index = idx;
    res->flags |= rule->flags | IORESOURCE_IRQ;
    res->flags &= ~IORESOURCE_UNSET;

    if (bitmap_empty(rule->map, PNP_IRQ_NR)) {
        res->flags |= IORESOURCE_DISABLED;
        dev_dbg(&dev->dev, "  irq %d disabled\n", idx);
        return 1;    /* skip disabled resource requests */
    }

    /* TBD: need check for >16 IRQ */
    res->start = find_next_bit(rule->map, PNP_IRQ_NR, 16);
    if (res->start < PNP_IRQ_NR) {
        res->end = res->start;
        dev_dbg(&dev->dev, "  assign irq %d %d\n", idx,
            (int) res->start);
        return 1;
    }
    for (i = 0; i < 16; i++) {
        if (test_bit(xtab[i], rule->map)) {
            res->start = res->end = xtab[i];
            if (pnp_check_irq(dev, res)) {
                dev_dbg(&dev->dev, "  assign irq %d %d\n", idx,
                    (int) res->start);
                return 1;
            }
        }
    }
    dev_dbg(&dev->dev, "  couldn't assign irq %d\n", idx);
    return 0;
}
示例#9
0
static inline int __ocfs2_node_map_is_empty(struct ocfs2_node_map *map)
{
	int bit;
	bit = find_next_bit(map->map, map->num_nodes, 0);
	if (bit < map->num_nodes)
		return 0;
	return 1;
}
/**
 * iio_simple_dummy_trigger_h() - the trigger handler function
 * @irq: the interrupt number
 * @p: private data - always a pointer to the poll func.
 *
 * This is the guts of buffered capture. On a trigger event occurring,
 * if the pollfunc is attached then this handler is called as a threaded
 * interrupt (and hence may sleep). It is responsible for grabbing data
 * from the device and pushing it into the associated buffer.
 */
static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
{
    struct iio_poll_func *pf = p;
    struct iio_dev *indio_dev = pf->indio_dev;
    struct iio_buffer *buffer = indio_dev->buffer;
    int len = 0;
    u16 *data;

    data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
    if (data == NULL)
        return -ENOMEM;

    if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
        /*
         * Three common options here:
         * hardware scans: certain combinations of channels make
         *   up a fast read.  The capture will consist of all of them.
         *   Hence we just call the grab data function and fill the
         *   buffer without processing.
         * software scans: can be considered to be random access
         *   so efficient reading is just a case of minimal bus
         *   transactions.
         * software culled hardware scans:
         *   occasionally a driver may process the nearest hardware
         *   scan to avoid storing elements that are not desired. This
         *   is the fidliest option by far.
         * Here lets pretend we have random access. And the values are
         * in the constant table fakedata.
         */
        int i, j;
        for (i = 0, j = 0;
                i < bitmap_weight(indio_dev->active_scan_mask,
                                  indio_dev->masklength);
                i++) {
            j = find_next_bit(buffer->scan_mask,
                              indio_dev->masklength, j + 1);
            /* random access read form the 'device' */
            data[i] = fakedata[j];
            len += 2;
        }
    }
    /* Store a timestampe at an 8 byte boundary */
    if (indio_dev->scan_timestamp)
        *(s64 *)(((phys_addr_t)data + len
                  + sizeof(s64) - 1) & ~(sizeof(s64) - 1))
            = iio_get_time_ns();
    buffer->access->store_to(buffer, (u8 *)data, pf->timestamp);

    kfree(data);

    /*
     * Tell the core we are done with this trigger and ready for the
     * next one.
     */
    iio_trigger_notify_done(indio_dev->trig);

    return IRQ_HANDLED;
}
示例#11
0
static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid)
{
	struct fm10k_hw *hw = &interface->hw;
	u16 default_vid = hw->mac.default_vid;
	u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID;

	vid = find_next_bit(interface->active_vlans, vid_limit, ++vid);

	return vid;
}
示例#12
0
文件: bridge.c 项目: Domikk/libnl
static void dump_bitmap(struct nl_dump_params *p, const uint32_t *b)
{
	int i = -1, j, k;
	int start = -1, prev = -1;
	int done, found = 0;

	for (k = 0; k < RTNL_LINK_BRIDGE_VLAN_BITMAP_LEN; k++) {
		int base_bit;
		uint32_t a = b[k];

		base_bit = k * 32;
		i = -1;
		done = 0;
		while (!done) {
			j = find_next_bit(i, a);
			if (j > 0) {
				/* first hit of any bit */
				if (start < 0 && prev < 0) {
					start = prev = j - 1 + base_bit;
					goto next;
				}
				/* this bit is a continuation of prior bits */
				if (j - 2 + base_bit == prev) {
					prev++;
					goto next;
				}
			} else
				done = 1;

			if (start >= 0) {
				found++;
				if (done && k < RTNL_LINK_BRIDGE_VLAN_BITMAP_LEN - 1)
					break;

				nl_dump(p, " %d", start);
				if (start != prev)
					nl_dump(p, "-%d", prev);

				if (done)
					break;
			}
			if (j > 0)
				start = prev = j - 1 + base_bit;
next:
			i = j;
		}
	}
	if (!found)
		nl_dump(p, " <none>");

	return;
}
示例#13
0
/* pasemi_dma_alloc_fun - Allocate a function engine
 *
 * Allocates a function engine to use for crypto/checksum offload
 * Returns allocated engine (0-8), < 0 on error.
 */
int pasemi_dma_alloc_fun(void)
{
	int bit;

retry:
	bit = find_next_bit(fun_free, MAX_FLAGS, 0);
	if (bit >= MAX_FLAGS)
		return -ENOSPC;
	if (!test_and_clear_bit(bit, fun_free))
		goto retry;

	return bit;
}
示例#14
0
static int __init test_find_next_bit(const void *bitmap, unsigned long len)
{
	unsigned long i, cnt;
	ktime_t time;

	time = ktime_get();
	for (cnt = i = 0; i < BITMAP_LEN; cnt++)
		i = find_next_bit(bitmap, BITMAP_LEN, i) + 1;
	time = ktime_get() - time;
	pr_err("find_next_bit:      %18llu ns, %6ld iterations\n", time, cnt);

	return 0;
}
示例#15
0
static int pnp_assign_irq(struct pnp_dev * dev, struct pnp_irq *rule, int idx)
{
	unsigned long *start, *end, *flags;
	int i;

	/* IRQ priority: this table is good for i386 */
	static unsigned short xtab[16] = {
		5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
	};

	if (!dev || !rule)
		return -EINVAL;

	if (idx >= PNP_MAX_IRQ) {
		pnp_err("More than 2 irqs is incompatible with pnp specifications.");
		/* pretend we were successful so at least the manager won't try again */
		return 1;
	}

	/* check if this resource has been manually set, if so skip */
	if (!(dev->res.irq_resource[idx].flags & IORESOURCE_AUTO))
		return 1;

	start = &dev->res.irq_resource[idx].start;
	end = &dev->res.irq_resource[idx].end;
	flags = &dev->res.irq_resource[idx].flags;

	/* set the initial values */
	*flags |= rule->flags | IORESOURCE_IRQ;
	*flags &=  ~IORESOURCE_UNSET;

	if (bitmap_empty(rule->map, PNP_IRQ_NR)) {
		*flags |= IORESOURCE_DISABLED;
		return 1; /* skip disabled resource requests */
	}

	/* TBD: need check for >16 IRQ */
	*start = find_next_bit(rule->map, PNP_IRQ_NR, 16);
	if (*start < PNP_IRQ_NR) {
		*end = *start;
		return 1;
	}
	for (i = 0; i < 16; i++) {
		if(test_bit(xtab[i], rule->map)) {
			*start = *end = xtab[i];
			if(pnp_check_irq(dev, idx))
				return 1;
		}
	}
	return 0;
}
/*
 * When the summary IRQ is raised, any number of GPIO lines may be high.
 * It is the job of the summary handler to find all those GPIO lines
 * which have been set as summary IRQ lines and which are triggered,
 * and to call their interrupt handlers.
 */
static void msm_summary_irq_handler(unsigned int irq,
				struct irq_desc *desc)
{
	unsigned long i;

	for (i = find_first_bit(enabled_irqs, NR_MSM_GPIOS);
	     i < NR_MSM_GPIOS;
	     i = find_next_bit(enabled_irqs, NR_MSM_GPIOS, i + 1)) {
		if (readl(GPIO_INTR_STATUS(i)) & 0x01) {
			generic_handle_irq(gpio_to_irq(i));
		}
	}
	desc->chip->ack(irq);
}
示例#17
0
文件: bbuild.c 项目: jebtang/NOVA
static int __nova_build_blocknode_map(struct super_block *sb,
	unsigned long *bitmap, unsigned long bsize, unsigned long scale)
{
	struct nova_sb_info *sbi = NOVA_SB(sb);
	struct free_list *free_list;
	unsigned long next = 0;
	unsigned long low = 0;
	unsigned long start, end;
	int cpuid = 0;

	free_list = nova_get_free_list(sb, cpuid);
	start = free_list->block_start;
	end = free_list->block_end + 1;
	while (1) {
		next = find_next_zero_bit(bitmap, end, start);
		if (next == bsize)
			break;
		if (next == end) {
			if (cpuid == sbi->cpus - 1)
				cpuid = SHARED_CPU;
			else
				cpuid++;
			free_list = nova_get_free_list(sb, cpuid);
			start = free_list->block_start;
			end = free_list->block_end + 1;
			continue;
		}

		low = next;
		next = find_next_bit(bitmap, end, next);
		if (nova_insert_blocknode_map(sb, cpuid,
				low << scale , (next << scale) - 1)) {
			nova_dbg("Error: could not insert %lu - %lu\n",
				low << scale, ((next << scale) - 1));
		}
		start = next;
		if (next == bsize)
			break;
		if (next == end) {
			if (cpuid == sbi->cpus - 1)
				cpuid = SHARED_CPU;
			else
				cpuid++;
			free_list = nova_get_free_list(sb, cpuid);
			start = free_list->block_start;
			end = free_list->block_end + 1;
		}
	}
	return 0;
}
/*
 * Check if this node is heartbeating and is connected to all other
 * heartbeating nodes.
 */
static int o2cb_cluster_check(void)
{
	u8 node_num;
	int i;
	unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
	unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)];

	node_num = o2nm_this_node();
	if (node_num == O2NM_MAX_NODES) {
		printk(KERN_ERR "o2cb: This node has not been configured.\n");
		return -EINVAL;
	}

	/*
	 * o2dlm expects o2net sockets to be created. If not, then
	 * dlm_join_domain() fails with a stack of errors which are both cryptic
	 * and incomplete. The idea here is to detect upfront whether we have
	 * managed to connect to all nodes or not. If not, then list the nodes
	 * to allow the user to check the configuration (incorrect IP, firewall,
	 * etc.) Yes, this is racy. But its not the end of the world.
	 */
#define	O2CB_MAP_STABILIZE_COUNT	60
	for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) {
		o2hb_fill_node_map(hbmap, sizeof(hbmap));
		if (!test_bit(node_num, hbmap)) {
			printk(KERN_ERR "o2cb: %s heartbeat has not been "
			       "started.\n", (o2hb_global_heartbeat_active() ?
					      "Global" : "Local"));
			return -EINVAL;
		}
		o2net_fill_node_map(netmap, sizeof(netmap));
		/* Force set the current node to allow easy compare */
		set_bit(node_num, netmap);
		if (!memcmp(hbmap, netmap, sizeof(hbmap)))
			return 0;
		if (i < O2CB_MAP_STABILIZE_COUNT)
			msleep(1000);
	}

	printk(KERN_ERR "o2cb: This node could not connect to nodes:");
	i = -1;
	while ((i = find_next_bit(hbmap, O2NM_MAX_NODES,
				  i + 1)) < O2NM_MAX_NODES) {
		if (!test_bit(i, netmap))
			printk(" %u", i);
	}
	printk(".\n");

	return -ENOTCONN;
}
示例#19
0
/**
 * dma_contiguous_isolate() - isolate contiguous memory from the page allocator
 * @dev: Pointer to device which owns the contiguous memory
 *
 * This function isolates contiguous memory from the page allocator. If some of
 * the contiguous memory is allocated, it is reclaimed.
 */
int dma_contiguous_isolate(struct device *dev)
{
	struct cma *cma = dev_get_cma_area(dev);
	int ret;
	int idx;

	if (!cma)
		return -ENODEV;

	if (cma->count == 0)
		return 0;

	mutex_lock(&cma_mutex);

	if (cma->isolated) {
		mutex_unlock(&cma_mutex);
		dev_err(dev, "Alread isolated!\n");
		return 0;
	}

	idx = find_first_zero_bit(cma->bitmap, cma->count);
	while (idx < cma->count) {
		int idx_set;

		idx_set = find_next_bit(cma->bitmap, cma->count, idx);
		do {
			ret = alloc_contig_range(cma->base_pfn + idx,
						cma->base_pfn + idx_set,
						MIGRATE_CMA);
		} while (ret == -EBUSY);

		if (ret < 0) {
			mutex_unlock(&cma_mutex);
			dma_contiguous_deisolate_until(dev, idx_set);
			dev_err(dev, "Failed to isolate %#lx@%#010llx (%d).\n",
				(idx_set - idx) * PAGE_SIZE,
				PFN_PHYS(cma->base_pfn + idx), ret);
			return ret;
		}

		idx = find_next_zero_bit(cma->bitmap, cma->count, idx_set);
	}

	cma->isolated = true;

	mutex_unlock(&cma_mutex);

	return 0;
}
示例#20
0
/* Check pending exceptions and deliver one, if possible. */
void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
{
	unsigned long *pending = &vcpu->arch.pending_exceptions;
	unsigned int priority;

	priority = __ffs(*pending);
	while (priority <= BOOKE_IRQPRIO_MAX) {
		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
			break;

		priority = find_next_bit(pending,
		                         BITS_PER_BYTE * sizeof(*pending),
		                         priority + 1);
	}
}
示例#21
0
void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
{
	u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;

	/* Scan through VF's and process commands */
	while (1) {
		vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
		if (vf_id >= active_vfs)
			break;

		clear_bit(vf_id, bp->pf.vf_event_bmap);
		bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
		i = vf_id + 1;
	}
}
示例#22
0
/* "unused": the lockres has no locks, is not on the dirty list,
 * has no inflight locks (in the gap between mastery and acquiring
 * the first lock), and has no bits in its refmap.
 * truly ready to be freed. */
int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
	if (!__dlm_lockres_has_locks(res) &&
	    (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
		/* try not to scan the bitmap unless the first two
		 * conditions are already true */
		int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
		if (bit >= O2NM_MAX_NODES) {
			/* since the bit for dlm->node_num is not
			 * set, inflight_locks better be zero */
			BUG_ON(res->inflight_locks != 0);
			return 1;
		}
	}
	return 0;
}
示例#23
0
static int next_pidmap(struct pid_namespace *pid_ns, int last)
{
	int offset;
	struct pidmap *map, *end;

	offset = (last + 1) & BITS_PER_PAGE_MASK;
	map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
	end = &pid_ns->pidmap[PIDMAP_ENTRIES];
	for (; map < end; map++, offset = 0) {
		if (unlikely(!map->page))
			continue;
		offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
		if (offset < BITS_PER_PAGE)
			return mk_pid(pid_ns, map, offset);
	}
	return -1;
}
示例#24
0
/*
 *  Counts the run of zero bits starting at bit up to max.
 *  It handles the case where a run might spill over a buffer.
 *  Called with bitmap lock.
 */
static int count_run(unsigned long **addr, int nbits,
		int addrlen, int bit, int max)
{
	int count = 0;
	int x;

	for (; addrlen > 0; addrlen--, addr++) {
		x = find_next_bit(*addr, nbits, bit);
		count += x - bit;

		if (x < nbits || count > max)
			return min(count, max);

		bit = 0;
	}
	return min(count, max);
}
示例#25
0
文件: fpsimd.c 项目: 0x7f454c46/linux
/*
 * All vector length selection from userspace comes through here.
 * We're on a slow path, so some sanity-checks are included.
 * If things go wrong there's a bug somewhere, but try to fall back to a
 * safe choice.
 */
static unsigned int find_supported_vector_length(unsigned int vl)
{
	int bit;
	int max_vl = sve_max_vl;

	if (WARN_ON(!sve_vl_valid(vl)))
		vl = SVE_VL_MIN;

	if (WARN_ON(!sve_vl_valid(max_vl)))
		max_vl = SVE_VL_MIN;

	if (vl > max_vl)
		vl = max_vl;

	bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
			    vq_to_bit(sve_vq_from_vl(vl)));
	return sve_vl_from_vq(bit_to_vq(bit));
}
示例#26
0
文件: cpu_features.c 项目: pfliu/qemu
void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type,
                          uint8_t *data)
{
    S390Feat feat;
    int bit_nr;

    switch (type) {
    case S390_FEAT_TYPE_STFL:
        if (test_bit(S390_FEAT_ZARCH, features)) {
            /* Features that are always active */
            set_be_bit(2, data);   /* z/Architecture */
            set_be_bit(138, data); /* Configuration-z-architectural-mode */
        }
        break;
    case S390_FEAT_TYPE_PTFF:
    case S390_FEAT_TYPE_KMAC:
    case S390_FEAT_TYPE_KMC:
    case S390_FEAT_TYPE_KM:
    case S390_FEAT_TYPE_KIMD:
    case S390_FEAT_TYPE_KLMD:
    case S390_FEAT_TYPE_PCKMO:
    case S390_FEAT_TYPE_KMCTR:
    case S390_FEAT_TYPE_KMF:
    case S390_FEAT_TYPE_KMO:
    case S390_FEAT_TYPE_PCC:
    case S390_FEAT_TYPE_PPNO:
    case S390_FEAT_TYPE_KMA:
        set_be_bit(0, data); /* query is always available */
        break;
    default:
        break;
    };

    feat = find_first_bit(features, S390_FEAT_MAX);
    while (feat < S390_FEAT_MAX) {
        if (s390_features[feat].type == type) {
            bit_nr = s390_features[feat].bit;
            /* big endian on uint8_t array */
            set_be_bit(bit_nr, data);
        }
        feat = find_next_bit(features, S390_FEAT_MAX, feat + 1);
    }
}
示例#27
0
/**
 * read_ec_accel_data_unsafe() - Read acceleration data from EC shared memory.
 * @st:        Pointer to state information for device.
 * @scan_mask: Bitmap of the sensor indices to scan.
 * @data:      Location to store data.
 *
 * This is the unsafe function for reading the EC data. It does not guarantee
 * that the EC will not modify the data as it is being read in.
 */
static void read_ec_accel_data_unsafe(struct cros_ec_accel_legacy_state *st,
				      unsigned long scan_mask, s16 *data)
{
	int i = 0;
	int num_enabled = bitmap_weight(&scan_mask, MAX_AXIS);

	/* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
	while (num_enabled--) {
		i = find_next_bit(&scan_mask, MAX_AXIS, i);
		ec_cmd_read_u16(st->ec,
				EC_MEMMAP_ACC_DATA +
				sizeof(s16) *
				(1 + i + st->sensor_num * MAX_AXIS),
				data);
		*data *= st->sign[i];
		i++;
		data++;
	}
}
示例#28
0
static inline void trace_exit_reason(u32 *irq_traced)
{
    if ( unlikely(tb_init_done) )
    {
        int i, curbit;
        u32 irr_status[8] = { 0 };

        /* Get local apic IRR register */
        for ( i = 0; i < 8; i++ )
            irr_status[i] = apic_read(APIC_IRR + (i << 4));
        i = 0;
        curbit = find_first_bit((const unsigned long *)irr_status, 256);
        while ( i < 4 && curbit < 256 )
        {
            irq_traced[i++] = curbit;
            curbit = find_next_bit((const unsigned long *)irr_status, 256, curbit + 1);
        }
    }
}
static int evtchn_set_pending(struct vcpu *v, int port)
{
    struct domain *d = v->domain;
    int vcpuid;

    /*
     * The following bit operations must happen in strict order.
     * NB. On x86, the atomic bit operations also act as memory barriers.
     * There is therefore sufficiently strict ordering for this architecture --
     * others may require explicit memory barriers.
     */

    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
        return 1;

    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
                           &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }
    
    /* Check if some VCPU might be polling for this event. */
    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
        return 0;

    /* Wake any interested (or potentially interested) pollers. */
    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
          vcpuid < d->max_vcpus;
          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
    {
        v = d->vcpu[vcpuid];
        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
             test_and_clear_bit(vcpuid, d->poll_mask) )
        {
            v->poll_evtchn = 0;
            vcpu_unblock(v);
        }
    }

    return 0;
}
示例#30
0
void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type,
                          uint8_t *data)
{
    S390Feat feat;
    int bit_nr;

    if (type == S390_FEAT_TYPE_STFL && test_bit(S390_FEAT_ZARCH, features)) {
        /* z/Architecture is always active if around */
        data[0] |= 0x20;
    }

    feat = find_first_bit(features, S390_FEAT_MAX);
    while (feat < S390_FEAT_MAX) {
        if (s390_features[feat].type == type) {
            bit_nr = s390_features[feat].bit;
            /* big endian on uint8_t array */
            data[bit_nr / 8] |= 0x80 >> (bit_nr % 8);
        }
        feat = find_next_bit(features, S390_FEAT_MAX, feat + 1);
    }