Exemplo n.º 1
0
/* For each queue, from the most- to least-constrained:
 * find an LSB that can be assigned to the queue. If there are N queues that
 * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
 * dedicated LSB. Remaining LSB regions become a shared resource.
 * If we have fewer LSBs than queues, all LSB regions become shared resources.
 */
static int ccp_assign_lsbs(struct ccp_device *ccp)
{
	DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
	int n_lsbs = 0;
	int bitno;
	int i, lsb_cnt;
	int rc = 0;

	bitmap_zero(lsb_pub, MAX_LSB_CNT);

	/* Create an aggregate bitmap to get a total count of available LSBs */
	for (i = 0; i < ccp->cmd_q_count; i++)
		bitmap_or(lsb_pub,
			  lsb_pub, ccp->cmd_q[i].lsbmask,
			  MAX_LSB_CNT);

	n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);

	if (n_lsbs >= ccp->cmd_q_count) {
		/* We have enough LSBS to give every queue a private LSB.
		 * Brute force search to start with the queues that are more
		 * constrained in LSB choice. When an LSB is privately
		 * assigned, it is removed from the public mask.
		 * This is an ugly N squared algorithm with some optimization.
		 */
		for (lsb_cnt = 1;
		     n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
		     lsb_cnt++) {
			rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
							  lsb_pub);
			if (rc < 0)
				return -EINVAL;
			n_lsbs = rc;
		}
	}

	rc = 0;
	/* What's left of the LSBs, according to the public mask, now become
	 * shared. Any zero bits in the lsb_pub mask represent an LSB region
	 * that can't be used as a shared resource, so mark the LSB slots for
	 * them as "in use".
	 */
	bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);

	bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
	while (bitno < MAX_LSB_CNT) {
		bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
		bitmap_set(qlsb, bitno, 1);
		bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
	}

	return rc;
}
Exemplo n.º 2
0
static void __init test_zero_clear(void)
{
	DECLARE_BITMAP(bmap, 1024);

	/* Known way to set all bits */
	memset(bmap, 0xff, 128);

	expect_eq_pbl("0-22", bmap, 23);
	expect_eq_pbl("0-1023", bmap, 1024);

	/* single-word bitmaps */
	bitmap_clear(bmap, 0, 9);
	expect_eq_pbl("9-1023", bmap, 1024);

	bitmap_zero(bmap, 35);
	expect_eq_pbl("64-1023", bmap, 1024);

	/* cross boundaries operations */
	bitmap_clear(bmap, 79, 19);
	expect_eq_pbl("64-78,98-1023", bmap, 1024);

	bitmap_zero(bmap, 115);
	expect_eq_pbl("128-1023", bmap, 1024);

	/* Zeroing entire area */
	bitmap_zero(bmap, 1024);
	expect_eq_pbl("", bmap, 1024);
}
Exemplo n.º 3
0
static void __init test_fill_set(void)
{
	DECLARE_BITMAP(bmap, 1024);

	/* Known way to clear all bits */
	memset(bmap, 0x00, 128);

	expect_eq_pbl("", bmap, 23);
	expect_eq_pbl("", bmap, 1024);

	/* single-word bitmaps */
	bitmap_set(bmap, 0, 9);
	expect_eq_pbl("0-8", bmap, 1024);

	bitmap_fill(bmap, 35);
	expect_eq_pbl("0-63", bmap, 1024);

	/* cross boundaries operations */
	bitmap_set(bmap, 79, 19);
	expect_eq_pbl("0-63,79-97", bmap, 1024);

	bitmap_fill(bmap, 115);
	expect_eq_pbl("0-127", bmap, 1024);

	/* Zeroing entire area */
	bitmap_fill(bmap, 1024);
	expect_eq_pbl("0-1023", bmap, 1024);
}
Exemplo n.º 4
0
static void gic_handle_shared_int(bool chained)
{
	unsigned int intr, virq;
	unsigned long *pcpu_mask;
	DECLARE_BITMAP(pending, GIC_MAX_INTRS);

	/* Get per-cpu bitmaps */
	pcpu_mask = this_cpu_ptr(pcpu_masks);

	if (mips_cm_is64)
		__ioread64_copy(pending, addr_gic_pend(),
				DIV_ROUND_UP(gic_shared_intrs, 64));
	else
		__ioread32_copy(pending, addr_gic_pend(),
				DIV_ROUND_UP(gic_shared_intrs, 32));

	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);

	for_each_set_bit(intr, pending, gic_shared_intrs) {
		virq = irq_linear_revmap(gic_irq_domain,
					 GIC_SHARED_TO_HWIRQ(intr));
		if (chained)
			generic_handle_irq(virq);
		else
			do_IRQ(virq);
	}
Exemplo n.º 5
0
int snd_usb_clock_find_source(struct snd_usb_audio *chip,
			      struct usb_host_interface *host_iface,
			      int entity_id)
{
	DECLARE_BITMAP(visited, 256);
	memset(visited, 0, sizeof(visited));
	return __uac_clock_find_source(chip, host_iface, entity_id, visited);
}
Exemplo n.º 6
0
/*
 * Fill a DRP IE's allocation fields from a MAS bitmap.
 */
static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
			       struct uwb_mas_bm *mas)
{
	int z, i, num_fields = 0, next = 0;
	struct uwb_drp_alloc *zones;
	__le16 current_bmp;
	DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
	DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);

	zones = drp_ie->allocs;

	bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);

	/* Determine unique MAS bitmaps in zones from bitmap. */
	for (z = 0; z < UWB_NUM_ZONES; z++) {
		bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
		if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
			bool found = false;
			current_bmp = (__le16) *tmp_mas_bm;
			for (i = 0; i < next; i++) {
				if (current_bmp == zones[i].mas_bm) {
					zones[i].zone_bm |= 1 << z;
					found = true;
					break;
				}
			}
			if (!found)  {
				num_fields++;
				zones[next].zone_bm = 1 << z;
				zones[next].mas_bm = current_bmp;
				next++;
			}
		}
		bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
	}

	/* Store in format ready for transmission (le16). */
	for (i = 0; i < num_fields; i++) {
		drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
		drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
	}

	drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
		+ num_fields * sizeof(struct uwb_drp_alloc);
}
Exemplo n.º 7
0
static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
{
	DECLARE_BITMAP(values, BITS_PER_TYPE(val));

	values[0] = val;

	gpiod_set_array_value_cansleep(mux->data.n_gpios, mux->gpios, NULL,
				       values);
}
Exemplo n.º 8
0
static int dsa_slave_port_vlan_dump(struct net_device *dev,
				    struct switchdev_obj_port_vlan *vlan,
				    switchdev_obj_dump_cb_t *cb)
{
	struct dsa_slave_priv *p = netdev_priv(dev);
	struct dsa_switch *ds = p->parent;
	DECLARE_BITMAP(members, DSA_MAX_PORTS);
	DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
	u16 pvid, vid = 0;
	int err;

	if (!ds->drv->vlan_getnext || !ds->drv->port_pvid_get)
		return -EOPNOTSUPP;

	err = ds->drv->port_pvid_get(ds, p->port, &pvid);
	if (err)
		return err;

	for (;;) {
		err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
		if (err)
			break;

		if (!test_bit(p->port, members))
			continue;

		memset(vlan, 0, sizeof(*vlan));
		vlan->vid_begin = vlan->vid_end = vid;

		if (vid == pvid)
			vlan->flags |= BRIDGE_VLAN_INFO_PVID;

		if (test_bit(p->port, untagged))
			vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;

		err = cb(&vlan->obj);
		if (err)
			break;
	}

	return err == -ENOENT ? 0 : err;
}
Exemplo n.º 9
0
/* Check if all specified nodes are online */
static int nodes_online(unsigned long *nodes)
{
	DECLARE_BITMAP(online2, MAX_NUMNODES);

	bitmap_copy(online2, nodes_addr(node_online_map), MAX_NUMNODES);
	if (bitmap_empty(online2, MAX_NUMNODES))
		set_bit(0, online2);
	if (!bitmap_subset(nodes, online2, MAX_NUMNODES))
		return -EINVAL;
	return 0;
}
Exemplo n.º 10
0
static void update_handled_vectors(struct kvm_ioapic *ioapic)
{
	DECLARE_BITMAP(handled_vectors, 256);
	int i;

	memset(handled_vectors, 0, sizeof(handled_vectors));
	for (i = 0; i < IOAPIC_NUM_PINS; ++i)
		__set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
	memcpy(ioapic->handled_vectors, handled_vectors,
	       sizeof(handled_vectors));
	smp_wmb();
}
Exemplo n.º 11
0
static int dsa_bridge_check_vlan_range(struct dsa_switch *ds,
				       const struct net_device *bridge,
				       u16 vid_begin, u16 vid_end)
{
	struct dsa_slave_priv *p;
	struct net_device *dev, *vlan_br;
	DECLARE_BITMAP(members, DSA_MAX_PORTS);
	DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
	u16 vid;
	int member, err;

	if (!ds->drv->vlan_getnext || !vid_begin)
		return -EOPNOTSUPP;

	vid = vid_begin - 1;

	do {
		err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
		if (err)
			break;

		if (vid > vid_end)
			break;

		member = find_first_bit(members, DSA_MAX_PORTS);
		if (member == DSA_MAX_PORTS)
			continue;

		dev = ds->ports[member];
		p = netdev_priv(dev);
		vlan_br = p->bridge_dev;
		if (vlan_br == bridge)
			continue;

		netdev_dbg(vlan_br, "hardware VLAN %d already in use\n", vid);
		return -EOPNOTSUPP;
	} while (vid < vid_end);

	return err == -ENOENT ? 0 : err;
}
Exemplo n.º 12
0
static void __init test_copy(void)
{
	DECLARE_BITMAP(bmap1, 1024);
	DECLARE_BITMAP(bmap2, 1024);

	bitmap_zero(bmap1, 1024);
	bitmap_zero(bmap2, 1024);

	/* single-word bitmaps */
	bitmap_set(bmap1, 0, 19);
	bitmap_copy(bmap2, bmap1, 23);
	expect_eq_pbl("0-18", bmap2, 1024);

	bitmap_set(bmap2, 0, 23);
	bitmap_copy(bmap2, bmap1, 23);
	expect_eq_pbl("0-18", bmap2, 1024);

	/* multi-word bitmaps */
	bitmap_set(bmap1, 0, 109);
	bitmap_copy(bmap2, bmap1, 1024);
	expect_eq_pbl("0-108", bmap2, 1024);

	bitmap_fill(bmap2, 1024);
	bitmap_copy(bmap2, bmap1, 1024);
	expect_eq_pbl("0-108", bmap2, 1024);

	/* the following tests assume a 32- or 64-bit arch (even 128b
	 * if we care)
	 */

	bitmap_fill(bmap2, 1024);
	bitmap_copy(bmap2, bmap1, 109);  /* ... but 0-padded til word length */
	expect_eq_pbl("0-108,128-1023", bmap2, 1024);

	bitmap_fill(bmap2, 1024);
	bitmap_copy(bmap2, bmap1, 97);  /* ... but aligned on word length */
	expect_eq_pbl("0-108,128-1023", bmap2, 1024);
}
Exemplo n.º 13
0
static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
					int lsb_cnt, int n_lsbs,
					unsigned long *lsb_pub)
{
	DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
	int bitno;
	int qlsb_wgt;
	int i;

	/* For each queue:
	 * If the count of potential LSBs available to a queue matches the
	 * ordinal given to us in lsb_cnt:
	 * Copy the mask of possible LSBs for this queue into "qlsb";
	 * For each bit in qlsb, see if the corresponding bit in the
	 * aggregation mask is set; if so, we have a match.
	 *     If we have a match, clear the bit in the aggregation to
	 *     mark it as no longer available.
	 *     If there is no match, clear the bit in qlsb and keep looking.
	 */
	for (i = 0; i < ccp->cmd_q_count; i++) {
		struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];

		qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);

		if (qlsb_wgt == lsb_cnt) {
			bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);

			bitno = find_first_bit(qlsb, MAX_LSB_CNT);
			while (bitno < MAX_LSB_CNT) {
				if (test_bit(bitno, lsb_pub)) {
					/* We found an available LSB
					 * that this queue can access
					 */
					cmd_q->lsb = bitno;
					bitmap_clear(lsb_pub, bitno, 1);
					dev_info(ccp->dev,
						 "Queue %d gets LSB %d\n",
						 i, bitno);
					break;
				}
				bitmap_clear(qlsb, bitno, 1);
				bitno = find_first_bit(qlsb, MAX_LSB_CNT);
			}
			if (bitno >= MAX_LSB_CNT)
				return -EINVAL;
			n_lsbs--;
		}
	}
	return n_lsbs;
}
Exemplo n.º 14
0
/*
 * apic_is_clustered_box() -- Check if we can expect good TSC
 *
 * Thus far, the major user of this is IBM's Summit2 series:
 *
 * Clustered boxes may have unsynced TSC problems if they are
 * multi-chassis. Use available data to take a good guess.
 * If in doubt, go HPET.
 */
__cpuinit int apic_is_clustered_box(void)
{
	int i, clusters, zeros;
	unsigned id;
	u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

	for (i = 0; i < NR_CPUS; i++) {
		/* are we being called early in kernel startup? */
		if (bios_cpu_apicid) {
			id = bios_cpu_apicid[i];
		}
		else if (i < nr_cpu_ids) {
			if (cpu_present(i))
				id = per_cpu(x86_bios_cpu_apicid, i);
			else
				continue;
		}
		else
			break;

		if (id != BAD_APICID)
			__set_bit(APIC_CLUSTERID(id), clustermap);
	}

	/* Problem:  Partially populated chassis may not have CPUs in some of
	 * the APIC clusters they have been allocated.  Only present CPUs have
	 * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
	 * Since clusters are allocated sequentially, count zeros only if
	 * they are bounded by ones.
	 */
	clusters = 0;
	zeros = 0;
	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
		if (test_bit(i, clustermap)) {
			clusters += 1 + zeros;
			zeros = 0;
		} else
			++zeros;
	}

	/*
	 * If clusters > 2, then should be multi-chassis.
	 * May have to revisit this when multi-core + hyperthreaded CPUs come
	 * out, but AFAIK this will work even for them.
	 */
	return (clusters > 2);
}
Exemplo n.º 15
0
static void __init test_of_node(void)
{
	u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 };
	const char *expected_str = "0-9,20-24,28-39,41-99,220-255";
	char *prop_name = "msi-available-ranges";
	char *node_name = "/fakenode";
	struct device_node of_node;
	struct property prop;
	struct msi_bitmap bmp;
#define SIZE_EXPECTED 256
	DECLARE_BITMAP(expected, SIZE_EXPECTED);

	/* There should really be a struct device_node allocator */
	memset(&of_node, 0, sizeof(of_node));
	of_node_init(&of_node);
	of_node.full_name = node_name;

	WARN_ON(msi_bitmap_alloc(&bmp, SIZE_EXPECTED, &of_node));

	/* No msi-available-ranges, so expect > 0 */
	WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);

	/* Should all still be free */
	WARN_ON(bitmap_find_free_region(bmp.bitmap, SIZE_EXPECTED,
					get_count_order(SIZE_EXPECTED)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(SIZE_EXPECTED));

	/* Now create a fake msi-available-ranges property */

	/* There should really .. oh whatever */
	memset(&prop, 0, sizeof(prop));
	prop.name = prop_name;
	prop.value = &prop_data;
	prop.length = sizeof(prop_data);

	of_node.properties = &prop;

	/* msi-available-ranges, so expect == 0 */
	WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp));

	/* Check we got the expected result */
	WARN_ON(bitmap_parselist(expected_str, expected, SIZE_EXPECTED));
	WARN_ON(!bitmap_equal(expected, bmp.bitmap, SIZE_EXPECTED));

	msi_bitmap_free(&bmp);
	kfree(bmp.bitmap);
}
Exemplo n.º 16
0
/*
 * For all kinds of sample rate settings and other device queries,
 * the clock source (end-leaf) must be used. However, clock selectors,
 * clock multipliers and sample rate converters may be specified as
 * clock source input to terminal. This functions walks the clock path
 * to its end and tries to find the source.
 *
 * The 'visited' bitfield is used internally to detect recursive loops.
 *
 * Returns the clock source UnitID (>=0) on success, or an error.
 */
int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
			      int entity_id, bool validate)
{
	DECLARE_BITMAP(visited, 256);
	memset(visited, 0, sizeof(visited));

	switch (protocol) {
	case UAC_VERSION_2:
		return __uac_clock_find_source(chip, entity_id, visited,
					       validate);
	case UAC_VERSION_3:
		return __uac3_clock_find_source(chip, entity_id, visited,
					       validate);
	default:
		return -EINVAL;
	}
}
Exemplo n.º 17
0
int main()
{
	DECLARE_BITMAP(bm, 1000);

	bitmap_zero(bm, 1000);
	bitmap_set(bm, 3, 2);
	assert(!test_bit(2, bm));
	assert(test_bit(3, bm));
	assert(test_bit(4, bm));
	assert(!test_bit(5, bm));

	int bit;
	int start = 3;
	for_each_set_bit(bit, bm, 1000)
		assert(bit == start++);

	printf("passed\n");

	return 0;
}
Exemplo n.º 18
0
static void test_smbios_structs(test_data *data)
{
    DECLARE_BITMAP(struct_bitmap, SMBIOS_MAX_TYPE+1) = { 0 };
    struct smbios_entry_point *ep_table = &data->smbios_ep_table;
    uint32_t addr = ep_table->structure_table_address;
    int i, len, max_len = 0;
    uint8_t type, prv, crt;
    uint8_t required_struct_types[] = {0, 1, 3, 4, 16, 17, 19, 32, 127};

    /* walk the smbios tables */
    for (i = 0; i < ep_table->number_of_structures; i++) {

        /* grab type and formatted area length from struct header */
        type = readb(addr);
        g_assert_cmpuint(type, <=, SMBIOS_MAX_TYPE);
        len = readb(addr + 1);

        /* single-instance structs must not have been encountered before */
        if (smbios_single_instance(type)) {
            g_assert(!test_bit(type, struct_bitmap));
        }
        set_bit(type, struct_bitmap);

        /* seek to end of unformatted string area of this struct ("\0\0") */
        prv = crt = 1;
        while (prv || crt) {
            prv = crt;
            crt = readb(addr + len);
            len++;
        }

        /* keep track of max. struct size */
        if (max_len < len) {
            max_len = len;
            g_assert_cmpuint(max_len, <=, ep_table->max_structure_size);
        }

        /* start of next structure */
        addr += len;
    }
Exemplo n.º 19
0
static int __cpuinit apic_cluster_num(void)
{
    int i, clusters, zeros;
    unsigned id;
    u16 *bios_cpu_apicid;
    DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

    bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
    bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

    for (i = 0; i < nr_cpu_ids; i++) {

        if (bios_cpu_apicid) {
            id = bios_cpu_apicid[i];
        } else if (i < nr_cpu_ids) {
            if (cpu_present(i))
                id = per_cpu(x86_bios_cpu_apicid, i);
            else
                continue;
        } else
            break;

        if (id != BAD_APICID)
            __set_bit(APIC_CLUSTERID(id), clustermap);
    }


    clusters = 0;
    zeros = 0;
    for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
        if (test_bit(i, clustermap)) {
            clusters += 1 + zeros;
            zeros = 0;
        } else
            ++zeros;
    }

    return clusters;
}
Exemplo n.º 20
0
/*
 * oem_force_hpet_timer -- force HPET mode for some boxes.
 *
 * Thus far, the major user of this is IBM's Summit2 series:
 *
 * Clustered boxes may have unsynced TSC problems if they are
 * multi-chassis. Use available data to take a good guess.
 * If in doubt, go HPET.
 */
__cpuinit int oem_force_hpet_timer(void)
{
	int i, clusters, zeros;
	unsigned id;
	DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

	bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

	for (i = 0; i < NR_CPUS; i++) {
		id = bios_cpu_apicid[i];
		if (id != BAD_APICID)
			__set_bit(APIC_CLUSTERID(id), clustermap);
	}

	/* Problem:  Partially populated chassis may not have CPUs in some of
	 * the APIC clusters they have been allocated.  Only present CPUs have
	 * bios_cpu_apicid entries, thus causing zeroes in the bitmap.  Since
	 * clusters are allocated sequentially, count zeros only if they are
	 * bounded by ones.
	 */
	clusters = 0;
	zeros = 0;
	for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
		if (test_bit(i, clustermap)) {
			clusters += 1 + zeros;
			zeros = 0;
		} else
			++zeros;
	}

	/*
	 * If clusters > 2, then should be multi-chassis.  Return 1 for HPET.
	 * Else return 0 to use TSC.
	 * May have to revisit this when multi-core + hyperthreaded CPUs come
	 * out, but AFAIK this will work even for them.
	 */
	return (clusters > 2);
}
Exemplo n.º 21
0
bool msm_mpm_gpio_irq_enabled(bool from_idle)
{
	unsigned long *apps_irq_bitmap = from_idle ?
			msm_mpm_enabled_apps_irqs : msm_mpm_wake_apps_irqs;
	int gpio_irq_enabled = 0;

	gpio_irq_enabled = __bitmap_intersects(msm_mpm_gpio_irqs_mask, apps_irq_bitmap,
			MSM_MPM_NR_APPS_IRQS);

	if (gpio_irq_enabled && (!from_idle ||
		(MSM_MPM_DEBUG_ILDE_BLOCK & msm_mpm_debug_mask))) {
		char buf[DIV_ROUND_UP(MSM_MPM_NR_APPS_IRQS, 32)*9+1];
		DECLARE_BITMAP(msm_mpm_chk_gpio_irqs, MSM_MPM_NR_APPS_IRQS);

		__bitmap_and(msm_mpm_chk_gpio_irqs, msm_mpm_gic_irqs_mask,
			apps_irq_bitmap, MSM_MPM_NR_APPS_IRQS);
		bitmap_scnprintf(buf, sizeof(buf), apps_irq_bitmap, MSM_MPM_NR_APPS_IRQS);
		buf[sizeof(buf) - 1] = '\0';

		pr_info("%s: cannot monitor %s", __func__, buf);
	}
	return gpio_irq_enabled;
}
Exemplo n.º 22
0
/*H:205
 * Virtual Interrupts.
 *
 * interrupt_pending() returns the first pending interrupt which isn't blocked
 * by the Guest.  It is called before every entry to the Guest, and just before
 * we go to sleep when the Guest has halted itself.
 */
unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
{
	unsigned int irq;
	DECLARE_BITMAP(blk, LGUEST_IRQS);

	/* If the Guest hasn't even initialized yet, we can do nothing. */
	if (!cpu->lg->lguest_data)
		return LGUEST_IRQS;

	/*
	 * Take our "irqs_pending" array and remove any interrupts the Guest
	 * wants blocked: the result ends up in "blk".
	 */
	if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
			   sizeof(blk)))
		return LGUEST_IRQS;
	bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);

	/* Find the first interrupt. */
	irq = find_first_bit(blk, LGUEST_IRQS);
	*more = find_next_bit(blk, LGUEST_IRQS, irq+1);

	return irq;
}
Exemplo n.º 23
0
/*
 * apic_is_clustered_box() -- Check if we can expect good TSC
 *
 * Thus far, the major user of this is IBM's Summit2 series:
 *
 * Clustered boxes may have unsynced TSC problems if they are
 * multi-chassis. Use available data to take a good guess.
 * If in doubt, go HPET.
 */
__cpuinit int apic_is_clustered_box(void)
{
    int i, clusters, zeros;
    unsigned id;
    u16 *bios_cpu_apicid;
    DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);

    /*
     * there is not this kind of box with AMD CPU yet.
     * Some AMD box with quadcore cpu and 8 sockets apicid
     * will be [4, 0x23] or [8, 0x27] could be thought to
     * vsmp box still need checking...
     */
    if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
        return 0;

    bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
    bitmap_zero(clustermap, NUM_APIC_CLUSTERS);

    for (i = 0; i < NR_CPUS; i++) {
        /* are we being called early in kernel startup? */
        if (bios_cpu_apicid) {
            id = bios_cpu_apicid[i];
        }
        else if (i < nr_cpu_ids) {
            if (cpu_present(i))
                id = per_cpu(x86_bios_cpu_apicid, i);
            else
                continue;
        }
        else
            break;

        if (id != BAD_APICID)
            __set_bit(APIC_CLUSTERID(id), clustermap);
    }

    /* Problem:  Partially populated chassis may not have CPUs in some of
     * the APIC clusters they have been allocated.  Only present CPUs have
     * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
     * Since clusters are allocated sequentially, count zeros only if
     * they are bounded by ones.
     */
    clusters = 0;
    zeros = 0;
    for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
        if (test_bit(i, clustermap)) {
            clusters += 1 + zeros;
            zeros = 0;
        } else
            ++zeros;
    }

    /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
     * not guaranteed to be synced between boards
     */
    if (is_vsmp_box() && clusters > 1)
        return 1;

    /*
     * If clusters > 2, then should be multi-chassis.
     * May have to revisit this when multi-core + hyperthreaded CPUs come
     * out, but AFAIK this will work even for them.
     */
    return (clusters > 2);
}
Exemplo n.º 24
0
Arquivo: regd.c Projeto: E-LLP/n900
bool
ath9k_regd_init_channels(struct ath_hal *ah,
			 u32 maxchans,
			 u32 *nchans, u8 *regclassids,
			 u32 maxregids, u32 *nregids, u16 cc,
			 bool enableOutdoor,
			 bool enableExtendedChannels)
{
	u16 maxChan = 7000;
	struct country_code_to_enum_rd *country = NULL;
	struct regDomain rd5GHz, rd2GHz;
	const struct cmode *cm;
	struct ath9k_channel *ichans = &ah->ah_channels[0];
	int next = 0, b;
	u8 ctl;
	int regdmn;
	u16 chanSep;
	unsigned long *modes_avail;
	DECLARE_BITMAP(modes_allowed, ATH9K_MODE_MAX);

	DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: cc %u %s %s\n",
		 __func__, cc,
		 enableOutdoor ? "Enable outdoor" : "",
		 enableExtendedChannels ? "Enable ecm" : "");

	if (!ath9k_regd_is_ccode_valid(ah, cc)) {
		DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
			"%s: invalid country code %d\n", __func__, cc);
		return false;
	}

	if (!ath9k_regd_is_eeprom_valid(ah)) {
		DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
			"%s: invalid EEPROM contents\n", __func__);
		return false;
	}

	ah->ah_countryCode = ath9k_regd_get_default_country(ah);

	if (ah->ah_countryCode == CTRY_DEFAULT) {
		ah->ah_countryCode = cc & COUNTRY_CODE_MASK;
		if ((ah->ah_countryCode == CTRY_DEFAULT) &&
		    (ath9k_regd_get_eepromRD(ah) == CTRY_DEFAULT)) {
			ah->ah_countryCode = CTRY_UNITED_STATES;
		}
	}

#ifdef AH_SUPPORT_11D
	if (ah->ah_countryCode == CTRY_DEFAULT) {
		regdmn = ath9k_regd_get_eepromRD(ah);
		country = NULL;
	} else {
#endif
		country = ath9k_regd_find_country(ah->ah_countryCode);
		if (country == NULL) {
			DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
				"Country is NULL!!!!, cc= %d\n",
				ah->ah_countryCode);
			return false;
		} else {
			regdmn = country->regDmnEnum;
#ifdef AH_SUPPORT_11D
			if (((ath9k_regd_get_eepromRD(ah) &
			      WORLD_SKU_MASK) == WORLD_SKU_PREFIX) &&
			    (cc == CTRY_UNITED_STATES)) {
				if (!isWwrSKU_NoMidband(ah)
				    && ath9k_regd_is_fcc_midband_supported(ah))
					regdmn = FCC3_FCCA;
				else
					regdmn = FCC1_FCCA;
			}
#endif
		}
#ifdef AH_SUPPORT_11D
	}
#endif
	if (!ath9k_regd_get_wmode_regdomain(ah,
					    regdmn,
					    ~CHANNEL_2GHZ,
					    &rd5GHz)) {
		DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
			"%s: couldn't find unitary "
			"5GHz reg domain for country %u\n",
			__func__, ah->ah_countryCode);
		return false;
	}
	if (!ath9k_regd_get_wmode_regdomain(ah,
					    regdmn,
					    CHANNEL_2GHZ,
					    &rd2GHz)) {
		DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
			"%s: couldn't find unitary 2GHz "
			"reg domain for country %u\n",
			__func__, ah->ah_countryCode);
		return false;
	}

	if (!isWwrSKU(ah) && ((rd5GHz.regDmnEnum == FCC1) ||
			      (rd5GHz.regDmnEnum == FCC2))) {
		if (ath9k_regd_is_fcc_midband_supported(ah)) {
			if (!ath9k_regd_get_wmode_regdomain(ah,
							    FCC3_FCCA,
							    ~CHANNEL_2GHZ,
							    &rd5GHz)) {
				DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
					"%s: couldn't find unitary 5GHz "
					"reg domain for country %u\n",
					__func__, ah->ah_countryCode);
				return false;
			}
		}
	}

	if (country == NULL) {
		modes_avail = ah->ah_caps.wireless_modes;
	} else {
		ath9k_regd_get_wmodes_nreg(ah, country, &rd5GHz, modes_allowed);
		modes_avail = modes_allowed;

		if (!enableOutdoor)
			maxChan = country->outdoorChanStart;
	}

	next = 0;

	if (maxchans > ARRAY_SIZE(ah->ah_channels))
		maxchans = ARRAY_SIZE(ah->ah_channels);

	for (cm = modes; cm < &modes[ARRAY_SIZE(modes)]; cm++) {
		u16 c, c_hi, c_lo;
		u64 *channelBM = NULL;
		struct regDomain *rd = NULL;
		struct RegDmnFreqBand *fband = NULL, *freqs;
		int8_t low_adj = 0, hi_adj = 0;

		if (!test_bit(cm->mode, modes_avail)) {
			DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
				"%s: !avail mode %d flags 0x%x\n",
				__func__, cm->mode, cm->flags);
			continue;
		}
		if (!ath9k_get_channel_edges(ah, cm->flags, &c_lo, &c_hi)) {
			DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
				"%s: channels 0x%x not supported "
				"by hardware\n",
				__func__, cm->flags);
			continue;
		}

		switch (cm->mode) {
		case ATH9K_MODE_11A:
		case ATH9K_MODE_11NA_HT20:
		case ATH9K_MODE_11NA_HT40PLUS:
		case ATH9K_MODE_11NA_HT40MINUS:
			rd = &rd5GHz;
			channelBM = rd->chan11a;
			freqs = &regDmn5GhzFreq[0];
			ctl = rd->conformanceTestLimit;
			break;
		case ATH9K_MODE_11B:
			rd = &rd2GHz;
			channelBM = rd->chan11b;
			freqs = &regDmn2GhzFreq[0];
			ctl = rd->conformanceTestLimit | CTL_11B;
			break;
		case ATH9K_MODE_11G:
		case ATH9K_MODE_11NG_HT20:
		case ATH9K_MODE_11NG_HT40PLUS:
		case ATH9K_MODE_11NG_HT40MINUS:
			rd = &rd2GHz;
			channelBM = rd->chan11g;
			freqs = &regDmn2Ghz11gFreq[0];
			ctl = rd->conformanceTestLimit | CTL_11G;
			break;
		default:
			DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
				"%s: Unknown HAL mode 0x%x\n", __func__,
				cm->mode);
			continue;
		}

		if (ath9k_regd_is_chan_bm_zero(channelBM))
			continue;

		if ((cm->mode == ATH9K_MODE_11NA_HT40PLUS) ||
		    (cm->mode == ATH9K_MODE_11NG_HT40PLUS)) {
			hi_adj = -20;
		}

		if ((cm->mode == ATH9K_MODE_11NA_HT40MINUS) ||
		    (cm->mode == ATH9K_MODE_11NG_HT40MINUS)) {
			low_adj = 20;
		}

		/* XXX: Add a helper here instead */
		for (b = 0; b < 64 * BMLEN; b++) {
			if (ath9k_regd_is_bit_set(b, channelBM)) {
				fband = &freqs[b];
				if (rd5GHz.regDmnEnum == MKK1
				    || rd5GHz.regDmnEnum == MKK2) {
					if (ath9k_regd_japan_check(ah,
								   b,
								   &rd5GHz))
						continue;
				}

				ath9k_regd_add_reg_classid(regclassids,
							   maxregids,
							   nregids,
							   fband->
							   regClassId);

				if (IS_HT40_MODE(cm->mode) && (rd == &rd5GHz)) {
					chanSep = 40;
					if (fband->lowChannel == 5280)
						low_adj += 20;

					if (fband->lowChannel == 5170)
						continue;
				} else
					chanSep = fband->channelSep;

				for (c = fband->lowChannel + low_adj;
				     ((c <= (fband->highChannel + hi_adj)) &&
				      (c >= (fband->lowChannel + low_adj)));
				     c += chanSep) {
					if (next >= maxchans) {
						DPRINTF(ah->ah_sc,
							ATH_DBG_REGULATORY,
							"%s: too many channels "
							"for channel table\n",
							__func__);
						goto done;
					}
					if (ath9k_regd_add_channel(ah,
						   c, c_lo, c_hi,
						   maxChan, ctl,
						   next,
						   rd5GHz,
						   fband, rd, cm,
						   ichans,
						   enableExtendedChannels))
						next++;
				}
				if (IS_HT40_MODE(cm->mode) &&
				    (fband->lowChannel == 5280)) {
					low_adj -= 20;
				}
			}
		}
	}
done:
	if (next != 0) {
		int i;

		if (next > ARRAY_SIZE(ah->ah_channels)) {
			DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
				"%s: too many channels %u; truncating to %u\n",
				__func__, next,
				(int) ARRAY_SIZE(ah->ah_channels));
			next = ARRAY_SIZE(ah->ah_channels);
		}
#ifdef ATH_NF_PER_CHAN
		ath9k_regd_init_rf_buffer(ichans, next);
#endif
		ath9k_regd_sort(ichans, next,
				sizeof(struct ath9k_channel),
				ath9k_regd_chansort);

		ah->ah_nchan = next;

		DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "Channel list:\n");
		for (i = 0; i < next; i++) {
			DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
				"chan: %d flags: 0x%x\n",
				ah->ah_channels[i].channel,
				ah->ah_channels[i].channelFlags);
		}
	}
	*nchans = next;

	ah->ah_countryCode = ah->ah_countryCode;

	ah->ah_currentRDInUse = regdmn;
	ah->ah_currentRD5G = rd5GHz.regDmnEnum;
	ah->ah_currentRD2G = rd2GHz.regDmnEnum;
	if (country == NULL) {
		ah->ah_iso[0] = 0;
		ah->ah_iso[1] = 0;
	} else {
		ah->ah_iso[0] = country->isoName[0];
		ah->ah_iso[1] = country->isoName[1];
	}

	return next != 0;
}
Exemplo n.º 25
0
/**
 * media_entity_pipeline_start - Mark a pipeline as streaming
 * @entity: Starting entity
 * @pipe: Media pipeline to be assigned to all entities in the pipeline.
 *
 * Mark all entities connected to a given entity through enabled links, either
 * directly or indirectly, as streaming. The given pipeline object is assigned to
 * every entity in the pipeline and stored in the media_entity pipe field.
 *
 * Calls to this function can be nested, in which case the same number of
 * media_entity_pipeline_stop() calls will be required to stop streaming. The
 * pipeline pointer must be identical for all nested calls to
 * media_entity_pipeline_start().
 */
__must_check int media_entity_pipeline_start(struct media_entity *entity,
					     struct media_pipeline *pipe)
{
	struct media_device *mdev = entity->parent;
	struct media_entity_graph graph;
	struct media_entity *entity_err = entity;
	int ret;

	mutex_lock(&mdev->graph_mutex);

	media_entity_graph_walk_start(&graph, entity);

	while ((entity = media_entity_graph_walk_next(&graph))) {
		DECLARE_BITMAP(active, entity->num_pads);
		DECLARE_BITMAP(has_no_links, entity->num_pads);
		unsigned int i;

		entity->stream_count++;
		WARN_ON(entity->pipe && entity->pipe != pipe);
		entity->pipe = pipe;

		/* Already streaming --- no need to check. */
		if (entity->stream_count > 1)
			continue;

		if (!entity->ops || !entity->ops->link_validate)
			continue;

		bitmap_zero(active, entity->num_pads);
		bitmap_fill(has_no_links, entity->num_pads);

		for (i = 0; i < entity->num_links; i++) {
			struct media_link *link = &entity->links[i];
			struct media_pad *pad = link->sink->entity == entity
						? link->sink : link->source;

			/* Mark that a pad is connected by a link. */
			bitmap_clear(has_no_links, pad->index, 1);

			/*
			 * Pads that either do not need to connect or
			 * are connected through an enabled link are
			 * fine.
			 */
			if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) ||
			    link->flags & MEDIA_LNK_FL_ENABLED)
				bitmap_set(active, pad->index, 1);

			/*
			 * Link validation will only take place for
			 * sink ends of the link that are enabled.
			 */
			if (link->sink != pad ||
			    !(link->flags & MEDIA_LNK_FL_ENABLED))
				continue;

			ret = entity->ops->link_validate(link);
			if (ret < 0 && ret != -ENOIOCTLCMD)
				goto error;
		}

		/* Either no links or validated links are fine. */
		bitmap_or(active, active, has_no_links, entity->num_pads);

		if (!bitmap_full(active, entity->num_pads)) {
			ret = -EPIPE;
			goto error;
		}
	}

	mutex_unlock(&mdev->graph_mutex);

	return 0;

error:
	/*
	 * Link validation on graph failed. We revert what we did and
	 * return the error.
	 */
	media_entity_graph_walk_start(&graph, entity_err);

	while ((entity_err = media_entity_graph_walk_next(&graph))) {
		entity_err->stream_count--;
		if (entity_err->stream_count == 0)
			entity_err->pipe = NULL;

		/*
		 * We haven't increased stream_count further than this
		 * so we quit here.
		 */
		if (entity_err == entity)
			break;
	}

	mutex_unlock(&mdev->graph_mutex);

	return ret;
}
/*H:205
 * Virtual Interrupts.
 *
 * maybe_do_interrupt() gets called before every entry to the Guest, to see if
 * we should divert the Guest to running an interrupt handler. */
void maybe_do_interrupt(struct lg_cpu *cpu)
{
	unsigned int irq;
	DECLARE_BITMAP(blk, LGUEST_IRQS);
	struct desc_struct *idt;

	/* If the Guest hasn't even initialized yet, we can do nothing. */
	if (!cpu->lg->lguest_data)
		return;

	/* Take our "irqs_pending" array and remove any interrupts the Guest
	 * wants blocked: the result ends up in "blk". */
	if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
			   sizeof(blk)))
		return;
	bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);

	/* Find the first interrupt. */
	irq = find_first_bit(blk, LGUEST_IRQS);
	/* None?  Nothing to do */
	if (irq >= LGUEST_IRQS)
		return;

	/* They may be in the middle of an iret, where they asked us never to
	 * deliver interrupts. */
	if (cpu->regs->eip >= cpu->lg->noirq_start &&
	   (cpu->regs->eip < cpu->lg->noirq_end))
		return;

	/* If they're halted, interrupts restart them. */
	if (cpu->halted) {
		/* Re-enable interrupts. */
		if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
			kill_guest(cpu, "Re-enabling interrupts");
		cpu->halted = 0;
	} else {
		/* Otherwise we check if they have interrupts disabled. */
		u32 irq_enabled;
		if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
			irq_enabled = 0;
		if (!irq_enabled)
			return;
	}

	/* Look at the IDT entry the Guest gave us for this interrupt.  The
	 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
	 * over them. */
	idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
	/* If they don't have a handler (yet?), we just ignore it */
	if (idt_present(idt->a, idt->b)) {
		/* OK, mark it no longer pending and deliver it. */
		clear_bit(irq, cpu->irqs_pending);
		/* set_guest_interrupt() takes the interrupt descriptor and a
		 * flag to say whether this interrupt pushes an error code onto
		 * the stack as well: virtual interrupts never do. */
		set_guest_interrupt(cpu, idt->a, idt->b, 0);
	}

	/* Every time we deliver an interrupt, we update the timestamp in the
	 * Guest's lguest_data struct.  It would be better for the Guest if we
	 * did this more often, but it can actually be quite slow: doing it
	 * here is a compromise which means at least it gets updated every
	 * timer interrupt. */
	write_timestamp(cpu);
}
Exemplo n.º 27
0
static void __init
struct_rtc_time(void)
{
	/* 1543210543 */
	const struct rtc_time tm = {
		.tm_sec = 43,
		.tm_min = 35,
		.tm_hour = 5,
		.tm_mday = 26,
		.tm_mon = 10,
		.tm_year = 118,
	};

	test("(%ptR?)", "%pt", &tm);
	test("2018-11-26T05:35:43", "%ptR", &tm);
	test("0118-10-26T05:35:43", "%ptRr", &tm);
	test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm);
	test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm);
	test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm);
	test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm);
}

static void __init
struct_clk(void)
{
}

static void __init
large_bitmap(void)
{
	const int nbits = 1 << 16;
	unsigned long *bits = bitmap_zalloc(nbits, GFP_KERNEL);
	if (!bits)
		return;

	bitmap_set(bits, 1, 20);
	bitmap_set(bits, 60000, 15);
	test("1-20,60000-60014", "%*pbl", nbits, bits);
	bitmap_free(bits);
}

static void __init
bitmap(void)
{
	DECLARE_BITMAP(bits, 20);
	const int primes[] = {2,3,5,7,11,13,17,19};
	int i;

	bitmap_zero(bits, 20);
	test("00000|00000", "%20pb|%*pb", bits, 20, bits);
	test("|", "%20pbl|%*pbl", bits, 20, bits);

	for (i = 0; i < ARRAY_SIZE(primes); ++i)
		set_bit(primes[i], bits);
	test("a28ac|a28ac", "%20pb|%*pb", bits, 20, bits);
	test("2-3,5,7,11,13,17,19|2-3,5,7,11,13,17,19", "%20pbl|%*pbl", bits, 20, bits);

	bitmap_fill(bits, 20);
	test("fffff|fffff", "%20pb|%*pb", bits, 20, bits);
	test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits);

	large_bitmap();
}
Exemplo n.º 28
0
static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
			      struct inode *dir, const char *name, int namelen,
			      struct inode *inode, int visible)
{
	struct cpu_key entry_key;
	struct reiserfs_de_head *deh;
	INITIALIZE_PATH(path);
	struct reiserfs_dir_entry de;
	DECLARE_BITMAP(bit_string, MAX_GENERATION_NUMBER + 1);
	int gen_number;
	char small_buf[32 + DEH_SIZE];	/* 48 bytes now and we avoid kmalloc
					   if we create file with short name */
	char *buffer;
	int buflen, paste_size;
	int retval;

	BUG_ON(!th->t_trans_id);

	/* cannot allow items to be added into a busy deleted directory */
	if (!namelen)
		return -EINVAL;

	if (namelen > REISERFS_MAX_NAME(dir->i_sb->s_blocksize))
		return -ENAMETOOLONG;

	/* each entry has unique key. compose it */
	make_cpu_key(&entry_key, dir,
		     get_third_component(dir->i_sb, name, namelen),
		     TYPE_DIRENTRY, 3);

	/* get memory for composing the entry */
	buflen = DEH_SIZE + ROUND_UP(namelen);
	if (buflen > sizeof(small_buf)) {
		buffer = kmalloc(buflen, GFP_NOFS);
		if (!buffer)
			return -ENOMEM;
	} else
		buffer = small_buf;

	paste_size =
	    (get_inode_sd_version(dir) ==
	     STAT_DATA_V1) ? (DEH_SIZE + namelen) : buflen;

	/* fill buffer : directory entry head, name[, dir objectid | , stat data | ,stat data, dir objectid ] */
	deh = (struct reiserfs_de_head *)buffer;
	deh->deh_location = 0;	/* JDM Endian safe if 0 */
	put_deh_offset(deh, cpu_key_k_offset(&entry_key));
	deh->deh_state = 0;	/* JDM Endian safe if 0 */
	/* put key (ino analog) to de */
	deh->deh_dir_id = INODE_PKEY(inode)->k_dir_id;	/* safe: k_dir_id is le */
	deh->deh_objectid = INODE_PKEY(inode)->k_objectid;	/* safe: k_objectid is le */

	/* copy name */
	memcpy((char *)(deh + 1), name, namelen);
	/* padd by 0s to the 4 byte boundary */
	padd_item((char *)(deh + 1), ROUND_UP(namelen), namelen);

	/* entry is ready to be pasted into tree, set 'visibility' and 'stat data in entry' attributes */
	mark_de_without_sd(deh);
	visible ? mark_de_visible(deh) : mark_de_hidden(deh);

	/* find the proper place for the new entry */
	memset(bit_string, 0, sizeof(bit_string));
	de.de_gen_number_bit_string = bit_string;
	retval = reiserfs_find_entry(dir, name, namelen, &path, &de);
	if (retval != NAME_NOT_FOUND) {
		if (buffer != small_buf)
			kfree(buffer);
		pathrelse(&path);

		if (retval == IO_ERROR) {
			return -EIO;
		}

		if (retval != NAME_FOUND) {
			reiserfs_error(dir->i_sb, "zam-7002",
				       "reiserfs_find_entry() returned "
				       "unexpected value (%d)", retval);
		}

		return -EEXIST;
	}

	gen_number =
	    find_first_zero_bit(bit_string,
				MAX_GENERATION_NUMBER + 1);
	if (gen_number > MAX_GENERATION_NUMBER) {
		/* there is no free generation number */
		reiserfs_warning(dir->i_sb, "reiserfs-7010",
				 "Congratulations! we have got hash function "
				 "screwed up");
		if (buffer != small_buf)
			kfree(buffer);
		pathrelse(&path);
		return -EBUSY;
	}
	/* adjust offset of directory enrty */
	put_deh_offset(deh, SET_GENERATION_NUMBER(deh_offset(deh), gen_number));
	set_cpu_key_k_offset(&entry_key, deh_offset(deh));

	/* update max-hash-collisions counter in reiserfs_sb_info */
	PROC_INFO_MAX(th->t_super, max_hash_collisions, gen_number);

	if (gen_number != 0) {	/* we need to re-search for the insertion point */
		if (search_by_entry_key(dir->i_sb, &entry_key, &path, &de) !=
		    NAME_NOT_FOUND) {
			reiserfs_warning(dir->i_sb, "vs-7032",
					 "entry with this key (%K) already "
					 "exists", &entry_key);

			if (buffer != small_buf)
				kfree(buffer);
			pathrelse(&path);
			return -EBUSY;
		}
	}

	/* perform the insertion of the entry that we have prepared */
	retval =
	    reiserfs_paste_into_item(th, &path, &entry_key, dir, buffer,
				     paste_size);
	if (buffer != small_buf)
		kfree(buffer);
	if (retval) {
		reiserfs_check_path(&path);
		return retval;
	}

	dir->i_size += paste_size;
	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
	if (!S_ISDIR(inode->i_mode) && visible)
		// reiserfs_mkdir or reiserfs_rename will do that by itself
		reiserfs_update_sd(th, dir);

	reiserfs_check_path(&path);
	return 0;
}
Exemplo n.º 29
0
static irqreturn_t sh_keysc_isr(int irq, void *dev_id)
{
	struct platform_device *pdev = dev_id;
	struct sh_keysc_priv *priv = platform_get_drvdata(pdev);
	struct sh_keysc_info *pdata = &priv->pdata;
	int keyout_nr = sh_keysc_mode[pdata->mode].keyout;
	int keyin_nr = sh_keysc_mode[pdata->mode].keyin;
	DECLARE_BITMAP(keys, SH_KEYSC_MAXKEYS);
	DECLARE_BITMAP(keys0, SH_KEYSC_MAXKEYS);
	DECLARE_BITMAP(keys1, SH_KEYSC_MAXKEYS);
	unsigned char keyin_set, tmp;
	int i, k, n;

	dev_dbg(&pdev->dev, "isr!\n");

	bitmap_fill(keys1, SH_KEYSC_MAXKEYS);
	bitmap_zero(keys0, SH_KEYSC_MAXKEYS);

	do {
		bitmap_zero(keys, SH_KEYSC_MAXKEYS);
		keyin_set = 0;

		sh_keysc_write(priv, KYCR2, KYCR2_IRQ_DISABLED);

		for (i = 0; i < keyout_nr; i++) {
			n = keyin_nr * i;

			/* drive one KEYOUT pin low, read KEYIN pins */
			sh_keysc_write(priv, KYOUTDR, 0xffff ^ (3 << (i * 2)));
			udelay(pdata->delay);
			tmp = sh_keysc_read(priv, KYINDR);

			/* set bit if key press has been detected */
			for (k = 0; k < keyin_nr; k++) {
				if (tmp & (1 << k))
					__set_bit(n + k, keys);
			}

			/* keep track of which KEYIN bits that have been set */
			keyin_set |= tmp ^ ((1 << keyin_nr) - 1);
		}

		sh_keysc_level_mode(priv, keyin_set);

		bitmap_complement(keys, keys, SH_KEYSC_MAXKEYS);
		bitmap_and(keys1, keys1, keys, SH_KEYSC_MAXKEYS);
		bitmap_or(keys0, keys0, keys, SH_KEYSC_MAXKEYS);

		sh_keysc_map_dbg(&pdev->dev, keys, "keys");

	} while (sh_keysc_read(priv, KYCR2) & 0x01);

	sh_keysc_map_dbg(&pdev->dev, priv->last_keys, "last_keys");
	sh_keysc_map_dbg(&pdev->dev, keys0, "keys0");
	sh_keysc_map_dbg(&pdev->dev, keys1, "keys1");

	for (i = 0; i < SH_KEYSC_MAXKEYS; i++) {
		k = pdata->keycodes[i];
		if (!k)
			continue;

		if (test_bit(i, keys0) == test_bit(i, priv->last_keys))
			continue;

		if (test_bit(i, keys1) || test_bit(i, keys0)) {
			input_event(priv->input, EV_KEY, k, 1);
			__set_bit(i, priv->last_keys);
		}

		if (!test_bit(i, keys1)) {
			input_event(priv->input, EV_KEY, k, 0);
			__clear_bit(i, priv->last_keys);
		}

	}
	input_sync(priv->input);

	return IRQ_HANDLED;
}
static void build_pci_bus_end(PCIBus *bus, void *bus_state)
{
    AcpiBuildPciBusHotplugState *child = bus_state;
    AcpiBuildPciBusHotplugState *parent = child->parent;
    GArray *bus_table = build_alloc_array();
    DECLARE_BITMAP(slot_hotplug_enable, PCI_SLOT_MAX);
    DECLARE_BITMAP(slot_device_present, PCI_SLOT_MAX);
    DECLARE_BITMAP(slot_device_system, PCI_SLOT_MAX);
    DECLARE_BITMAP(slot_device_vga, PCI_SLOT_MAX);
    DECLARE_BITMAP(slot_device_qxl, PCI_SLOT_MAX);
    uint8_t op;
    int i;
    QObject *bsel;
    GArray *method;
    bool bus_hotplug_support = false;

    if (bus->parent_dev) {
        op = 0x82; /* DeviceOp */
        build_append_nameseg(bus_table, "S%.02X_",
                             bus->parent_dev->devfn);
        build_append_byte(bus_table, 0x08); /* NameOp */
        build_append_nameseg(bus_table, "_SUN");
        build_append_value(bus_table, PCI_SLOT(bus->parent_dev->devfn), 1);
        build_append_byte(bus_table, 0x08); /* NameOp */
        build_append_nameseg(bus_table, "_ADR");
        build_append_value(bus_table, (PCI_SLOT(bus->parent_dev->devfn) << 16) |
                           PCI_FUNC(bus->parent_dev->devfn), 4);
    } else {
        op = 0x10; /* ScopeOp */;
        build_append_nameseg(bus_table, "PCI0");
    }

    bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL);
    if (bsel) {
        build_append_byte(bus_table, 0x08); /* NameOp */
        build_append_nameseg(bus_table, "BSEL");
        build_append_int(bus_table, qint_get_int(qobject_to_qint(bsel)));
        memset(slot_hotplug_enable, 0xff, sizeof slot_hotplug_enable);
    } else {
        /* No bsel - no slots are hot-pluggable */
        memset(slot_hotplug_enable, 0x00, sizeof slot_hotplug_enable);
    }

    memset(slot_device_present, 0x00, sizeof slot_device_present);
    memset(slot_device_system, 0x00, sizeof slot_device_present);
    memset(slot_device_vga, 0x00, sizeof slot_device_vga);
    memset(slot_device_qxl, 0x00, sizeof slot_device_qxl);

    for (i = 0; i < ARRAY_SIZE(bus->devices); i += PCI_FUNC_MAX) {
        DeviceClass *dc;
        PCIDeviceClass *pc;
        PCIDevice *pdev = bus->devices[i];
        int slot = PCI_SLOT(i);

        if (!pdev) {
            continue;
        }

        set_bit(slot, slot_device_present);
        pc = PCI_DEVICE_GET_CLASS(pdev);
        dc = DEVICE_GET_CLASS(pdev);

        if (pc->class_id == PCI_CLASS_BRIDGE_ISA) {
            set_bit(slot, slot_device_system);
        }

        if (pc->class_id == PCI_CLASS_DISPLAY_VGA) {
            set_bit(slot, slot_device_vga);

            if (object_dynamic_cast(OBJECT(pdev), "qxl-vga")) {
                set_bit(slot, slot_device_qxl);
            }
        }

        if (!dc->hotpluggable || pc->is_bridge) {
            clear_bit(slot, slot_hotplug_enable);
        }
    }

    /* Append Device object for each slot */
    for (i = 0; i < PCI_SLOT_MAX; i++) {
        bool can_eject = test_bit(i, slot_hotplug_enable);
        bool present = test_bit(i, slot_device_present);
        bool vga = test_bit(i, slot_device_vga);
        bool qxl = test_bit(i, slot_device_qxl);
        bool system = test_bit(i, slot_device_system);
        if (can_eject) {
            void *pcihp = acpi_data_push(bus_table,
                                         ACPI_PCIHP_SIZEOF);
            memcpy(pcihp, ACPI_PCIHP_AML, ACPI_PCIHP_SIZEOF);
            patch_pcihp(i, pcihp);
            bus_hotplug_support = true;
        } else if (qxl) {
            void *pcihp = acpi_data_push(bus_table,
                                         ACPI_PCIQXL_SIZEOF);
            memcpy(pcihp, ACPI_PCIQXL_AML, ACPI_PCIQXL_SIZEOF);
            patch_pciqxl(i, pcihp);
        } else if (vga) {
            void *pcihp = acpi_data_push(bus_table,
                                         ACPI_PCIVGA_SIZEOF);
            memcpy(pcihp, ACPI_PCIVGA_AML, ACPI_PCIVGA_SIZEOF);
            patch_pcivga(i, pcihp);
        } else if (system) {
            /* Nothing to do: system devices are in DSDT. */
        } else if (present) {
            void *pcihp = acpi_data_push(bus_table,
                                         ACPI_PCINOHP_SIZEOF);
            memcpy(pcihp, ACPI_PCINOHP_AML, ACPI_PCINOHP_SIZEOF);
            patch_pcinohp(i, pcihp);
        }
    }

    if (bsel) {
        method = build_alloc_method("DVNT", 2);

        for (i = 0; i < PCI_SLOT_MAX; i++) {
            GArray *notify;
            uint8_t op;

            if (!test_bit(i, slot_hotplug_enable)) {
                continue;
            }

            notify = build_alloc_array();
            op = 0xA0; /* IfOp */

            build_append_byte(notify, 0x7B); /* AndOp */
            build_append_byte(notify, 0x68); /* Arg0Op */
            build_append_int(notify, 0x1 << i);
            build_append_byte(notify, 0x00); /* NullName */
            build_append_byte(notify, 0x86); /* NotifyOp */
            build_append_nameseg(notify, "S%.02X_", PCI_DEVFN(i, 0));
            build_append_byte(notify, 0x69); /* Arg1Op */

            /* Pack it up */
            build_package(notify, op, 0);

            build_append_array(method, notify);

            build_free_array(notify);
        }

        build_append_and_cleanup_method(bus_table, method);
    }

    /* Append PCNT method to notify about events on local and child buses.
     * Add unconditionally for root since DSDT expects it.
     */
    if (bus_hotplug_support || child->notify_table->len || !bus->parent_dev) {
        method = build_alloc_method("PCNT", 0);

        /* If bus supports hotplug select it and notify about local events */
        if (bsel) {
            build_append_byte(method, 0x70); /* StoreOp */
            build_append_int(method, qint_get_int(qobject_to_qint(bsel)));
            build_append_nameseg(method, "BNUM");
            build_append_nameseg(method, "DVNT");
            build_append_nameseg(method, "PCIU");
            build_append_int(method, 1); /* Device Check */
            build_append_nameseg(method, "DVNT");
            build_append_nameseg(method, "PCID");
            build_append_int(method, 3); /* Eject Request */
        }

        /* Notify about child bus events in any case */
        build_append_array(method, child->notify_table);

        build_append_and_cleanup_method(bus_table, method);

        /* Append description of child buses */
        build_append_array(bus_table, child->device_table);

        /* Pack it up */
        if (bus->parent_dev) {
            build_extop_package(bus_table, op);
        } else {
            build_package(bus_table, op, 0);
        }

        /* Append our bus description to parent table */
        build_append_array(parent->device_table, bus_table);

        /* Also tell parent how to notify us, invoking PCNT method.
         * At the moment this is not needed for root as we have a single root.
         */
        if (bus->parent_dev) {
            build_append_byte(parent->notify_table, '^'); /* ParentPrefixChar */
            build_append_byte(parent->notify_table, 0x2E); /* DualNamePrefix */
            build_append_nameseg(parent->notify_table, "S%.02X_",
                                 bus->parent_dev->devfn);
            build_append_nameseg(parent->notify_table, "PCNT");
        }
    }

    build_free_array(bus_table);
    build_pci_bus_state_cleanup(child);
    g_free(child);
}