Beispiel #1
0
int __bitmap_weight(const unsigned long *bitmap, int bits)
{
	int k, w = 0, lim = bits/BITS_PER_LONG;

	for (k = 0; k < lim; k++)
		w += hweight_long(bitmap[k]);

	if (bits % BITS_PER_LONG)
		w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));

	return w;
}
Beispiel #2
0
static unsigned fd_set_popcount(fd_set *set, unsigned n)
{
	unsigned count = 0, i;

	for (i = 0; i < __FDELT__(n); i++)
		if (set->fds_bits[i])
			count += hweight_long(set->fds_bits[i]);

	if (i < __FDSET_LONGS__ && (set->fds_bits[i] & (__FDMASK__(n) - 1)))
		count += hweight_long(set->fds_bits[i] & (__FDMASK__(n) - 1));

	return count;
}
int register_trapped_io(struct trapped_io *tiop)
{
	struct resource *res;
	unsigned long len = 0, flags = 0;
	struct page *pages[TRAPPED_PAGES_MAX];
	int k, n;

	if (unlikely(trapped_io_disable))
		return 0;

	/* structure must be page aligned */
	if ((unsigned long)tiop & (PAGE_SIZE - 1))
		goto bad;

	for (k = 0; k < tiop->num_resources; k++) {
		res = tiop->resource + k;
		len += roundup((res->end - res->start) + 1, PAGE_SIZE);
		flags |= res->flags;
	}

	/* support IORESOURCE_IO _or_ MEM, not both */
	if (hweight_long(flags) != 1)
		goto bad;

	n = len >> PAGE_SHIFT;

	if (n >= TRAPPED_PAGES_MAX)
		goto bad;

	for (k = 0; k < n; k++)
		pages[k] = virt_to_page(tiop);

	tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
	if (!tiop->virt_base)
		goto bad;

	len = 0;
	for (k = 0; k < tiop->num_resources; k++) {
		res = tiop->resource + k;
		pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
		       (unsigned long)(tiop->virt_base + len),
		       res->flags & IORESOURCE_IO ? "io" : "mmio",
		       (unsigned long)res->start);
		len += roundup((res->end - res->start) + 1, PAGE_SIZE);
	}

	tiop->magic = IO_TRAPPED_MAGIC;
	INIT_LIST_HEAD(&tiop->list);
	spin_lock_irq(&trapped_lock);
	if (flags & IORESOURCE_IO)
		list_add(&tiop->list, &trapped_io);
	if (flags & IORESOURCE_MEM)
		list_add(&tiop->list, &trapped_mem);
	spin_unlock_irq(&trapped_lock);

	return 0;
 bad:
	pr_warning("unable to install trapped io filter\n");
	return -1;
}
/**
 * max1363_ring_preenable() - setup the parameters of the ring before enabling
 *
 * The complex nature of the setting of the nuber of bytes per datum is due
 * to this driver currently ensuring that the timestamp is stored at an 8
 * byte boundary.
 **/
static int max1363_ring_preenable(struct iio_dev *indio_dev)
{
	struct max1363_state *st = indio_dev->dev_data;
	struct iio_ring_buffer *ring = indio_dev->ring;
	size_t d_size;
	unsigned long numvals;

	/*
	 * Need to figure out the current mode based upon the requested
	 * scan mask in iio_dev
	 */
	st->current_mode = max1363_match_mode(ring->scan_mask,
					st->chip_info);
	if (!st->current_mode)
		return -EINVAL;

	max1363_set_scan_mode(st);

	numvals = hweight_long(st->current_mode->modemask);
	if (ring->access.set_bytes_per_datum) {
		if (st->chip_info->bits != 8)
			d_size = numvals*2 + sizeof(s64);
		else
			d_size = numvals + sizeof(s64);
		if (d_size % 8)
			d_size += 8 - (d_size % 8);
		ring->access.set_bytes_per_datum(ring, d_size);
	}

	return 0;
}
Beispiel #5
0
static const char *
brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev,
						u32 masters)
{
	u32 mask = gdev->valid_mask & masters;

	if (hweight_long(mask) != 1)
		return NULL;

	return gdev->master_names[ffs(mask) - 1];
}
Beispiel #6
0
static int ps3stor_probe_access(struct ps3_storage_device *dev)
{
	int res, error;
	unsigned int i;
	unsigned long n;

	if (dev->sbd.match_id == PS3_MATCH_ID_STOR_ROM) {
		/* special case: CD-ROM is assumed always accessible */
		dev->accessible_regions = 1;
		return 0;
	}

	error = -EPERM;
	for (i = 0; i < dev->num_regions; i++) {
		dev_dbg(&dev->sbd.core,
			"%s:%u: checking accessibility of region %u\n",
			__func__, __LINE__, i);

		dev->region_idx = i;
		res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, 0, 1,
						 0);
		if (res) {
			dev_dbg(&dev->sbd.core, "%s:%u: read failed, "
				"region %u is not accessible\n", __func__,
				__LINE__, i);
			continue;
		}

		dev_dbg(&dev->sbd.core, "%s:%u: region %u is accessible\n",
			__func__, __LINE__, i);
		set_bit(i, &dev->accessible_regions);

		/* We can access at least one region */
		error = 0;
	}
	if (error)
		return error;

	n = hweight_long(dev->accessible_regions);
	if (n > 1)
		dev_info(&dev->sbd.core,
			 "%s:%u: %lu accessible regions found. Only the first "
			 "one will be used",
			 __func__, __LINE__, n);
	dev->region_idx = __ffs(dev->accessible_regions);
	dev_info(&dev->sbd.core,
		 "First accessible region has index %u start %lu size %lu\n",
		 dev->region_idx, dev->regions[dev->region_idx].start,
		 dev->regions[dev->region_idx].size);

	return 0;
}
Beispiel #7
0
static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
				      struct ib_gid_table *table)
{
	unsigned int i;
	unsigned long roce_gid_type_mask;
	unsigned int num_default_gids;

	roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
	num_default_gids = hweight_long(roce_gid_type_mask);
	/* Reserve starting indices for default GIDs */
	for (i = 0; i < num_default_gids && i < table->sz; i++)
		table->default_gid_indices |= BIT(i);
}
/**
 * max1363_poll_bh_to_ring() - bh of trigger launched polling to ring buffer
 * @work_s:	the work struct through which this was scheduled
 *
 * Currently there is no option in this driver to disable the saving of
 * timestamps within the ring.
 * I think the one copy of this at a time was to avoid problems if the
 * trigger was set far too high and the reads then locked up the computer.
 **/
static void max1363_poll_bh_to_ring(struct work_struct *work_s)
{
	struct max1363_state *st = container_of(work_s, struct max1363_state,
						  poll_work);
	struct iio_dev *indio_dev = st->indio_dev;
	struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring);
	s64 time_ns;
	__u8 *rxbuf;
	int b_sent;
	size_t d_size;
	unsigned long numvals = hweight_long(st->current_mode->modemask);

	/* Ensure the timestamp is 8 byte aligned */
	if (st->chip_info->bits != 8)
		d_size = numvals*2 + sizeof(s64);
	else
		d_size = numvals + sizeof(s64);
	if (d_size % sizeof(s64))
		d_size += sizeof(s64) - (d_size % sizeof(s64));

	/* Ensure only one copy of this function running at a time */
	if (atomic_inc_return(&st->protect_ring) > 1)
		return;

	/* Monitor mode prevents reading. Whilst not currently implemented
	 * might as well have this test in here in the meantime as it does
	 * no harm.
	 */
	if (numvals == 0)
		return;

	rxbuf = kmalloc(d_size,	GFP_KERNEL);
	if (rxbuf == NULL)
		return;
	if (st->chip_info->bits != 8)
		b_sent = i2c_master_recv(st->client, rxbuf, numvals*2);
	else
		b_sent = i2c_master_recv(st->client, rxbuf, numvals);
	if (b_sent < 0)
		goto done;

	time_ns = iio_get_time_ns();

	memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));

	indio_dev->ring->access.store_to(&sw_ring->buf, rxbuf, time_ns);
done:
	kfree(rxbuf);
	atomic_dec(&st->protect_ring);
}
Beispiel #9
0
static irqreturn_t max1363_trigger_handler(int irq, void *p)
{
	struct iio_poll_func *pf = p;
	struct iio_dev *indio_dev = pf->private_data;
	struct max1363_state *st = iio_priv(indio_dev);
	s64 time_ns;
	__u8 *rxbuf;
	int b_sent;
	size_t d_size;
	unsigned long numvals = hweight_long(st->current_mode->modemask);

	/* Ensure the timestamp is 8 byte aligned */
	if (st->chip_info->bits != 8)
		d_size = numvals*2 + sizeof(s64);
	else
		d_size = numvals + sizeof(s64);
	if (d_size % sizeof(s64))
		d_size += sizeof(s64) - (d_size % sizeof(s64));

	/* Monitor mode prevents reading. Whilst not currently implemented
	 * might as well have this test in here in the meantime as it does
	 * no harm.
	 */
	if (numvals == 0)
		return IRQ_HANDLED;

	rxbuf = kmalloc(d_size,	GFP_KERNEL);
	if (rxbuf == NULL)
		return -ENOMEM;
	if (st->chip_info->bits != 8)
		b_sent = i2c_master_recv(st->client, rxbuf, numvals*2);
	else
		b_sent = i2c_master_recv(st->client, rxbuf, numvals);
	if (b_sent < 0)
		goto done;

	time_ns = iio_get_time_ns();

	memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));

	indio_dev->ring->access->store_to(indio_dev->ring, rxbuf, time_ns);
done:
	iio_trigger_notify_done(indio_dev->trig);
	kfree(rxbuf);

	return IRQ_HANDLED;
}
Beispiel #10
0
static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
	loff_t *spos = (loff_t *)v;
	(*pos)++;
	if (*pos > FINAL) {
		kfree(v);
		return NULL;
	}
	while (hweight_long(*pos) != 4) {
		(*pos)++;
		if (*pos > FINAL) {
			kfree(v);
			return NULL;
		}
	}
	*spos = *pos;
	return spos;
}
Beispiel #11
0
static void *ct_seq_start(struct seq_file *s, loff_t *pos)
{
	loff_t *spos;
	if (*pos > FINAL) {
		return NULL;
	}
	while (hweight_long(*pos) != 4) {
		(*pos)++;
		if (*pos > FINAL) {
			return NULL;
		}
	}
	spos = kmalloc(sizeof *spos, GFP_KERNEL);
	if (!spos)
		return ERR_PTR(ENOMEM);
	*spos = *pos;
	return spos;
}
static int lis3lv02d_set_odr(int rate)
{
	u8 ctrl;
	int i, len, shift;

	lis3_dev.read(&lis3_dev, CTRL_REG1, &ctrl);
	ctrl &= ~lis3_dev.odr_mask;
	len = 1 << hweight_long(lis3_dev.odr_mask); /* # of possible values */
	shift = ffs(lis3_dev.odr_mask) - 1;

	for (i = 0; i < len; i++)
		if (lis3_dev.odrs[i] == rate) {
			lis3_dev.write(&lis3_dev, CTRL_REG1,
					ctrl | (i << shift));
			return 0;
		}
	return -EINVAL;
}
Beispiel #13
0
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
			      struct sahara_sha_reqctx *rctx)
{
	u32 hdr = 0;

	hdr = rctx->mode;

	if (rctx->first) {
		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
		hdr |= SAHARA_HDR_MDHA_INIT;
	} else {
		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
	}

	if (rctx->last)
		hdr |= SAHARA_HDR_MDHA_PDATA;

	if (hweight_long(hdr) % 2 == 0)
		hdr |= SAHARA_HDR_PARITY_BIT;

	return hdr;
}
Beispiel #14
0
static struct event_constraint *
amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	unsigned int event_code = amd_get_event_code(hwc);

	switch (event_code & AMD_EVENT_TYPE_MASK) {
	case AMD_EVENT_FP:
		switch (event_code) {
		case 0x000:
			if (!(hwc->config & 0x0000F000ULL))
				break;
			if (!(hwc->config & 0x00000F00ULL))
				break;
			return &amd_f15_PMC3;
		case 0x004:
			if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
				break;
			return &amd_f15_PMC3;
		case 0x003:
		case 0x00B:
		case 0x00D:
			return &amd_f15_PMC3;
		}
		return &amd_f15_PMC53;
	case AMD_EVENT_LS:
	case AMD_EVENT_DC:
	case AMD_EVENT_EX_LS:
		switch (event_code) {
		case 0x023:
		case 0x043:
		case 0x045:
		case 0x046:
		case 0x054:
		case 0x055:
			return &amd_f15_PMC20;
		case 0x02D:
			return &amd_f15_PMC3;
		case 0x02E:
			return &amd_f15_PMC30;
		default:
			return &amd_f15_PMC50;
		}
	case AMD_EVENT_CU:
	case AMD_EVENT_IC_DE:
	case AMD_EVENT_DE:
		switch (event_code) {
		case 0x08F:
		case 0x187:
		case 0x188:
			return &amd_f15_PMC0;
		case 0x0DB ... 0x0DF:
		case 0x1D6:
		case 0x1D8:
			return &amd_f15_PMC50;
		default:
			return &amd_f15_PMC20;
		}
	case AMD_EVENT_NB:
		/* not yet implemented */
		return &emptyconstraint;
	default:
		return &emptyconstraint;
	}
}
Beispiel #15
0
/**
 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
 * @mem_addr: mem region whose ownership need to be reassigned
 * @mem_sz:   size of the region.
 * @srcvm:    vmid for current set of owners, each set bit in
 *            flag indicate a unique owner
 * @newvm:    array having new owners and corrsponding permission
 *            flags
 * @dest_cnt: number of owners in next set.
 *
 * Return negative errno on failure, 0 on success, with @srcvm updated.
 */
int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
			unsigned int *srcvm,
			struct qcom_scm_vmperm *newvm, int dest_cnt)
{
	struct qcom_scm_current_perm_info *destvm;
	struct qcom_scm_mem_map_info *mem_to_map;
	phys_addr_t mem_to_map_phys;
	phys_addr_t dest_phys;
	phys_addr_t ptr_phys;
	size_t mem_to_map_sz;
	size_t dest_sz;
	size_t src_sz;
	size_t ptr_sz;
	int next_vm;
	__le32 *src;
	void *ptr;
	int ret;
	int len;
	int i;

	src_sz = hweight_long(*srcvm) * sizeof(*src);
	mem_to_map_sz = sizeof(*mem_to_map);
	dest_sz = dest_cnt * sizeof(*destvm);
	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
			ALIGN(dest_sz, SZ_64);

	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	/* Fill source vmid detail */
	src = ptr;
	len = hweight_long(*srcvm);
	for (i = 0; i < len; i++) {
		src[i] = cpu_to_le32(ffs(*srcvm) - 1);
		*srcvm ^= 1 << (ffs(*srcvm) - 1);
	}

	/* Fill details of mem buff to map */
	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
	mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
	mem_to_map[0].mem_size = cpu_to_le64(mem_sz);

	next_vm = 0;
	/* Fill details of next vmid detail */
	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
	for (i = 0; i < dest_cnt; i++) {
		destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
		destvm[i].perm = cpu_to_le32(newvm[i].perm);
		destvm[i].ctx = 0;
		destvm[i].ctx_size = 0;
		next_vm |= BIT(newvm[i].vmid);
	}

	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
				    ptr_phys, src_sz, dest_phys, dest_sz);
	dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
	if (ret) {
		dev_err(__scm->dev,
			"Assign memory protection call failed %d.\n", ret);
		return -EINVAL;
	}

	*srcvm = next_vm;
	return 0;
}
Beispiel #16
0
static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
{
	struct device_node *dn = pdev->dev.of_node;
	struct brcmstb_gisb_arb_device *gdev;
	const struct of_device_id *of_id;
	struct resource *r;
	int err, timeout_irq, tea_irq;
	unsigned int num_masters, j = 0;
	int i, first, last;

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	timeout_irq = platform_get_irq(pdev, 0);
	tea_irq = platform_get_irq(pdev, 1);

	gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
	if (!gdev)
		return -ENOMEM;

	mutex_init(&gdev->lock);
	INIT_LIST_HEAD(&gdev->next);

	gdev->base = devm_ioremap_resource(&pdev->dev, r);
	if (IS_ERR(gdev->base))
		return PTR_ERR(gdev->base);

	of_id = of_match_node(brcmstb_gisb_arb_of_match, dn);
	if (!of_id) {
		pr_err("failed to look up compatible string\n");
		return -EINVAL;
	}
	gdev->gisb_offsets = of_id->data;
	gdev->big_endian = of_device_is_big_endian(dn);

	err = devm_request_irq(&pdev->dev, timeout_irq,
				brcmstb_gisb_timeout_handler, 0, pdev->name,
				gdev);
	if (err < 0)
		return err;

	err = devm_request_irq(&pdev->dev, tea_irq,
				brcmstb_gisb_tea_handler, 0, pdev->name,
				gdev);
	if (err < 0)
		return err;

	/* If we do not have a valid mask, assume all masters are enabled */
	if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
				&gdev->valid_mask))
		gdev->valid_mask = 0xffffffff;

	/* Proceed with reading the litteral names if we agree on the
	 * number of masters
	 */
	num_masters = of_property_count_strings(dn,
			"brcm,gisb-arb-master-names");
	if (hweight_long(gdev->valid_mask) == num_masters) {
		first = ffs(gdev->valid_mask) - 1;
		last = fls(gdev->valid_mask) - 1;

		for (i = first; i < last; i++) {
			if (!(gdev->valid_mask & BIT(i)))
				continue;

			of_property_read_string_index(dn,
					"brcm,gisb-arb-master-names", j,
					&gdev->master_names[i]);
			j++;
		}
	}

	err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
	if (err)
		return err;

	platform_set_drvdata(pdev, gdev);

	list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);

#ifdef CONFIG_ARM
	hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
			"imprecise external abort");
#endif

	dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
			gdev->base, timeout_irq, tea_irq);

	return 0;
}
static bool samples_same(const struct perf_sample *s1,
			 const struct perf_sample *s2, u64 type, u64 regs_user,
			 u64 read_format)
{
	size_t i;

	if (type & PERF_SAMPLE_IDENTIFIER)
		COMP(id);

	if (type & PERF_SAMPLE_IP)
		COMP(ip);

	if (type & PERF_SAMPLE_TID) {
		COMP(pid);
		COMP(tid);
	}

	if (type & PERF_SAMPLE_TIME)
		COMP(time);

	if (type & PERF_SAMPLE_ADDR)
		COMP(addr);

	if (type & PERF_SAMPLE_ID)
		COMP(id);

	if (type & PERF_SAMPLE_STREAM_ID)
		COMP(stream_id);

	if (type & PERF_SAMPLE_CPU)
		COMP(cpu);

	if (type & PERF_SAMPLE_PERIOD)
		COMP(period);

	if (type & PERF_SAMPLE_READ) {
		if (read_format & PERF_FORMAT_GROUP)
			COMP(read.group.nr);
		else
			COMP(read.one.value);
		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
			COMP(read.time_enabled);
		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
			COMP(read.time_running);
		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
		if (read_format & PERF_FORMAT_GROUP) {
			for (i = 0; i < s1->read.group.nr; i++)
				MCOMP(read.group.values[i]);
		} else {
			COMP(read.one.id);
		}
	}

	if (type & PERF_SAMPLE_CALLCHAIN) {
		COMP(callchain->nr);
		for (i = 0; i < s1->callchain->nr; i++)
			COMP(callchain->ips[i]);
	}

	if (type & PERF_SAMPLE_RAW) {
		COMP(raw_size);
		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
			pr_debug("Samples differ at 'raw_data'\n");
			return false;
		}
	}

	if (type & PERF_SAMPLE_BRANCH_STACK) {
		COMP(branch_stack->nr);
		for (i = 0; i < s1->branch_stack->nr; i++)
			MCOMP(branch_stack->entries[i]);
	}

	if (type & PERF_SAMPLE_REGS_USER) {
		size_t sz = hweight_long(regs_user) * sizeof(u64);

		COMP(user_regs.abi);
		if (s1->user_regs.abi &&
		    (!s1->user_regs.regs || !s2->user_regs.regs ||
		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
			pr_debug("Samples differ at 'user_regs'\n");
			return false;
		}
	}

	if (type & PERF_SAMPLE_STACK_USER) {
		COMP(user_stack.size);
		if (memcmp(s1->user_stack.data, s1->user_stack.data,
			   s1->user_stack.size)) {
			pr_debug("Samples differ at 'user_stack'\n");
			return false;
		}
	}

	if (type & PERF_SAMPLE_WEIGHT)
		COMP(weight);

	if (type & PERF_SAMPLE_DATA_SRC)
		COMP(data_src);

	if (type & PERF_SAMPLE_TRANSACTION)
		COMP(transaction);

	return true;
}
static int msm_dai_slim_dev_probe(struct slim_device *sdev)
{
	int rc, i;
	u8 max_channels;
	u32 apps_ch_pipes;
	struct msm_dai_slim_drv_data *drv_data;
	struct device *dev = &sdev->dev;
	struct snd_soc_dai_driver *dai_drv;

	if (!dev->of_node ||
	    !dev->of_node->parent) {
		dev_err(dev,
			"%s: Invalid %s\n", __func__,
			(!dev->of_node) ? "of_node" : "parent_of_node");
		return -EINVAL;
	}

	rc = of_property_read_u32(dev->of_node->parent,
					 "qcom,apps-ch-pipes",
					 &apps_ch_pipes);
	if (rc) {
		dev_err(dev,
			"%s: Failed to lookup property %s in node %s, err = %d\n",
			__func__, "qcom,apps-ch-pipes",
			dev->of_node->parent->full_name, rc);
		goto err_ret;
	}

	max_channels = hweight_long(apps_ch_pipes);
	if (max_channels <= 0) {
		dev_err(dev,
			"%s: Invalid apps owned ports %d\n",
			__func__, max_channels);
		goto err_ret;
	}

	dev_dbg(dev, "%s: max channels = %u\n",
		__func__, max_channels);

	for (i = 0; i < ARRAY_SIZE(msm_slim_dais); i++) {
		dai_drv = &msm_slim_dais[i];
		dai_drv->capture.channels_max = max_channels;
		dai_drv->playback.channels_max = max_channels;
	}

	drv_data = devm_kzalloc(dev, sizeof(*drv_data),
				GFP_KERNEL);
	if (!drv_data) {
		dev_err(dev, "%s: dai driver struct alloc failed\n",
			__func__);
		rc = -ENOMEM;
		goto err_ret;
	}

	drv_data->sdev = sdev;
	drv_data->num_dais = NUM_SLIM_DAIS;

	rc = msm_dai_slim_populate_dai_data(dev, drv_data);
	if (rc) {
		dev_err(dev,
			"%s: failed to setup dai_data, err = %d\n",
			__func__, rc);
		goto err_populate_dai;
	}

	rc = snd_soc_register_component(&sdev->dev, &msm_dai_slim_component,
					msm_slim_dais, NUM_SLIM_DAIS);

	if (IS_ERR_VALUE(rc)) {
		dev_err(dev, "%s: failed to register DAI, err = %d\n",
			__func__, rc);
		goto err_reg_comp;
	}

	dev_set_drvdata(dev, drv_data);
	return rc;

err_reg_comp:
	msm_dai_slim_remove_dai_data(dev, drv_data);

err_populate_dai:
	devm_kfree(dev, drv_data);

err_ret:
	return rc;
}
Beispiel #19
0
static int brcmstb_gisb_arb_probe(struct platform_device *pdev)
{
	struct device_node *dn = pdev->dev.of_node;
	struct brcmstb_gisb_arb_device *gdev;
	struct resource *r;
	int err, timeout_irq, tea_irq;
	unsigned int num_masters, j = 0;
	int i, first, last;

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	timeout_irq = platform_get_irq(pdev, 0);
	tea_irq = platform_get_irq(pdev, 1);

	gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL);
	if (!gdev)
		return -ENOMEM;

	mutex_init(&gdev->lock);
	INIT_LIST_HEAD(&gdev->next);

	gdev->base = devm_request_and_ioremap(&pdev->dev, r);
	if (!gdev->base)
		return -ENOMEM;

	err = devm_request_irq(&pdev->dev, timeout_irq,
				brcmstb_gisb_timeout_handler, 0, pdev->name,
				gdev);
	if (err < 0)
		return err;

	err = devm_request_irq(&pdev->dev, tea_irq,
				brcmstb_gisb_tea_handler, 0, pdev->name,
				gdev);
	if (err < 0)
		return err;

	/* If we do not have a valid mask, assume all masters are enabled */
	if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask",
				&gdev->valid_mask))
		gdev->valid_mask = 0xffffffff;

	/* Proceed with reading the litteral names if we agree on the
	 * number of masters
	 */
	num_masters = of_property_count_strings(dn,
			"brcm,gisb-arb-master-names");
	if (hweight_long(gdev->valid_mask) == num_masters) {
		first = ffs(gdev->valid_mask) - 1;
		last = fls(gdev->valid_mask) - 1;

		for (i = first; i < last; i++) {
			if (!(gdev->valid_mask & BIT(i)))
				continue;

			of_property_read_string_index(dn,
					"brcm,gisb-arb-master-names", j,
					&gdev->master_names[i]);
			j++;
		}
	}

	err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
	if (err)
		return err;

	platform_set_drvdata(pdev, gdev);

	list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);

	dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
			gdev->base, timeout_irq, tea_irq);

	return 0;
}
Beispiel #20
0
static int omap_hwspinlock_probe(struct platform_device *pdev)
{
	struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
	struct hwspinlock_device *bank;
	struct hwspinlock *hwlock;
	struct resource *res;
	void __iomem *io_base;
	int num_locks, i, ret;

	if (!pdata)
		return -ENODEV;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	io_base = ioremap(res->start, resource_size(res));
	if (!io_base)
		return -ENOMEM;

	/* Determine number of locks */
	i = readl(io_base + SYSSTATUS_OFFSET);
	i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;

	/* one of the four lsb's must be set, and nothing else */
	if (hweight_long(i & 0xf) != 1 || i > 8) {
		ret = -EINVAL;
		goto iounmap_base;
	}

	num_locks = i * 32; /* actual number of locks in this device */

	bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
	if (!bank) {
		ret = -ENOMEM;
		goto iounmap_base;
	}

	platform_set_drvdata(pdev, bank);

	for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
		hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;

	/*
	 * runtime PM will make sure the clock of this module is
	 * enabled iff at least one lock is requested
	 */
	pm_runtime_enable(&pdev->dev);

	ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
						pdata->base_id, num_locks);
	if (ret)
		goto reg_fail;

	return 0;

reg_fail:
	pm_runtime_disable(&pdev->dev);
	kfree(bank);
iounmap_base:
	iounmap(io_base);
	return ret;
}