/*FIXME:If we have not hwspinlock , we need use spinlock to do it*/
static void sci_glb_lock(unsigned long *flags, unsigned long *hw_flags)
{
	if (arch_get_hwlock(HWLOCK_GLB))
		WARN_ON(IS_ERR_VALUE(hwspin_lock_timeout_irqsave(arch_get_hwlock(HWLOCK_GLB), -1, flags)));
	else
		arch_hwlock_fast(HWLOCK_GLB);
}
Example #2
0
/**
 * qcom_smem_get() - resolve ptr of size of a smem item
 * @host:	the remote processor, or -1
 * @item:	smem item handle
 * @size:	pointer to be filled out with size of the item
 *
 * Looks up smem item and returns pointer to it. Size of smem
 * item is returned in @size.
 */
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
	unsigned long flags;
	int ret;
	void *ptr = ERR_PTR(-EPROBE_DEFER);

	if (!__smem)
		return ptr;

	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
					  HWSPINLOCK_TIMEOUT,
					  &flags);
	if (ret)
		return ERR_PTR(ret);

	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
		ptr = qcom_smem_get_private(__smem, host, item, size);
	else
		ptr = qcom_smem_get_global(__smem, item, size);

	hwspin_unlock_irqrestore(__smem->hwlock, &flags);

	return ptr;

}
Example #3
0
/**
 * qcom_smem_alloc() - allocate space for a smem item
 * @host:	remote processor id, or -1
 * @item:	smem item handle
 * @size:	number of bytes to be allocated
 *
 * Allocate space for a given smem item of size @size, given that the item is
 * not yet allocated.
 */
int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
{
	unsigned long flags;
	int ret;

	if (!__smem)
		return -EPROBE_DEFER;

	if (item < SMEM_ITEM_LAST_FIXED) {
		dev_err(__smem->dev,
			"Rejecting allocation of static entry %d\n", item);
		return -EINVAL;
	}

	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
					  HWSPINLOCK_TIMEOUT,
					  &flags);
	if (ret)
		return ret;

	if (host < SMEM_HOST_COUNT && __smem->partitions[host])
		ret = qcom_smem_alloc_private(__smem, host, item, size);
	else
		ret = qcom_smem_alloc_global(__smem, item, size);

	hwspin_unlock_irqrestore(__smem->hwlock, &flags);

	return ret;
}
static inline void hi3630_srcup_reg_write(struct hi3630_srcup_data *pdata,
					  unsigned int reg, unsigned int value)
{
	unsigned long flag = 0;

	BUG_ON(NULL == pdata);

	if (hwspin_lock_timeout_irqsave(pdata->hwlock, HWLOCK_WAIT_TIME, &flag)) {
		loge("%s: hwspinlock timeout!\n", __func__);
		return;
	}

	hi3630_asp_irq_write(pdata->hi3630_asp_irq, reg, value);

	hwspin_unlock_irqrestore(pdata->hwlock, &flag);
}
static inline int hi3630_srcup_reg_read(struct hi3630_srcup_data *pdata, unsigned int reg)
{
	unsigned long flag = 0;
	int ret = 0;

	BUG_ON(NULL == pdata);

	if (hwspin_lock_timeout_irqsave(pdata->hwlock, HWLOCK_WAIT_TIME, &flag)) {
		loge("%s: hwspinlock timeout!\n", __func__);
		return ret;
	}

	ret = hi3630_asp_irq_read(pdata->hi3630_asp_irq, reg);

	hwspin_unlock_irqrestore(pdata->hwlock, &flag);
	return ret;
}
Example #6
0
static void smem_debug_read_mem(struct seq_file *s)
{
	u32 *info;
	size_t size;
	int ret, i;
	long flags;

	info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HEAP_INFO, &size);

	if (IS_ERR(info))
		seq_printf(s, "Can't get global heap information pool\n");
	else {
		seq_printf(s, "global heap\n");
		seq_printf(s, "   initialized: %d offset: %08x avail: %08x\n",
				info[0], info[1], info[2]);

		for (i = 0; i < 512; i++) {
			info = qcom_smem_get(QCOM_SMEM_HOST_ANY, i, &size);
			if (IS_ERR(info))
				continue;

			seq_printf(s, "      [%d]: p: %p s: %li\n", i, info,
					size);
		}
	}

	seq_printf(s, "\nSecure partitions accessible from APPS:\n");

	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
					  HWSPINLOCK_TIMEOUT,
					  &flags);

	for (i = 0; i < SMEM_HOST_COUNT; i++) {
		struct smem_partition_header *part_hdr = __smem->partitions[i];
		void *p;

		if (!part_hdr)
			continue;

		if (part_hdr->magic != SMEM_PART_MAGIC) {
			seq_printf(s, "   part[%d]: incorrect magic\n", i);
			continue;
		}

		seq_printf(s, "   part[%d]: (%d <-> %d) size: %d off: %08x\n",
			i, part_hdr->host0, part_hdr->host1, part_hdr->size,
			part_hdr->offset_free_uncached);

		p = (void *)part_hdr + sizeof(*part_hdr);
		while (p < (void *)part_hdr + part_hdr->offset_free_uncached) {
			struct smem_private_entry *entry = p;

			seq_printf(s,
				"          [%d]: %s size: %d pd: %d\n",
				entry->item,
				(entry->canary == SMEM_PRIVATE_CANARY) ?
					"valid" : "invalid",
				entry->size,
				entry->padding_data);

			p += sizeof(*entry) + entry->padding_hdr + entry->size;
		}
	}

	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
}