/**
 * Protects memory if heap is unsecured heap. Also ensures that we are in
 * the correct FMEM state if this heap is a reusable heap.
 * Must be called with heap->lock locked.
 */
static int ion_cp_protect(struct ion_heap *heap)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);
	int ret_value = 0;

	if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
		/* Make sure we are in C state when the heap is protected. */
		if (cp_heap->reusable && !cp_heap->allocated_bytes) {
			ret_value = fmem_set_state(FMEM_C_STATE);
			if (ret_value)
				goto out;
		}

		ret_value = ion_cp_protect_mem(cp_heap->secure_base,
				cp_heap->secure_size, cp_heap->permission_type);
		if (ret_value) {
			pr_err("Failed to protect memory for heap %s - "
				"error code: %d\n", heap->name, ret_value);

			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
				if (fmem_set_state(FMEM_T_STATE) != 0)
					pr_err("%s: unable to transition heap to T-state\n",
						__func__);
			}
		} else {
			cp_heap->heap_protected = HEAP_PROTECTED;
			pr_debug("Protected heap %s @ 0x%lx\n",
				heap->name, cp_heap->base);
		}
	}
out:
	return ret_value;
}
static ssize_t fmem_state_store(struct kobject *kobj,
				    struct kobj_attribute *attr,
				    const char *buf, size_t count)
{
	int ret = -EINVAL;

	if (!strncmp(buf, "t", 1))
		ret = fmem_set_state(FMEM_T_STATE);
	else if (!strncmp(buf, "c", 1))
		ret = fmem_set_state(FMEM_C_STATE);
	if (ret)
		return ret;
	return 1;
}
Esempio n. 3
0
/**
 * Unprotects memory if heap is secure heap. Also ensures that we are in
 * the correct FMEM state if this heap is a reusable heap.
 * Must be called with heap->lock locked.
 */
static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
		int error_code = ion_cp_unprotect_mem(
			cp_heap->secure_base, cp_heap->secure_size,
			cp_heap->permission_type, version, data);
		if (error_code) {
			pr_err("Failed to un-protect memory for heap %s - "
				"error code: %d\n", heap->name, error_code);
		} else  {
			cp_heap->heap_protected = HEAP_NOT_PROTECTED;
			pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
				(unsigned int) cp_heap->base);

			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
				if (fmem_set_state(FMEM_T_STATE) != 0)
					pr_err("%s: unable to transition heap to T-state",
						__func__);
			}
		}
	}
	pr_debug("%s: protect count is %d\n", __func__,
		atomic_read(&cp_heap->protect_cnt));
	BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
}
/**
 * Unprotects memory if heap is secure heap. Also ensures that we are in
 * the correct FMEM state if this heap is a reusable heap.
 * Must be called with heap->lock locked.
 */
static void ion_cp_unprotect(struct ion_heap *heap)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (cp_heap->heap_protected == HEAP_PROTECTED) {
		int error_code = ion_cp_unprotect_mem(
			cp_heap->secure_base, cp_heap->secure_size,
			cp_heap->permission_type);
		if (error_code) {
			pr_err("Failed to un-protect memory for heap %s - "
				"error code: %d\n", heap->name, error_code);
		} else  {
			cp_heap->heap_protected = HEAP_NOT_PROTECTED;
			pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
				(unsigned int) cp_heap->base);

			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
				if (fmem_set_state(FMEM_T_STATE) != 0)
					pr_err("%s: unable to transition heap to T-state",
						__func__);
			}
		}
	}
}
Esempio n. 5
0
static int fmem_mem_going_offline_callback(void *arg)
{
	struct memory_notify *marg = arg;

	if (fmem_is_disjoint(marg->start_pfn, marg->nr_pages))
		return 0;
	return fmem_set_state(FMEM_O_STATE);
}
Esempio n. 6
0
static ssize_t fmem_state_store(struct kobject *kobj,
				    struct kobj_attribute *attr,
				    const char *buf, size_t count)
{
	int ret = -EINVAL;

	if (!strncmp(buf, "t", 1))
		ret = fmem_set_state(FMEM_T_STATE);
	else if (!strncmp(buf, "c", 1))
		ret = fmem_set_state(FMEM_C_STATE);
#ifdef CONFIG_MEMORY_HOTPLUG
	else if (!strncmp(buf, "o", 1))
		ret = fmem_set_state(FMEM_O_STATE);
#endif
	if (ret)
		return ret;
	return 1;
}
Esempio n. 7
0
/**
 * Protects memory if heap is unsecured heap. Also ensures that we are in
 * the correct FMEM state if this heap is a reusable heap.
 * Must be called with heap->lock locked.
 */
static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);
	int ret_value = 0;

	if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
		/* Make sure we are in C state when the heap is protected. */
		if (cp_heap->reusable && !cp_heap->allocated_bytes) {
			ret_value = fmem_set_state(FMEM_C_STATE);
			if (ret_value)
				goto out;
		}

		ret_value = ion_cp_protect_mem(cp_heap->secure_base,
				cp_heap->secure_size, cp_heap->permission_type,
				version, data);
		if (ret_value) {
			pr_err("Failed to protect memory for heap %s - "
				"error code: %d\n", heap->name, ret_value);

			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
				if (fmem_set_state(FMEM_T_STATE) != 0)
					pr_err("%s: unable to transition heap to T-state\n",
						__func__);
			}
			atomic_dec(&cp_heap->protect_cnt);
		} else {
			cp_heap->heap_protected = HEAP_PROTECTED;
			pr_debug("Protected heap %s @ 0x%lx\n",
				heap->name, cp_heap->base);
		}
	}
out:
	pr_debug("%s: protect count is %d\n", __func__,
		atomic_read(&cp_heap->protect_cnt));
	BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
	return ret_value;
}
Esempio n. 8
0
static void fmem_mem_online_callback(void *arg)
{
	struct memory_notify *marg = arg;
	int i;

	section_powered_off[marg->start_pfn >> PFN_SECTION_SHIFT] = 0;

	if (fmem_state != FMEM_O_STATE)
		return;

	for (i = fmem_section_start; i <= fmem_section_end; i++) {
		if (section_powered_off[i])
			return;
	}

	fmem_set_state(FMEM_T_STATE);
}
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
		       unsigned long size)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (addr == ION_CP_ALLOCATE_FAIL)
		return;
	gen_pool_free(cp_heap->pool, addr, size);

	mutex_lock(&cp_heap->lock);
	cp_heap->allocated_bytes -= size;

	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
		if (fmem_set_state(FMEM_T_STATE) != 0)
			pr_err("%s: unable to transition heap to T-state\n",
				__func__);
	}
	mutex_unlock(&cp_heap->lock);
}
Esempio n. 10
0
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
		       unsigned long size)
{
	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	if (addr == ION_CP_ALLOCATE_FAIL)
		return;
	gen_pool_free(cp_heap->pool, addr, size);

	mutex_lock(&cp_heap->lock);
	cp_heap->allocated_bytes -= size;

	if (cp_heap->reusable && !cp_heap->allocated_bytes &&
	    cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
		if (fmem_set_state(FMEM_T_STATE) != 0)
			pr_err("%s: unable to transition heap to T-state\n",
				__func__);
	}

	/* Unmap everything if we previously mapped the whole heap at once. */
	if (!cp_heap->allocated_bytes) {
		unsigned int i;
		for (i = 0; i < MAX_DOMAINS; ++i) {
			if (cp_heap->iommu_iova[i]) {
				unsigned long vaddr_len = cp_heap->total_size;

				if (i == cp_heap->iommu_2x_map_domain)
					vaddr_len <<= 1;
				iommu_unmap_all(i, cp_heap);

				msm_free_iova_address(cp_heap->iommu_iova[i], i,
						cp_heap->iommu_partition[i],
						vaddr_len);
			}
			cp_heap->iommu_iova[i] = 0;
			cp_heap->iommu_partition[i] = 0;
		}
	}
	mutex_unlock(&cp_heap->lock);
}
Esempio n. 11
0
ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
				      unsigned long size,
				      unsigned long align,
				      unsigned long flags)
{
	unsigned long offset;
	unsigned long secure_allocation = flags & ION_SECURE;

	struct ion_cp_heap *cp_heap =
		container_of(heap, struct ion_cp_heap, heap);

	mutex_lock(&cp_heap->lock);
	if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
		mutex_unlock(&cp_heap->lock);
		pr_err("ION cannot allocate un-secure memory from protected"
			" heap %s\n", heap->name);
		return ION_CP_ALLOCATE_FAIL;
	}

	if (secure_allocation &&
	    (cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) {
		mutex_unlock(&cp_heap->lock);
		pr_err("ION cannot allocate secure memory from heap with "
			"outstanding mappings: User space: %lu, kernel space "
			"(cached): %lu\n", cp_heap->umap_count,
					   cp_heap->kmap_cached_count);
		return ION_CP_ALLOCATE_FAIL;
	}

	/*
	 * if this is the first reusable allocation, transition
	 * the heap
	 */
	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
		if (fmem_set_state(FMEM_C_STATE) != 0) {
			mutex_unlock(&cp_heap->lock);
			return ION_RESERVED_ALLOCATE_FAIL;
		}
	}

	cp_heap->allocated_bytes += size;
	mutex_unlock(&cp_heap->lock);

	offset = gen_pool_alloc_aligned(cp_heap->pool,
					size, ilog2(align));

	if (!offset) {
		mutex_lock(&cp_heap->lock);
		cp_heap->allocated_bytes -= size;
		if ((cp_heap->total_size -
		     cp_heap->allocated_bytes) >= size)
			pr_debug("%s: heap %s has enough memory (%lx) but"
				" the allocation of size %lx still failed."
				" Memory is probably fragmented.\n",
				__func__, heap->name,
				cp_heap->total_size -
				cp_heap->allocated_bytes, size);

		if (cp_heap->reusable && !cp_heap->allocated_bytes &&
		    cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
			if (fmem_set_state(FMEM_T_STATE) != 0)
				pr_err("%s: unable to transition heap to T-state\n",
					__func__);
		}
		mutex_unlock(&cp_heap->lock);

		return ION_CP_ALLOCATE_FAIL;
	}

	return offset;
}