Exemplo n.º 1
0
int kasan_module_alloc(void *addr, size_t size)
{
	void *ret;
	size_t shadow_size;
	unsigned long shadow_start;

	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
	shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
			PAGE_SIZE);

	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
		return -EINVAL;

	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
			shadow_start + shadow_size,
			GFP_KERNEL | __GFP_ZERO,
			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
			__builtin_return_address(0));

	if (ret) {
		find_vm_area(addr)->flags |= VM_KASAN;
		kmemleak_ignore(ret);
		return 0;
	}

	return -ENOMEM;
}
Exemplo n.º 2
0
static int random_size_align_alloc_test(void)
{
	unsigned long size, align, rnd;
	void *ptr;
	int i;

	for (i = 0; i < test_loop_count; i++) {
		get_random_bytes(&rnd, sizeof(rnd));

		/*
		 * Maximum 1024 pages, if PAGE_SIZE is 4096.
		 */
		align = 1 << (rnd % 23);

		/*
		 * Maximum 10 pages.
		 */
		size = ((rnd % 10) + 1) * PAGE_SIZE;

		ptr = __vmalloc_node_range(size, align,
		   VMALLOC_START, VMALLOC_END,
		   GFP_KERNEL | __GFP_ZERO,
		   PAGE_KERNEL,
		   0, 0, __builtin_return_address(0));

		if (!ptr)
			return -1;

		vfree(ptr);
	}

	return 0;
}
Exemplo n.º 3
0
void *module_alloc(unsigned long size)
{
	if (PAGE_ALIGN(size) > MODULES_LEN)
		return NULL;
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
				GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
				-1, __builtin_return_address(0));
}
Exemplo n.º 4
0
static inline void *__module_alloc(unsigned long size, pgprot_t prot)
{
	if (PAGE_ALIGN(size) > MODULES_LEN)
		return NULL;
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
				GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
				-1, __builtin_return_address(0));
}
Exemplo n.º 5
0
void *module_alloc(unsigned long size)
{
    if (PAGE_ALIGN(size) > MODULES_LEN)
        return NULL;
    return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
                                GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
                                __builtin_return_address(0));
}
Exemplo n.º 6
0
void *module_alloc(unsigned long size)
{
#ifdef CONFIG_RELOCATABLE_KERNEL
	static unsigned long module_va = 0; 
	/* random address is 16K ALIGN and will have 16MB shift spaces, this will reduce the avaliable memory space for modules */
	if(module_va == 0) {
		module_va = MODULES_VADDR; 
		if (randomize_module_space)
			module_va += ALIGN( get_random_int() %  RANDOMIZE_MODULE_REGION, PAGE_SIZE * 4); 
	}
	return __vmalloc_node_range(size, 1, module_va, MODULES_END,
				    GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
				    __builtin_return_address(0));
	
#else
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
				    GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
				    __builtin_return_address(0));
#endif 

}
Exemplo n.º 7
0
void *module_alloc(unsigned long size)
{
	gfp_t gfp_mask = GFP_KERNEL;
	void *p;

	/* Silence the initial allocation */
	if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
		gfp_mask |= __GFP_NOWARN;

	p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
				module_alloc_base + MODULES_VSIZE,
				gfp_mask, PAGE_KERNEL_EXEC, 0,
				NUMA_NO_NODE, __builtin_return_address(0));

	if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
	    !IS_ENABLED(CONFIG_KASAN))
		/*
		 * KASAN can only deal with module allocations being served
		 * from the reserved module region, since the remainder of
		 * the vmalloc region is already backed by zero shadow pages,
		 * and punching holes into it is non-trivial. Since the module
		 * region is not randomized when KASAN is enabled, it is even
		 * less likely that the module region gets exhausted, so we
		 * can simply omit this fallback in that case.
		 */
		p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
				module_alloc_base + SZ_4G, GFP_KERNEL,
				PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
				__builtin_return_address(0));

	if (p && (kasan_module_alloc(p, size) < 0)) {
		vfree(p);
		return NULL;
	}

	return p;
}
Exemplo n.º 8
0
void *module_alloc(unsigned long size)
{
	void *p;

	p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
				GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
				NUMA_NO_NODE, __builtin_return_address(0));

	if (p && (kasan_module_alloc(p, size) < 0)) {
		vfree(p);
		return NULL;
	}

	return p;
}
void *module_alloc(unsigned long size)
{
	void *p;

	if (PAGE_ALIGN(size) > MODULES_LEN)
		return NULL;

	p = __vmalloc_node_range(size, MODULE_ALIGN,
				    MODULES_VADDR + get_module_load_offset(),
				    MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
				    PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
				    __builtin_return_address(0));
	if (p && (kasan_module_alloc(p, size) < 0)) {
		vfree(p);
		return NULL;
	}

	return p;
}
Exemplo n.º 10
0
static int fix_align_alloc_test(void)
{
	void *ptr;
	int i;

	for (i = 0; i < test_loop_count; i++) {
		ptr = __vmalloc_node_range(5 * PAGE_SIZE,
			THREAD_ALIGN << 1,
			VMALLOC_START, VMALLOC_END,
			GFP_KERNEL | __GFP_ZERO,
			PAGE_KERNEL,
			0, 0, __builtin_return_address(0));

		if (!ptr)
			return -1;

		vfree(ptr);
	}

	return 0;
}
Exemplo n.º 11
0
static int __meminit kasan_mem_notifier(struct notifier_block *nb,
			unsigned long action, void *data)
{
	struct memory_notify *mem_data = data;
	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
	unsigned long shadow_end, shadow_size;

	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
	shadow_size = nr_shadow_pages << PAGE_SHIFT;
	shadow_end = shadow_start + shadow_size;

	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
		return NOTIFY_BAD;

	switch (action) {
	case MEM_GOING_ONLINE: {
		void *ret;

		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
					shadow_end, GFP_KERNEL,
					PAGE_KERNEL, VM_NO_GUARD,
					pfn_to_nid(mem_data->start_pfn),
					__builtin_return_address(0));
		if (!ret)
			return NOTIFY_BAD;

		kmemleak_ignore(ret);
		return NOTIFY_OK;
	}
	case MEM_OFFLINE:
		vfree((void *)shadow_start);
	}

	return NOTIFY_OK;
}
Exemplo n.º 12
0
/*
 * This test case is supposed to be failed.
 */
static int align_shift_alloc_test(void)
{
	unsigned long align;
	void *ptr;
	int i;

	for (i = 0; i < BITS_PER_LONG; i++) {
		align = ((unsigned long) 1) << i;

		ptr = __vmalloc_node_range(PAGE_SIZE, align,
			VMALLOC_START, VMALLOC_END,
			GFP_KERNEL | __GFP_ZERO,
			PAGE_KERNEL,
			0, 0, __builtin_return_address(0));

		if (!ptr)
			return -1;

		vfree(ptr);
	}

	return 0;
}
Exemplo n.º 13
0
void *module_alloc(unsigned long size)
{
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
				    GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
				    __builtin_return_address(0));
}