Пример #1
0
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from ioremap_base
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * If the address lies within the first 16 MB, assume it's in ISA
	 * memory space
	 */
	if (p < 16*1024*1024)
		p += _ISA_MEM_BASE;

	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 */
	if (mem_init_done && (p < virt_to_phys(high_memory))) {
		printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
		       __builtin_return_address(0));
		return NULL;
	}

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped?  Perhaps overlapped by a previous
	 * BAT mapping.  If the whole area is mapped then we're done,
	 * otherwise remap it since we want to keep the virt addrs for
	 * each request contiguous.
	 *
	 * We make the assumption here that if the bottom and top
	 * of the range we want are mapped then it's mapped to the
	 * same virt address (and this is contiguous).
	 *  -- Cort
	 */
	if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
		goto out;

	if ((v = p_mapped_by_tlbcam(p)))
		goto out;

	if (mem_init_done) {
		struct vm_struct *area;
		area = get_vm_area(size, VM_IOREMAP);
		if (area == 0)
			return NULL;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	if ((flags & _PAGE_PRESENT) == 0)
		flags |= _PAGE_KERNEL;
	if (flags & _PAGE_NO_CACHE)
		flags |= _PAGE_GUARDED;

	/*
	 * Should check if it is a candidate for a BAT mapping
	 */

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(v+i, p+i, flags);
	if (err) {
		if (mem_init_done)
			vunmap((void *)v);
		return NULL;
	}

out:
	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
Пример #2
0
void PREFIX(free)(void *ptr)
{
	struct liballoc_minor *min;
	struct liballoc_major *maj;

	if ( ptr == NULL )
	{
		l_warningCount += 1;
		#if defined DEBUG || defined INFO
		serial_printf( "liballoc: WARNING: PREFIX(free)( NULL ) called from %x\n",
							__builtin_return_address(0) );
		FLUSH();
		#endif
		return;
	}

	UNALIGN( ptr );

	liballoc_lock();		// lockit


	min = (struct liballoc_minor*)((uintptr_t)ptr - sizeof( struct liballoc_minor ));


	if ( min->magic != LIBALLOC_MAGIC )
	{
		l_errorCount += 1;

		// Check for overrun errors. For all bytes of LIBALLOC_MAGIC
		if (
			((min->magic & 0xFFFFFF) == (LIBALLOC_MAGIC & 0xFFFFFF)) ||
			((min->magic & 0xFFFF) == (LIBALLOC_MAGIC & 0xFFFF)) ||
			((min->magic & 0xFF) == (LIBALLOC_MAGIC & 0xFF))
		   )
		{
			l_possibleOverruns += 1;
			#if defined DEBUG || defined INFO
			serial_printf( "liballoc: ERROR: Possible 1-3 byte overrun for magic %x != %x\n",
								min->magic,
								LIBALLOC_MAGIC );
			FLUSH();
			#endif
		}


		if ( min->magic == LIBALLOC_DEAD )
		{
			#if defined DEBUG || defined INFO
			serial_printf( "liballoc: ERROR: multiple PREFIX(free)() attempt on %x from %x.\n",
									ptr,
									__builtin_return_address(0) );
			FLUSH();
			#endif
		}
		else
		{
			#if defined DEBUG || defined INFO
			serial_printf( "liballoc: ERROR: Bad PREFIX(free)( %x ) called from %x\n",
								ptr,
								__builtin_return_address(0) );
			FLUSH();
			#endif
		}

		// being lied to...
		liballoc_unlock();		// release the lock
		return;
	}

	#ifdef DEBUG
	serial_printf( "liballoc: %x PREFIX(free)( %x ): ",
				__builtin_return_address( 0 ),
				ptr );
	FLUSH();
	#endif


		maj = min->block;

		l_inuse -= min->size;

		maj->usage -= (min->size + sizeof( struct liballoc_minor ));
		min->magic  = LIBALLOC_DEAD;		// No mojo.

		if ( min->next != NULL ) min->next->prev = min->prev;
		if ( min->prev != NULL ) min->prev->next = min->next;

		if ( min->prev == NULL ) maj->first = min->next;
							// Might empty the block. This was the first
							// minor.


	// We need to clean up after the majors now....

	if ( maj->first == NULL )	// Block completely unused.
	{
		if ( l_memRoot == maj ) l_memRoot = maj->next;
		if ( l_bestBet == maj ) l_bestBet = NULL;
		if ( maj->prev != NULL ) maj->prev->next = maj->next;
		if ( maj->next != NULL ) maj->next->prev = maj->prev;
		l_allocated -= maj->size;

		liballoc_free( maj, maj->pages );
	}
	else
	{
		if ( l_bestBet != NULL )
		{
			int bestSize = l_bestBet->size  - l_bestBet->usage;
			int majSize = maj->size - maj->usage;

			if ( majSize > bestSize ) l_bestBet = maj;
		}

	}


	#ifdef DEBUG
	serial_printf( "OK\n");
	FLUSH();
	#endif

	liballoc_unlock();		// release the lock
}
Пример #3
0
NORET_TYPE void panic(const char * fmt, ...)
{
	long i;
	static char buf[1024];
	va_list args;
#if defined(CONFIG_ARCH_S390)
        unsigned long caller = (unsigned long) __builtin_return_address(0);
#endif

	bust_spinlocks(1);
	va_start(args, fmt);
	vsnprintf(buf, sizeof(buf), fmt, args);
	va_end(args);
	printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
	bust_spinlocks(0);

#ifdef CONFIG_SMP
	smp_send_stop();
#endif

	notifier_call_chain(&panic_notifier_list, 0, buf);

	if (!panic_blink)
		panic_blink = no_blink;

	if (panic_timeout > 0)
	{
		/*
	 	 * Delay timeout seconds before rebooting the machine. 
		 * We can't use the "normal" timers since we just panicked..
	 	 */
		printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout);
		for (i = 0; i < panic_timeout*1000; ) {
			touch_nmi_watchdog();
			i += panic_blink(i);
			mdelay(1);
			i++;
		}
		/*
		 *	Should we run the reboot notifier. For the moment Im
		 *	choosing not too. It might crash, be corrupt or do
		 *	more harm than good for other reasons.
		 */
		machine_restart(NULL);
	}
#ifdef __sparc__
	{
		extern int stop_a_enabled;
		/* Make sure the user can actually press Stop-A (L1-A) */
		stop_a_enabled = 1;
		printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n");
	}
#endif
#if defined(CONFIG_ARCH_S390)
        disabled_wait(caller);
#endif
	local_irq_enable();
	for (i = 0;;) {
		i += panic_blink(i);
		mdelay(1);
		i++;
	}
}
Пример #4
0
static void *
getreturnaddr(int level)
{

    switch(level) {
    case 0: return __builtin_return_address(1);
    case 1: return __builtin_return_address(2);
    case 2: return __builtin_return_address(3);
    case 3: return __builtin_return_address(4);
    case 4: return __builtin_return_address(5);
    case 5: return __builtin_return_address(6);
    case 6: return __builtin_return_address(7);
    case 7: return __builtin_return_address(8);
    case 8: return __builtin_return_address(9);
    case 9: return __builtin_return_address(10);
    case 10: return __builtin_return_address(11);
    case 11: return __builtin_return_address(12);
    case 12: return __builtin_return_address(13);
    case 13: return __builtin_return_address(14);
    case 14: return __builtin_return_address(15);
    case 15: return __builtin_return_address(16);
    case 16: return __builtin_return_address(17);
    case 17: return __builtin_return_address(18);
    case 18: return __builtin_return_address(19);
    case 19: return __builtin_return_address(20);
    case 20: return __builtin_return_address(21);
    case 21: return __builtin_return_address(22);
    case 22: return __builtin_return_address(23);
    case 23: return __builtin_return_address(24);
    case 24: return __builtin_return_address(25);
    case 25: return __builtin_return_address(26);
    case 26: return __builtin_return_address(27);
    case 27: return __builtin_return_address(28);
    case 28: return __builtin_return_address(29);
    case 29: return __builtin_return_address(30);
    case 30: return __builtin_return_address(31);
    case 31: return __builtin_return_address(32);
    case 32: return __builtin_return_address(33);
    case 33: return __builtin_return_address(34);
    case 34: return __builtin_return_address(35);
    case 35: return __builtin_return_address(36);
    case 36: return __builtin_return_address(37);
    case 37: return __builtin_return_address(38);
    case 38: return __builtin_return_address(39);
    case 39: return __builtin_return_address(40);
    case 40: return __builtin_return_address(41);
    case 41: return __builtin_return_address(42);
    case 42: return __builtin_return_address(43);
    case 43: return __builtin_return_address(44);
    case 44: return __builtin_return_address(45);
    case 45: return __builtin_return_address(46);
    case 46: return __builtin_return_address(47);
    case 47: return __builtin_return_address(48);
    case 48: return __builtin_return_address(49);
    case 49: return __builtin_return_address(50);
    case 50: return __builtin_return_address(51);
    case 51: return __builtin_return_address(52);
    case 52: return __builtin_return_address(53);
    case 53: return __builtin_return_address(54);
    case 54: return __builtin_return_address(55);
    case 55: return __builtin_return_address(56);
    case 56: return __builtin_return_address(57);
    case 57: return __builtin_return_address(58);
    case 58: return __builtin_return_address(59);
    case 59: return __builtin_return_address(60);
    case 60: return __builtin_return_address(61);
    case 61: return __builtin_return_address(62);
    case 62: return __builtin_return_address(63);
    case 63: return __builtin_return_address(64);
    case 64: return __builtin_return_address(65);
    case 65: return __builtin_return_address(66);
    case 66: return __builtin_return_address(67);
    case 67: return __builtin_return_address(68);
    case 68: return __builtin_return_address(69);
    case 69: return __builtin_return_address(70);
    case 70: return __builtin_return_address(71);
    case 71: return __builtin_return_address(72);
    case 72: return __builtin_return_address(73);
    case 73: return __builtin_return_address(74);
    case 74: return __builtin_return_address(75);
    case 75: return __builtin_return_address(76);
    case 76: return __builtin_return_address(77);
    case 77: return __builtin_return_address(78);
    case 78: return __builtin_return_address(79);
    case 79: return __builtin_return_address(80);
    case 80: return __builtin_return_address(81);
    case 81: return __builtin_return_address(82);
    case 82: return __builtin_return_address(83);
    case 83: return __builtin_return_address(84);
    case 84: return __builtin_return_address(85);
    case 85: return __builtin_return_address(86);
    case 86: return __builtin_return_address(87);
    case 87: return __builtin_return_address(88);
    case 88: return __builtin_return_address(89);
    case 89: return __builtin_return_address(90);
    case 90: return __builtin_return_address(91);
    case 91: return __builtin_return_address(92);
    case 92: return __builtin_return_address(93);
    case 93: return __builtin_return_address(94);
    case 94: return __builtin_return_address(95);
    case 95: return __builtin_return_address(96);
    case 96: return __builtin_return_address(97);
    case 97: return __builtin_return_address(98);
    case 98: return __builtin_return_address(99);
    case 99: return __builtin_return_address(100);
    case 100: return __builtin_return_address(101);
    case 101: return __builtin_return_address(102);
    case 102: return __builtin_return_address(103);
    case 103: return __builtin_return_address(104);
    case 104: return __builtin_return_address(105);
    case 105: return __builtin_return_address(106);
    case 106: return __builtin_return_address(107);
    case 107: return __builtin_return_address(108);
    case 108: return __builtin_return_address(109);
    case 109: return __builtin_return_address(110);
    case 110: return __builtin_return_address(111);
    case 111: return __builtin_return_address(112);
    case 112: return __builtin_return_address(113);
    case 113: return __builtin_return_address(114);
    case 114: return __builtin_return_address(115);
    case 115: return __builtin_return_address(116);
    case 116: return __builtin_return_address(117);
    case 117: return __builtin_return_address(118);
    case 118: return __builtin_return_address(119);
    case 119: return __builtin_return_address(120);
    case 120: return __builtin_return_address(121);
    case 121: return __builtin_return_address(122);
    case 122: return __builtin_return_address(123);
    case 123: return __builtin_return_address(124);
    case 124: return __builtin_return_address(125);
    case 125: return __builtin_return_address(126);
    case 126: return __builtin_return_address(127);
    case 127: return __builtin_return_address(128);
    default: return NULL;
    }
}
Пример #5
0
void __iomem *
__uc32_ioremap_cached(unsigned long phys_addr, size_t size)
{
	return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
			__builtin_return_address(0));
}
void *__wrap___real_realloc(void *ptr, size_t size)
{
	void *ret;
	ret = hook_realloc(ptr, size, __builtin_return_address(0));
	return ret;
}
Пример #7
0
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
{
	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
}
Пример #8
0
static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
		unsigned long flags)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from ioremap_base
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 *
	 * However, allow remap of rootfs: TBD
	 */

	if (mem_init_done &&
		p >= memory_start && p < virt_to_phys(high_memory) &&
		!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
		p < __virt_to_phys((phys_addr_t)__bss_stop))) {
		pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
			(unsigned long)p, __builtin_return_address(0));
		return NULL;
	}

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped? If the whole area is mapped then we're
	 * done, otherwise remap it since we want to keep the virt addrs for
	 * each request contiguous.
	 *
	 * We make the assumption here that if the bottom and top
	 * of the range we want are mapped then it's mapped to the
	 * same virt address (and this is contiguous).
	 *  -- Cort
	 */

	if (mem_init_done) {
		struct vm_struct *area;
		area = get_vm_area(size, VM_IOREMAP);
		if (area == NULL)
			return NULL;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	if ((flags & _PAGE_PRESENT) == 0)
		flags |= _PAGE_KERNEL;
	if (flags & _PAGE_NO_CACHE)
		flags |= _PAGE_GUARDED;

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(v + i, p + i, flags);
	if (err) {
		if (mem_init_done)
			vfree((void *)v);
		return NULL;
	}

	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
Пример #9
0
/**
 *	panic - halt the system
 *	@fmt: The text string to print
 *
 *	Display a message, then perform cleanups.
 *
 *	This function never returns.
 */
void panic(const char *fmt, ...)
{
	static char buf[1024];
	va_list args;
	long i, i_next = 0, len;
	int state = 0;
	int old_cpu, this_cpu;
	bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;

	/*
	 * Disable local interrupts. This will prevent panic_smp_self_stop
	 * from deadlocking the first cpu that invokes the panic, since
	 * there is nothing to prevent an interrupt handler (that runs
	 * after setting panic_cpu) from invoking panic() again.
	 */
	local_irq_disable();

	/*
	 * It's possible to come here directly from a panic-assertion and
	 * not have preempt disabled. Some functions called from here want
	 * preempt to be disabled. No point enabling it later though...
	 *
	 * Only one CPU is allowed to execute the panic code from here. For
	 * multiple parallel invocations of panic, all other CPUs either
	 * stop themself or will wait until they are stopped by the 1st CPU
	 * with smp_send_stop().
	 *
	 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
	 * comes here, so go ahead.
	 * `old_cpu == this_cpu' means we came from nmi_panic() which sets
	 * panic_cpu to this CPU.  In this case, this is also the 1st CPU.
	 */
	this_cpu = raw_smp_processor_id();
	old_cpu  = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);

	if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
		panic_smp_self_stop();

	console_verbose();
	bust_spinlocks(1);
	va_start(args, fmt);
	len = vscnprintf(buf, sizeof(buf), fmt, args);
	va_end(args);

	if (len && buf[len - 1] == '\n')
		buf[len - 1] = '\0';

	pr_emerg("Kernel panic - not syncing: %s\n", buf);
#ifdef CONFIG_DEBUG_BUGVERBOSE
	/*
	 * Avoid nested stack-dumping if a panic occurs during oops processing
	 */
	if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
		dump_stack();
#endif

	/*
	 * If we have crashed and we have a crash kernel loaded let it handle
	 * everything else.
	 * If we want to run this after calling panic_notifiers, pass
	 * the "crash_kexec_post_notifiers" option to the kernel.
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
	 */
	if (!_crash_kexec_post_notifiers) {
		printk_safe_flush_on_panic();
		__crash_kexec(NULL);

		/*
		 * Note smp_send_stop is the usual smp shutdown function, which
		 * unfortunately means it may not be hardened to work in a
		 * panic situation.
		 */
		smp_send_stop();
	} else {
		/*
		 * If we want to do crash dump after notifier calls and
		 * kmsg_dump, we will need architecture dependent extra
		 * works in addition to stopping other CPUs.
		 */
		crash_smp_send_stop();
	}

	/*
	 * Run any panic handlers, including those that might need to
	 * add information to the kmsg dump output.
	 */
	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);

	/* Call flush even twice. It tries harder with a single online CPU */
	printk_safe_flush_on_panic();
	kmsg_dump(KMSG_DUMP_PANIC);

	/*
	 * If you doubt kdump always works fine in any situation,
	 * "crash_kexec_post_notifiers" offers you a chance to run
	 * panic_notifiers and dumping kmsg before kdump.
	 * Note: since some panic_notifiers can make crashed kernel
	 * more unstable, it can increase risks of the kdump failure too.
	 *
	 * Bypass the panic_cpu check and call __crash_kexec directly.
	 */
	if (_crash_kexec_post_notifiers)
		__crash_kexec(NULL);

#ifdef CONFIG_VT
	unblank_screen();
#endif
	console_unblank();

	/*
	 * We may have ended up stopping the CPU holding the lock (in
	 * smp_send_stop()) while still having some valuable data in the console
	 * buffer.  Try to acquire the lock then release it regardless of the
	 * result.  The release will also print the buffers out.  Locks debug
	 * should be disabled to avoid reporting bad unlock balance when
	 * panic() is not being callled from OOPS.
	 */
	debug_locks_off();
	console_flush_on_panic();

	panic_print_sys_info();

	if (!panic_blink)
		panic_blink = no_blink;

	if (panic_timeout > 0) {
		/*
		 * Delay timeout seconds before rebooting the machine.
		 * We can't use the "normal" timers since we just panicked.
		 */
		pr_emerg("Rebooting in %d seconds..\n", panic_timeout);

		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
			touch_nmi_watchdog();
			if (i >= i_next) {
				i += panic_blink(state ^= 1);
				i_next = i + 3600 / PANIC_BLINK_SPD;
			}
			mdelay(PANIC_TIMER_STEP);
		}
	}
	if (panic_timeout != 0) {
		/*
		 * This will not be a clean reboot, with everything
		 * shutting down.  But if there is a chance of
		 * rebooting the system it will be rebooted.
		 */
		emergency_restart();
	}
#ifdef __sparc__
	{
		extern int stop_a_enabled;
		/* Make sure the user can actually press Stop-A (L1-A) */
		stop_a_enabled = 1;
		pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
			 "twice on console to return to the boot prom\n");
	}
#endif
#if defined(CONFIG_S390)
	{
		unsigned long caller;

		caller = (unsigned long)__builtin_return_address(0);
		disabled_wait(caller);
	}
#endif
	pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
	local_irq_enable();
	for (i = 0; ; i += PANIC_TIMER_STEP) {
		touch_softlockup_watchdog();
		if (i >= i_next) {
			i += panic_blink(state ^= 1);
			i_next = i + 3600 / PANIC_BLINK_SPD;
		}
		mdelay(PANIC_TIMER_STEP);
	}
}
Пример #10
0
extern "C" void __sanitizer_cov_trace_pc() {
  fuzzer::HandlePC(static_cast<uint32_t>(
      reinterpret_cast<uintptr_t>(__builtin_return_address(0))));
}
Пример #11
0
void *module_alloc(unsigned long size)
{
	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
				    GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
				    __builtin_return_address(0));
}
Пример #12
0
G*
__go_go(void (*fn)(void*), void* arg)
{
	byte *sp;
	size_t spsize;
	G *newg;

	schedlock();

	if((newg = gfget()) != nil){
#ifdef USING_SPLIT_STACK
		int dont_block_signals = 0;

		sp = __splitstack_resetcontext(&newg->stack_context[0],
					       &spsize);
		__splitstack_block_signals_context(&newg->stack_context[0],
						   &dont_block_signals, nil);
#else
		sp = newg->gcinitial_sp;
		spsize = newg->gcstack_size;
		if(spsize == 0)
			runtime_throw("bad spsize in __go_go");
		newg->gcnext_sp = sp;
#endif
	} else {
		newg = runtime_malg(StackMin, &sp, &spsize);
		if(runtime_lastg == nil)
			runtime_allg = newg;
		else
			runtime_lastg->alllink = newg;
		runtime_lastg = newg;
	}
	newg->status = Gwaiting;
	newg->waitreason = "new goroutine";

	newg->entry = (byte*)fn;
	newg->param = arg;
	newg->gopc = (uintptr)__builtin_return_address(0);

	runtime_sched.gcount++;
	runtime_sched.goidgen++;
	newg->goid = runtime_sched.goidgen;

	if(sp == nil)
		runtime_throw("nil g->stack0");

	{
		// Avoid warnings about variables clobbered by
		// longjmp.
		byte * volatile vsp = sp;
		size_t volatile vspsize = spsize;
		G * volatile vnewg = newg;

		getcontext(&vnewg->context);
		vnewg->context.uc_stack.ss_sp = vsp;
#ifdef MAKECONTEXT_STACK_TOP
		vnewg->context.uc_stack.ss_sp += vspsize;
#endif
		vnewg->context.uc_stack.ss_size = vspsize;
		makecontext(&vnewg->context, kickoff, 0);

		newprocreadylocked(vnewg);
		schedunlock();

		return vnewg;
	}
}
Пример #13
0
void local_bh_disable(void)
{
	__local_bh_disable((unsigned long)__builtin_return_address(0));
}
Пример #14
0
void __wrap___cxa_throw(void *pException, std::type_info *pTypeInfo, void (_GLIBCXX_CDTOR_CALLABI *pfnDtor)(void *)){
	if(__MCF_CRT_OnException){
		(*__MCF_CRT_OnException)(pException, *pTypeInfo, __builtin_return_address(0));
	}
	::__real___cxa_throw(pException, pTypeInfo, pfnDtor);
}
void *__wrap___real_memalign(size_t boundary, size_t size)
{
	void *ret;
	ret = hook_memalign(boundary, size, __builtin_return_address(0));
	return ret;
}
Пример #16
0
void warn_slowpath_null(const char *file, int line)
{
	pr_warn(CUT_HERE);
	__warn(file, line, __builtin_return_address(0), TAINT_WARN, NULL, NULL);
}
void __wrap___real_free(void *ptr)
{
	hook_free(ptr, __builtin_return_address(0));
}
Пример #18
0
/*
 * Called when gcc's -fstack-protector feature is used, and
 * gcc detects corruption of the on-stack canary value
 */
__visible void __stack_chk_fail(void)
{
	panic("stack-protector: Kernel stack is corrupted in: %pB",
		__builtin_return_address(0));
}
Пример #19
0
void __iomem *
ioremap_wc(phys_addr_t addr, unsigned long size)
{
	return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
				__builtin_return_address(0));
}
Пример #20
0
 * Set up a call for the given parameters.
 * - Called with the socket lock held, which it must release.
 * - If it returns a call, the call's lock will need releasing by the caller.
 */
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
					 struct rxrpc_conn_parameters *cp,
					 struct sockaddr_rxrpc *srx,
					 unsigned long user_call_ID,
					 s64 tx_total_len,
					 gfp_t gfp)
	__releases(&rx->sk.sk_lock.slock)
{
	struct rxrpc_call *call, *xcall;
	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
	struct rb_node *parent, **pp;
	const void *here = __builtin_return_address(0);
	int ret;

	_enter("%p,%lx", rx, user_call_ID);

	call = rxrpc_alloc_client_call(srx, gfp);
	if (IS_ERR(call)) {
		release_sock(&rx->sk);
		_leave(" = %ld", PTR_ERR(call));
		return call;
	}

	call->tx_total_len = tx_total_len;
	trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
			 here, (const void *)user_call_ID);
Пример #21
0
void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
		 void *caller)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/* Make sure we have the base flags */
	if ((flags & _PAGE_PRESENT) == 0)
		flags |= pgprot_val(PAGE_KERNEL);

	/* Non-cacheable page cannot be coherent */
	if (flags & _PAGE_NO_CACHE)
		flags &= ~_PAGE_COHERENT;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from IOREMAP_TOP
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * If the address lies within the first 16 MB, assume it's in ISA
	 * memory space
	 */
	if (p < 16*1024*1024)
		p += _ISA_MEM_BASE;

#ifndef CONFIG_CRASH_DUMP
	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 */
	if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
	    !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
		printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
		       (unsigned long long)p, __builtin_return_address(0));
		return NULL;
	}
#endif

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped?  Perhaps overlapped by a previous
	 * mapping.
	 */
	v = p_block_mapped(p);
	if (v)
		goto out;

	if (slab_is_available()) {
		struct vm_struct *area;
		area = get_vm_area_caller(size, VM_IOREMAP, caller);
		if (area == 0)
			return NULL;
		area->phys_addr = p;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	/*
	 * Should check if it is a candidate for a BAT mapping
	 */

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(v+i, p+i, flags);
	if (err) {
		if (slab_is_available())
			vunmap((void *)v);
		return NULL;
	}

out:
	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
Пример #22
0
void local_bh_disable(void)
{
	__local_bh_disable((unsigned long)__builtin_return_address(0),
				SOFTIRQ_DISABLE_OFFSET);
}
Пример #23
0
void 
hal_diag_write_char(char c)
{
    static char line[100];
    static int pos = 0;

    // No need to send CRs
    if( c == '\r' ) return;

    line[pos++] = c;

    if( c == '\n' || pos == sizeof(line) )
    {
        CYG_INTERRUPT_STATE old;

        // Disable interrupts. This prevents GDB trying to interrupt us
        // while we are in the middle of sending a packet. The serial
        // receive interrupt will be seen when we re-enable interrupts
        // later.
        
#ifdef CYGDBG_HAL_DEBUG_GDB_INCLUDE_STUBS
        CYG_HAL_GDB_ENTER_CRITICAL_IO_REGION(old);
#else
        HAL_DISABLE_INTERRUPTS(old);
#endif

        while(1)
        {
            static char hex[] = "0123456789ABCDEF";
            cyg_uint8 csum = 0;
            int i;
            char c1;
        
            cyg_hal_plf_serial_putc(0, '$');
            cyg_hal_plf_serial_putc(0, 'O');
            csum += 'O';
            for( i = 0; i < pos; i++ )
            {
                char ch = line[i];
                char h = hex[(ch>>4)&0xF];
                char l = hex[ch&0xF];
                cyg_hal_plf_serial_putc(0, h);
                cyg_hal_plf_serial_putc(0, l);
                csum += h;
                csum += l;
            }
            cyg_hal_plf_serial_putc(0, '#');
            cyg_hal_plf_serial_putc(0, hex[(csum>>4)&0xF]);
            cyg_hal_plf_serial_putc(0, hex[csum&0xF]);

            // Wait for the ACK character '+' from GDB here and handle
            // receiving a ^C instead.  This is the reason for this clause
            // being a loop.
            c1 = cyg_hal_plf_serial_getc(0);

            if( c1 == '+' )
                break;              // a good acknowledge

#if defined(CYGDBG_HAL_DEBUG_GDB_INCLUDE_STUBS) && \
    defined(CYGDBG_HAL_DEBUG_GDB_BREAK_SUPPORT)
            cyg_drv_interrupt_acknowledge(CYGNUM_HAL_VECTOR_INTCSI1);
            if( c1 == 3 ) {
                // Ctrl-C: breakpoint.
                cyg_hal_gdb_interrupt (__builtin_return_address(0));
                break;
            }
#endif
            // otherwise, loop round again
        }
        
        pos = 0;

        // And re-enable interrupts
#ifdef CYGDBG_HAL_DEBUG_GDB_INCLUDE_STUBS
        CYG_HAL_GDB_LEAVE_CRITICAL_IO_REGION(old);
#else
        HAL_RESTORE_INTERRUPTS(old);
#endif
    }
Пример #24
0
void local_bh_enable(void)
{
	_local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
Пример #25
0
void *PREFIX(malloc)(size_t req_size)
{
	int startedBet = 0;
	unsigned long long bestSize = 0;
	void *p = NULL;
	uintptr_t diff;
	struct liballoc_major *maj;
	struct liballoc_minor *min;
	struct liballoc_minor *new_min;
	unsigned long size = req_size;

	// For alignment, we adjust size so there's enough space to align.
	if ( ALIGNMENT > 1 )
	{
		size += ALIGNMENT + ALIGN_INFO;
	}
				// So, ideally, we really want an alignment of 0 or 1 in order
				// to save space.

	liballoc_lock();

	if ( size == 0 )
	{
		l_warningCount += 1;
		#if defined DEBUG || defined INFO
		serial_printf( "liballoc: WARNING: alloc( 0 ) called from %x\n",
							__builtin_return_address(0) );
		FLUSH();
		#endif
		liballoc_unlock();
		return PREFIX(malloc)(1);
	}


	if ( l_memRoot == NULL )
	{
		#if defined DEBUG || defined INFO
		#ifdef DEBUG
		serial_printf( "liballoc: initialization of liballoc " VERSION "\n" );
		#endif
		//atexit( liballoc_dump );
		FLUSH();
		#endif

		// This is the first time we are being used.
		l_memRoot = allocate_new_page( size );
		if ( l_memRoot == NULL )
		{
		  liballoc_unlock();
		  #ifdef DEBUG
		  serial_printf( "liballoc: initial l_memRoot initialization failed\n", p);
		  FLUSH();
		  #endif
		  return NULL;
		}

		#ifdef DEBUG
		serial_printf( "liballoc: set up first memory major %x\n", l_memRoot );
		FLUSH();
		#endif
	}


	#ifdef DEBUG
	serial_printf( "liballoc: %x PREFIX(malloc)( %i ): ",
					__builtin_return_address(0),
					size );
	FLUSH();
	#endif

	// Now we need to bounce through every major and find enough space....

	maj = l_memRoot;
	startedBet = 0;

	// Start at the best bet....
	if ( l_bestBet != NULL )
	{
		bestSize = l_bestBet->size - l_bestBet->usage;

		if ( bestSize > (size + sizeof(struct liballoc_minor)))
		{
			maj = l_bestBet;
			startedBet = 1;
		}
	}

	while ( maj != NULL )
	{
		diff  = maj->size - maj->usage;
										// free memory in the block

		if ( bestSize < diff )
		{
			// Hmm.. this one has more memory then our bestBet. Remember!
			l_bestBet = maj;
			bestSize = diff;
		}


#ifdef USE_CASE1

		// CASE 1:  There is not enough space in this major block.
		if ( diff < (size + sizeof( struct liballoc_minor )) )
		{
			#ifdef DEBUG
			serial_printf( "CASE 1: Insufficient space in block %x\n", maj);
			FLUSH();
			#endif

				// Another major block next to this one?
			if ( maj->next != NULL )
			{
				maj = maj->next;		// Hop to that one.
				continue;
			}

			if ( startedBet == 1 )		// If we started at the best bet,
			{							// let's start all over again.
				maj = l_memRoot;
				startedBet = 0;
				continue;
			}

			// Create a new major block next to this one and...
			maj->next = allocate_new_page( size );	// next one will be okay.
			if ( maj->next == NULL ) break;			// no more memory.
			maj->next->prev = maj;
			maj = maj->next;

			// .. fall through to CASE 2 ..
		}

#endif

#ifdef USE_CASE2

		// CASE 2: It's a brand new block.
		if ( maj->first == NULL )
		{
			maj->first = (struct liballoc_minor*)((uintptr_t)maj + sizeof(struct liballoc_major) );


			maj->first->magic 		= LIBALLOC_MAGIC;
			maj->first->prev 		= NULL;
			maj->first->next 		= NULL;
			maj->first->block 		= maj;
			maj->first->size 		= size;
			maj->first->req_size 	= req_size;
			maj->usage 	+= size + sizeof( struct liballoc_minor );


			l_inuse += size;


			p = (void*)((uintptr_t)(maj->first) + sizeof( struct liballoc_minor ));

			ALIGN( p );

			#ifdef DEBUG
			serial_printf( "CASE 2: returning %x\n", p);
			FLUSH();
			#endif
			liballoc_unlock();		// release the lock
			return p;
		}

#endif

#ifdef USE_CASE3

		// CASE 3: Block in use and enough space at the start of the block.
		diff =  (uintptr_t)(maj->first);
		diff -= (uintptr_t)maj;
		diff -= sizeof(struct liballoc_major);

		if ( diff >= (size + sizeof(struct liballoc_minor)) )
		{
			// Yes, space in front. Squeeze in.
			maj->first->prev = (struct liballoc_minor*)((uintptr_t)maj + sizeof(struct liballoc_major) );
			maj->first->prev->next = maj->first;
			maj->first = maj->first->prev;

			maj->first->magic 	= LIBALLOC_MAGIC;
			maj->first->prev 	= NULL;
			maj->first->block 	= maj;
			maj->first->size 	= size;
			maj->first->req_size 	= req_size;
			maj->usage 			+= size + sizeof( struct liballoc_minor );

			l_inuse += size;

			p = (void*)((uintptr_t)(maj->first) + sizeof( struct liballoc_minor ));
			ALIGN( p );

			#ifdef DEBUG
			serial_printf( "CASE 3: returning %x\n", p);
			FLUSH();
			#endif
			liballoc_unlock();		// release the lock
			return p;
		}

#endif


#ifdef USE_CASE4

		// CASE 4: There is enough space in this block. But is it contiguous?
		min = maj->first;

			// Looping within the block now...
		while ( min != NULL )
		{
				// CASE 4.1: End of minors in a block. Space from last and end?
				if ( min->next == NULL )
				{
					// the rest of this block is free...  is it big enough?
					diff = (uintptr_t)(maj) + maj->size;
					diff -= (uintptr_t)min;
					diff -= sizeof( struct liballoc_minor );
					diff -= min->size;
						// minus already existing usage..

					if ( diff >= (size + sizeof( struct liballoc_minor )) )
					{
						// yay....
						min->next = (struct liballoc_minor*)((uintptr_t)min + sizeof( struct liballoc_minor ) + min->size);
						min->next->prev = min;
						min = min->next;
						min->next = NULL;
						min->magic = LIBALLOC_MAGIC;
						min->block = maj;
						min->size = size;
						min->req_size = req_size;
						maj->usage += size + sizeof( struct liballoc_minor );

						l_inuse += size;

						p = (void*)((uintptr_t)min + sizeof( struct liballoc_minor ));
						ALIGN( p );

						#ifdef DEBUG
						serial_printf( "CASE 4.1: returning %x\n", p);
						FLUSH();
						#endif
						liballoc_unlock();		// release the lock
						return p;
					}
				}



				// CASE 4.2: Is there space between two minors?
				if ( min->next != NULL )
				{
					// is the difference between here and next big enough?
					diff  = (uintptr_t)(min->next);
					diff -= (uintptr_t)min;
					diff -= sizeof( struct liballoc_minor );
					diff -= min->size;
										// minus our existing usage.

					if ( diff >= (size + sizeof( struct liballoc_minor )) )
					{
						// yay......
						new_min = (struct liballoc_minor*)((uintptr_t)min + sizeof( struct liballoc_minor ) + min->size);

						new_min->magic = LIBALLOC_MAGIC;
						new_min->next = min->next;
						new_min->prev = min;
						new_min->size = size;
						new_min->req_size = req_size;
						new_min->block = maj;
						min->next->prev = new_min;
						min->next = new_min;
						maj->usage += size + sizeof( struct liballoc_minor );

						l_inuse += size;

						p = (void*)((uintptr_t)new_min + sizeof( struct liballoc_minor ));
						ALIGN( p );


						#ifdef DEBUG
						serial_printf( "CASE 4.2: returning %x\n", p);
						FLUSH();
						#endif

						liballoc_unlock();		// release the lock
						return p;
					}
				}	// min->next != NULL

				min = min->next;
		} // while min != NULL ...


#endif

#ifdef USE_CASE5

		// CASE 5: Block full! Ensure next block and loop.
		if ( maj->next == NULL )
		{
			#ifdef DEBUG
			serial_printf( "CASE 5: block full\n");
			FLUSH();
			#endif

			if ( startedBet == 1 )
			{
				maj = l_memRoot;
				startedBet = 0;
				continue;
			}

			// we've run out. we need more...
			maj->next = allocate_new_page( size );		// next one guaranteed to be okay
			if ( maj->next == NULL ) break;			//  uh oh,  no more memory.....
			maj->next->prev = maj;

		}

#endif

		maj = maj->next;
	} // while (maj != NULL)



	liballoc_unlock();		// release the lock

	#ifdef DEBUG
	serial_printf( "All cases exhausted. No memory available.\n");
	FLUSH();
	#endif
	#if defined DEBUG || defined INFO
	serial_printf( "liballoc: WARNING: PREFIX(malloc)( %i ) returning NULL.\n", size);
	liballoc_dump();
	FLUSH();
	#endif
	return NULL;
}
Пример #26
0
asmlinkage void __do_softirq(void)
{
	struct softirq_action *h;
	__u32 pending;
	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
	int cpu;
	unsigned long old_flags = current->flags;
	int max_restart = MAX_SOFTIRQ_RESTART;

	/*
	 * Mask out PF_MEMALLOC s current task context is borrowed for the
	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
	 * again if the socket is related to swap
	 */
	current->flags &= ~PF_MEMALLOC;

	pending = local_softirq_pending();
	account_system_vtime(current);

	__local_bh_disable((unsigned long)__builtin_return_address(0),
				SOFTIRQ_OFFSET);
	lockdep_softirq_enter();

	cpu = smp_processor_id();
restart:
	/* Reset the pending bitmask before enabling irqs */
	set_softirq_pending(0);

	local_irq_enable();

	h = softirq_vec;

	do {
		if (pending & 1) {
			unsigned int vec_nr = h - softirq_vec;
			int prev_count = preempt_count();

			kstat_incr_softirqs_this_cpu(vec_nr);

			trace_softirq_entry(vec_nr);
			h->action(h);
			trace_softirq_exit(vec_nr);
			if (unlikely(prev_count != preempt_count())) {
				printk(KERN_ERR "huh, entered softirq %u %s %p"
				       "with preempt_count %08x,"
				       " exited with %08x?\n", vec_nr,
				       softirq_to_name[vec_nr], h->action,
				       prev_count, preempt_count());
				preempt_count_set(prev_count);
			}

			rcu_bh_qs(cpu);
		}
		h++;
		pending >>= 1;
	} while (pending);

	local_irq_disable();

	pending = local_softirq_pending();
	if (pending) {
		if (time_before(jiffies, end) && !need_resched() &&
		    --max_restart)
			goto restart;

		wakeup_softirqd();
	}

	lockdep_softirq_exit();

	account_system_vtime(current);
	__local_bh_enable(SOFTIRQ_OFFSET);
	tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
Пример #27
0
void*   PREFIX(realloc)(void *p, size_t size)
{
	void *ptr;
	struct liballoc_minor *min;
	unsigned int real_size;

	// Honour the case of size == 0 => free old and return NULL
	if ( size == 0 )
	{
		PREFIX(free)( p );
		return NULL;
	}

	// In the case of a NULL pointer, return a simple malloc.
	if ( p == NULL ) return PREFIX(malloc)( size );

	// Unalign the pointer if required.
	ptr = p;
	UNALIGN(ptr);

	liballoc_lock();		// lockit

		min = (struct liballoc_minor*)((uintptr_t)ptr - sizeof( struct liballoc_minor ));

		// Ensure it is a valid structure.
		if ( min->magic != LIBALLOC_MAGIC )
		{
			l_errorCount += 1;

			// Check for overrun errors. For all bytes of LIBALLOC_MAGIC
			if (
				((min->magic & 0xFFFFFF) == (LIBALLOC_MAGIC & 0xFFFFFF)) ||
				((min->magic & 0xFFFF) == (LIBALLOC_MAGIC & 0xFFFF)) ||
				((min->magic & 0xFF) == (LIBALLOC_MAGIC & 0xFF))
			   )
			{
				l_possibleOverruns += 1;
				#if defined DEBUG || defined INFO
				serial_printf( "liballoc: ERROR: Possible 1-3 byte overrun for magic %x != %x\n",
									min->magic,
									LIBALLOC_MAGIC );
				FLUSH();
				#endif
			}


			if ( min->magic == LIBALLOC_DEAD )
			{
				#if defined DEBUG || defined INFO
				serial_printf( "liballoc: ERROR: multiple PREFIX(free)() attempt on %x from %x.\n",
										ptr,
										__builtin_return_address(0) );
				FLUSH();
				#endif
			}
			else
			{
				#if defined DEBUG || defined INFO
				serial_printf( "liballoc: ERROR: Bad PREFIX(free)( %x ) called from %x\n",
									ptr,
									__builtin_return_address(0) );
				FLUSH();
				#endif
			}

			// being lied to...
			liballoc_unlock();		// release the lock
			return NULL;
		}

		// Definitely a memory block.

		real_size = min->req_size;

		if ( real_size >= size )
		{
			min->req_size = size;
			liballoc_unlock();
			return p;
		}

	liballoc_unlock();

	// If we got here then we're reallocating to a block bigger than us.
	ptr = PREFIX(malloc)( size );					// We need to allocate new memory
	liballoc_memcpy( ptr, p, real_size );
	PREFIX(free)( p );

	return ptr;
}
Пример #28
0
void __iomem *
__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
{
	return arch_ioremap_caller(phys_addr, size, mtype,
		__builtin_return_address(0));
}
Пример #29
0
void* dlvsym(void* handle, const char* symbol, const char* version) {
  const void* caller_addr = __builtin_return_address(0);
  return __loader_dlvsym(handle, symbol, version, caller_addr);
}
Пример #30
0
/*
 * Several applications, such as Quake3, use dlopen("libGL.so.1"), but
 * LD_PRELOAD does not intercept symbols obtained via dlopen/dlsym, therefore
 * we need to intercept the dlopen() call here, and redirect to our wrapper
 * shared object.
 */
extern "C" PUBLIC
void * dlopen(const char *filename, int flag)
{
    void *handle;

    if (!filename) {
        return _dlopen(filename, flag);
    }

    LibClass libClass = classifyLibrary(filename);
    bool intercept = libClass != LIB_UNKNOWN;

    if (intercept) {
        void *caller = __builtin_return_address(0);
        Dl_info info;
        const char *caller_module = "<unknown>";
        if (dladdr(caller, &info)) {
            caller_module = info.dli_fname;
            intercept = classifyLibrary(caller_module) == LIB_UNKNOWN;
        }

        const char * libgl_filename = getenv("TRACE_LIBGL");
        if (libgl_filename) {
            // Don't intercept when using LD_LIBRARY_PATH instead of LD_PRELOAD
            intercept = false;
        }

        os::log("apitrace: %s dlopen(\"%s\", 0x%x) from %s\n",
                intercept ? "redirecting" : "ignoring",
                filename, flag, caller_module);
    }

#ifdef EGLTRACE

    if (intercept) {
        /* The current dispatch implementation relies on core entry-points to be globally available, so force this.
         *
         * TODO: A better approach would be note down the entry points here and
         * use them latter. Another alternative would be to reopen the library
         * with RTLD_NOLOAD | RTLD_GLOBAL.
         */
        flag &= ~RTLD_LOCAL;
        flag |= RTLD_GLOBAL;
    }

#endif

    handle = _dlopen(filename, flag);
    if (!handle) {
        return handle;
    }

    if (intercept) {
        if (libClass == LIB_GL) {
            // Use the true libGL.so handle instead of RTLD_NEXT from now on
            _libGlHandle = handle;
        }

        // Get the file path for our shared object, and use it instead
        static int dummy = 0xdeedbeef;
        Dl_info info;
        if (dladdr(&dummy, &info)) {
            handle = _dlopen(info.dli_fname, flag);
        } else {
            os::log("apitrace: warning: dladdr() failed\n");
        }

#ifdef EGLTRACE
        // SDL will skip dlopen'ing libEGL.so after it spots EGL symbols on our
        // wrapper, so force loading it here.
        // (https://github.com/apitrace/apitrace/issues/291#issuecomment-59734022)
        if (strcmp(filename, "libEGL.so") != 0 &&
            strcmp(filename, "libEGL.so.1") != 0) {
            _dlopen("libEGL.so.1", RTLD_GLOBAL | RTLD_LAZY);
        }
#endif
    }

    return handle;
}