示例#1
0
void test_priority_preemptible(void)
{
	int old_prio = k_thread_priority_get(k_current_get());

	/* set current thread to a non-negative priority */
	last_prio = 2;
	k_thread_priority_set(k_current_get(), last_prio);

	int spawn_prio = last_prio - 1;

	k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
				      thread_entry, NULL, NULL, NULL,
				      spawn_prio, 0, 0);
	/* checkpoint: thread is preempted by higher thread */
	zassert_true(last_prio == spawn_prio, NULL);

	k_sleep(100);
	k_thread_abort(tid);

	spawn_prio = last_prio + 1;
	tid = k_thread_create(&tdata, tstack, STACK_SIZE,
			      thread_entry, NULL, NULL, NULL,
			      spawn_prio, 0, 0);
	/* checkpoint: thread is not preempted by lower thread */
	zassert_false(last_prio == spawn_prio, NULL);
	k_thread_abort(tid);

	/* restore environment */
	k_thread_priority_set(k_current_get(), old_prio);
}
static void threads_suspend_resume(int prio)
{
	int old_prio = k_thread_priority_get(k_current_get());

	/* set current thread */
	last_prio = prio;
	k_thread_priority_set(k_current_get(), last_prio);

	/* spawn thread with lower priority */
	int spawn_prio = last_prio + 1;

	k_tid_t tid = k_thread_spawn(tstack, STACK_SIZE,
				     thread_entry, NULL, NULL, NULL,
				     spawn_prio, 0, 0);
	/* checkpoint: suspend current thread */
	k_thread_suspend(tid);
	k_sleep(100);
	/* checkpoint: spawned thread shouldn't be executed after suspend */
	assert_false(last_prio == spawn_prio, NULL);
	k_thread_resume(tid);
	k_sleep(100);
	/* checkpoint: spawned thread should be executed after resume */
	assert_true(last_prio == spawn_prio, NULL);

	k_thread_abort(tid);

	/* restore environment */
	k_thread_priority_set(k_current_get(), old_prio);
}
示例#3
0
文件: context.c 项目: bboozzoo/zephyr
static void thread_helper(void *arg1, void *arg2, void *arg3)
{
	k_tid_t self_thread_id;

	ARG_UNUSED(arg1);
	ARG_UNUSED(arg2);
	ARG_UNUSED(arg3);

	/*
	 * This thread starts off at a higher priority than thread_entry().
	 * Thus, it should execute immediately.
	 */

	thread_evidence++;

	/* Test that helper will yield to a thread of equal priority */
	self_thread_id = k_current_get();

	/* Lower priority to that of thread_entry() */
	k_thread_priority_set(self_thread_id, self_thread_id->base.prio + 1);

	k_yield();              /* Yield to thread of equal priority */

	thread_evidence++;
	/* <thread_evidence> should now be 2 */

}
示例#4
0
文件: context.c 项目: bboozzoo/zephyr
/**
 *
 * @brief Test some context routines from a preemptible thread
 *
 * This routines tests the k_current_get() and
 * k_is_in_isr() routines from both a preemtible thread  and an ISR (that
 * interrupted a preemtible thread). Checking those routines with cooperative
 * threads are done elsewhere.
 *
 * @return TC_PASS on success
 * @return TC_FAIL on failure
 */
static int test_kernel_ctx_task(void)
{
	k_tid_t self_thread_id;

	TC_PRINT("Testing k_current_get() from an ISR and task\n");

	self_thread_id = k_current_get();
	isr_info.command = THREAD_SELF_CMD;
	isr_info.error = 0;
	/* isr_info is modified by the isr_handler routine */
	isr_handler_trigger();
	if (isr_info.error || isr_info.data != (void *)self_thread_id) {
		/*
		 * Either the ISR detected an error, or the ISR context ID
		 * does not match the interrupted task's thread ID.
		 */
		return TC_FAIL;
	}

	TC_PRINT("Testing k_is_in_isr() from an ISR\n");
	isr_info.command = EXEC_CTX_TYPE_CMD;
	isr_info.error = 0;
	isr_handler_trigger();
	if (isr_info.error || isr_info.value != K_ISR) {
		return TC_FAIL;
	}

	TC_PRINT("Testing k_is_in_isr() from a preemtible thread\n");
	if (k_is_in_isr() || _current->base.prio < 0) {
		return TC_FAIL;
	}

	return TC_PASS;
}
示例#5
0
文件: context.c 项目: bboozzoo/zephyr
/**
 *
 * @brief Handler to perform various actions from within an ISR context
 *
 * This routine is the ISR handler for isr_handler_trigger().  It performs
 * the command requested in <isr_info.command>.
 *
 * @return N/A
 */
static void isr_handler(void *data)
{
	ARG_UNUSED(data);

	switch (isr_info.command) {
	case THREAD_SELF_CMD:
		isr_info.data = (void *)k_current_get();
		break;

	case EXEC_CTX_TYPE_CMD:
		if (k_is_in_isr()) {
			isr_info.value = K_ISR;
			break;
		}

		if (_current->base.prio < 0) {
			isr_info.value = K_COOP_THREAD;
			break;
		}

		isr_info.value = K_PREEMPT_THREAD;

		break;

	default:
		isr_info.error = UNKNOWN_COMMAND;
		break;
	}
}
static void thread_entry_abort(void *p1, void *p2, void *p3)
{
	/**TESTPOINT: abort current thread*/
	execute_flag = 1;
	k_thread_abort(k_current_get());
	/*unreachable*/
	execute_flag = 2;
	zassert_true(1 == 0, NULL);
}
示例#7
0
/**
 *
 * @brief Test some context routines from a preemptible thread
 *
 * This routines tests the k_current_get() and
 * k_is_in_isr() routines from both a preemtible thread  and an ISR (that
 * interrupted a preemtible thread). Checking those routines with cooperative
 * threads are done elsewhere.
 *
 * @return TC_PASS on success
 * @return TC_FAIL on failure
 */
static int test_kernel_ctx_task(void)
{
	k_tid_t self_thread_id;

	TC_PRINT("Testing k_current_get() from an ISR and task\n");

	self_thread_id = k_current_get();
	isr_info.command = THREAD_SELF_CMD;
	isr_info.error = 0;
	/* isr_info is modified by the isr_handler routine */
	isr_handler_trigger();

	if (isr_info.error) {
		TC_ERROR("ISR detected an error\n");
		return TC_FAIL;
	}

	if (isr_info.data != (void *)self_thread_id) {
		TC_ERROR("ISR context ID mismatch\n");
		return TC_FAIL;
	}

	TC_PRINT("Testing k_is_in_isr() from an ISR\n");
	isr_info.command = EXEC_CTX_TYPE_CMD;
	isr_info.error = 0;
	isr_handler_trigger();

	if (isr_info.error) {
		TC_ERROR("ISR detected an error\n");
		return TC_FAIL;
	}

	if (isr_info.value != K_ISR) {
		TC_ERROR("isr_info.value was not K_ISR\n");
		return TC_FAIL;
	}

	TC_PRINT("Testing k_is_in_isr() from a preemptible thread\n");
	if (k_is_in_isr()) {
		TC_ERROR("Should not be in ISR context\n");
		return TC_FAIL;
	}
	if (_current->base.prio < 0) {
		TC_ERROR("Current thread should have preemptible priority\n");
		return TC_FAIL;
	}

	return TC_PASS;
}
示例#8
0
/*test cases*/
void test_priority_cooperative(void)
{
	int old_prio = k_thread_priority_get(k_current_get());

	/* set current thread to a negative priority */
	last_prio = -1;
	k_thread_priority_set(k_current_get(), last_prio);

	/* spawn thread with higher priority */
	int spawn_prio = last_prio - 1;

	k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
				      thread_entry, NULL, NULL, NULL,
				      spawn_prio, 0, 0);
	/* checkpoint: current thread shouldn't preempted by higher thread */
	zassert_true(last_prio == k_thread_priority_get(k_current_get()), NULL);
	k_sleep(100);
	/* checkpoint: spawned thread get executed */
	zassert_true(last_prio == spawn_prio, NULL);
	k_thread_abort(tid);

	/* restore environment */
	k_thread_priority_set(k_current_get(), old_prio);
}
static void tcoop_ctx(void *p1, void *p2, void *p3)
{
	/** TESTPOINT: The thread's priority is in the cooperative range.*/
	zassert_false(k_is_preempt_thread(), NULL);
	k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(1));
	/** TESTPOINT: The thread's priority is in the preemptible range.*/
	zassert_true(k_is_preempt_thread(), NULL);
	k_sched_lock();
	/** TESTPOINT: The thread has locked the scheduler.*/
	zassert_false(k_is_preempt_thread(), NULL);
	k_sched_unlock();
	/** TESTPOINT: The thread has not locked the scheduler.*/
	zassert_true(k_is_preempt_thread(), NULL);
	k_sem_give(&end_sema);
}
示例#10
0
文件: fatal.c 项目: zmole945/zephyr
/**
 *
 * @brief Kernel fatal error handler
 *
 * This routine is called when fatal error conditions are detected by software
 * and is responsible only for reporting the error. Once reported, it then
 * invokes the user provided routine _SysFatalErrorHandler() which is
 * responsible for implementing the error handling policy.
 *
 * The caller is expected to always provide a usable ESF. In the event that the
 * fatal error does not have a hardware generated ESF, the caller should either
 * create its own or use a pointer to the global default ESF <_default_esf>.
 *
 * @return This function does not return.
 */
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
							const NANO_ESF *pEsf)
{
	switch (reason) {
	case _NANO_ERR_HW_EXCEPTION:
		break;

#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_ARC_STACK_CHECKING)
	case _NANO_ERR_STACK_CHK_FAIL:
		printk("***** Stack Check Fail! *****\n");
		break;
#endif

	case _NANO_ERR_ALLOCATION_FAIL:
		printk("**** Kernel Allocation Failure! ****\n");
		break;

	case _NANO_ERR_KERNEL_OOPS:
		printk("***** Kernel OOPS! *****\n");
		break;

	case _NANO_ERR_KERNEL_PANIC:
		printk("***** Kernel Panic! *****\n");
		break;

	default:
		printk("**** Unknown Fatal Error %d! ****\n", reason);
		break;
	}
	printk("Current thread ID = %p\n"
	       "Faulting instruction address = 0x%lx\n",
	       k_current_get(),
	       _arc_v2_aux_reg_read(_ARC_V2_ERET));

	/*
	 * Now that the error has been reported, call the user implemented
	 * policy
	 * to respond to the error.  The decisions as to what responses are
	 * appropriate to the various errors are something the customer must
	 * decide.
	 */

	_SysFatalErrorHandler(reason, pEsf);

	for (;;)
		;
}
示例#11
0
/*test cases*/
void test_threads_cancel_undelayed(void)
{
	int cur_prio = k_thread_priority_get(k_current_get());

	/* spawn thread with lower priority */
	int spawn_prio = cur_prio + 1;

	k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
				      thread_entry, NULL, NULL, NULL,
				      spawn_prio, 0, 0);

	/**TESTPOINT: check cancel retcode when thread is not delayed*/
	int cancel_ret = k_thread_cancel(tid);

	zassert_equal(cancel_ret, -EINVAL, NULL);
	k_thread_abort(tid);
}
示例#12
0
文件: fatal.c 项目: 01org/zephyr
/**
 *
 * @brief Kernel fatal error handler
 *
 * This routine is called when fatal error conditions are detected by software
 * and is responsible only for reporting the error. Once reported, it then
 * invokes the user provided routine _SysFatalErrorHandler() which is
 * responsible for implementing the error handling policy.
 *
 * The caller is expected to always provide a usable ESF. In the event that the
 * fatal error does not have a hardware generated ESF, the caller should either
 * create its own or use a pointer to the global default ESF <_default_esf>.
 *
 * Unlike other arches, this function may return if _SysFatalErrorHandler
 * determines that only the current thread should be aborted and the CPU
 * was in handler mode. PendSV will be asserted in this case and the current
 * thread taken off the run queue. Leaving the exception will immediately
 * trigger a context switch.
 *
 * @param reason the reason that the handler was called
 * @param pEsf pointer to the exception stack frame
 */
void _NanoFatalErrorHandler(unsigned int reason,
					  const NANO_ESF *pEsf)
{
	switch (reason) {
	case _NANO_ERR_INVALID_TASK_EXIT:
		printk("***** Invalid Exit Software Error! *****\n");
		break;

#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL)
	case _NANO_ERR_STACK_CHK_FAIL:
		printk("***** Stack Check Fail! *****\n");
		break;
#endif /* CONFIG_STACK_CANARIES */

	case _NANO_ERR_ALLOCATION_FAIL:
		printk("**** Kernel Allocation Failure! ****\n");
		break;

	case _NANO_ERR_KERNEL_OOPS:
		printk("***** Kernel OOPS! *****\n");
		break;

	case _NANO_ERR_KERNEL_PANIC:
		printk("***** Kernel Panic! *****\n");
		break;

	default:
		printk("**** Unknown Fatal Error %d! ****\n", reason);
		break;
	}
	printk("Current thread ID = %p\n"
	       "Faulting instruction address = 0x%x\n",
	       k_current_get(), pEsf->pc);

	/*
	 * Now that the error has been reported, call the user implemented
	 * policy
	 * to respond to the error.  The decisions as to what responses are
	 * appropriate to the various errors are something the customer must
	 * decide.
	 */

	_SysFatalErrorHandler(reason, pEsf);
}
示例#13
0
static int shell_cmd_tasks(int argc, char *argv[])
{
	ARG_UNUSED(argc);
	ARG_UNUSED(argv);
	struct k_thread *thread_list = NULL;

	printk("tasks:\n");

	thread_list   = (struct k_thread *)SYS_THREAD_MONITOR_HEAD;
	while (thread_list != NULL) {
		printk("%s%p:   options: 0x%x priority: %d\n",
		       (thread_list == k_current_get()) ? "*" : " ",
		       thread_list,
		       thread_list->base.user_options,
		       k_thread_priority_get(thread_list));
		thread_list = (struct k_thread *)SYS_THREAD_MONITOR_NEXT(thread_list);
	}
	return 0;
}
示例#14
0
文件: context.c 项目: bboozzoo/zephyr
/**
 *
 * @brief Test the various context/thread routines from a cooperative thread
 *
 * This routines tests the k_current_get and
 * k_is_in_isr() routines from both a thread and an ISR (that interrupted a
 * cooperative thread).  Checking those routines with preemptible threads are
 * done elsewhere.
 *
 * This routine may set <thread_detected_error> to the following values:
 *   1 - if thread ID matches that of the task
 *   2 - if thread ID taken during ISR does not match that of the thread
 *   3 - k_is_in_isr() when called from an ISR is false
 *   4 - k_is_in_isr() when called from a thread is true
 *   5 - if thread is not a cooperative thread
 *
 * @return TC_PASS on success
 * @return TC_FAIL on failure
 */
static int test_kernel_thread(k_tid_t task_thread_id)
{
	k_tid_t self_thread_id;

	self_thread_id = k_current_get();
	if (self_thread_id == task_thread_id) {
		thread_detected_error = 1;
		return TC_FAIL;
	}

	isr_info.command = THREAD_SELF_CMD;
	isr_info.error = 0;
	isr_handler_trigger();
	if (isr_info.error || isr_info.data != (void *)self_thread_id) {
		/*
		 * Either the ISR detected an error, or the ISR context ID
		 * does not match the interrupted thread's thread ID.
		 */
		thread_detected_error = 2;
		return TC_FAIL;
	}

	isr_info.command = EXEC_CTX_TYPE_CMD;
	isr_info.error = 0;
	isr_handler_trigger();
	if (isr_info.error || (isr_info.value != K_ISR)) {
		thread_detected_error = 3;
		return TC_FAIL;
	}

	if (k_is_in_isr()) {
		thread_detected_error = 4;
		return TC_FAIL;
	}

	if (_current->base.prio >= 0) {
		thread_detected_error = 5;
		return TC_FAIL;
	}

	return TC_PASS;
}
示例#15
0
文件: fatal.c 项目: zmole945/zephyr
/**
 *
 * @brief Kernel fatal error handler
 *
 * This routine is called when a fatal error condition is detected by either
 * hardware or software.
 *
 * The caller is expected to always provide a usable ESF.  In the event that the
 * fatal error does not have a hardware generated ESF, the caller should either
 * create its own or use a pointer to the global default ESF <_default_esf>.
 *
 * @param reason the reason that the handler was called
 * @param pEsf pointer to the exception stack frame
 *
 * @return This function does not return.
 */
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
					  const NANO_ESF *pEsf)
{
	_debug_fatal_hook(pEsf);

#ifdef CONFIG_PRINTK

	/* Display diagnostic information about the error */

	switch (reason) {
	case _NANO_ERR_CPU_EXCEPTION:
		break;

	case _NANO_ERR_SPURIOUS_INT: {
		int vector = _irq_controller_isr_vector_get();

		printk("***** Unhandled interrupt vector ");
		if (vector >= 0) {
			printk("%d ", vector);
		}
		printk("*****\n");
		break;
	}
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL) || \
		defined(CONFIG_HW_STACK_PROTECTION) || \
		defined(CONFIG_USERSPACE)
	case _NANO_ERR_STACK_CHK_FAIL:
		printk("***** Stack Check Fail! *****\n");
		break;
#endif /* CONFIG_STACK_CANARIES */

	case _NANO_ERR_KERNEL_OOPS:
		printk("***** Kernel OOPS! *****\n");
		break;

	case _NANO_ERR_KERNEL_PANIC:
		printk("***** Kernel Panic! *****\n");
		break;

	case _NANO_ERR_ALLOCATION_FAIL:
		printk("**** Kernel Allocation Failure! ****\n");
		break;

	default:
		printk("**** Unknown Fatal Error %d! ****\n", reason);
		break;
	}

	printk("Current thread ID = %p\n"
	       "Faulting segment:address = 0x%04x:0x%08x\n"
	       "eax: 0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x\n"
	       "esi: 0x%08x, edi: 0x%08x, ebp: 0x%08x, esp: 0x%08x\n"
	       "eflags: 0x%x\n",
	       k_current_get(),
	       pEsf->cs & 0xFFFF, pEsf->eip,
	       pEsf->eax, pEsf->ebx, pEsf->ecx, pEsf->edx,
	       pEsf->esi, pEsf->edi, pEsf->ebp, pEsf->esp,
	       pEsf->eflags);
#endif /* CONFIG_PRINTK */


	/*
	 * Error was fatal to a kernel task or a thread; invoke the system
	 * fatal error handling policy defined for the platform.
	 */

	_SysFatalErrorHandler(reason, pEsf);
}
static void thread_entry(void *p1, void *p2, void *p3)
{
	last_prio = k_thread_priority_get(k_current_get());
}
示例#17
0
文件: main.c 项目: bboozzoo/zephyr
void load_store_low(void)
{
	unsigned int i;
	unsigned char init_byte;
	unsigned char *store_ptr = (unsigned char *)&float_reg_set_store;
	unsigned char *load_ptr = (unsigned char *)&float_reg_set_load;

	volatile char volatile_stack_var = 0;

	PRINT_DATA("Floating point sharing tests started\n");
	PRINT_LINE;

	/*
	 * The high priority thread has a sleep to get this (low pri) thread
	 * running and here (low priority) we enable slicing and waste cycles
	 * to run hi pri thread in between fp ops.
	 *
	 * Enable round robin scheduling to allow both the low priority pi
	 * computation and load/store tasks to execute. The high priority pi
	 * computation and load/store tasks will preempt the low priority tasks
	 * periodically.
	 */

	k_sched_time_slice_set(10, LO_PRI);

	/*
	 * Initialize floating point load buffer to known values;
	 * these values must be different than the value used in other threads.
	 */

	init_byte = MAIN_FLOAT_REG_CHECK_BYTE;
	for (i = 0; i < SIZEOF_FP_REGISTER_SET; i++) {
		load_ptr[i] = init_byte++;
	}

	/* Keep cranking forever, or until an error is detected. */

	for (load_store_low_count = 0; ; load_store_low_count++) {

		/*
		 * Clear store buffer to erase all traces of any previous
		 * floating point values that have been saved.
		 */

		memset(&float_reg_set_store, 0, SIZEOF_FP_REGISTER_SET);

		/*
		 * Utilize an architecture specific function to load all the
		 * floating point registers with known values.
		 */

		_load_all_float_registers(&float_reg_set_load);

		/*
		 * Waste some cycles to give the high priority load/store
		 * thread an opportunity to run when the low priority thread is
		 * using the floating point registers.
		 *
		 * IMPORTANT: This logic requires that sys_tick_get_32() not
		 * perform any floating point operations!
		 */

		while ((_tick_get_32() % 5) != 0) {
			/*
			 * Use a volatile variable to prevent compiler
			 * optimizing out the spin loop.
			 */
			++volatile_stack_var;
		}

		/*
		 * Utilize an architecture specific function to dump the
		 * contents of all floating point registers to memory.
		 */

		_store_all_float_registers(&float_reg_set_store);

		/*
		 * Compare each byte of buffer to ensure the expected value is
		 * present, indicating that the floating point registers weren't
		 * impacted by the operation of the high priority thread(s).
		 *
		 * Display error message and terminate if discrepancies are
		 * detected.
		 */

		init_byte = MAIN_FLOAT_REG_CHECK_BYTE;

		for (i = 0; i < SIZEOF_FP_REGISTER_SET; i++) {
			if (store_ptr[i] != init_byte) {
				TC_ERROR("load_store_low found 0x%x instead "
					"of 0x%x @ offset 0x%x\n",
						store_ptr[i],
						init_byte, i);
				TC_ERROR("Discrepancy found during "
						"iteration %d\n",
						load_store_low_count);
				fpu_sharing_error = 1;
			}
			init_byte++;
		}

		/*
		 * Terminate if a test error has been reported.
		 */

		if (fpu_sharing_error) {
			TC_END_RESULT(TC_FAIL);
			TC_END_REPORT(TC_FAIL);
			return;
		}

#if defined(CONFIG_ISA_IA32)
		/*
		 * After every 1000 iterations (arbitrarily chosen), explicitly
		 * disable floating point operations for the task. The
		 * subsequent execution of _load_all_float_registers() will
		 * result in an exception to automatically re-enable
		 * floating point support for the task.
		 *
		 * The purpose of this part of the test is to exercise the
		 * k_float_disable() API, and to also continue exercising
		 * the (exception based) floating enabling mechanism.
		 */
		if ((load_store_low_count % 1000) == 0) {
			k_float_disable(k_current_get());
		}
#elif defined(CONFIG_CPU_CORTEX_M4)
		/*
		 * The routine k_float_disable() allows for thread-level
		 * granularity for disabling floating point. Furthermore, it
		 * is useful for testing on the fly thread enabling of floating
		 * point. Neither of these capabilities are currently supported
		 * for ARM.
		 */
#endif
	}
}
示例#18
0
/**
 *
 * @brief Software interrupt generating thread
 *
 * Lower priority thread that, when it starts, it waits for a semaphore. When
 * it gets it, released by the main thread, sets up the interrupt handler and
 * generates the software interrupt
 *
 * @return 0 on success
 */
void int_thread(void)
{
	k_sem_take(&INTSEMA, K_FOREVER);
	irq_offload(latency_test_isr, NULL);
	k_thread_suspend(k_current_get());
}
示例#19
0
STATIC mp_obj_t mod_current_tid(void) {
    return MP_OBJ_NEW_SMALL_INT(k_current_get());
}
示例#20
0
文件: main.c 项目: rsalveti/zephyr
void run_tests(void)
{
	k_thread_priority_set(k_current_get(), K_PRIO_COOP(7));

	test_failed = false;

	struct net_conn_handle *handlers[CONFIG_NET_MAX_CONN];
	struct net_if *iface = net_if_get_default();
	struct net_if_addr *ifaddr;
	struct ud *ud;
	int ret, i = 0;
	bool st;

	struct sockaddr_in6 any_addr6;
	const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;

	struct sockaddr_in6 my_addr6;
	struct in6_addr in6addr_my = { { { 0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0,
					   0, 0, 0, 0, 0, 0, 0, 0x1 } } };

	struct sockaddr_in6 peer_addr6;
	struct in6_addr in6addr_peer = { { { 0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0,
					  0, 0, 0, 0x4e, 0x11, 0, 0, 0x2 } } };

	struct sockaddr_in any_addr4;
	const struct in_addr in4addr_any = { { { 0 } } };

	struct sockaddr_in my_addr4;
	struct in_addr in4addr_my = { { { 192, 0, 2, 1 } } };

	struct sockaddr_in peer_addr4;
	struct in_addr in4addr_peer = { { { 192, 0, 2, 9 } } };

	net_ipaddr_copy(&any_addr6.sin6_addr, &in6addr_any);
	any_addr6.sin6_family = AF_INET6;

	net_ipaddr_copy(&my_addr6.sin6_addr, &in6addr_my);
	my_addr6.sin6_family = AF_INET6;

	net_ipaddr_copy(&peer_addr6.sin6_addr, &in6addr_peer);
	peer_addr6.sin6_family = AF_INET6;

	net_ipaddr_copy(&any_addr4.sin_addr, &in4addr_any);
	any_addr4.sin_family = AF_INET;

	net_ipaddr_copy(&my_addr4.sin_addr, &in4addr_my);
	my_addr4.sin_family = AF_INET;

	net_ipaddr_copy(&peer_addr4.sin_addr, &in4addr_peer);
	peer_addr4.sin_family = AF_INET;

	k_sem_init(&recv_lock, 0, UINT_MAX);

	ifaddr = net_if_ipv6_addr_add(iface, &in6addr_my, NET_ADDR_MANUAL, 0);
	if (!ifaddr) {
		printk("Cannot add %s to interface %p\n",
		       net_sprint_ipv6_addr(&in6addr_my), iface);
		zassert_true(0, "exiting");
	}

	ifaddr = net_if_ipv4_addr_add(iface, &in4addr_my, NET_ADDR_MANUAL, 0);
	if (!ifaddr) {
		printk("Cannot add %s to interface %p\n",
		       net_sprint_ipv4_addr(&in4addr_my), iface);
		zassert_true(0, "exiting");
	}

#define REGISTER(family, raddr, laddr, rport, lport)			\
	({								\
		static struct ud user_data;				\
									\
		user_data.remote_addr = (struct sockaddr *)raddr;	\
		user_data.local_addr =  (struct sockaddr *)laddr;	\
		user_data.remote_port = rport;				\
		user_data.local_port = lport;				\
		user_data.test = "DST="#raddr"-SRC="#laddr"-RP="#rport	\
			"-LP="#lport;					\
									\
		set_port(family, (struct sockaddr *)raddr,		\
			 (struct sockaddr *)laddr, rport, lport);	\
									\
		ret = net_udp_register((struct sockaddr *)raddr,	\
				       (struct sockaddr *)laddr,	\
				       rport, lport,			\
				       test_ok, &user_data,		\
				       &handlers[i]);			\
		if (ret) {						\
			printk("UDP register %s failed (%d)\n",		\
			       user_data.test, ret);			\
			zassert_true(0, "exiting");			\
		}							\
		user_data.handle = handlers[i++];			\
		&user_data;						\
	})

#define REGISTER_FAIL(raddr, laddr, rport, lport)			\
	ret = net_udp_register((struct sockaddr *)raddr,		\
			       (struct sockaddr *)laddr,		\
			       rport, lport,				\
			       test_fail, INT_TO_POINTER(0), NULL);	\
	if (!ret) {							\
		printk("UDP register invalid match %s failed\n",	\
		       "DST="#raddr"-SRC="#laddr"-RP="#rport"-LP="#lport); \
		zassert_true(0, "exiting");				\
	}

#define UNREGISTER(ud)							\
	ret = net_udp_unregister(ud->handle);				\
	if (ret) {							\
		printk("UDP unregister %p failed (%d)\n", ud->handle,	\
		       ret);						\
		zassert_true(0, "exiting");				\
	}

#define TEST_IPV6_OK(ud, raddr, laddr, rport, lport)			\
	st = send_ipv6_udp_msg(iface, raddr, laddr, rport, lport, ud,	\
			       false);					\
	if (!st) {							\
		printk("%d: UDP test \"%s\" fail\n", __LINE__,		\
		       ud->test);					\
		zassert_true(0, "exiting");				\
	}

#define TEST_IPV6_LONG_OK(ud, raddr, laddr, rport, lport)		\
	st = send_ipv6_udp_long_msg(iface, raddr, laddr, rport, lport, ud, \
			       false);					\
	if (!st) {							\
		printk("%d: UDP long test \"%s\" fail\n", __LINE__,	\
		       ud->test);					\
		zassert_true(0, "exiting");				\
	}

#define TEST_IPV4_OK(ud, raddr, laddr, rport, lport)			\
	st = send_ipv4_udp_msg(iface, raddr, laddr, rport, lport, ud,	\
			       false);					\
	if (!st) {							\
		printk("%d: UDP test \"%s\" fail\n", __LINE__,		\
		       ud->test);					\
		zassert_true(0, "exiting");				\
	}

#define TEST_IPV6_FAIL(ud, raddr, laddr, rport, lport)			\
	st = send_ipv6_udp_msg(iface, raddr, laddr, rport, lport, ud,	\
			       true);					\
	if (!st) {							\
		printk("%d: UDP neg test \"%s\" fail\n", __LINE__,	\
		       ud->test);					\
		zassert_true(0, "exiting");				\
	}

#define TEST_IPV4_FAIL(ud, raddr, laddr, rport, lport)			\
	st = send_ipv4_udp_msg(iface, raddr, laddr, rport, lport, ud,	\
			       true);					\
	if (!st) {							\
		printk("%d: UDP neg test \"%s\" fail\n", __LINE__,	\
		       ud->test);					\
		zassert_true(0, "exiting");				\
	}

	ud = REGISTER(AF_INET6, &any_addr6, &any_addr6, 1234, 4242);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_LONG_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_LONG_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_FAIL(ud, &in6addr_peer, &in6addr_my, 1234, 61400);
	TEST_IPV6_FAIL(ud, &in6addr_peer, &in6addr_my, 1234, 61400);
	UNREGISTER(ud);

	ud = REGISTER(AF_INET, &any_addr4, &any_addr4, 1234, 4242);
	TEST_IPV4_OK(ud, &in4addr_peer, &in4addr_my, 1234, 4242);
	TEST_IPV4_OK(ud, &in4addr_peer, &in4addr_my, 1234, 4242);
	TEST_IPV4_FAIL(ud, &in4addr_peer, &in4addr_my, 1234, 4325);
	TEST_IPV4_FAIL(ud, &in4addr_peer, &in4addr_my, 1234, 4325);
	UNREGISTER(ud);

	ud = REGISTER(AF_INET6, &any_addr6, NULL, 1234, 4242);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_FAIL(ud, &in6addr_peer, &in6addr_my, 1234, 61400);
	TEST_IPV6_FAIL(ud, &in6addr_peer, &in6addr_my, 1234, 61400);
	UNREGISTER(ud);

	ud = REGISTER(AF_INET6, NULL, &any_addr6, 1234, 4242);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_LONG_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_LONG_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_FAIL(ud, &in6addr_peer, &in6addr_my, 1234, 61400);
	TEST_IPV6_FAIL(ud, &in6addr_peer, &in6addr_my, 1234, 61400);
	UNREGISTER(ud);

	ud = REGISTER(AF_INET6, &peer_addr6, &my_addr6, 1234, 4242);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 4242);
	TEST_IPV6_FAIL(ud, &in6addr_peer, &in6addr_my, 1234, 4243);

	ud = REGISTER(AF_INET, &peer_addr4, &my_addr4, 1234, 4242);
	TEST_IPV4_OK(ud, &in4addr_peer, &in4addr_my, 1234, 4242);
	TEST_IPV4_FAIL(ud, &in4addr_peer, &in4addr_my, 1234, 4243);

	ud = REGISTER(AF_UNSPEC, NULL, NULL, 1234, 42423);
	TEST_IPV4_OK(ud, &in4addr_peer, &in4addr_my, 1234, 42423);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 42423);

	ud = REGISTER(AF_UNSPEC, NULL, NULL, 1234, 0);
	TEST_IPV4_OK(ud, &in4addr_peer, &in4addr_my, 1234, 42422);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 42422);
	TEST_IPV4_OK(ud, &in4addr_peer, &in4addr_my, 1234, 42422);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 1234, 42422);

	TEST_IPV4_FAIL(ud, &in4addr_peer, &in4addr_my, 12345, 42421);
	TEST_IPV6_FAIL(ud, &in6addr_peer, &in6addr_my, 12345, 42421);

	ud = REGISTER(AF_UNSPEC, NULL, NULL, 0, 0);
	TEST_IPV4_OK(ud, &in4addr_peer, &in4addr_my, 12345, 42421);
	TEST_IPV6_OK(ud, &in6addr_peer, &in6addr_my, 12345, 42421);
	TEST_IPV6_LONG_OK(ud, &in6addr_peer, &in6addr_my, 12345, 42421);

	/* Remote addr same as local addr, these two will never match */
	REGISTER(AF_INET6, &my_addr6, NULL, 1234, 4242);
	REGISTER(AF_INET, &my_addr4, NULL, 1234, 4242);

	/* IPv4 remote addr and IPv6 remote addr, impossible combination */
	REGISTER_FAIL(&my_addr4, &my_addr6, 1234, 4242);

	/**TESTPOINT: Check if tests passed*/
	zassert_false(fail, "Tests failed");

	i--;
	while (i) {
		ret = net_udp_unregister(handlers[i]);
		if (ret < 0 && ret != -ENOENT) {
			printk("Cannot unregister udp %d\n", i);
			zassert_true(0, "exiting");
		}

		i--;
	}

	zassert_true((net_udp_unregister(NULL) < 0), "Unregister udp failed");
	zassert_false(test_failed, "udp tests failed");
}
示例#21
0
文件: context.c 项目: bboozzoo/zephyr
/**
 *
 * @brief Test the k_yield() routine
 *
 * This routine tests the k_yield() routine.  It starts another thread
 * (thus also testing k_thread_spawn() and checks that behaviour of
 * k_yield() against the cases of there being a higher priority thread,
 * a lower priority thread, and another thread of equal priority.
 *
 * On error, it may set <thread_detected_error> to one of the following values:
 *   10 - helper thread ran prematurely
 *   11 - k_yield() did not yield to a higher priority thread
 *   12 - k_yield() did not yield to an equal prioirty thread
 *   13 - k_yield() yielded to a lower priority thread
 *
 * @return TC_PASS on success
 * @return TC_FAIL on failure
 */
static int test_k_yield(void)
{
	k_tid_t self_thread_id;

	/*
	 * Start a thread of higher priority.  Note that since the new thread is
	 * being started from a thread, it will not automatically switch to the
	 * thread as it would if done from a task.
	 */

	self_thread_id = k_current_get();
	thread_evidence = 0;

	k_thread_spawn(thread_stack2, THREAD_STACKSIZE, thread_helper,
		       NULL, NULL, NULL,
		       K_PRIO_COOP(THREAD_PRIORITY - 1), 0, 0);

	if (thread_evidence != 0) {
		/* ERROR! Helper spawned at higher */
		thread_detected_error = 10;     /* priority ran prematurely. */
		return TC_FAIL;
	}

	/*
	 * Test that the thread will yield to the higher priority helper.
	 * <thread_evidence> is still 0.
	 */

	k_yield();

	if (thread_evidence == 0) {
		/* ERROR! Did not yield to higher */
		thread_detected_error = 11;     /* priority thread. */
		return TC_FAIL;
	}

	if (thread_evidence > 1) {
		/* ERROR! Helper did not yield to */
		thread_detected_error = 12;     /* equal priority thread. */
		return TC_FAIL;
	}

	/*
	 * Raise the priority of thread_entry().  Calling k_yield() should
	 * not result in switching to the helper.
	 */

	k_thread_priority_set(self_thread_id, self_thread_id->base.prio - 1);
	k_yield();

	if (thread_evidence != 1) {
		/* ERROR! Context switched to a lower */
		thread_detected_error = 13;     /* priority thread! */
		return TC_FAIL;
	}

	/*
	 * Block on <sem_thread>.  This will allow the helper thread to
	 * complete. The main task will wake this thread.
	 */

	k_sem_take(&sem_thread, K_FOREVER);

	return TC_PASS;
}
示例#22
0
文件: context.c 项目: bboozzoo/zephyr
/**
 * @brief Entry point to timer tests
 *
 * This is the entry point to the CPU and thread tests.
 *
 * @return N/A
 */
void main(void)
{
	int rv;                 /* return value from tests */

	thread_detected_error = 0;
	thread_evidence = 0;

	TC_START("Test kernel CPU and thread routines");

	TC_PRINT("Initializing kernel objects\n");
	rv = kernel_init_objects();
	if (rv != TC_PASS) {
		goto tests_done;
	}
#ifdef HAS_POWERSAVE_INSTRUCTION
	TC_PRINT("Testing k_cpu_idle()\n");
	rv = test_kernel_cpu_idle(0);
	if (rv != TC_PASS) {
		goto tests_done;
	}
#ifndef CONFIG_ARM
	TC_PRINT("Testing k_cpu_atomic_idle()\n");
	rv = test_kernel_cpu_idle(1);
	if (rv != TC_PASS) {
		goto tests_done;
	}
#endif
#endif

	TC_PRINT("Testing interrupt locking and unlocking\n");
	rv = test_kernel_interrupts(irq_lock_wrapper, irq_unlock_wrapper, -1);
	if (rv != TC_PASS) {
		goto tests_done;
	}
#ifdef TICK_IRQ
	/* Disable interrupts coming from the timer. */

	TC_PRINT("Testing irq_disable() and irq_enable()\n");
	rv = test_kernel_interrupts(irq_disable_wrapper, irq_enable_wrapper,
				    TICK_IRQ);
	if (rv != TC_PASS) {
		goto tests_done;
	}
#endif

	TC_PRINT("Testing some kernel context routines\n");
	rv = test_kernel_ctx_task();
	if (rv != TC_PASS) {
		goto tests_done;
	}

	TC_PRINT("Spawning a thread from a task\n");
	thread_evidence = 0;

	k_thread_spawn(thread_stack1, THREAD_STACKSIZE, thread_entry,
		       k_current_get(), NULL,
		       NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, 0);

	if (thread_evidence != 1) {
		rv = TC_FAIL;
		TC_ERROR("  - thread did not execute as expected!\n");
		goto tests_done;
	}

	/*
	 * The thread ran, now wake it so it can test k_current_get and
	 * k_is_in_isr.
	 */
	TC_PRINT("Thread to test k_current_get() and " "k_is_in_isr()\n");
	k_sem_give(&sem_thread);

	if (thread_detected_error != 0) {
		rv = TC_FAIL;
		TC_ERROR("  - failure detected in thread; "
			 "thread_detected_error = %d\n", thread_detected_error);
		goto tests_done;
	}

	TC_PRINT("Thread to test k_yield()\n");
	k_sem_give(&sem_thread);

	if (thread_detected_error != 0) {
		rv = TC_FAIL;
		TC_ERROR("  - failure detected in thread; "
			 "thread_detected_error = %d\n", thread_detected_error);
		goto tests_done;
	}

	k_sem_give(&sem_thread);

	rv = test_timeout();
	if (rv != TC_PASS) {
		goto tests_done;
	}

tests_done:
	TC_END_RESULT(rv);
	TC_END_REPORT(rv);
}