Beispiel #1
0
void fiber_sleep(int32_t timeout_in_ticks)
{
    int key;

    if (timeout_in_ticks == TICKS_NONE) {
        fiber_yield();
        return;
    }

    key = irq_lock();
    _nano_timeout_add(_nanokernel.current, NULL, timeout_in_ticks);
    _Swap(key);
}
Beispiel #2
0
void stack_fiber3(int par1, int par2)
{
	int i;
	uint32_t data;
	int * pcounter = (int *) par1;

	for (i = 0; i < par2; i++) {
		data = i;
		nano_fiber_stack_push(&nano_stack_1, data);
		data = 0xffffffff;
		while (!nano_fiber_stack_pop(&nano_stack_2, &data)) {
			fiber_yield();
		}
		if (data != i) {
			break;
		}
		(*pcounter)++;
	}
}
Beispiel #3
0
/**
 *
 * @brief Lifo test fiber
 *
 * @param par1   Address of the counter.
 * @param par2   Number of test loops.
 *
 * @return N/A
 */
void lifo_fiber3(int par1, int par2)
{
	int i;
	int element[2];
	int *pelement;
	int *pcounter = (int *)par1;

	for (i = 0; i < par2; i++) {
		element[1] = i;
		nano_fiber_lifo_put(&nanoLifo1, element);
		while ((pelement = nano_fiber_lifo_get(&nanoLifo2,
							TICKS_NONE)) == NULL) {
			fiber_yield();
		}
		if (pelement[1] != i) {
			break;
		}
		(*pcounter)++;
	}
	/* wait till it is safe to end: */
	nano_fiber_fifo_get(&nanoFifo_sync, TICKS_UNLIMITED);
}
Beispiel #4
0
static void fiberHelper(int arg1, int arg2)
{
	nano_thread_id_t  self_thread_id;

	ARG_UNUSED(arg1);
	ARG_UNUSED(arg2);

	/*
	 * This fiber starts off at a higher priority than fiberEntry().  Thus, it
	 * should execute immediately.
	 */

	fiberEvidence++;

	/* Test that helper will yield to a fiber of equal priority */
	self_thread_id = sys_thread_self_get();
	self_thread_id->prio++;  /* Lower priority to that of fiberEntry() */
	fiber_yield();        /* Yield to fiber of equal priority */

	fiberEvidence++;
	/* <fiberEvidence> should now be 2 */

}
Beispiel #5
0
/* This is called by contiki/ip/tcpip.c:tcpip_uipcall() when packet
 * is processed.
 */
PROCESS_THREAD(tcp, ev, data, buf, user_data)
{
	PROCESS_BEGIN();

	while(1) {
		PROCESS_YIELD_UNTIL(ev == tcpip_event);

		if (POINTER_TO_INT(data) == TCP_WRITE_EVENT) {
			/* We want to send data to peer. */
			struct net_context *context = user_data;

			if (!context) {
				continue;
			}

			do {
				context = user_data;
				if (!context || !buf) {
					break;
				}

				if (!context->ps.net_buf ||
				    context->ps.net_buf != buf) {
					NET_DBG("psock init %p buf %p\n",
						&context->ps, buf);
					PSOCK_INIT(&context->ps, buf);
				}

				handle_tcp_connection(&context->ps,
						      POINTER_TO_INT(data),
						      buf);

				PROCESS_WAIT_EVENT_UNTIL(ev == tcpip_event);

				if (POINTER_TO_INT(data) != TCP_WRITE_EVENT) {
					goto read_data;
				}
			} while(!(uip_closed(buf)  ||
				  uip_aborted(buf) ||
				  uip_timedout(buf)));

			context = user_data;

			if (context &&
			    context->tcp_type == NET_TCP_TYPE_CLIENT) {
				NET_DBG("\nConnection closed.\n");
				ip_buf_sent_status(buf) = -ECONNRESET;
			}

			continue;
		}

	read_data:
		/* We are receiving data from peer. */
		if (buf && uip_newdata(buf)) {
			struct net_buf *clone;

			if (!uip_len(buf)) {
				continue;
			}

			/* Note that uIP stack will reuse the buffer when
			 * sending ACK to peer host. The sending will happen
			 * right after this function returns. Because of this
			 * we cannot use the same buffer to pass data to
			 * application.
			 */
			clone = net_buf_clone(buf);
			if (!clone) {
				NET_ERR("No enough RX buffers, "
					"packet %p discarded\n", buf);
				continue;
			}

			ip_buf_appdata(clone) = uip_buf(clone) +
				(ip_buf_appdata(buf) - (void *)uip_buf(buf));
			ip_buf_appdatalen(clone) = uip_len(buf);
			ip_buf_len(clone) = ip_buf_len(buf);
			ip_buf_context(clone) = user_data;
			uip_set_conn(clone) = uip_conn(buf);
			uip_flags(clone) = uip_flags(buf);
			uip_flags(clone) |= UIP_CONNECTED;

			NET_DBG("packet received context %p buf %p len %d "
				"appdata %p appdatalen %d\n",
				ip_buf_context(clone),
				clone,
				ip_buf_len(clone),
				ip_buf_appdata(clone),
				ip_buf_appdatalen(clone));

			nano_fifo_put(net_context_get_queue(user_data), clone);

			/* We let the application to read the data now */
			fiber_yield();
		}
	}

	PROCESS_END();
}
Beispiel #6
0
int fiber_yieldTest(void)
{
	nano_thread_id_t  self_thread_id;

	/*
	 * Start a fiber of higher priority.  Note that since the new fiber is
	 * being started from a fiber, it will not automatically switch to the
	 * fiber as it would if done from a task.
	 */

	self_thread_id = sys_thread_self_get();
	fiberEvidence = 0;
	fiber_fiber_start(fiberStack2, FIBER_STACKSIZE, fiberHelper,
		0, 0, FIBER_PRIORITY - 1, 0);

	if (fiberEvidence != 0) {
		/* ERROR! Helper spawned at higher */
		fiberDetectedError = 10;    /* priority ran prematurely. */
		return TC_FAIL;
	}

	/*
	 * Test that the fiber will yield to the higher priority helper.
	 * <fiberEvidence> is still 0.
	 */

	fiber_yield();

	if (fiberEvidence == 0) {
		/* ERROR! Did not yield to higher */
		fiberDetectedError = 11;    /* priority fiber. */
		return TC_FAIL;
	}

	if (fiberEvidence > 1) {
		/* ERROR! Helper did not yield to */
		fiberDetectedError = 12;    /* equal priority fiber. */
		return TC_FAIL;
	}

	/*
	 * Raise the priority of fiberEntry().  Calling fiber_yield() should
	 * not result in switching to the helper.
	 */

	self_thread_id->prio--;
	fiber_yield();

	if (fiberEvidence != 1) {
		/* ERROR! Context switched to a lower */
		fiberDetectedError = 13;    /* priority fiber! */
		return TC_FAIL;
	}

	/*
	 * Block on <wakeFiber>.  This will allow the helper fiber to complete.
	 * The main task will wake this fiber.
	 */

	nano_fiber_sem_take(&wakeFiber, TICKS_UNLIMITED);

	return TC_PASS;
}
Beispiel #7
0
void fiber_yieldv(void* v){   FiberYieldValue val;   val.pvoid=v;   fiber_yield(val); }
Beispiel #8
0
/**
 *
 * @brief The microkernel thread entry point
 *
 * This function implements the microkernel fiber.  It waits for command
 * packets to arrive on its command stack. It executes all commands on the
 * stack and then sets up the next task that is ready to run. Next it
 * goes to wait on further inputs on the command stack.
 *
 * @return Does not return.
 */
FUNC_NORETURN void _k_server(int unused1, int unused2)
{
	struct k_args *pArgs;
	struct k_task *pNextTask;

	ARG_UNUSED(unused1);
	ARG_UNUSED(unused2);

	/* indicate that failure of this fiber may be fatal to the entire system
	 */

	_nanokernel.current->flags |= ESSENTIAL;

	while (1) { /* forever */
		(void) nano_fiber_stack_pop(&_k_command_stack, (uint32_t *)&pArgs,
				TICKS_UNLIMITED); /* will schedule */
		do {
			int cmd_type = (int)pArgs & KERNEL_CMD_TYPE_MASK;

			if (cmd_type == KERNEL_CMD_PACKET_TYPE) {

				/* process command packet */

#ifdef CONFIG_TASK_MONITOR
				if (_k_monitor_mask & MON_KSERV) {
					_k_task_monitor_args(pArgs);
				}
#endif
				(*pArgs->Comm)(pArgs);
			} else if (cmd_type == KERNEL_CMD_EVENT_TYPE) {

				/* give event */

#ifdef CONFIG_TASK_MONITOR
				if (_k_monitor_mask & MON_EVENT) {
					_k_task_monitor_args(pArgs);
				}
#endif
				kevent_t event = (int)pArgs & ~KERNEL_CMD_TYPE_MASK;

				_k_do_event_signal(event);
			} else { /* cmd_type == KERNEL_CMD_SEMAPHORE_TYPE */

				/* give semaphore */

#ifdef CONFIG_TASK_MONITOR
				/* task monitoring for giving semaphore not implemented */
#endif
				ksem_t sem = (int)pArgs & ~KERNEL_CMD_TYPE_MASK;

				_k_sem_struct_value_update(1, (struct _k_sem_struct *)sem);
			}

			/*
			 * check if another fiber (of equal or greater priority)
			 * needs to run
			 */

			if (_nanokernel.fiber) {
				fiber_yield();
			}
		} while (nano_fiber_stack_pop(&_k_command_stack, (uint32_t *)&pArgs,
					TICKS_NONE));

		pNextTask = next_task_select();

		if (_k_current_task != pNextTask) {

			/*
			 * switch from currently selected task to a different
			 * one
			 */

#ifdef CONFIG_WORKLOAD_MONITOR
			if (pNextTask->id == 0x00000000) {
				_k_workload_monitor_idle_start();
			} else if (_k_current_task->id == 0x00000000) {
				_k_workload_monitor_idle_end();
			}
#endif

			_k_current_task = pNextTask;
			_nanokernel.task = (struct tcs *)pNextTask->workspace;

#ifdef CONFIG_TASK_MONITOR
			if (_k_monitor_mask & MON_TSWAP) {
				_k_task_monitor(_k_current_task, 0);
			}
#endif
		}
	}

	/*
	 * Code analyzers may complain that _k_server() uses an infinite loop
	 * unless we indicate that this is intentional
	 */

	CODE_UNREACHABLE;
}
Beispiel #9
0
void fiber_yieldll(uint64_t ll){   FiberYieldValue val;   val.uint64v=ll;   fiber_yield(val); }
Beispiel #10
0
void fiber_yieldlf(double lf){   FiberYieldValue val;   val.doublev=lf;   fiber_yield(val); }
Beispiel #11
0
void fiber_yieldf(float f){   FiberYieldValue val;   val.floatv=f;   fiber_yield(val); }
Beispiel #12
0
void fiber_yieldi(int i){   FiberYieldValue val;   val.intv=i;   fiber_yield(val); }
/**
 *
 * @brief Interrupt preparation fiber
 *
 * Fiber makes all the test preparations: registers the interrupt handler,
 * gets the first timestamp and invokes the software interrupt.
 *
 * @return N/A
 */
static void fiberInt(void)
{
	irq_offload(latencyTestIsr, NULL);
	fiber_yield();
}