Esempio n. 1
0
File: soc.c Progetto: agatti/zephyr
/**
 * @brief Perform basic hardware initialization
 *
 * Initialize the interrupt controller device drivers.
 * Also initialize the timer device driver, if required.
 *
 * @return 0
 */
static int silabs_efm32wg_init(struct device *arg)
{
	ARG_UNUSED(arg);

	int oldLevel; /* old interrupt lock level */

	/* disable interrupts */
	oldLevel = irq_lock();

	/* handle chip errata */
	CHIP_Init();

	_ClearFaults();

	/* Initialize system clock according to CONFIG_CMU settings */
	clkInit();

	/*
	 * install default handler that simply resets the CPU
	 * if configured in the kernel, NOP otherwise
	 */
	NMI_INIT();

	/* restore interrupt state */
	irq_unlock(oldLevel);
	return 0;
}
Esempio n. 2
0
const char *bt_hex(const void *buf, size_t len)
{
	static const char hex[] = "0123456789abcdef";
	static char hexbufs[4][129];
	static u8_t curbuf;
	const u8_t *b = buf;
	unsigned int mask;
	char *str;
	int i;

	mask = irq_lock();
	str = hexbufs[curbuf++];
	curbuf %= ARRAY_SIZE(hexbufs);
	irq_unlock(mask);

	len = min(len, (sizeof(hexbufs[0]) - 1) / 2);

	for (i = 0; i < len; i++) {
		str[i * 2]     = hex[b[i] >> 4];
		str[i * 2 + 1] = hex[b[i] & 0xf];
	}

	str[i * 2] = '\0';

	return str;
}
Esempio n. 3
0
/**
 *
 * @brief Test the k_cpu_idle() routine
 *
 * This tests the k_cpu_idle() routine.  The first thing it does is align to
 * a tick boundary.  The only source of interrupts while the test is running is
 * expected to be the tick clock timer which should wake the CPU.  Thus after
 * each call to k_cpu_idle(), the tick count should be one higher.
 *
 * @return TC_PASS on success
 * @return TC_FAIL on failure
 */
static int test_kernel_cpu_idle(int atomic)
{
	int tms, tms2;;         /* current time in millisecond */
	int i;                  /* loop variable */

	/* Align to a "ms boundary". */
	tms = k_uptime_get_32();
	while (tms == k_uptime_get_32()) {
	}

	tms = k_uptime_get_32();
	for (i = 0; i < 5; i++) {       /* Repeat the test five times */
		if (atomic) {
			unsigned int key = irq_lock();

			k_cpu_atomic_idle(key);
		} else {
			k_cpu_idle();
		}
		/* calculating milliseconds per tick*/
		tms += sys_clock_us_per_tick / USEC_PER_MSEC;
		tms2 = k_uptime_get_32();
		if (tms2 < tms) {
			TC_ERROR("Bad ms per tick value computed, got %d which is less than %d\n",
				 tms2, tms);
			return TC_FAIL;
		}
	}
	return TC_PASS;
}
Esempio n. 4
0
void task_start(ktask_t task)
{
	int key = irq_lock();

	_k_thread_single_start(task);
	_reschedule_threads(key);
}
Esempio n. 5
0
/* The actual printk hook */
static int telnet_console_out(int c)
{
	unsigned int key = irq_lock();
	struct line_buf *lb = telnet_rb_get_line_in();
	bool yield = false;

	lb->buf[lb->len++] = (char)c;

	if (c == '\n' || lb->len == TELNET_LINE_SIZE - 1) {
		lb->buf[lb->len - 1] = NVT_CR;
		lb->buf[lb->len++] = NVT_LF;
		telnet_rb_switch();
		yield = true;
	}

	irq_unlock(key);

#ifdef CONFIG_TELNET_CONSOLE_DEBUG_DEEP
	/* This is ugly, but if one wants to debug telnet, it
	 * will also output the character to original console
	 */
	orig_printk_hook(c);
#endif

	if (yield) {
		k_yield();
	}

	return c;
}
Esempio n. 6
0
static void qdec_nrfx_event_handler(nrfx_qdec_event_t event)
{
	sensor_trigger_handler_t handler;
	unsigned int key;

	switch (event.type) {
	case NRF_QDEC_EVENT_REPORTRDY:
		accumulate(&qdec_nrfx_data, event.data.report.acc);

		key = irq_lock();
		handler = qdec_nrfx_data.data_ready_handler;
		irq_unlock(key);

		if (handler) {
			struct sensor_trigger trig = {
				.type = SENSOR_TRIG_DATA_READY,
				.chan = SENSOR_CHAN_ROTATION,
			};

			handler(DEVICE_GET(qdec_nrfx), &trig);
		}
		break;

	default:
		LOG_ERR("unhandled event (0x%x)", event.type);
		break;
	}
}
Esempio n. 7
0
/*
 * Install an exception handler for the current task.
 *
 * NULL can be specified as handler to remove current handler.
 * If handler is removed, all pending exceptions are discarded
 * immediately. In this case, all threads blocked in
 * exception_wait() are unblocked.
 *
 * Only one exception handler can be set per task. If the
 * previous handler exists in task, exception_setup() just
 * override that handler.
 */
int
exception_setup(void (*handler)(int))
{
	task_t self = cur_task();
	list_t head, n;
	thread_t th;

	if (handler != NULL && !user_area(handler))
		return EFAULT;

	sched_lock();
	if (self->handler && handler == NULL) {
		/*
		 * Remove existing exception handler. Do clean up
		 * job for all threads in the target task.
		 */
		head = &self->threads;
		for (n = list_first(head); n != head; n = list_next(n)) {
			/*
			 * Clear pending exceptions.
			 */
			th = list_entry(n, struct thread, task_link);
			irq_lock();
			th->excbits = 0;
			irq_unlock();

			/*
			 * If the thread is waiting for an exception,
			 * cancel it.
			 */
			if (th->slpevt == &exception_event)
				sched_unsleep(th, SLP_BREAK);
		}
	}
Esempio n. 8
0
static void MvicRteSet(unsigned int irq, /* INTIN number */
	uint32_t value  /* value to be written */
	)
{
	int key; /* interrupt lock level */
	volatile unsigned int *rte;
	volatile unsigned int *index;
	unsigned int low_nibble;
	unsigned int high_nibble;

	index = (unsigned int *)(IOAPIC_BASE_ADRS + IOAPIC_IND);
	rte = (unsigned int *)(IOAPIC_BASE_ADRS + IOAPIC_DATA);

	/* Set index in the IOREGSEL */
	__ASSERT(irq < IOAPIC_NUM_RTES, "INVL");

	low_nibble = ((irq & MVIC_LOW_NIBBLE_MASK) << 0x1);
	high_nibble = ((irq & MVIC_HIGH_NIBBLE_MASK) << 0x2);

	/* lock interrupts to ensure indirect addressing works "atomically" */

	key = irq_lock();

	*(index) = high_nibble | low_nibble;
	*(rte) = (value & IOAPIC_LO32_RTE_SUPPORTED_MASK);

	irq_unlock(key);
}
Esempio n. 9
0
/*******************************************************************************
*
* MvicRteGet - read a 32 bit MVIC IO APIC register
*
* RETURNS: register value
*/
static uint32_t MvicRteGet(unsigned int irq /* INTIN number */
	)
{
	uint32_t value; /* value */
	int key;	/* interrupt lock level */
	volatile unsigned int *rte;
	volatile unsigned int *index;
	unsigned int low_nibble;
	unsigned int high_nibble;

	index = (unsigned int *)(IOAPIC_BASE_ADRS + IOAPIC_IND);
	rte = (unsigned int *)(IOAPIC_BASE_ADRS + IOAPIC_DATA);

	/* Set index in the IOREGSEL */
	__ASSERT(irq < IOAPIC_NUM_RTES, "INVL");

	low_nibble = ((irq & MVIC_LOW_NIBBLE_MASK) << 0x1);
	high_nibble = ((irq & MVIC_HIGH_NIBBLE_MASK) << 0x2);

	/* lock interrupts to ensure indirect addressing works "atomically" */

	key = irq_lock();

	*(index) = high_nibble | low_nibble;
	value = *(rte);

	irq_unlock(key);

	return value;
}
Esempio n. 10
0
int32_t _timeout_remaining_get(struct _timeout *timeout)
{
	unsigned int key = irq_lock();
	int32_t remaining_ticks;

	if (timeout->delta_ticks_from_prev == _INACTIVE) {
		remaining_ticks = 0;
	} else {
		/*
		 * compute remaining ticks by walking the timeout list
		 * and summing up the various tick deltas involved
		 */
		struct _timeout *t =
			(struct _timeout *)sys_dlist_peek_head(&_timeout_q);

		remaining_ticks = t->delta_ticks_from_prev;
		while (t != timeout) {
			t = (struct _timeout *)sys_dlist_peek_next(&_timeout_q,
								   &t->node);
			remaining_ticks += t->delta_ticks_from_prev;
		}
	}

	irq_unlock(key);
	return _ticks_to_ms(remaining_ticks);
}
Esempio n. 11
0
void _loapic_irq_disable(unsigned int irq /* IRQ number of the
										  interrupt */
										  )
{
	volatile int *pLvt; /* pointer to local vector table */
	int32_t oldLevel;   /* previous interrupt lock level */

	/*
	* irq is actually an index to local APIC LVT register.
	* ASSERT if out of range for MVIC implementation.
	*/
	__ASSERT_NO_MSG(irq < LOAPIC_IRQ_COUNT);

	/*
	* See the comments in _LoApicLvtVecSet() regarding IRQ to LVT mappings
	* and ths assumption concerning LVT spacing.
	*/

	pLvt = (volatile int *)(LOAPIC_BASE_ADRS + LOAPIC_TIMER + (irq * LOAPIC_LVT_REG_SPACING));

	/* set the mask bit in the LVT */

	oldLevel = irq_lock();
	*pLvt = *pLvt | LOAPIC_LVT_MASKED;
	irq_unlock(oldLevel);

}
Esempio n. 12
0
static int qdec_nrfx_channel_get(struct device       *dev,
				 enum sensor_channel  chan,
				 struct sensor_value *val)
{
	struct qdec_nrfx_data *data = &qdec_nrfx_data;
	unsigned int key;
	s32_t acc;

	ARG_UNUSED(dev);
	LOG_DBG("");

	if (chan != SENSOR_CHAN_ROTATION) {
		return -ENOTSUP;
	}

	key = irq_lock();
	acc = data->acc;
	data->acc = 0;
	irq_unlock(key);

	BUILD_ASSERT_MSG(DT_NORDIC_NRF_QDEC_QDEC_0_STEPS > 0,
			 "only positive number valid");
	BUILD_ASSERT_MSG(DT_NORDIC_NRF_QDEC_QDEC_0_STEPS <= 2148,
			 "overflow possible");

	val->val1 = (acc * FULL_ANGLE) / DT_NORDIC_NRF_QDEC_QDEC_0_STEPS;
	val->val2 = (acc * FULL_ANGLE)
		    - (val->val1 * DT_NORDIC_NRF_QDEC_QDEC_0_STEPS);
	if (val->val2 != 0) {
		val->val2 *= 1000000;
		val->val2 /= DT_NORDIC_NRF_QDEC_QDEC_0_STEPS;
	}

	return 0;
}
Esempio n. 13
0
/*
 * @brief Initialize fake serial port
 * @which: port number
 * @init_info: pointer to initialization information
 */
void uart_init(int which, const struct uart_init_info * const init_info)
{
	int key = irq_lock();

	uart[which].regs = init_info->regs;
	irq_unlock(key);
}
Esempio n. 14
0
void z_arch_irq_enable(unsigned int irq)
{
	unsigned int key = irq_lock();

	z_arc_v2_irq_unit_int_enable(irq);
	irq_unlock(key);
}
Esempio n. 15
0
void _IntVecSet(unsigned int vector, void (*routine)(void *), unsigned int dpl)
{
	unsigned long long *pIdtEntry;
	unsigned int key;

	/*
	 * The <vector> parameter must be less than the value of the
	 * CONFIG_IDT_NUM_VECTORS configuration parameter, however,
	 * explicit validation will not be performed in this primitive.
	 */

	pIdtEntry = (unsigned long long *)(_idt_base_address + (vector << 3));

	/*
	 * Lock interrupts to protect the IDT entry to which _IdtEntryCreate()
	 * will write.  They must be locked here because the _IdtEntryCreate()
	 * code is shared with the 'gen_idt' host tool.
	 */

	key = irq_lock();
	_IdtEntCreate(pIdtEntry, routine, dpl);

#ifdef CONFIG_MVIC
	/* Some nonstandard interrupt controllers may be doing some IDT
	 * caching for performance reasons and need the IDT reloaded if
	 * any changes are made to it
	 */
	__asm__ volatile ("lidt _Idt");
#endif

	irq_unlock(key);
}
Esempio n. 16
0
/**
 *
 * @brief Power saving when idle
 *
 * If _sys_power_save_flag is non-zero, this routine keeps the system in a low
 * power state whenever the kernel is idle. If it is zero, this routine will
 * fall through and _k_kernel_idle() will try the next idling mechanism.
 *
 * @return N/A
 *
 */
static void _power_save(void)
{
	if (_sys_power_save_flag) {
		for (;;) {
			irq_lock();
#ifdef CONFIG_ADVANCED_POWER_MANAGEMENT
			_sys_power_save_idle(_get_next_timer_expiry());
#else
			/*
			 * nano_cpu_idle() is invoked here directly only if APM
			 * is disabled. Otherwise the microkernel decides
			 * either to invoke it or to implement advanced idle
			 * functionality
			 */

			nano_cpu_idle();
#endif
		}

		/*
		 * Code analyzers may complain that _power_save() uses an
		 * infinite loop unless we indicate that this is intentional
		 */

		CODE_UNREACHABLE;
	}
}
Esempio n. 17
0
/**
 * Execute the callback of a timer.
 *
 * @param expiredTimer pointer on the timer
 *
 * WARNING: expiredTimer MUST NOT be null (rem: static function )
 */
static void execute_callback(T_TIMER_LIST_ELT *expiredTimer)
{
#ifdef __DEBUG_OS_ABSTRACTION_TIMER
	_log(
		"\nINFO : execute_callback : executing callback of timer 0x%x  (now = %u - expiration = %u)",
		(uint32_t)expiredTimer,
		get_uptime_ms(), expiredTimer->desc.expiration);
#endif
	int flags = irq_lock();

	/* if the timer was not stopped by its own callback */
	if (E_TIMER_RUNNING == expiredTimer->desc.status) {
		remove_timer(expiredTimer);
		/* add it again if repeat flag was on */
		if (expiredTimer->desc.repeat) {
			expiredTimer->desc.expiration = get_uptime_ms() +
							expiredTimer->desc.
							delay;
			add_timer(expiredTimer);
		}
	}
	irq_unlock(flags);

	/* call callback back */
	if (NULL != expiredTimer->desc.callback) {
		expiredTimer->desc.callback(expiredTimer->desc.data);
	} else {
#ifdef __DEBUG_OS_ABSTRACTION_TIMER
		_log("\nERROR : execute_callback : timer callback is null ");
#endif
		panic(E_OS_ERR);
	}
}
Esempio n. 18
0
/**
 *  Remove the timer in Chained list of timers.
 *     This service may panic if:
 *         tmr parameter is is null, invalid, or timer is not running.
 *
 * Authorized execution levels:  task, fiber, ISR
 *
 * @param tmr : handler on the timer (value returned by timer_create ).
 *
 */
void timer_stop(T_TIMER tmr)
{
	T_TIMER_LIST_ELT *timer = (T_TIMER_LIST_ELT *)tmr;
	bool doSignal = false;

	if (NULL != timer) {
		int flags = irq_lock();
		/* if timer is active */
		if (timer->desc.status == E_TIMER_RUNNING) {
#ifdef __DEBUG_OS_ABSTRACTION_TIMER
			_log(
				"\nINFO : timer_stop : stopping timer at addr = 0x%x",
				(uint32_t)timer);
#endif
			/* remove the timer */

			if (g_CurrentTimerHead == timer) {
				doSignal = true;
			}

			remove_timer(timer);

			irq_unlock(flags);

			if (doSignal) {
				/* the next timer to expire was removed, unblock timer_task to assess the change */
				signal_timer_task();
			}
		} else { /* tmr is not running */
			irq_unlock(flags);
		}
	} else { /* tmr is not a timer from g_TimerPool_elements */
		panic(E_OS_ERR);
	}
}
Esempio n. 19
0
void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
{
	__ASSERT(head && tail, "invalid head or tail");

	struct k_thread *first_thread, *thread;
	unsigned int key;

	key = irq_lock();

	first_thread = _peek_first_pending_thread(&queue->wait_q);
	while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
		prepare_thread_to_run(thread, head);
		head = *(void **)head;
	}

	if (head) {
		sys_slist_append_list(&queue->data_q, head, tail);
	}

	if (first_thread) {
		if (!_is_in_isr() && _must_switch_threads()) {
			(void)_Swap(key);
			return;
		}
	} else {
		if (handle_poll_event(queue)) {
			(void)_Swap(key);
			return;
		}
	}

	irq_unlock(key);
}
Esempio n. 20
0
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
{
	struct k_thread *first_pending_thread;
	unsigned int key;

	key = irq_lock();

	first_pending_thread = _unpend_first_thread(&queue->wait_q);

	if (first_pending_thread) {
		prepare_thread_to_run(first_pending_thread, data);
		if (!_is_in_isr() && _must_switch_threads()) {
			(void)_Swap(key);
			return;
		}
	} else {
		sys_slist_insert(&queue->data_q, prev, data);
		if (handle_poll_event(queue)) {
			(void)_Swap(key);
			return;
		}
	}

	irq_unlock(key);
}
Esempio n. 21
0
void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the
										   interrupt */
										   unsigned int vector /* vector to copy
															   into the LVT */
															   )
{
	volatile int *pLvt; /* pointer to local vector table */
	int32_t oldLevel;   /* previous interrupt lock level */

	/*
	* irq is actually an index to local APIC LVT register.
	* ASSERT if out of range for MVIC implementation.
	*/
	__ASSERT_NO_MSG(irq < LOAPIC_IRQ_COUNT);

	/*
	* The following mappings are used:
	*
	*   LVT0 -> LOAPIC_TIMER
	*
	* It's assumed that LVTs are spaced by LOAPIC_LVT_REG_SPACING bytes
	*/

	pLvt = (volatile int *)(LOAPIC_BASE_ADRS + LOAPIC_TIMER + (irq * LOAPIC_LVT_REG_SPACING));

	/* update the 'vector' bits in the LVT */

	oldLevel = irq_lock();
	*pLvt = (*pLvt & ~LOAPIC_VECTOR) | vector;
	irq_unlock(oldLevel);

}
Esempio n. 22
0
void _loapic_int_vec_set(unsigned int irq, /* IRQ number of the interrupt */
				  unsigned int vector /* vector to copy into the LVT */
				  )
{
	volatile int *pLvt; /* pointer to local vector table */
	s32_t oldLevel;   /* previous interrupt lock level */

	/*
	 * The following mappings are used:
	 *
	 *   IRQ0 -> LOAPIC_TIMER
	 *   IRQ1 -> LOAPIC_THERMAL
	 *   IRQ2 -> LOAPIC_PMC
	 *   IRQ3 -> LOAPIC_LINT0
	 *   IRQ4 -> LOAPIC_LINT1
	 *   IRQ5 -> LOAPIC_ERROR
	 *
	 * It's assumed that LVTs are spaced by 0x10 bytes
	 */

	pLvt = (volatile int *)
			(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_TIMER + (irq * 0x10));

	/* update the 'vector' bits in the LVT */

	oldLevel = irq_lock();
	*pLvt = (*pLvt & ~LOAPIC_VECTOR) | vector;
	irq_unlock(oldLevel);
}
Esempio n. 23
0
static int qdec_nrfx_trigger_set(struct device               *dev,
				 const struct sensor_trigger *trig,
				 sensor_trigger_handler_t     handler)
{
	struct qdec_nrfx_data *data = &qdec_nrfx_data;
	unsigned int key;

	ARG_UNUSED(dev);
	LOG_DBG("");

	if (trig->type != SENSOR_TRIG_DATA_READY) {
		return -ENOTSUP;
	}

	if ((trig->chan != SENSOR_CHAN_ALL) &&
	    (trig->chan != SENSOR_CHAN_ROTATION)) {
		return -ENOTSUP;
	}

	key = irq_lock();
	data->data_ready_handler = handler;
	irq_unlock(key);

	return 0;
}
Esempio n. 24
0
static int qdec_nrfx_init(struct device *dev)
{
	static const nrfx_qdec_config_t config = {
		.reportper          = NRF_QDEC_REPORTPER_40,
		.sampleper          = NRF_QDEC_SAMPLEPER_2048us,
		.psela              = DT_NORDIC_NRF_QDEC_QDEC_0_A_PIN,
		.pselb              = DT_NORDIC_NRF_QDEC_QDEC_0_B_PIN,
#if defined(DT_NORDIC_NRF_QDEC_QDEC_0_LED_PIN)
		.pselled            = DT_NORDIC_NRF_QDEC_QDEC_0_LED_PIN,
#else
		.pselled            = 0xFFFFFFFF, /* disabled */
#endif
		.ledpre             = DT_NORDIC_NRF_QDEC_QDEC_0_LED_PRE,
		.ledpol             = NRF_QDEC_LEPOL_ACTIVE_HIGH,
		.interrupt_priority = NRFX_QDEC_CONFIG_IRQ_PRIORITY,
		.dbfen              = 0, /* disabled */
		.sample_inten       = 0, /* disabled */
	};

	nrfx_err_t nerr;

	LOG_DBG("");

	IRQ_CONNECT(DT_NORDIC_NRF_QDEC_QDEC_0_IRQ,
		    DT_NORDIC_NRF_QDEC_QDEC_0_IRQ_PRIORITY,
		    nrfx_isr, nrfx_qdec_irq_handler, 0);

	nerr = nrfx_qdec_init(&config, qdec_nrfx_event_handler);
	if (nerr == NRFX_ERROR_INVALID_STATE) {
		LOG_ERR("qdec already in use");
		return -EBUSY;
	} else if (nerr != NRFX_SUCCESS) {
		LOG_ERR("failed to initialize qdec");
		return -EFAULT;
	}

	qdec_nrfx_gpio_ctrl(true);
	nrfx_qdec_enable();

#ifdef CONFIG_DEVICE_POWER_MANAGEMENT
	struct qdec_nrfx_data *data = &qdec_nrfx_data;

	data->pm_state = DEVICE_PM_ACTIVE_STATE;
#endif

	return 0;
}

#ifdef CONFIG_DEVICE_POWER_MANAGEMENT

static int qdec_nrfx_pm_get_state(struct qdec_nrfx_data *data,
				  u32_t                 *state)
{
	unsigned int key = irq_lock();
	*state = data->pm_state;
	irq_unlock(key);

	return 0;
}
Esempio n. 25
0
void k_thread_resume(struct k_thread *thread)
{
	unsigned int  key = irq_lock();

	_k_thread_single_resume(thread);

	_reschedule_threads(key);
}
Esempio n. 26
0
u32_t _timer_cycle_get_32(void)
{
	u32_t key = irq_lock();
	u32_t ret = counter_sub(counter(), last_count) + last_count;

	irq_unlock(key);
	return ret;
}
Esempio n. 27
0
uintptr_t HwiP_disable(void)
{
	uintptr_t key;

	key = irq_lock();

	return (key);
}
Esempio n. 28
0
/**
 * @brief Handle expiration of a kernel timer object.
 *
 * @param t  Timeout used by the timer.
 *
 * @return N/A
 */
void _timer_expiration_handler(struct _timeout *t)
{
	struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
	struct k_thread *thread;
	unsigned int key;

	/*
	 * if the timer is periodic, start it again; don't add _TICK_ALIGN
	 * since we're already aligned to a tick boundary
	 */
	if (timer->period > 0) {
		key = irq_lock();
		_add_timeout(NULL, &timer->timeout, &timer->wait_q,
				timer->period);
		irq_unlock(key);
	}

	/* update timer's status */
	timer->status += 1;

	/* invoke timer expiry function */
	if (timer->expiry_fn) {
		timer->expiry_fn(timer);
	}

	thread = (struct k_thread *)sys_dlist_peek_head(&timer->wait_q);

	if (!thread) {
		return;
	}

	/*
	 * Interrupts _DO NOT_ have to be locked in this specific instance of
	 * calling _unpend_thread() because a) this is the only place a thread
	 * can be taken off this pend queue, and b) the only place a thread
	 * can be put on the pend queue is at thread level, which of course
	 * cannot interrupt the current context.
	 */
	_unpend_thread(thread);

	key = irq_lock();
	_ready_thread(thread);
	irq_unlock(key);

	_set_thread_return_value(thread, 0);
}
Esempio n. 29
0
/**
 *
 * @brief Disable the MVIC Local APIC.
 *
 * This routine disables the MVIC Local APIC.
 *
 * @returns: N/A
 */
void _loapic_disable(void)
{
	int32_t oldLevel = irq_lock(); /* LOCK INTERRUPTS */

	*(volatile int *)(CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_SVR) &= ~LOAPIC_ENABLE;

	irq_unlock(oldLevel); /* UNLOCK INTERRUPTS */
}
Esempio n. 30
0
void _loapic_enable(void)
{
	int32_t oldLevel = irq_lock(); /* LOCK INTERRUPTS */

	*(volatile int *)(LOAPIC_BASE_ADRS + LOAPIC_SVR) |= LOAPIC_ENABLE;

	irq_unlock(oldLevel); /* UNLOCK INTERRUPTS */
}