/*-----------------------------------------------------------------------------------*/ caddr_t _sbrk_r(struct _reent *r, size_t incr) { if(incr < 0) { puts("[syscalls] Negative Values for _sbrk_r are not supported"); r->_errno = ENOMEM; return NULL; } uint32_t cpsr = disableIRQ(); /* check all heaps for a chunk of the requested size */ for( ; iUsedHeap < NUM_HEAPS; iUsedHeap++ ) { caddr_t new_heap = heap[iUsedHeap] + incr; if( new_heap <= heap_max[iUsedHeap] ) { caddr_t prev_heap = heap[iUsedHeap]; heap[iUsedHeap] = new_heap; r->_errno = 0; restoreIRQ(cpsr); return prev_heap; } } restoreIRQ(cpsr); r->_errno = ENOMEM; return NULL; }
int pthread_cond_wait(struct pthread_cond_t *cond, struct mutex_t *mutex) { queue_node_t n; n.priority = sched_active_thread->priority; n.data = sched_active_thread->pid; n.next = NULL; /* the signaling thread may not hold the mutex, the queue is not thread safe */ unsigned old_state = disableIRQ(); queue_priority_add(&(cond->queue), &n); restoreIRQ(old_state); mutex_unlock_and_sleep(mutex); if (n.data != -1u) { /* on signaling n.data is set to -1u */ /* if it isn't set, then the wakeup is either spurious or a timer wakeup */ old_state = disableIRQ(); queue_remove(&(cond->queue), &n); restoreIRQ(old_state); } mutex_lock(mutex); return 0; }
void mutex_wait(struct mutex_t *mutex) { int irqstate = disableIRQ(); DEBUG("%s: Mutex in use. %u\n", active_thread->name, mutex->val); if (mutex->val == 0) { /* somebody released the mutex. return. */ mutex->val = thread_pid; DEBUG("%s: mutex_wait early out. %u\n", active_thread->name, mutex->val); restoreIRQ(irqstate); return; } sched_set_status((tcb_t*) active_thread, STATUS_MUTEX_BLOCKED); queue_node_t n; n.priority = (unsigned int) active_thread->priority; n.data = (unsigned int) active_thread; n.next = NULL; DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", active_thread->name, n.priority); queue_priority_add(&(mutex->queue), &n); restoreIRQ(irqstate); thread_yield(); /* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */ }
uint8_t flashrom_erase(uint8_t *addr) { uint8_t sec = iap_get_sector((uint32_t) addr); unsigned intstate; if (sec == INVALID_ADDRESS) { DEBUG("Invalid address\n"); return 0; } /* check sector */ if (!blank_check_sector(sec, sec)) { DEBUG("Sector already blank!\n"); return 1; } /* prepare sector */ if (prepare_sectors(sec, sec)) { DEBUG("-- ERROR: PREPARE_SECTOR_FOR_WRITE_OPERATION --\n"); return 0; } intstate = disableIRQ(); /* erase sector */ if (erase_sectors(sec, sec)) { DEBUG("-- ERROR: ERASE SECTOR --\n"); restoreIRQ(intstate); return 0; } restoreIRQ(intstate); /* check again */ if (blank_check_sector(sec, sec)) { DEBUG("-- ERROR: BLANK_CHECK_SECTOR\n"); return 0; } DEBUG("Sector successfully erased.\n"); return 1; }
int msg_reply(msg_t *m, msg_t *reply) { int state = disableIRQ(); tcb_t *target = (tcb_t*) sched_threads[m->sender_pid]; if (!target) { DEBUG("msg_reply(): %s: Target \"%" PRIu16 "\" not existing...dropping msg!\n", sched_active_thread->name, m->sender_pid); return -1; } if (target->status != STATUS_REPLY_BLOCKED) { DEBUG("msg_reply(): %s: Target \"%s\" not waiting for reply.", sched_active_thread->name, target->name); restoreIRQ(state); return -1; } DEBUG("msg_reply(): %s: Direct msg copy.\n", sched_active_thread->name); /* copy msg to target */ msg_t *target_message = (msg_t*) target->wait_data; *target_message = *reply; sched_set_status(target, STATUS_PENDING); restoreIRQ(state); thread_yield(); return 1; }
void uart_transmit(struct uart_periph* p, uint8_t data ) { uint16_t temp; unsigned cpsr; temp = (p->tx_insert_idx + 1) % UART_TX_BUFFER_SIZE; if (temp == p->tx_extract_idx) return; // no room cpsr = disableIRQ(); // disable global interrupts ((uartRegs_t *)(p->reg_addr))->ier &= ~UIER_ETBEI; // disable TX interrupts restoreIRQ(cpsr); // restore global interrupts // check if in process of sending data if (p->tx_running) { // add to queue p->tx_buf[p->tx_insert_idx] = data; p->tx_insert_idx = temp; } else { // set running flag and write to output register p->tx_running = 1; ((uartRegs_t *)(p->reg_addr))->thr = data; } cpsr = disableIRQ(); // disable global interrupts ((uartRegs_t *)(p->reg_addr))->ier |= UIER_ETBEI; // enable TX interrupts restoreIRQ(cpsr); // restore global interrupts }
bool_t i2c_submit(struct i2c_periph* p, struct i2c_transaction* t) { unsigned cpsr; uint8_t idx; idx = p->trans_insert_idx + 1; if (idx >= I2C_TRANSACTION_QUEUE_LEN) idx = 0; if (idx == p->trans_extract_idx) { t->status = I2CTransFailed; return FALSE; /* queue full */ } t->status = I2CTransPending; uint8_t* vic = (uint8_t*)(p->init_struct); cpsr = disableIRQ(); // disable global interrupts VICIntEnClear = VIC_BIT(*vic); restoreIRQ(cpsr); // restore global interrupts p->trans[p->trans_insert_idx] = t; p->trans_insert_idx = idx; /* if peripheral is idle, start the transaction */ if (p->status == I2CIdle) I2cSendStart(p); /* else it will be started by the interrupt handler */ /* when the previous transactions completes */ //int_enable(); cpsr = disableIRQ(); // disable global interrupts VICIntEnable = VIC_BIT(*vic); restoreIRQ(cpsr); // restore global interrupts return TRUE; }
void spi_transmit(spi_package* package) { int temp; unsigned cpsr; temp = (spi_package_buffer_insert_idx + 1) % SPI_PACKAGE_BUFFER_SIZE; // calculate the next queue position if (temp == spi_package_buffer_extract_idx) { // check if there is free space in the send queue return; // no room } cpsr = disableIRQ(); // disable global interrupts SpiDisableRti(); // disable RTI interrupts restoreIRQ(cpsr); // restore global interrupts spi_package_buffer[spi_package_buffer_insert_idx] = *package; // add data to queue spi_package_buffer_insert_idx = temp; // increase insert pointer if (spi_transmit_running==0) // check if in process of sending data { spi_transmit_running = 1; // set running flag spi_transmit_single_package(&spi_package_buffer[spi_package_buffer_extract_idx]); spi_package_buffer_extract_idx++; spi_package_buffer_extract_idx %= SPI_PACKAGE_BUFFER_SIZE; } cpsr = disableIRQ(); // disable global interrupts SpiEnableRti(); // enable RTI interrupts restoreIRQ(cpsr); // restore global interrupts }
static void mutex_wait(struct mutex_t *mutex) { unsigned irqstate = disableIRQ(); DEBUG("%s: Mutex in use. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); if (atomic_set_to_one(&mutex->val)) { /* somebody released the mutex. return. */ DEBUG("%s: mutex_wait early out. %u\n", sched_active_thread->name, ATOMIC_VALUE(mutex->val)); restoreIRQ(irqstate); return; } sched_set_status((thread_t*) sched_active_thread, STATUS_MUTEX_BLOCKED); priority_queue_node_t n; n.priority = (unsigned int) sched_active_thread->priority; n.data = (unsigned int) sched_active_thread; n.next = NULL; DEBUG("%s: Adding node to mutex queue: prio: %" PRIu32 "\n", sched_active_thread->name, n.priority); priority_queue_add(&(mutex->queue), &n); restoreIRQ(irqstate); thread_yield_higher(); /* we were woken up by scheduler. waker removed us from queue. we have the mutex now. */ }
void mutex_unlock(struct mutex_t *mutex) { unsigned irqstate = disableIRQ(); DEBUG("mutex_unlock(): val: %u pid: %" PRIkernel_pid "\n", ATOMIC_VALUE(mutex->val), sched_active_pid); if (ATOMIC_VALUE(mutex->val) == 0) { /* the mutex was not locked */ restoreIRQ(irqstate); return; } priority_queue_node_t *next = priority_queue_remove_head(&(mutex->queue)); if (!next) { /* the mutex was locked and no thread was waiting for it */ ATOMIC_VALUE(mutex->val) = 0; restoreIRQ(irqstate); return; } thread_t *process = (thread_t *) next->data; DEBUG("mutex_unlock: waking up waiting thread %" PRIkernel_pid "\n", process->pid); sched_set_status(process, STATUS_PENDING); uint16_t process_priority = process->priority; restoreIRQ(irqstate); sched_switch(process_priority); }
int msg_reply(msg_t *m, msg_t *reply) { unsigned state = disableIRQ(); tcb_t *target = (tcb_t*) sched_threads[m->sender_pid]; if (!target) { DEBUG("msg_reply(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid "\" not existing...dropping msg!\n", sched_active_thread->pid, m->sender_pid); return -1; } if (target->status != STATUS_REPLY_BLOCKED) { DEBUG("msg_reply(): %" PRIkernel_pid ": Target \"%" PRIkernel_pid "\" not waiting for reply.", sched_active_thread->pid, target->pid); restoreIRQ(state); return -1; } DEBUG("msg_reply(): %" PRIkernel_pid ": Direct msg copy.\n", sched_active_thread->pid); /* copy msg to target */ msg_t *target_message = (msg_t*) target->wait_data; *target_message = *reply; sched_set_status(target, STATUS_PENDING); uint16_t target_prio = target->priority; restoreIRQ(state); sched_switch(target_prio); return 1; }
void condition_variable::wait(unique_lock<mutex>& lock) noexcept { #ifdef NOEXCEPTIONS lock.owns_lock(); #else if (!lock.owns_lock()) { throw std::system_error( std::make_error_code(std::errc::operation_not_permitted), "Mutex not locked."); } #endif priority_queue_node_t n; n.priority = sched_active_thread->priority; n.data = sched_active_pid; n.next = NULL; // the signaling thread may not hold the mutex, the queue is not thread safe unsigned old_state = disableIRQ(); priority_queue_add(&m_queue, &n); restoreIRQ(old_state); mutex_unlock_and_sleep(lock.mutex()->native_handle()); if (n.data != -1u) { // on signaling n.data is set to -1u // if it isn't set, then the wakeup is either spurious or a timer wakeup old_state = disableIRQ(); priority_queue_remove(&m_queue, &n); restoreIRQ(old_state); } mutex_lock(lock.mutex()->native_handle()); }
void uart0_transmit( unsigned char data ) { uint16_t temp; unsigned cpsr; temp = (uart0_tx_insert_idx + 1) % UART0_TX_BUFFER_SIZE; if (temp == uart0_tx_extract_idx) // return -1; // no room return; // no room cpsr = disableIRQ(); // disable global interrupts U0IER &= ~UIER_ETBEI; // disable TX interrupts restoreIRQ(cpsr); // restore global interrupts // check if in process of sending data if (uart0_tx_running) { // add to queue uart0_tx_buffer[uart0_tx_insert_idx] = (uint8_t)data; uart0_tx_insert_idx = temp; } else { // set running flag and write to output register uart0_tx_running = 1; U0THR = (uint8_t)data; } cpsr = disableIRQ(); // disable global interrupts U0IER |= UIER_ETBEI; // enable TX interrupts restoreIRQ(cpsr); // restore global interrupts // return (uint8_t)ch; }
/****************************************************************************** * P U B L I C F U N C T I O N S *****************************************************************************/ uint8_t flashrom_write(uint8_t *dst, char *src, size_t size) { char err; unsigned intstate; uint8_t sec; //buffer_vic = VICIntEnable; // save interrupt enable //VICIntEnClr = 0xFFFFFFFF; // clear vic sec = iap_get_sector((uint32_t) dst); if (sec == INVALID_ADDRESS) { DEBUG("Invalid address\n"); return 0; } /* check sector */ if(blank_check_sector(sec, sec) == SECTOR_NOT_BLANK) { DEBUG("Warning: Sector %i not blank\n", sec); } /* prepare sector */ err = prepare_sectors(sec, sec); if (err) { DEBUG("\n-- ERROR: PREPARE_SECTOR_FOR_WRITE_OPERATION: %u\n", err); /* set interrupts back and return */ // VICIntEnable = buffer_vic; return 0; } /* write flash */ else { intstate = disableIRQ(); err = copy_ram_to_flash((uint32_t) dst, (uint32_t) src, 256); restoreIRQ(intstate); if(err) { DEBUG("ERROR: COPY_RAM_TO_FLASH: %u\n", err); /* set interrupts back and return */ restoreIRQ(intstate); // VICIntEnable = buffer_vic; return 0; } /* check result */ else { err = compare((uint32_t) dst, (uint32_t) src, 256); if (err) { DEBUG("ERROR: COMPARE: %i (at position %u)\n", err, iap_result[1]); /* set interrupts back and return */ // VICIntEnable = buffer_vic; return 0; } else { DEBUG("Data successfully written!\n"); /* set interrupts back and return */ // VICIntEnable = buffer_vic; return 1; } } } }
/* Unregister an upper layer thread */ uint8_t transceiver_unregister(transceiver_type_t t, kernel_pid_t pid) { int result = 0; unsigned state = disableIRQ(); for (size_t i = 0; i < TRANSCEIVER_MAX_REGISTERED; ++i) { if (reg[i].pid == pid) { reg[i].transceivers &= ~t; restoreIRQ(state); result = 1; break; } } restoreIRQ(state); return result; }
/** * @brief Allocate memory from the heap. * * The current heap implementation is very rudimentary, it is only able to allocate * memory. It does not have any means to free memory again. * * @return a pointer to the successfully allocated memory * @return -1 on error, and errno is set to ENOMEM */ caddr_t _sbrk_r(struct _reent *r, size_t incr) { unsigned int state = disableIRQ(); if ((uintptr_t)heap_top + incr > SRAM_BASE + SRAM_LENGTH) { restoreIRQ(state); r->_errno = ENOMEM; return (caddr_t)-1; } else { caddr_t res = heap_top; heap_top += incr; restoreIRQ(state); return res; } }
/****************************************************************************** * P U B L I C F U N C T I O N S *****************************************************************************/ uint8_t flashrom_write(uint8_t *dst, const uint8_t *src, size_t size) { (void) size; /* unused */ char err; uint8_t sec; sec = iap_get_sector((uint32_t) dst); if (sec == INVALID_ADDRESS) { DEBUG("Invalid address\n"); return 0; } /* check sector */ if (blank_check_sector(sec, sec) == SECTOR_NOT_BLANK) { DEBUG("Warning: Sector %i not blank\n", sec); } /* prepare sector */ err = prepare_sectors(sec, sec); if (err) { DEBUG("\n-- ERROR: PREPARE_SECTOR_FOR_WRITE_OPERATION: %u\n", err); return 0; } /* write flash */ unsigned intstate = disableIRQ(); err = copy_ram_to_flash((uint32_t) dst, (uint32_t) src, 256); restoreIRQ(intstate); if (err) { DEBUG("ERROR: COPY_RAM_TO_FLASH: %u\n", err); /* set interrupts back and return */ restoreIRQ(intstate); return 0; } /* check result */ err = compare((uint32_t) dst, (uint32_t) src, 256); if (err) { DEBUG("ERROR: COMPARE: %i (at position %u)\n", err, iap_result[1]); return 0; } DEBUG("Data successfully written!\n"); return 1; }
void cc110x_init_interrupts(void) { uint8_t state = disableIRQ(); /* Disable all interrupts */ cc110x_gdo2_enable(); cc110x_gdo0_disable(); restoreIRQ(state); /* Enable all interrupts */ }
int xtimer_remove(xtimer_t *timer) { if (!_is_set(timer)) { return 0; } unsigned state = disableIRQ(); int res = 0; if (timer_list_head == timer) { uint32_t next; timer_list_head = timer->next; if (timer_list_head) { /* schedule callback on next timer target time */ next = timer_list_head->target - XTIMER_OVERHEAD; } else { next = _mask(0xFFFFFFFF); } _lltimer_set(next); } else { res = _remove_timer_from_list(&timer_list_head, timer) || _remove_timer_from_list(&overflow_list_head, timer) || _remove_timer_from_list(&long_list_head, timer); } timer->target = 0; timer->long_target = 0; restoreIRQ(state); return res; }
bool x86_rtc_set_periodic(uint8_t hz, uint32_t msg_content, kernel_pid_t target_pid, bool allow_replace) { if (!valid) { return false; } unsigned old_status = disableIRQ(); bool result; if (target_pid == KERNEL_PID_UNDEF || hz == RTC_REG_A_HZ_OFF) { result = true; periodic_pid = KERNEL_PID_UNDEF; uint8_t old_divider = x86_cmos_read(RTC_REG_A) & ~RTC_REG_A_HZ_MASK; x86_cmos_write(RTC_REG_A, old_divider | RTC_REG_A_HZ_OFF); x86_cmos_write(RTC_REG_B, x86_cmos_read(RTC_REG_B) & ~RTC_REG_B_INT_PERIODIC); } else { result = allow_replace || periodic_pid == KERNEL_PID_UNDEF; if (result) { periodic_msg_content = msg_content; periodic_pid = target_pid; uint8_t old_divider = x86_cmos_read(RTC_REG_A) & ~RTC_REG_A_HZ_MASK; x86_cmos_write(RTC_REG_A, old_divider | hz); x86_cmos_write(RTC_REG_B, x86_cmos_read(RTC_REG_B) | RTC_REG_B_INT_PERIODIC); } } rtc_irq_handler(0); restoreIRQ(old_status); return result; }
bool x86_rtc_set_update(uint32_t msg_content, kernel_pid_t target_pid, bool allow_replace) { if (!valid) { return false; } unsigned old_status = disableIRQ(); bool result; if (target_pid == KERNEL_PID_UNDEF) { result = true; update_pid = KERNEL_PID_UNDEF; x86_cmos_write(RTC_REG_B, x86_cmos_read(RTC_REG_B) & ~RTC_REG_B_INT_UPDATE); } else { result = allow_replace || update_pid == KERNEL_PID_UNDEF; if (result) { update_msg_content = msg_content; update_pid = target_pid; x86_cmos_write(RTC_REG_B, x86_cmos_read(RTC_REG_B) | RTC_REG_B_INT_UPDATE); } } rtc_irq_handler(0); restoreIRQ(old_status); return result; }
/*---------------------------------------------------------------------------*/ void finish(uint8_t istate) { /* Enable interrupts. */ IE1 = ie1; IE2 = ie2; restoreIRQ(istate); }
boolean SendCANFrame( uint32_t id, uint8_t size, uint8_t *pData ) { unsigned _cpsr; int i; char wrkbuf[ 10 ]; char canmsg[ 50 ]; const char *p; if ( size > 8 ) return false; _itoa( id, wrkbuf, 16 ); strcpy( canmsg, "T" ); // Add zeros in front of id so length always is 8 for ( i=0; i < ( 8 - strlen( wrkbuf ) ); i++ ) { strcat( canmsg, "0" ); } // Copy in id strcat( canmsg, wrkbuf ); // data size _itoa( size, wrkbuf, 16 ); strcat( canmsg, wrkbuf ); for ( i=0; i<size; i++ ) { _itoa( pData[ i ], wrkbuf, 16 ); if ( 1 == strlen( wrkbuf ) ) { strcat( canmsg, "0" ); } strcat( canmsg, wrkbuf ); } // add CR at end strcat( canmsg, "\r" ); // Send the frame //uart1Puts( canmsg ); p = canmsg; while ( 0 != *p ) { p = uart1Puts( p ); _cpsr = disableIRQ(); WDFEED = 0xAA; WDFEED = 0x55; restoreIRQ( _cpsr ); } //uart0Puts( canmsg ); //uart0Puts( "\r\n" ); return true; }
void _native_syscall_leave(void) { #if LOCAL_DEBUG real_write(STDERR_FILENO, "< _native_in_syscall\n", 21); #endif _native_in_syscall--; if ( (_native_sigpend > 0) && (_native_in_isr == 0) && (_native_in_syscall == 0) && (native_interrupts_enabled == 1) && (sched_active_thread != NULL) ) { _native_in_isr = 1; unsigned int mask = disableIRQ(); _native_cur_ctx = (ucontext_t *)sched_active_thread->sp; native_isr_context.uc_stack.ss_sp = __isr_stack; native_isr_context.uc_stack.ss_size = SIGSTKSZ; native_isr_context.uc_stack.ss_flags = 0; makecontext(&native_isr_context, native_irq_handler, 0); if (swapcontext(_native_cur_ctx, &native_isr_context) == -1) { err(EXIT_FAILURE, "_native_syscall_leave: swapcontext"); } restoreIRQ(mask); } }
int pthread_cond_broadcast(struct pthread_cond_t *cond) { unsigned old_state = disableIRQ(); int other_prio = -1; while (1) { queue_node_t *head = queue_remove_head(&(cond->queue)); if (head == NULL) { break; } tcb_t *other_thread = (tcb_t *) sched_threads[head->data]; if (other_thread) { other_prio = max_prio(other_prio, other_thread->priority); sched_set_status(other_thread, STATUS_PENDING); } head->data = -1u; } restoreIRQ(old_state); if (other_prio >= 0) { sched_switch(sched_active_thread->priority, other_prio); } return 0; }
static ssize_t pipe_rw(ringbuffer_t *rb, void *buf, size_t n, tcb_t **other_op_blocked, tcb_t **this_op_blocked, ringbuffer_op_t ringbuffer_op) { if (n == 0) { return 0; } while (1) { unsigned old_state = disableIRQ(); unsigned count = ringbuffer_op(rb, buf, n); if (count > 0) { tcb_t *other_thread = *other_op_blocked; int other_prio = -1; if (other_thread) { *other_op_blocked = NULL; other_prio = other_thread->priority; sched_set_status(other_thread, STATUS_PENDING); } restoreIRQ(old_state); if (other_prio >= 0) { sched_switch(other_prio); } return count; } else if (*this_op_blocked || inISR()) { restoreIRQ(old_state); return 0; } else { *this_op_blocked = (tcb_t *) sched_active_thread; sched_set_status((tcb_t *) sched_active_thread, STATUS_SLEEPING); restoreIRQ(old_state); thread_yield(); } } }
/* Register an upper layer thread */ uint8_t transceiver_register(transceiver_type_t t, kernel_pid_t pid) { int result = 0; unsigned state = disableIRQ(); for (size_t i = 0; i < TRANSCEIVER_MAX_REGISTERED; i++) { if ((reg[i].pid == pid) || (reg[i].transceivers == TRANSCEIVER_NONE)) { reg[i].transceivers |= t; reg[i].pid = pid; DEBUG("transceiver: Thread %i registered for %i\n", reg[i].pid, reg[i].transceivers); restoreIRQ(state); result = 1; break; } } restoreIRQ(state); return result; }
/** * @brief Allocate memory from the heap. * * The current heap implementation is very rudimentary, it is only able to allocate * memory. But it does not * - check if the returned address is valid (no check if the memory very exists) * - have any means to free memory again * * TODO: check if the requested memory is really available * * @return [description] */ caddr_t _sbrk_r(struct _reent *r, size_t incr) { unsigned int state = disableIRQ(); caddr_t res = heap_top; heap_top += incr; restoreIRQ(state); return res; }
int msg_send_to_self(msg_t *m) { unsigned int state = disableIRQ(); m->sender_pid = sched_active_pid; int res = queue_msg((tcb_t *) sched_active_thread, m); restoreIRQ(state); return res; }
void vtimer_remove(vtimer_t *t) { unsigned irq_state = disableIRQ(); priority_queue_remove(&shortterm_priority_queue_root, timer_get_node(t)); priority_queue_remove(&longterm_priority_queue_root, timer_get_node(t)); update_shortterm(); restoreIRQ(irq_state); }