INLINE void LED_TOGGLE(void) { static int led_status; if ((led_status = !led_status) != 0) ATOMIC(LED_ON()); else ATOMIC(LED_OFF()); }
/** * Private avr flush funtion. * * Write current buffered page in flash memory (if modified). * This function erase flash memory page before writing. * * This function is only use internally in this module. */ static void flash_avr_flush(Flash *fd) { if (fd->page_dirty) { LOG_INFO("Flushing page %d\n", fd->curr_page); // Wait while the SPM instruction is busy. boot_spm_busy_wait(); LOG_INFO("Filling temparary page buffer..."); // Fill the temporary buffer of the AVR for (page_addr_t page_addr = 0; page_addr < SPM_PAGESIZE; page_addr += 2) { uint16_t word = ((uint16_t)fd->page_buf[page_addr + 1] << 8) | fd->page_buf[page_addr]; ATOMIC(boot_page_fill(page_addr, word)); } LOG_INFO("Done.\n"); wdt_reset(); LOG_INFO("Erasing page, addr %u...", fd->curr_page * SPM_PAGESIZE); /* Page erase */ ATOMIC(boot_page_erase(fd->curr_page * SPM_PAGESIZE)); /* Wait until the memory is erased. */ boot_spm_busy_wait(); LOG_INFO("Done.\n"); LOG_INFO("Writing page, addr %u...", fd->curr_page * SPM_PAGESIZE); /* Store buffer in flash page. */ ATOMIC(boot_page_write(fd->curr_page * SPM_PAGESIZE)); boot_spm_busy_wait(); // Wait while the SPM instruction is busy. /* * Reenable RWW-section again. We need this if we want to jump back * to the application after bootloading. */ ATOMIC(boot_rww_enable()); fd->page_dirty = false; LOG_INFO("Done.\n"); } }
/** * Change the scheduling priority of a process. * * Process piorities are signed ints, whereas a larger integer value means * higher scheduling priority. The default priority for new processes is 0. * The idle process runs with the lowest possible priority: INT_MIN. * * A process with a higher priority always preempts lower priority processes. * Processes of equal priority share the CPU time according to a simple * round-robin policy. * * As a general rule to maximize responsiveness, compute-bound processes * should be assigned negative priorities and tight, interactive processes * should be assigned positive priorities. * * To avoid interfering with system background activities such as input * processing, application processes should remain within the range -10 * and +10. */ void proc_setPri(struct Process *proc, int pri) { #if CONFIG_KERN_PRI_INHERIT int new_pri; /* * Whatever it will happen below, this is the new * original priority of the process, i.e., the priority * it has without taking inheritance under account. */ proc->orig_pri = pri; /* If not changing anything we can just leave */ if ((new_pri = __prio_proc(proc)) == proc->link.pri) return; /* * Actual process priority is the highest among its * own priority and the one of the top-priority * process that it is blocking (returned by * __prio_proc()). */ proc->link.pri = new_pri; #else if (proc->link.pri == pri) return; proc->link.pri = pri; #endif // CONFIG_KERN_PRI_INHERIT if (proc != current_process) ATOMIC(sched_reenqueue(proc)); }
// Main idea: as long as no other thread has modifed stack, a thread’s modi!cation can proceed. int compare_and_swap(int* reg, int oldval, int newval) { ATOMIC(); int old_reg_val = *reg; if (old_reg_val == oldval) *reg = newval; END_ATOMIC(); return old_reg_val; }
static void spi_cleanup(UNUSED_ARG(struct SerialHardware *, _hw)) { SPCR = 0; SER_SPI_BUS_TXCLOSE; /* Set all pins as inputs */ ATOMIC(SPI_DDR &= ~(BV(SPI_MISO_BIT) | BV(SPI_MOSI_BIT) | BV(SPI_SCK_BIT) | BV(SPI_SS_BIT))); }
/** * Change the scheduling priority of a process. * * Process piorities are signed ints, whereas a larger integer value means * higher scheduling priority. The default priority for new processes is 0. * The idle process runs with the lowest possible priority: INT_MIN. * * A process with a higher priority always preempts lower priority processes. * Processes of equal priority share the CPU time according to a simple * round-robin policy. * * As a general rule to maximize responsiveness, compute-bound processes * should be assigned negative priorities and tight, interactive processes * should be assigned positive priorities. * * To avoid interfering with system background activities such as input * processing, application processes should remain within the range -10 * and +10. */ void proc_setPri(struct Process *proc, int pri) { if (proc->link.pri == pri) return; proc->link.pri = pri; if (proc != current_process) ATOMIC(sched_reenqueue(proc)); }
static void __attribute__((used)) svcHandlerDispatch(StackFrame* frame) { ATOMIC(svcInterruptCount++); SyscallIndex idx = (SyscallIndex)(((uint8_t*)frame->pc)[-2]); switch(idx) { SYSCALL_MAP if (dispatchTable[idx].isVoid) dispatchTable[idx].vfn(frame->a); else frame->a[0] = dispatchTable[idx].fn(frame->a); break; default: sysprintln("Uknown SVC: %d", idx); __asm("bkpt"); break; } return; }
uint32 fifo32Read (fifo32 * buf, uint8 * result) { uint32 val; VERIFY_OBJECT (buf, OBJID_FIFO) CLEAR_RESULT; if (buf->nItems == 0) { SET_RESULT (RSLT_BUFFER_EMPTY); return ERROR; } val = * buf->rd_ptr; buf->rd_ptr++; ATOMIC(buf->nItems--); if (buf->rd_ptr == buf->end) buf->rd_ptr = buf->start; return val; }
uint16 fifo16Read (fifo16 * buf, uint8 * result) { uint32 val; VERIFY_OBJECT (buf, OBJID_FIFO) CLEAR_RESULT; if (buf->nItems == 0) { SET_RESULT (RSLT_BUFFER_EMPTY); return ERROR; } val = * buf->rd_ptr; buf->rd_ptr++; ATOMIC(buf->nItems--); if (buf->rd_ptr == buf->end) buf->rd_ptr = buf->start; // _rx_buffer->tail = (unsigned int)(_rx_buffer->tail + 1) % SERIAL_BUFFER_SIZE; return val; }
/** * Insert \a c in tx FIFO buffer. * \note This function will switch out the calling process * if the tx buffer is full. If the buffer is full * and \a port->txtimeout is 0 return EOF immediatly. * * \return EOF on error or timeout, \a c otherwise. */ static int ser_putchar(int c, struct Serial *port) { if (fifo_isfull_locked(&port->txfifo)) { #if CONFIG_SER_TXTIMEOUT != -1 /* If timeout == 0 we don't want to wait */ if (port->txtimeout == 0) return EOF; ticks_t start_time = timer_clock(); #endif /* Wait while buffer is full... */ do { cpu_relax(); #if CONFIG_SER_TXTIMEOUT != -1 if (timer_clock() - start_time >= port->txtimeout) { ATOMIC(port->status |= SERRF_TXTIMEOUT); return EOF; } #endif /* CONFIG_SER_TXTIMEOUT */ } while (fifo_isfull_locked(&port->txfifo)); } fifo_push_locked(&port->txfifo, (unsigned char)c); /* (re)trigger tx interrupt */ port->hw->table->txStart(port->hw); /* Avoid returning signed extended char */ return (int)((unsigned char)c); }
/** * Fetch a character from the rx FIFO buffer. * \note This function will switch out the calling process * if the rx buffer is empty. If the buffer is empty * and \a port->rxtimeout is 0 return EOF immediatly. * * \return EOF on error or timeout, \a c otherwise. */ static int ser_getchar(struct Serial *port) { if (fifo_isempty_locked(&port->rxfifo)) { #if CONFIG_SER_RXTIMEOUT != -1 /* If timeout == 0 we don't want to wait for chars */ if (port->rxtimeout == 0) return EOF; ticks_t start_time = timer_clock(); #endif /* Wait while buffer is empty */ do { cpu_relax(); #if CONFIG_SER_RXTIMEOUT != -1 if (timer_clock() - start_time >= port->rxtimeout) { ATOMIC(port->status |= SERRF_RXTIMEOUT); return EOF; } #endif /* CONFIG_SER_RXTIMEOUT */ } while (fifo_isempty_locked(&port->rxfifo) && (ser_getstatus(port) & SERRF_RX) == 0); } /* * Get a byte from the FIFO (avoiding sign-extension), * re-enable RTS, then return result. */ if (ser_getstatus(port) & SERRF_RX) return EOF; return (int)(unsigned char)fifo_pop_locked(&port->rxfifo); }
term hlist(register term H, register term regs, stack wam) { no i; cell xval; bp_long ival; byte stamp; #if TRACE>0 fprintf(STD_err,"entering hlist, wam=%d, bboard=%d H=%d\n", wam,g.shared[BBoardStk].base,H); bbcheck(wam); #endif if(!INTEGER(X(1))) return NULL; /* first arg: stamp */ stamp=(byte)(OUTPUT_INT(X(1))); xval=X(2); /* second arg: starting arity of listed terms */ if(!INTEGER(xval)) return NULL; ival=OUTPUT_INT(xval); for(i=0; i<HMAX; i++) if(hstamp[i]>=stamp && HUSED()) { term xref=C2T(g.predmark); if(hstamp[i]<=RUNTIME) { /* gets preds of arity < ival `represented' as g.predmark*/ if(g.predmark!=htable[i].pred || GETARITY(htable[i].fun)<(no)ival) continue; xval=g.predmark; } else { /* gets RUNTIME data of arity > ival */ cell v=htable[i].val; if(NULL==(term)v) continue; if(VAR(v) && !( ONSTACK(g.shared[BBoardStk],v) || ONSTACK(g.shared[InstrStk],v) /*|| ON(HeapStk,v) */ )) { #if TRACE>0 fprintf(STD_err, "unexpected data in htable[%d]=>\n<%s,%s>->%s\n",i, smartref(htable[i].pred,wam), smartref(htable[i].fun,wam), smartref(v,wam)); #endif /* continue; */ } FDEREF(v); if((INTEGER(xval) && ival>0) || VAR(xval) || (GETARITY(xval) < (no)ival) || xval==g.empty ) continue; if(COMPOUND(xval)) xval=T2C(xref); } IF_OVER("COPY_KEYS",(term *)H,HeapStk,bp_halt(9)); SAVE_FUN(htable[i].pred); SAVE_FUN(htable[i].fun); #if 0 ASSERT2(( ATOMIC(xval) || ONSTACK(g.shared[BBoardStk],xval) || ON(HeapStk,xval)), /* will fail with multiple engines */ xval); #endif PUSH_LIST(xval); } PUSH_NIL(); return H; }
static void spi_init(UNUSED_ARG(struct SerialHardware *, _hw), UNUSED_ARG(struct Serial *, ser)) { /* * Set MOSI and SCK ports out, MISO in. * * The ATmega64/128 datasheet explicitly states that the input/output * state of the SPI pins is not significant, as when the SPI is * active the I/O port are overrided. * This is *blatantly FALSE*. * * Moreover, the MISO pin on the board_kc *must* be in high impedance * state even when the SPI is off, because the line is wired together * with the KBus serial RX, and the transmitter of the slave boards * would be unable to drive the line. */ ATOMIC(SPI_DDR |= (BV(SPI_MOSI_BIT) | BV(SPI_SCK_BIT))); /* * If the SPI master mode is activated and the SS pin is in input and tied low, * the SPI hardware will automatically switch to slave mode! * For proper communication this pins should therefore be: * - as output * - as input but tied high forever! * This driver set the pin as output. */ #warning FIXME:SPI SS pin set as output for proper operation, check schematics for possible conflicts. ATOMIC(SPI_DDR |= BV(SPI_SS_BIT)); ATOMIC(SPI_DDR &= ~BV(SPI_MISO_BIT)); /* Enable SPI, IRQ on, Master */ SPCR = BV(SPE) | BV(SPIE) | BV(MSTR); /* Set data order */ #if CONFIG_SPI_DATA_ORDER == SER_LSB_FIRST SPCR |= BV(DORD); #endif /* Set SPI clock rate */ #if CONFIG_SPI_CLOCK_DIV == 128 SPCR |= (BV(SPR1) | BV(SPR0)); #elif (CONFIG_SPI_CLOCK_DIV == 64 || CONFIG_SPI_CLOCK_DIV == 32) SPCR |= BV(SPR1); #elif (CONFIG_SPI_CLOCK_DIV == 16 || CONFIG_SPI_CLOCK_DIV == 8) SPCR |= BV(SPR0); #elif (CONFIG_SPI_CLOCK_DIV == 4 || CONFIG_SPI_CLOCK_DIV == 2) // SPR0 & SDPR1 both at 0 #else #error Unsupported SPI clock division factor. #endif /* Set SPI2X bit (spi double frequency) */ #if (CONFIG_SPI_CLOCK_DIV == 128 || CONFIG_SPI_CLOCK_DIV == 64 \ || CONFIG_SPI_CLOCK_DIV == 16 || CONFIG_SPI_CLOCK_DIV == 4) SPSR &= ~BV(SPI2X); #elif (CONFIG_SPI_CLOCK_DIV == 32 || CONFIG_SPI_CLOCK_DIV == 8 || CONFIG_SPI_CLOCK_DIV == 2) SPSR |= BV(SPI2X); #else #error Unsupported SPI clock division factor. #endif /* Set clock polarity */ #if CONFIG_SPI_CLOCK_POL == 1 SPCR |= BV(CPOL); #endif /* Set clock phase */ #if CONFIG_SPI_CLOCK_PHASE == 1 SPCR |= BV(CPHA); #endif SER_SPI_BUS_TXINIT; SER_STROBE_INIT; }
/** * Create a new process, starting at the provided entry point. * * * \note The function * \code * proc_new(entry, data, stacksize, stack) * \endcode * is a more convenient way to create a process, as you don't have to specify * the name. * * \return Process structure of new created process * if successful, NULL otherwise. */ struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base) { Process *proc; LOG_INFO("name=%s", name); #if CONFIG_KERN_HEAP bool free_stack = false; /* * Free up resources of a zombie process. * * We're implementing a kind of lazy garbage collector here for * efficiency reasons: we can avoid to introduce overhead into another * kernel task dedicated to free up resources (e.g., idle) and we're * not introducing any overhead into the scheduler after a context * switch (that would be *very* bad, because the scheduler runs with * IRQ disabled). * * In this way we are able to release the memory of the zombie tasks * without disabling IRQs and without introducing any significant * overhead in any other kernel task. */ proc_freeZombies(); /* Did the caller provide a stack for us? */ if (!stack_base) { /* Did the caller specify the desired stack size? */ if (!stack_size) stack_size = KERN_MINSTACKSIZE; /* Allocate stack dinamically */ PROC_ATOMIC(stack_base = (cpu_stack_t *)heap_allocmem(&proc_heap, stack_size)); if (stack_base == NULL) return NULL; free_stack = true; } #else // CONFIG_KERN_HEAP /* Stack must have been provided by the user */ ASSERT_VALID_PTR(stack_base); ASSERT(stack_size); #endif // CONFIG_KERN_HEAP #if CONFIG_KERN_MONITOR /* * Fill-in the stack with a special marker to help debugging. * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger * than an int, so the (int) cast is required to silence the * warning for truncating its size. */ memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size); #endif /* Initialize the process control block */ if (CPU_STACK_GROWS_UPWARD) { proc = (Process *)stack_base; proc->stack = stack_base + PROC_SIZE_WORDS; // On some architecture stack should be aligned, so we do it. proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t)))); if (CPU_SP_ON_EMPTY_SLOT) proc->stack++; } else { proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS); // On some architecture stack should be aligned, so we do it. proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t))); if (CPU_SP_ON_EMPTY_SLOT) proc->stack--; } /* Ensure stack is aligned */ ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0); stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t); proc_initStruct(proc); proc->user_data = data; #if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR proc->stack_base = stack_base; proc->stack_size = stack_size; #if CONFIG_KERN_HEAP if (free_stack) proc->flags |= PF_FREESTACK; #endif #endif proc->user_entry = entry; CPU_CREATE_NEW_STACK(proc->stack); #if CONFIG_KERN_MONITOR monitor_add(proc, name); #endif /* Add to ready list */ ATOMIC(SCHED_ENQUEUE(proc)); return proc; }
/** * Set current mask of repeatable keys. */ keymask_t kbd_setRepeatMask(keymask_t mask) { keymask_t oldmask = kbd_rpt_mask; ATOMIC(kbd_rpt_mask = mask); return oldmask; }
void kbd_remHandler(struct KbdHandler *handler) { /* Remove the handler */ ATOMIC(REMOVE(&handler->link)); }