/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may * safely be used in an ISR. */ int semaphore_wait(struct semaphore *s, int timeout) { int ret = OBJ_WAIT_TIMEDOUT; int oldlevel = disable_irq_save(); corelock_lock(&s->cl); int count = s->count; if(LIKELY(count > 0)) { /* count is not zero; down it */ s->count = count - 1; ret = OBJ_WAIT_SUCCEEDED; } else if(timeout != 0) { ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT, oldlevel); /* too many waits - block until count is upped... */ struct thread_entry *current = __running_self_entry(); block_thread(current, timeout, &s->queue, NULL); corelock_unlock(&s->cl); /* ...and turn control over to next thread */ switch_thread(); /* if explicit wake indicated; do no more */ if(LIKELY(!wait_queue_ptr(current))) return OBJ_WAIT_SUCCEEDED; disable_irq(); corelock_lock(&s->cl); /* see if anyone got us after the expired wait */ if(wait_queue_try_remove(current)) { count = s->count; if(count > 0) { /* down it lately */ s->count = count - 1; ret = OBJ_WAIT_SUCCEEDED; } } } /* else just polling it */ corelock_unlock(&s->cl); restore_irq(oldlevel); return ret; }
void cpu_boost_(bool on_off, char* location, int line) { corelock_lock(&boostctrl_cl); if (cpu_boost_calls_count == MAX_BOOST_LOG) { cpu_boost_first = (cpu_boost_first+1)%MAX_BOOST_LOG; cpu_boost_calls_count--; if (cpu_boost_calls_count < 0) cpu_boost_calls_count = 0; } if (cpu_boost_calls_count < MAX_BOOST_LOG) { int message = (cpu_boost_first+cpu_boost_calls_count)%MAX_BOOST_LOG; snprintf(cpu_boost_calls[message], MAX_PATH, "%c %s:%d",on_off?'B':'U',location,line); cpu_boost_calls_count++; } #else void cpu_boost(bool on_off) { corelock_lock(&boostctrl_cl); #endif /* CPU_BOOST_LOGGING */ if(on_off) { /* Boost the frequency if not already boosted */ if(++boost_counter == 1) set_cpu_frequency(CPUFREQ_MAX); } else { /* Lower the frequency if the counter reaches 0 */ if(--boost_counter <= 0) { if(cpu_idle) set_cpu_frequency(CPUFREQ_DEFAULT); else set_cpu_frequency(CPUFREQ_NORMAL); /* Safety measure */ if (boost_counter < 0) { boost_counter = 0; } } } corelock_unlock(&boostctrl_cl); }
/* Enables queue_send on the specified queue - caller allocates the extra * data structure. Only queues which are taken to be owned by a thread should * enable this however an official owner is not compulsory but must be * specified for priority inheritance to operate. * * Use of queue_wait(_w_tmo) by multiple threads on a queue using synchronous * messages results in an undefined order of message replies or possible default * replies if two or more waits happen before a reply is done. */ void queue_enable_queue_send(struct event_queue *q, struct queue_sender_list *send, unsigned int owner_id) { int oldlevel = disable_irq_save(); corelock_lock(&q->cl); if(send != NULL && q->send == NULL) { memset(send, 0, sizeof(*send)); #ifdef HAVE_PRIORITY_SCHEDULING send->blocker.wakeup_protocol = wakeup_priority_protocol_release; send->blocker.priority = PRIORITY_IDLE; if(owner_id != 0) { send->blocker.thread = thread_id_entry(owner_id); q->blocker_p = &send->blocker; } #endif q->send = send; } corelock_unlock(&q->cl); restore_irq(oldlevel); (void)owner_id; }
/* Gain ownership of a mutex object or block until it becomes free */ void mutex_lock(struct mutex *m) { struct thread_entry *current = __running_self_entry(); if(current == m->blocker.thread) { /* current thread already owns this mutex */ m->recursion++; return; } /* lock out other cores */ corelock_lock(&m->cl); /* must read thread again inside cs (a multiprocessor concern really) */ if(LIKELY(m->blocker.thread == NULL)) { /* lock is open */ m->blocker.thread = current; corelock_unlock(&m->cl); return; } /* block until the lock is open... */ disable_irq(); block_thread(current, TIMEOUT_BLOCK, &m->queue, &m->blocker); corelock_unlock(&m->cl); /* ...and turn control over to next thread */ switch_thread(); }
/* Up the semaphore's count and release any thread waiting at the head of the * queue. The count is saturated to the value of the 'max' parameter specified * in 'semaphore_init'. */ void semaphore_release(struct semaphore *s) { unsigned int result = THREAD_NONE; int oldlevel = disable_irq_save(); corelock_lock(&s->cl); struct thread_entry *thread = WQ_THREAD_FIRST(&s->queue); if(LIKELY(thread != NULL)) { /* a thread was queued - wake it up and keep count at 0 */ KERNEL_ASSERT(s->count == 0, "semaphore_release->threads queued but count=%d!\n", s->count); result = wakeup_thread(thread, WAKEUP_DEFAULT); } else { int count = s->count; if(count < s->max) { /* nothing waiting - up it */ s->count = count + 1; } } corelock_unlock(&s->cl); restore_irq(oldlevel); #if defined(HAVE_PRIORITY_SCHEDULING) && defined(is_thread_context) /* No thread switch if not thread context */ if((result & THREAD_SWITCH) && is_thread_context()) switch_thread(); #endif (void)result; }
static void ata_lock_lock(struct ata_lock *l) { struct thread_entry * const current = thread_self_entry(); if (current == l->thread) { l->count++; return; } corelock_lock(&l->cl); IF_PRIO( current->skip_count = -1; ) while (l->locked != 0)
char * cpu_boost_log_getlog_first(void) { char *first; corelock_lock(&boostctrl_cl); first = NULL; if (cpu_boost_calls_count) { cpu_boost_track_message = 1; first = cpu_boost_calls[cpu_boost_first]; } corelock_unlock(&boostctrl_cl); return first; }
/* Performance function that works with an external buffer note that x, bwidtht and stride are in 8-pixel units! */ void lcd_blit_mono(const unsigned char *data, int bx, int y, int bwidth, int height, int stride) { #if (NUM_CORES > 1) && defined(HAVE_BACKLIGHT_INVERSION) corelock_lock(&cl); #endif while (height--) { lcd_cmd_and_data(R_RAM_ADDR_SET, (y++ << 5) + addr_offset - bx); lcd_prepare_cmd(R_RAM_DATA); lcd_mono_data(data, bwidth); data += stride; } #if (NUM_CORES > 1) && defined(HAVE_BACKLIGHT_INVERSION) corelock_unlock(&cl); #endif }
/* Release ownership of a mutex object - only owning thread must call this */ void mutex_unlock(struct mutex *m) { /* unlocker not being the owner is an unlocking violation */ KERNEL_ASSERT(m->blocker.thread == __running_self_entry(), "mutex_unlock->wrong thread (%s != %s)\n", m->blocker.thread->name, __running_self_entry()->name); if(m->recursion > 0) { /* this thread still owns lock */ m->recursion--; return; } /* lock out other cores */ corelock_lock(&m->cl); /* transfer to next queued thread if any */ struct thread_entry *thread = WQ_THREAD_FIRST(&m->queue); if(LIKELY(thread == NULL)) { /* no threads waiting - open the lock */ m->blocker.thread = NULL; corelock_unlock(&m->cl); return; } const int oldlevel = disable_irq_save(); /* Tranfer of owning thread is handled in the wakeup protocol * if priorities are enabled otherwise just set it from the * queue head. */ #ifndef HAVE_PRIORITY_SCHEDULING m->blocker.thread = thread; #endif unsigned int result = wakeup_thread(thread, WAKEUP_TRANSFER); restore_irq(oldlevel); corelock_unlock(&m->cl); #ifdef HAVE_PRIORITY_SCHEDULING if(result & THREAD_SWITCH) switch_thread(); #endif (void)result; }
/* Performance function that works with an external buffer note that bx and bwidth are in 8-pixel units! */ void lcd_blit_grey_phase(unsigned char *values, unsigned char *phases, int bx, int y, int bwidth, int height, int stride) { #if (NUM_CORES > 1) && defined(HAVE_BACKLIGHT_INVERSION) corelock_lock(&cl); #endif while (height--) { lcd_cmd_and_data(R_RAM_ADDR_SET, (y++ << 5) + addr_offset - bx); lcd_prepare_cmd(R_RAM_DATA); lcd_grey_data(values, phases, bwidth); values += stride; phases += stride; } #if (NUM_CORES > 1) && defined(HAVE_BACKLIGHT_INVERSION) corelock_unlock(&cl); #endif }
void cpu_idle_mode(bool on_off) { corelock_lock(&boostctrl_cl); cpu_idle = on_off; /* We need to adjust the frequency immediately if the CPU isn't boosted */ if(boost_counter == 0) { if(cpu_idle) set_cpu_frequency(CPUFREQ_DEFAULT); else set_cpu_frequency(CPUFREQ_NORMAL); } corelock_unlock(&boostctrl_cl); }
/* Down the semaphore's count or wait for 'timeout' ticks for it to go up if * it is already 0. 'timeout' as TIMEOUT_NOBLOCK (0) will not block and may * safely be used in an ISR. */ int semaphore_wait(struct semaphore *s, int timeout) { int ret; int oldlevel; int count; oldlevel = disable_irq_save(); corelock_lock(&s->cl); count = s->count; if(LIKELY(count > 0)) { /* count is not zero; down it */ s->count = count - 1; ret = OBJ_WAIT_SUCCEEDED; } else if(timeout == 0) { /* just polling it */ ret = OBJ_WAIT_TIMEDOUT; } else { /* too many waits - block until count is upped... */ struct thread_entry * current = thread_self_entry(); IF_COP( current->obj_cl = &s->cl; ) current->bqp = &s->queue; /* return value will be OBJ_WAIT_SUCCEEDED after wait if wake was * explicit in semaphore_release */ current->retval = OBJ_WAIT_TIMEDOUT; if(timeout > 0) block_thread_w_tmo(current, timeout); /* ...or timed out... */ else block_thread(current); /* -timeout = infinite */ corelock_unlock(&s->cl); /* ...and turn control over to next thread */ switch_thread(); return current->retval; }
static void invert_display(void) { static bool last_invert = false; bool new_invert = lcd_inverted ^ lcd_backlit; if (new_invert != last_invert) { int oldlevel = disable_irq_save(); #if NUM_CORES > 1 corelock_lock(&cl); lcd_cmd_and_data(R_DISPLAY_CONTROL, new_invert? 0x0027 : 0x0019); corelock_unlock(&cl); #else lcd_cmd_and_data(R_DISPLAY_CONTROL, new_invert? 0x0027 : 0x0019); #endif restore_irq(oldlevel); last_invert = new_invert; } }
char * cpu_boost_log_getlog_next(void) { int message; char *next; corelock_lock(&boostctrl_cl); message = (cpu_boost_track_message+cpu_boost_first)%MAX_BOOST_LOG; next = NULL; if (cpu_boost_track_message < cpu_boost_calls_count) { cpu_boost_track_message++; next = cpu_boost_calls[message]; } corelock_unlock(&boostctrl_cl); return next; }
/* Queue must not be available for use during this call */ void queue_init(struct event_queue *q, bool register_queue) { int oldlevel = disable_irq_save(); if(register_queue) { corelock_lock(&all_queues.cl); } corelock_init(&q->cl); q->queue = NULL; /* What garbage is in write is irrelevant because of the masking design- * any other functions the empty the queue do this as well so that * queue_count and queue_empty return sane values in the case of a * concurrent change without locking inside them. */ q->read = q->write; #ifdef HAVE_EXTENDED_MESSAGING_AND_NAME q->send = NULL; /* No message sending by default */ IF_PRIO( q->blocker_p = NULL; ) #endif if(register_queue)