void badapple_sched(void) { // Check whether next second has been ready. if (!sched_inited) return; if (!spinlock_trylock(&sched_running)) { spinlock_lock(&sched_skip_counter); ++sched_skip_frames; spinlock_unlock(&sched_skip_counter); return; } if (atomic_read(&(buffers[sched_next_buffer].ready)) == 0) { //printf("[badapple] warn: decode not finished.\n"); goto return_unlock; } // Copy current frame out to VGA. uint8_t *frame = buffers[sched_next_buffer].data + PIC_FRAME_SIZE * sched_next_frame; #if 0 va_video_write(va, frame, 0, 0, DST_WIDTH, DST_HEIGHT); #endif // Since I have already encoded video in plane mode, so memcpy to output. int m; for (m=0; m<4; ++m, frame += PIC_PLANE_SIZE) { va_set_plane_mask(va, (1 << m)); memcpy(va->va_buffer, frame, PIC_PLANE_SIZE); } if (++sched_next_frame >= 30) { int last_buffer = sched_next_buffer; sched_next_buffer = 1 - sched_next_buffer; // Deal with skipped frames. spinlock_lock(&sched_skip_counter); if (sched_skip_frames >= 30) { sched_next_frame = 29; sched_skip_frames -= 29; } else { sched_next_frame = sched_skip_frames; sched_skip_frames = 0; } spinlock_unlock(&sched_skip_counter); if (sched_next_frame > 0) { printf("[badapple] warn: Skip %d frames.\n", sched_next_frame); } // Notify the decoder that the buffer has already been used up. barrier(); atomic_set(&(buffers[last_buffer].ready), 0); } return_unlock: spinlock_unlock(&sched_running); }
/** * Miękkie tyknięcie. * * Procedura uruchamiana nazewnątrz obsługi przerwania zegara. Czas procesora * zajęty przez nią opóźnia kolejne jej wywołanie, nie przerwania. */ void clock_softtick() { if ( spinlock_trylock(&soft_guard) ) { spinlock_unlock(&soft_guard); sched_action(); //nie dokonuje faktycznej zmiany wątku __handle_callouts(); } }
static void ipc_forget_all_active_calls(void) { call_t *call; restart: spinlock_lock(&TASK->active_calls_lock); if (list_empty(&TASK->active_calls)) { /* * We are done, there are no more active calls. * Nota bene: there may still be answers waiting for pick up. */ spinlock_unlock(&TASK->active_calls_lock); return; } call = list_get_instance(list_first(&TASK->active_calls), call_t, ta_link); if (!spinlock_trylock(&call->forget_lock)) { /* * Avoid deadlock and let async_answer() or * _ipc_answer_free_call() win the race to dequeue the first * call on the list. */ spinlock_unlock(&TASK->active_calls_lock); goto restart; } /* * Forget the call and donate it to the task which holds up the answer. */ call->forget = true; call->sender = NULL; list_remove(&call->ta_link); /* * The call may be freed by _ipc_answer_free_call() before we are done * with it; to avoid working with a destroyed call_t structure, we * must hold a reference to it. */ ipc_call_hold(call); spinlock_unlock(&call->forget_lock); spinlock_unlock(&TASK->active_calls_lock); atomic_dec(&call->caller_phone->active_calls); SYSIPC_OP(request_forget, call); ipc_call_release(call); goto restart; }
void __spin_pdr_lock(struct spin_pdr_lock *pdr_lock) { uint32_t vcoreid = vcore_id(); uint32_t ensure_tgt; while (spinlock_trylock(&pdr_lock->spinlock)) { ensure_tgt = pdr_lock->lockholder; /* ensure will make sure *every* vcore runs if you pass it your self. */ if (ensure_tgt == SPINPDR_VCOREID_UNKNOWN) ensure_tgt = vcoreid; ensure_vcore_runs(ensure_tgt); cpu_relax(); } pdr_lock->lockholder = vcoreid; }
int mutex_trylock(mutex_t* mtx) { if(unlikely(!mtx)) return E_ERR; if(mtx->owner != sys_getpid()) { if(spinlock_trylock(&mtx->lock) == E_ERR) return E_ERR; mtx->owner = sys_getpid(); mtx->recursion = 0; } else if(mtx->kind == MTX_KIND_ERRORCHECK) return E_ERR; if(mtx->kind == MTX_KIND_RECURSIVE) mtx->recursion += 1; return E_OK; }
/* * pthread_spin_trylock - lock a spin lock object * * SEE pthread_spin_lock() for more infromation. */ int pthread_spin_trylock ( pthread_spinlock_t *lock ) { ipl_t flags; flags = interrupts_disable(); if (spinlock_trylock(&lock->lock)) { interrupts_restore(flags); return EBUSY; } else { lock->flags = flags; return OK; } }
static inline void parwork_lock(parwork_t* w) { while (!spinlock_trylock(&w->lock)) ; }
int net_trylock() { int res; spinlock_trylock(&net_spinlock, res); return res; }
void spinlock_lock(spinlock_t *lock) { while (spinlock_trylock(lock)) cpu_relax(); }