void kipc_post_process(IPC* ipc, KPROCESS* sender) { KPROCESS* receiver = (KPROCESS*)ipc->process; int index = -1; bool wake = false; IPC* cur; CHECK_HANDLE(receiver, sizeof(KPROCESS)); CHECK_MAGIC(receiver, MAGIC_PROCESS); if (ipc->cmd & HAL_IO_FLAG) { KIO* kio = (KIO*)(((IO*)ipc->param2)->kio); CHECK_HANDLE(kio, sizeof(KIO)); CHECK_MAGIC(kio, MAGIC_KIO); if (!kio_send(kio, receiver)) return; } disable_interrupts(); if ((wake = ((receiver->kipc.wait_process == sender || receiver->kipc.wait_process == (KPROCESS*)ANY_HANDLE) && (receiver->kipc.cmd == ipc->cmd || receiver->kipc.cmd == ANY_CMD) && ((receiver->kipc.param1 == ipc->param1) || (receiver->kipc.param1 == ANY_HANDLE)))) == true) { receiver->kipc.wait_process = (KPROCESS*)INVALID_HANDLE; } if (!rb_is_full(&receiver->process->ipcs)) index = rb_put(&receiver->process->ipcs); enable_interrupts(); if (index >= 0) { cur = KIPC_ITEM(receiver, index); cur->cmd = ipc->cmd; cur->param1 = ipc->param1; cur->param2 = ipc->param2; cur->param3 = ipc->param3; cur->process = (HANDLE)sender; //already waiting? Wakeup him if (wake) kprocess_wakeup(receiver); } else { kprocess_error(sender, ERROR_OVERFLOW); #if (KERNEL_IPC_DEBUG) printk("Error: receiver %s IPC overflow!\n", kprocess_name(receiver)); if (sender == (KPROCESS*)KERNEL_HANDLE) printk("Sender: kernel\n"); else printk("Sender: %s\n", kprocess_name((KPROCESS*)sender)); printk("cmd: %#X, p1: %#X, p2: %#X, p3: %#X\n", ipc->cmd, ipc->param1, ipc->param2, ipc->param3); #if (KERNEL_DEVELOPER_MODE) HALT(); #endif #endif } }
void ksystime_soft_timer_stop(HANDLE t) { SOFT_TIMER* timer = (SOFT_TIMER*)t; CHECK_MAGIC(timer, MAGIC_TIMER); //in case it shouting right now disable_interrupts(); ksystime_timer_stop_internal(&timer->timer); enable_interrupts(); }
void ksystime_soft_timer_destroy(HANDLE t) { SOFT_TIMER* timer = (SOFT_TIMER*)t; if (t == INVALID_HANDLE) return; CHECK_MAGIC(timer, MAGIC_TIMER); CLEAR_MAGIC(timer); kfree(timer); }
void up(struct semaphore *sem) { #ifdef WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): up(%p) <count=%d> from %p\n", current->comm, current->pid, sem, atomic_read(&sem->count), __builtin_return_address(0)); #endif __up(sem); }
/* Returns 0 if we acquired the semaphore, 1 if it was queued. */ int wtd_down(struct worktodo *wtd, struct semaphore *sem) { #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif if (atomic_dec_return(&sem->count) < 0) { __wtd_down(sem, wtd); return 1; } else { return 0; } }
int verify_trace(__u32 magic) { if (!CHECK_MAGIC(magic)) { fprintf(stderr, "bad trace magic %x\n", magic); return 1; } if ((magic & 0xff) != ENDIAN_VERSION) { fprintf(stderr, "unsupported trace version %x\n", magic & 0xff); return 1; } return 0; }
void svc_queue_lock_release(QUEUE* queue, THREAD* thread) { CHECK_CONTEXT(SUPERVISOR_CONTEXT | IRQ_CONTEXT); CHECK_MAGIC(queue, MAGIC_QUEUE, QUEUE_NAME); if (is_dlist_contains((DLIST**)&queue->push_waiters, (DLIST*)thread)) dlist_remove((DLIST**)&queue->push_waiters, (DLIST*)thread); else if (is_dlist_contains((DLIST**)&queue->pull_waiters, (DLIST*)thread)) dlist_remove((DLIST**)&queue->pull_waiters, (DLIST*)thread); else { ASSERT(false); } }
void ksystime_soft_timer_start(HANDLE t, SYSTIME* time) { bool active; SOFT_TIMER* timer = (SOFT_TIMER*)t; CHECK_MAGIC(timer, MAGIC_TIMER); disable_interrupts(); active = timer->timer.active; enable_interrupts(); if (active) { error(ERROR_ALREADY_CONFIGURED); return; } ksystime_timer_start_internal(&timer->timer, time); }
static inline void svc_queue_push(QUEUE* queue, void* buf) { CHECK_MAGIC(queue, MAGIC_QUEUE, QUEUE_NAME); if (queue->pull_waiters) { THREAD* thread = queue->pull_waiters; dlist_remove_head((DLIST**)&queue->pull_waiters); //patch return value thread_patch_context(thread, (unsigned int)buf); svc_thread_wakeup(thread); } else dlist_add_tail(&queue->filled_blocks, (DLIST*)((unsigned int)buf - queue->align_offset)); }
int down_trylock(struct semaphore *sem) { int ret; #ifdef WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif ret = __down_trylock(sem); #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down_trylock %s from %p\n", current->comm, current->pid, ret ? "failed" : "acquired", __builtin_return_address(0)); #endif return ret; }
static inline void* svc_queue_allocate_buffer(QUEUE* queue, TIME* time) { CHECK_MAGIC(queue, MAGIC_QUEUE, QUEUE_NAME); void* res = NULL; THREAD* thread = svc_thread_get_current(); if (queue->free_blocks) { res = (void*)((unsigned int)(queue->free_blocks) + queue->align_offset); dlist_remove_head(&queue->free_blocks); } else { //first - remove from active list //if called from IRQ context, thread_private.c will raise error svc_thread_sleep(time, THREAD_SYNC_QUEUE, queue); dlist_add_tail((DLIST**)&queue->push_waiters, (DLIST*)thread); } return res; }
static inline bool svc_queue_is_full(QUEUE* queue) { CHECK_MAGIC(queue, MAGIC_QUEUE, QUEUE_NAME); return queue->free_blocks == NULL ? true : false; }