/* * read one byte from any source; whether from the controller, * the keyboard, or the aux device */ int read_controller_data(KBDC p) { if (availq(&kbdcp(p)->kbd)) return removeq(&kbdcp(p)->kbd); if (availq(&kbdcp(p)->aux)) return removeq(&kbdcp(p)->aux); if (!wait_for_data(kbdcp(p))) return -1; /* timeout */ return read_data(kbdcp(p)); }
/** * Destroy the @a xnselect structure associated with a file descriptor. * * Any binding with a @a xnselector block is destroyed. * * @param select_block pointer to the @a xnselect structure associated with a file descriptor */ void xnselect_destroy(struct xnselect *select_block) { xnholder_t *holder; int resched = 0; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getq(&select_block->bindings))) { struct xnselect_binding *binding; struct xnselector *selector; binding = link2binding(holder, link); selector = binding->selector; __FD_CLR__(binding->bit_index, &selector->fds[binding->type].expected); if (!__FD_ISSET__(binding->bit_index, &selector->fds[binding->type].pending)) { __FD_SET__(binding->bit_index, &selector->fds[binding->type].pending); if (xnselect_wakeup(selector)) resched = 1; } removeq(&selector->bindings, &binding->slink); xnlock_put_irqrestore(&nklock, s); xnfree(binding); xnlock_get_irqsave(&nklock, s); } if (resched) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); }
static void psostask_delete_hook(xnthread_t *thread) { /* The scheduler is locked while hooks are running */ psostask_t *task; psostm_t *tm; if (xnthread_get_magic(thread) != PSOS_SKIN_MAGIC) return; #ifdef CONFIG_XENO_OPT_REGISTRY if (xnthread_handle(thread) != XN_NO_HANDLE) xnregistry_remove(xnthread_handle(thread)); #endif /* CONFIG_XENO_OPT_REGISTRY */ task = thread2psostask(thread); removeq(&psostaskq, &task->link); while ((tm = (psostm_t *)getgq(&task->alarmq)) != NULL) tm_destroy_internal(tm); taskev_destroy(&task->evgroup); xnarch_delete_display(&task->threadbase); psos_mark_deleted(task); xnheap_schedule_free(&kheap, task, &task->link); }
static void xnselector_destroy_loop(void *cookie) { struct xnselector *selector; xnholder_t *holder; int resched; spl_t s; xnlock_get_irqsave(&nklock, s); while ((holder = getq(&xnselectors))) { selector = container_of(holder, struct xnselector, destroy_link); while ((holder = getq(&selector->bindings))) { struct xnselect_binding *binding; struct xnselect *fd; binding = link2binding(holder, slink); fd = binding->fd; removeq(&fd->bindings, &binding->link); xnlock_put_irqrestore(&nklock, s); xnfree(binding); xnlock_get_irqsave(&nklock, s); } resched = xnsynch_destroy(&selector->synchbase) == XNSYNCH_RESCHED; xnlock_put_irqrestore(&nklock, s); xnfree(selector); if (resched) xnpod_schedule(); xnlock_get_irqsave(&nklock, s); } xnlock_put_irqrestore(&nklock, s); }
/*wakeup:休眠状態のタスクを実行可能状態にする*/ void wakeup(int ch){ TASK_ID_TYPE wake_task; wake_task = removeq(&semaphore[ch].task_list); //printf("wakeup ::%d\n",wake_task); addq(&ready,wake_task); return ; }
/* read one byte from the keyboard, but return immediately if * no data is waiting */ int read_kbd_data_no_wait(KBDC p) { int f; #if KBDIO_DEBUG >= 2 if (++call > 2000) { call = 0; log(LOG_DEBUG, "kbdc: kbd q: %d calls, max %d chars, " "aux q: %d calls, max %d chars\n", kbdcp(p)->kbd.call_count, kbdcp(p)->kbd.max_qcount, kbdcp(p)->aux.call_count, kbdcp(p)->aux.max_qcount); } #endif if (availq(&kbdcp(p)->kbd)) return removeq(&kbdcp(p)->kbd); f = read_status(kbdcp(p)) & KBDS_BUFFER_FULL; if (f == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdcp(p)->aux, read_data(kbdcp(p))); f = read_status(kbdcp(p)) & KBDS_BUFFER_FULL; } if (f == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); return read_data(kbdcp(p)); } return -1; /* no data */ }
static void __heap_post_release(struct xnheap *h) { RT_HEAP *heap = container_of(h, RT_HEAP, heap_base); spl_t s; xnlock_get_irqsave(&nklock, s); removeq(heap->rqueue, &heap->rlink); if (heap->handle) xnregistry_remove(heap->handle); if (xnsynch_destroy(&heap->synch_base) == XNSYNCH_RESCHED) /* * Some task has been woken up as a result of the * deletion: reschedule now. */ xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); #ifndef __XENO_SIM__ if (heap->cpid) xnfree(heap); #endif }
/* Must be called nklock locked, irq off. */ static void pse51_shm_destroy(pse51_shm_t * shm, int force) { spl_t ignored; removeq(&pse51_shmq, &shm->link); xnlock_clear_irqon(&nklock); down(&shm->maplock); if (shm->addr) { xnheap_free(&shm->heapbase, shm->addr); xnheap_destroy_mapped(&shm->heapbase, NULL, NULL); shm->addr = NULL; shm->size = 0; } if (force) { xnholder_t *holder; while ((holder = getq(&shm->mappings))) { up(&shm->maplock); xnfree(link2map(holder)); down(&shm->maplock); } } up(&shm->maplock); xnlock_get_irqsave(&nklock, ignored); }
static inline void xnpipe_dequeue_wait(struct xnpipe_state *state, int mask) { if (testbits(state->status, mask)) if (--state->wcount == 0) { removeq(&xnpipe_sleepq, &state->slink); __clrbits(state->status, mask); } }
int mx_destroy_internal(vrtxmx_t *mx) { int s = xnsynch_destroy(&mx->synchbase); xnmap_remove(vrtx_mx_idmap, mx->mid); removeq(&vrtx_mx_q, &mx->link); xnregistry_remove(mx->handle); xnfree(mx); return s; }
/*ready状態のタスクから,次にrun状態にするタスクを決める関数*/ void sched(void){ next_task = removeq(&ready); if(next_task == NULLTASKID){ //printf("end task..........................\n"); while(1); } //printf("curr_task = %d\n",curr_task); //printf("next_task = %d\n",next_task); return ; }
static inline void xnpipe_dequeue_all(struct xnpipe_state *state, int mask) { if (testbits(state->status, mask)) { if (state->wcount) { state->wcount = 0; removeq(&xnpipe_sleepq, &state->slink); __clrbits(state->status, mask); } } }
static int _shm_free(unsigned long name) { int ret = 0; xnholder_t *holder; xnshm_a_t *p; spl_t s; xnlock_get_irqsave(&nklock, s); holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p->name == name && --p->ref == 0) { #ifdef CONFIG_XENO_OPT_REGISTRY if (p->handle) xnregistry_remove(p->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { /* Should release lock here? * Can destroy_mapped suspend ? * [YES!] */ #ifdef CONFIG_XENO_OPT_PERVASIVE ret = xnheap_destroy_mapped(p->heap, NULL, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ ret = xnheap_destroy(p->heap, &__heap_flush_private, NULL); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ if (ret) goto unlock_and_exit; xnheap_free(&kheap, p->heap); } removeq(&xnshm_allocq, &p->link); ret = p->size; xnheap_free(&kheap, p); break; } holder = nextq(&xnshm_allocq, holder); } unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret; }
static int sem_destroy_internal(vrtxsem_t *sem) { int s; removeq(&vrtx_sem_q, &sem->link); xnmap_remove(vrtx_sem_idmap, sem->semid); s = xnsynch_destroy(&sem->synchbase); xnregistry_remove(sem->handle); vrtx_mark_deleted(sem); xnfree(sem); return s; }
/* try to unqueue message for reading */ static inline wind_msg_t *unqueue_msg(wind_msgq_t *queue) { xnholder_t *holder; wind_msg_t *msg; holder = getheadq(&queue->msgq); if (holder == NULL) return NULL; msg = link2wind_msg(holder); removeq(&queue->msgq, holder); return msg; }
void xntimer_destroy(xntimer_t *timer) { spl_t s; xnlock_get_irqsave(&nklock, s); xntimer_stop(timer); __setbits(timer->status, XNTIMER_KILLED); timer->sched = NULL; #ifdef CONFIG_XENO_OPT_STATS removeq(&xntimer_base(timer)->timerq, &timer->tblink); xntimer_base(timer)->timerq_rev++; #endif /* CONFIG_XENO_OPT_TIMING_PERIODIC */ xnlock_put_irqrestore(&nklock, s); }
static void sem_destroy_inner(pse51_sem_t * sem, pse51_kqueues_t *q) { spl_t s; xnlock_get_irqsave(&nklock, s); removeq(&q->semq, &sem->link); if (xnsynch_destroy(&sem->synchbase) == XNSYNCH_RESCHED) xnpod_schedule(); xnlock_put_irqrestore(&nklock, s); if (sem->is_named) xnfree(sem2named_sem(sem)); else xnfree(sem); }
static int sem_destroy_internal(vrtxsem_t *sem) { int s; removeq(&vrtx_sem_q, &sem->link); xnmap_remove(vrtx_sem_idmap, sem->semid); s = xnsynch_destroy(&sem->synchbase); #ifdef CONFIG_XENO_OPT_REGISTRY xnregistry_remove(sem->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ vrtx_mark_deleted(sem); xnfree(sem); return s; }
void xntbase_free(xntbase_t *base) { spl_t s; if (base == &nktbase) return; xntslave_destroy(base2slave(base)); xntbase_remove_proc(base); xnlock_get_irqsave(&nklock, s); removeq(&nktimebaseq, &base->link); xnlock_put_irqrestore(&nklock, s); xnarch_free_host_mem(base, sizeof(*base)); }
static void __task_delete_hook(xnthread_t *thread) { RT_TASK *task; if (xnthread_get_magic(thread) != RTAI_SKIN_MAGIC) return; task = thread2rtask(thread); removeq(&__rtai_task_q, &task->link); rtai_mark_deleted(task); if (xnthread_test_state(&task->thread_base, XNSHADOW)) xnheap_schedule_free(&kheap, task, &task->link); }
static int _shm_free(unsigned long name) { xnholder_t *holder; xnshm_a_t *p; int ret; spl_t s; xnlock_get_irqsave(&nklock, s); holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p->name == name && --p->ref == 0) { removeq(&xnshm_allocq, &p->link); if (p->handle) xnregistry_remove(p->handle); xnlock_put_irqrestore(&nklock, s); if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { #ifdef CONFIG_XENO_OPT_PERVASIVE xnheap_destroy_mapped(p->heap, __heap_flush_shared, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ xnheap_destroy(p->heap, &__heap_flush_private, NULL); xnheap_free(&kheap, p->heap); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ } ret = p->size; xnheap_free(&kheap, p); return ret; } holder = nextq(&xnshm_allocq, holder); } xnlock_put_irqrestore(&nklock, s); return 0; }
void removemlq(struct xnsched_mlq *q, struct xnpholder *h) { int idx = h->prio; struct xnqueue *queue = &q->queue[idx]; q->elems--; removeq(queue, &h->plink); if (emptyq_p(queue)) { int hi = idx / BITS_PER_LONG; int lo = idx % BITS_PER_LONG; __clrbits(q->lomap[hi], 1UL << lo); if (q->lomap[hi] == 0) __clrbits(q->himap, 1UL << hi); } }
static void __heap_post_release(struct xnheap *h) /* nklock held, IRQs off */ { RT_HEAP *heap = container_of(h, RT_HEAP, heap_base); removeq(heap->rqueue, &heap->rlink); #ifdef CONFIG_XENO_OPT_REGISTRY if (heap->handle) xnregistry_remove(heap->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ if (xnsynch_destroy(&heap->synch_base) == XNSYNCH_RESCHED) /* * Some task has been woken up as a result of the * deletion: reschedule now. */ xnpod_schedule(); }
/* read one byte from the aux device, but return immediately if * no data is waiting */ int read_aux_data_no_wait(KBDC p) { int f; if (availq(&kbdcp(p)->aux)) return removeq(&kbdcp(p)->aux); f = read_status(kbdcp(p)) & KBDS_BUFFER_FULL; if (f == KBDS_KBD_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); addq(&kbdcp(p)->kbd, read_data(kbdcp(p))); f = read_status(kbdcp(p)) & KBDS_BUFFER_FULL; } if (f == KBDS_AUX_BUFFER_FULL) { DELAY(KBDD_DELAYTIME); return read_data(kbdcp(p)); } return -1; /* no data */ }
/* read one byte from the keyboard */ int read_kbd_data(KBDC p) { #if KBDIO_DEBUG >= 2 if (++call > 2000) { call = 0; log(LOG_DEBUG, "kbdc: kbd q: %d calls, max %d chars, " "aux q: %d calls, max %d chars\n", kbdcp(p)->kbd.call_count, kbdcp(p)->kbd.max_qcount, kbdcp(p)->aux.call_count, kbdcp(p)->aux.max_qcount); } #endif if (availq(&kbdcp(p)->kbd)) return removeq(&kbdcp(p)->kbd); if (!wait_for_kbd_data(kbdcp(p))) return -1; /* timeout */ return read_data(kbdcp(p)); }
int rt_intr_delete(RT_INTR *intr) { int err = 0, rc = XNSYNCH_DONE; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); intr = xeno_h2obj_validate(intr, XENO_INTR_MAGIC, RT_INTR); if (!intr) { err = xeno_handle_error(intr, XENO_INTR_MAGIC, RT_INTR); xnlock_put_irqrestore(&nklock, s); return err; } removeq(intr->rqueue, &intr->rlink); #ifdef CONFIG_XENO_OPT_PERVASIVE rc = xnsynch_destroy(&intr->synch_base); #endif /* CONFIG_XENO_OPT_PERVASIVE */ if (intr->handle) xnregistry_remove(intr->handle); xeno_mark_deleted(intr); xnlock_put_irqrestore(&nklock, s); err = xnintr_destroy(&intr->intr_base); if (rc == XNSYNCH_RESCHED) /* Some task has been woken up as a result of the deletion: reschedule now. */ xnpod_schedule(); return err; }
static int xnpipe_release(struct inode *inode, struct file *file) { struct xnpipe_state *state = file->private_data; spl_t s; xnlock_get_irqsave(&nklock, s); xnpipe_dequeue_all(state, XNPIPE_USER_WREAD); xnpipe_dequeue_all(state, XNPIPE_USER_WSYNC); if (testbits(state->status, XNPIPE_KERN_CONN)) { /* Unblock waiters. */ if (xnsynch_nsleepers(&state->synchbase) > 0) { xnsynch_flush(&state->synchbase, XNRMID); xnpod_schedule(); } } if (state->ops.input) state->ops.input(NULL, -EPIPE, state->xstate); if (state->asyncq) { /* Clear the async queue */ removeq(&xnpipe_asyncq, &state->alink); __clrbits(state->status, XNPIPE_USER_SIGIO); xnlock_put_irqrestore(&nklock, s); fasync_helper(-1, file, 0, &state->asyncq); xnlock_get_irqsave(&nklock, s); } xnpipe_cleanup_user_conn(state, s); /* * The extra state may not be available from now on, if * xnpipe_disconnect() entered lingering close before we got * there; so calling xnpipe_cleanup_user_conn() should be the * last thing we do. */ xnlock_put_irqrestore(&nklock, s); return 0; }
int rt_event_delete(RT_EVENT *event) { int err = 0, rc; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); event = xeno_h2obj_validate(event, XENO_EVENT_MAGIC, RT_EVENT); if (!event) { err = xeno_handle_error(event, XENO_EVENT_MAGIC, RT_EVENT); goto unlock_and_exit; } removeq(event->rqueue, &event->rlink); rc = xnsynch_destroy(&event->synch_base); #ifdef CONFIG_XENO_OPT_REGISTRY if (event->handle) xnregistry_remove(event->handle); #endif /* CONFIG_XENO_OPT_REGISTRY */ xeno_mark_deleted(event); if (rc == XNSYNCH_RESCHED) /* Some task has been woken up as a result of the deletion: reschedule now. */ xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return err; }
void __rtai_shm_pkg_cleanup(void) { #if 0 xnholder_t *holder; xnshm_a_t *p; char szName[6]; // Garbage collector : to be added : lock problem holder = getheadq(&xnshm_allocq); while (holder != NULL) { p = link2shma(holder); if (p) { num2nam(p->name, szName); printk ("[RTAI -SHM] Cleanup of unfreed memory %s( %d ref.)\n", szName, p->ref); if (p->heap == &kheap) xnheap_free(&kheap, p->chunk); else { /* FIXME: MUST release lock here. */ #ifdef CONFIG_XENO_OPT_PERVASIVE xnheap_destroy_mapped(p->heap, NULL, NULL); #else /* !CONFIG_XENO_OPT_PERVASIVE */ xnheap_destroy(p->heap, &__heap_flush_private, NULL); #endif /* !CONFIG_XENO_OPT_PERVASIVE */ xnheap_free(&kheap, p->heap); } removeq(&xnshm_allocq, &p->link); xnheap_free(&kheap, p); } holder = nextq(&xnshm_allocq, holder); } #endif }
int rt_buffer_delete(RT_BUFFER *bf) { int ret = 0, resched; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock, s); bf = xeno_h2obj_validate(bf, XENO_BUFFER_MAGIC, RT_BUFFER); if (bf == NULL) { ret = xeno_handle_error(bf, XENO_BUFFER_MAGIC, RT_BUFFER); goto unlock_and_exit; } xnarch_free_host_mem(bf->bufmem, bf->bufsz); removeq(bf->rqueue, &bf->rlink); resched = xnsynch_destroy(&bf->isynch_base) == XNSYNCH_RESCHED; resched += xnsynch_destroy(&bf->osynch_base) == XNSYNCH_RESCHED; if (bf->handle) xnregistry_remove(bf->handle); xeno_mark_deleted(bf); if (resched) /* * Some task has been woken up as a result of the * deletion: reschedule now. */ xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock, s); return ret; }