static int mbxovrwrput(MBX *mbx, char **msg, int msg_size, int space) { unsigned long flags; int tocpy,n; if ((n = msg_size - mbx->size) > 0) { *msg += n; msg_size -= n; } while (msg_size > 0) { if (mbx->frbs) { if ((tocpy = mbx->size - mbx->lbyte) > msg_size) { tocpy = msg_size; } if (tocpy > mbx->frbs) { tocpy = mbx->frbs; } if (space) { memcpy(mbx->bufadr + mbx->lbyte, *msg, tocpy); } else { rt_copy_from_user(mbx->bufadr + mbx->lbyte, *msg, tocpy); } flags = rt_spin_lock_irqsave(&(mbx->lock)); mbx->frbs -= tocpy; mbx->avbs += tocpy; rt_spin_unlock_irqrestore(flags, &(mbx->lock)); msg_size -= tocpy; *msg += tocpy; mbx->lbyte = MOD_SIZE(mbx->lbyte + tocpy); } if (msg_size) { while ((n = msg_size - mbx->frbs) > 0) { if ((tocpy = mbx->size - mbx->fbyte) > n) { tocpy = n; } if (tocpy > mbx->avbs) { tocpy = mbx->avbs; } flags = rt_spin_lock_irqsave(&(mbx->lock)); mbx->frbs += tocpy; mbx->avbs -= tocpy; rt_spin_unlock_irqrestore(flags, &(mbx->lock)); mbx->fbyte = MOD_SIZE(mbx->fbyte + tocpy); } } } return 0; }
static inline void asgn_min_prio(int cpuid) { // find minimum priority in timers_struct RT_TASK *timer_manager; struct rt_tasklet_struct *timer, *timerl; spinlock_t *lock; unsigned long flags; int priority; priority = (timer = (timerl = &timers_list[LIST_CPUID])->next)->priority; flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]); while ((timer = timer->next) != timerl) { if (timer->priority < priority) { priority = timer->priority; } rt_spin_unlock_irqrestore(flags, lock); flags = rt_spin_lock_irqsave(lock); } rt_spin_unlock_irqrestore(flags, lock); flags = rt_global_save_flags_and_cli(); if ((timer_manager = &timers_manager[LIST_CPUID])->priority > priority) { timer_manager->priority = priority; if (timer_manager->state == RT_SCHED_READY) { rem_ready_task(timer_manager); enq_ready_task(timer_manager); } } rt_global_restore_flags(flags); }
static int _send(RT_MSGQ *mq, void *msg, int msg_size, int msgpri, int space) { unsigned long flags; RT_MSG *msg_ptr; void *p; if (msg_size > mq->fastsize) { if (!(p = rt_malloc(msg_size))) { rt_sem_signal(&mq->freslots); rt_sem_signal(&mq->senders); return -ENOMEM; } } else { p = NULL; } flags = rt_spin_lock_irqsave(&mq->lock); msg_ptr = mq->slots[mq->slot++]; rt_spin_unlock_irqrestore(flags, &mq->lock); msg_ptr->hdr.size = msg_size; msg_ptr->hdr.priority = msgpri; msg_ptr->hdr.malloc = p; msg_ptr->hdr.broadcast = 0; if (space) { memcpy(p ? p : msg_ptr->msg, msg, msg_size); } else { rt_copy_from_user(p ? p : msg_ptr->msg, msg, msg_size); } flags = rt_spin_lock_irqsave(&mq->lock); enq_msg(mq, &msg_ptr->hdr); rt_spin_unlock_irqrestore(flags, &mq->lock); rt_sem_signal(&mq->received); rt_sem_signal(&mq->senders); return 0; }
static void rt_timers_manager(int dummy) { static unsigned long cr0; RTIME now; struct rt_tasklet_struct *tmr, *timer; unsigned long flags; int priority, used_fpu; while (1) { rt_sleep_until((timers_list.next)->firing_time); now = timers_manager.resume_time + tuned.timers_tol[0]; // find all the timers to be fired, in priority order while (1) { used_fpu = 0; tmr = timer = &timers_list; priority = RT_LOWEST_PRIORITY; flags = rt_spin_lock_irqsave(&timers_lock); while ((tmr = tmr->next)->firing_time <= now) { if (tmr->priority < priority) { priority = (timer = tmr)->priority; } } timers_manager.priority = priority; rt_spin_unlock_irqrestore(flags, &timers_lock); if (timer == &timers_list) { break; } if (!timer->period) { flags = rt_spin_lock_irqsave(&timers_lock); (timer->next)->prev = timer->prev; (timer->prev)->next = timer->next; timer->next = timer->prev = timer; rt_spin_unlock_irqrestore(flags, &timers_lock); } else { set_timer_firing_time(timer, timer->firing_time + timer->period); } if (!timer->task) { if (!used_fpu && timer->uses_fpu) { used_fpu = 1; save_cr0_and_clts(cr0); save_fpenv(timers_manager.fpu_reg); } timer->handler(timer->data); } else { rt_task_resume(timer->task); } } if (used_fpu) { restore_fpenv(timers_manager.fpu_reg); restore_cr0(cr0); } // set next timers_manager priority according to the highest priority timer asgn_min_prio(); // if no more timers in timers_struct remove timers_manager from tasks list } }
/*** * rtskb_queue_tail * @rtskb_head */ void rtskb_queue_tail(struct rtskb_head *list, struct rtskb *skb) { unsigned long flags; flags = rt_spin_lock_irqsave(&list->lock); __rtskb_queue_tail(list, skb); rt_spin_unlock_irqrestore(flags, &list->lock); }
RTAI_SYSCALL_MODE int rt_insert_tasklet(struct rt_tasklet_struct *tasklet, int priority, void (*handler)(unsigned long), unsigned long data, unsigned long id, int pid) { unsigned long flags; // tasklet initialization if (!handler || !id) { return -EINVAL; } tasklet->uses_fpu = 0; tasklet->priority = priority; tasklet->handler = handler; tasklet->data = data; tasklet->id = id; if (!pid) { tasklet->task = 0; } else { (tasklet->task)->priority = priority; rt_copy_to_user(tasklet->usptasklet, tasklet, sizeof(struct rt_usp_tasklet_struct)); } // tasklet insertion tasklets_list flags = rt_spin_lock_irqsave(&tasklets_lock); tasklet->next = &tasklets_list; tasklet->prev = tasklets_list.prev; (tasklets_list.prev)->next = tasklet; tasklets_list.prev = tasklet; rt_spin_unlock_irqrestore(flags, &tasklets_lock); return 0; }
int rt_get_registry_slot(int slot, struct rt_registry_entry_struct* entry) { unsigned long flags; // check if we got a valid pointer if(entry == 0) return 0; flags = rt_spin_lock_irqsave(&list_lock); // index 0 is reserved for the null slot. if (slot > 0 && slot <= MAX_SLOTS ) { if (lxrt_list[slot].name != 0) { // clear the result memset((char*)entry,0,sizeof(*entry)); // copy the structure entry->name = lxrt_list[slot].name; entry->adr = lxrt_list[slot].adr; entry->tsk = lxrt_list[slot].tsk; entry->pid = lxrt_list[slot].pid; entry->type = lxrt_list[slot].type; rt_spin_unlock_irqrestore(flags, &list_lock); return slot; } } rt_spin_unlock_irqrestore(flags, &list_lock); return 0; }
static int mbxget(MBX *mbx, char **msg, int msg_size, int space) { unsigned long flags; int tocpy; while (msg_size > 0 && mbx->avbs) { if ((tocpy = mbx->size - mbx->fbyte) > msg_size) { tocpy = msg_size; } if (tocpy > mbx->avbs) { tocpy = mbx->avbs; } if (space) { memcpy(*msg, mbx->bufadr + mbx->fbyte, tocpy); } else { rt_copy_to_user(*msg, mbx->bufadr + mbx->fbyte, tocpy); } flags = rt_spin_lock_irqsave(&(mbx->lock)); mbx->frbs += tocpy; mbx->avbs -= tocpy; rt_spin_unlock_irqrestore(flags, &(mbx->lock)); msg_size -= tocpy; *msg += tocpy; mbx->fbyte = MOD_SIZE(mbx->fbyte + tocpy); } return msg_size; }
void rt_set_timer_period(struct rt_tasklet_struct *timer, RTIME period) { unsigned long flags; flags = rt_spin_lock_irqsave(&timers_lock); timer->period = period; rt_spin_unlock_irqrestore(flags, &timers_lock); }
static inline void asgn_min_prio(void) { // find minimum priority in timers_struct struct rt_tasklet_struct *timer; unsigned long flags; int priority; priority = (timer = timers_list.next)->priority; flags = rt_spin_lock_irqsave(&timers_lock); while ((timer = timer->next) != &timers_list) { if (timer->priority < priority) { priority = timer->priority; } } rt_spin_unlock_irqrestore(flags, &timers_lock); flags = rt_global_save_flags_and_cli(); if (timers_manager.priority > priority) { timers_manager.priority = priority; if (timers_manager.state == READY || timers_manager.state == (READY | RUNNING)) { rt_rem_ready_task(&timers_manager); rt_enq_ready_task(&timers_manager); } } rt_global_restore_flags(flags); }
unsigned CM_GrabChannel(Channels *channels, unsigned sound_id) { unsigned long flags; int chan; DEBUG("CM_GrabChannel sound_id=%u\n", sound_id); flags = rt_spin_lock_irqsave(&channels->spinlock); /* if it's already playing, stick to the same channel! */ chan = CM_LookupSound_Nolock(channels, sound_id); /* otherwise, try and find a free channel id: note this code is kind of ugly. */ if (chan < 0) { /* loop to find a free channel as per the freemask */ for (chan = 0; chan < L22_NUM_CHANS; ++chan) if ( (channels->freeMask & (0x1<<chan)) && !L22IsPlaying(channels->dev, chan) ) break; /* if that failed, loop to find any free channel that isn't playing. */ if (chan >= L22_NUM_CHANS) for (chan = 0; chan < L22_NUM_CHANS; ++chan) if (!L22IsPlaying(channels->dev, chan) ) break; /* if none of the two above loops found a free channel, try and grab one by arbitrarily picking one off of the last_chan counter. */ if (chan >= L22_NUM_CHANS) chan = ++channels->last_chan % L22_NUM_CHANS; else channels->last_chan = chan; } channels->freeMask &= ~(0x1<<chan); /* clear bit. */ channels->s2cMap[sound_id] = chan; channels->c2sMap[chan] = sound_id; rt_spin_unlock_irqrestore(flags, &channels->spinlock); DEBUG("CM_GrabChannel ret=%u\n", chan); return chan; }
int CM_LookupSound(Channels *channels, unsigned sound_id) { unsigned long flags = rt_spin_lock_irqsave(&channels->spinlock); int chan = CM_LookupSound_Nolock(channels, sound_id); rt_spin_unlock_irqrestore(flags, &channels->spinlock); return chan; }
static void rt_timers_manager(long cpuid) { RTIME now; RT_TASK *timer_manager; struct rtdm_timer_struct *tmr, *timer, *timerl; spinlock_t *lock; unsigned long flags, timer_tol; int priority; timer_manager = &timers_manager[LIST_CPUID]; timerl = &timers_list[LIST_CPUID]; lock = &timers_lock[LIST_CPUID]; timer_tol = tuned.timers_tol[LIST_CPUID]; while (1) { rt_sleep_until((timerl->next)->firing_time); now = rt_get_time() + timer_tol; while (1) { tmr = timer = timerl; priority = RT_SCHED_LOWEST_PRIORITY; flags = rt_spin_lock_irqsave(lock); while ((tmr = tmr->next)->firing_time <= now) { if (tmr->priority < priority) { priority = (timer = tmr)->priority; } } rt_spin_unlock_irqrestore(flags, lock); if (timer == timerl) { if (timer_manager->priority > TimersManagerPrio) { timer_manager->priority = TimersManagerPrio; } break; } timer_manager->priority = priority; flags = rt_spin_lock_irqsave(lock); rem_timer(timer); if (timer->period) { timer->firing_time += timer->period; enq_timer(timer); } rt_spin_unlock_irqrestore(flags, lock); timer->handler(timer->data); } asgn_min_prio(LIST_CPUID); } }
RTAI_SYSCALL_MODE void rt_set_timer_period(struct rt_tasklet_struct *timer, RTIME period) { spinlock_t *lock; unsigned long flags; flags = rt_spin_lock_irqsave(lock = &timers_lock[TIMER_CPUID]); timer->period = period; rt_spin_unlock_irqrestore(flags, lock); }
static void alloc_collector(struct rtskb *skb, struct rtsocket *sock) { int i; unsigned int flags; struct ip_collector *p_coll; struct iphdr *iph = skb->nh.iph; /* Find free collector */ for (i = 0; i < COLLECTOR_COUNT; i++) { p_coll = &collector[i]; flags = rt_spin_lock_irqsave(&p_coll->frags.lock); /* * This is a very simple version of a garbage collector. * Whenver the last access to any of the collectors is a while ago, * the collector will be freed... * Under normal conditions, it should never be necessary to collect * the garbage. * */ if (p_coll->in_use && (counter - p_coll->last_accessed > GARBAGE_COLLECT_LIMIT)) { kfree_rtskb(p_coll->frags.first); p_coll->in_use = 0; rt_printk("RTnet: IP fragmentation garbage collection " "(saddr:%x, daddr:%x)\n", p_coll->saddr, p_coll->daddr); } /* Collector (now) free? */ if (!p_coll->in_use) { p_coll->in_use = 1; p_coll->last_accessed = counter; p_coll->buf_size = skb->len; p_coll->frags.first = skb; p_coll->frags.last = skb; p_coll->saddr = iph->saddr; p_coll->daddr = iph->daddr; p_coll->id = iph->id; p_coll->protocol = iph->protocol; p_coll->sock = sock; rt_spin_unlock_irqrestore(flags, &p_coll->frags.lock); return; } rt_spin_unlock_irqrestore(flags, &p_coll->frags.lock); } rt_printk("RTnet: IP fragmentation - no collector available\n"); kfree_rtskb(skb); }
RTAI_SYSCALL_MODE void rt_remove_tasklet(struct rt_tasklet_struct *tasklet) { if (tasklet->next && tasklet->prev && tasklet->next != tasklet && tasklet->prev != tasklet) { unsigned long flags; flags = rt_spin_lock_irqsave(&tasklets_lock); (tasklet->next)->prev = tasklet->prev; (tasklet->prev)->next = tasklet->next; tasklet->next = tasklet->prev = tasklet; rt_spin_unlock_irqrestore(flags, &tasklets_lock); } }
int CM_LookupChannel(Channels *channels, unsigned chan) { unsigned long flags; int snd; DEBUG("CM_LookupChannel chan=%d\n", (int)chan); flags = rt_spin_lock_irqsave(&channels->spinlock); snd = CM_LookupChannel_Nolock(channels, chan); rt_spin_unlock_irqrestore(flags, &channels->spinlock); DEBUG("CM_LookupChannel ret=%d\n", snd); return snd; }
/*** * rtskb_dequeue * @list rtskb_head */ struct rtskb *rtskb_dequeue(struct rtskb_head *list) { unsigned long flags; struct rtskb *result; flags = rt_spin_lock_irqsave(&list->lock); result = __rtskb_dequeue(list); rt_spin_unlock_irqrestore(flags, &list->lock); return result; }
static int _receive(RT_MSGQ *mq, void *msg, int msg_size, int *msgpri, int space) { int size; RT_MSG *msg_ptr; void *p; size = min((msg_ptr = mq->firstmsg)->hdr.size, msg_size); if (space) { memcpy(msg, (p = msg_ptr->hdr.malloc) ? p : msg_ptr->msg, size); if (msgpri) { *msgpri = msg_ptr->hdr.priority; } } else { rt_copy_to_user(msg, (p = msg_ptr->hdr.malloc) ? p : msg_ptr->msg, size); if (msgpri) { rt_put_user(msg_ptr->hdr.priority, msgpri); } } if (msg_ptr->hdr.broadcast) { if (!--msg_ptr->hdr.broadcast) { rt_sem_wait_barrier(&mq->broadcast); goto relslot; } else { rt_sem_signal(&mq->received); rt_sem_signal(&mq->receivers); rt_sem_wait_barrier(&mq->broadcast); } } else { unsigned long flags; relslot: flags = rt_spin_lock_irqsave(&mq->lock); mq->firstmsg = msg_ptr->hdr.next; mq->slots[--mq->slot] = msg_ptr; rt_spin_unlock_irqrestore(flags, &mq->lock); rt_sem_signal(&mq->freslots); rt_sem_signal(&mq->receivers); if (p) { rt_free(p); } } return msg_size - size; }
RTAI_SYSCALL_MODE void rt_remove_timer(struct rt_tasklet_struct *timer) { if (timer->next && timer->prev && timer->next != timer && timer->prev != timer) { spinlock_t *lock; unsigned long flags; flags = rt_spin_lock_irqsave(lock = &timers_lock[TIMER_CPUID]); rem_timer(timer); rt_spin_unlock_irqrestore(flags, lock); asgn_min_prio(TIMER_CPUID); } }
RTAI_SYSCALL_MODE int rt_insert_timer(struct rt_tasklet_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data, int pid) { spinlock_t *lock; unsigned long flags, cpuid; RT_TASK *timer_manager; // timer initialization timer->uses_fpu = 0; if (pid >= 0) { if (!handler) { return -EINVAL; } timer->handler = handler; timer->data = data; } else { if (timer->handler != NULL || timer->handler == (void *)1) { timer->handler = (void *)1; timer->data = data; } } timer->priority = priority; REALTIME2COUNT(firing_time) timer->firing_time = firing_time; timer->period = period; if (!pid) { timer->task = 0; timer->cpuid = cpuid = NUM_CPUS > 1 ? rtai_cpuid() : 0; } else { timer->cpuid = cpuid = NUM_CPUS > 1 ? (timer->task)->runnable_on_cpus : 0; (timer->task)->priority = priority; rt_copy_to_user(timer->usptasklet, timer, sizeof(struct rt_usp_tasklet_struct)); } // timer insertion in timers_list flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]); enq_timer(timer); rt_spin_unlock_irqrestore(flags, lock); // timers_manager priority inheritance if (timer->priority < (timer_manager = &timers_manager[LIST_CPUID])->priority) { timer_manager->priority = timer->priority; } // timers_task deadline inheritance flags = rt_global_save_flags_and_cli(); if (timers_list[LIST_CPUID].next == timer && (timer_manager->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) { timer_manager->resume_time = firing_time; rem_timed_task(timer_manager); enq_timed_task(timer_manager); rt_schedule(); } rt_global_restore_flags(flags); return 0; }
void rt_remove_timer(struct rt_tasklet_struct *timer) { if (timer->next != timer && timer->prev != timer) { unsigned long flags; flags = rt_spin_lock_irqsave(&timers_lock); (timer->next)->prev = timer->prev; (timer->prev)->next = timer->next; timer->next = timer->prev = timer; rt_spin_unlock_irqrestore(flags, &timers_lock); asgn_min_prio(); } }
/* *********************************************************************** * Returns the next pointer from the ringbuffer or zero if nothing is * available * ********************************************************************* */ static void *read_from_ringbuffer(skb_exch_ringbuffer_t *pRing) { void *ret = 0; unsigned int flags = rt_spin_lock_irqsave(&skb_spinlock); if (pRing->rd != pRing->wr) { pRing->rd = (pRing->rd + 1) % SKB_RINGBUFFER_SIZE; ret = pRing->ptr[pRing->rd]; } rt_spin_unlock_irqrestore(flags, &skb_spinlock); return ret; }
static inline unsigned long get_name(void *adr) { static unsigned long nameseed = 0xfacade; unsigned long flags; int slot; if (!adr) { unsigned long name; flags = rt_spin_lock_irqsave(&list_lock); name = nameseed++; rt_spin_unlock_irqrestore(flags, &list_lock); return name; } flags = rt_spin_lock_irqsave(&list_lock); for (slot = 1; slot <= MAX_SLOTS; slot++) { if (lxrt_list[slot].adr == adr) { rt_spin_unlock_irqrestore(flags, &list_lock); return lxrt_list[slot].name; } } rt_spin_unlock_irqrestore(flags, &list_lock); return 0; }
void CM_ClearChannel(Channels *channels, unsigned chan) { unsigned long flags; int snd; DEBUG("CM_ClearSound chan=%u\n", chan); flags = rt_spin_lock_irqsave(&channels->spinlock); snd = CM_LookupChannel_Nolock(channels, chan); channels->c2sMap[chan] = -1; if (snd >= 0) channels->s2cMap[snd] = -1; channels->freeMask |= 0x1<<chan; rt_spin_unlock_irqrestore(flags, &channels->spinlock); DEBUG("CM_ClearSound ret=(void)\n"); }
static inline void set_timer_firing_time(struct rt_tasklet_struct *timer, RTIME firing_time) { if (timer->next != timer && timer->prev != timer) { spinlock_t *lock; unsigned long flags; timer->firing_time = firing_time; flags = rt_spin_lock_irqsave(lock = &timers_lock[TIMER_CPUID]); rem_timer(timer); enq_timer(timer); rt_spin_unlock_irqrestore(flags, lock); } }
static inline int get_type(unsigned long name) { unsigned long flags; int slot; flags = rt_spin_lock_irqsave(&list_lock); for (slot = 1; slot <= MAX_SLOTS; slot++) { if (lxrt_list[slot].name == name) { rt_spin_unlock_irqrestore(flags, &list_lock); return lxrt_list[slot].type; } } rt_spin_unlock_irqrestore(flags, &list_lock); return -EINVAL; }
static inline void *get_adr(unsigned long name) { unsigned long flags; int slot; flags = rt_spin_lock_irqsave(&list_lock); for (slot = 1; slot <= MAX_SLOTS; slot++) { if (lxrt_list[slot].name == name) { rt_spin_unlock_irqrestore(flags, &list_lock); return lxrt_list[slot].adr; } } rt_spin_unlock_irqrestore(flags, &list_lock); return 0; }
static inline int drg_on_adr(void *adr) { unsigned long flags; int slot; flags = rt_spin_lock_irqsave(&list_lock); for (slot = 1; slot <= MAX_SLOTS; slot++) { if (lxrt_list[slot].adr == adr) { lxrt_list[slot] = lxrt_list[0]; rt_spin_unlock_irqrestore(flags, &list_lock); return slot; } } rt_spin_unlock_irqrestore(flags, &list_lock); return 0; }
static inline int get_ptimer_indx(struct rt_tasklet_struct *timer) { unsigned long flags; flags = rt_spin_lock_irqsave(&ptimer_lock); if (ptimer_index < PosixTimers) { struct ptimer_list *p; p = posix_timer[ptimer_index++].p_ptr; p->timer = timer; rt_spin_unlock_irqrestore(flags, &ptimer_lock); return p->t_indx; } rt_spin_unlock_irqrestore(flags, &ptimer_lock); return 0; }