static void handle_ktask_error(rtswitch_context_t *ctx, unsigned fp_val) { unsigned i; ctx->failed = 1; ctx->error.fp_val = fp_val; for (i = 0; i < ctx->tasks_count; i++) { rtswitch_task_t *task = &ctx->tasks[i]; /* Find the first non kernel-space task. */ if ((task->base.flags & RTSWITCH_KERNEL)) continue; /* Unblock it. */ switch(task->base.flags & RTSWITCH_RT) { case RTSWITCH_NRT: rtswitch_utask[ctx->cpu] = task; rtdm_nrtsig_pend(&rtswitch_wake_utask); break; case RTSWITCH_RT: rtdm_event_signal(&task->rt_synch); break; } xnpod_suspend_self(); } }
static int rtswitch_to_rt(rtswitch_context_t *ctx, unsigned from_idx, unsigned to_idx) { rtswitch_task_t *from, *to; int rc; if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count) return -EINVAL; /* to == from is a special case which means "return to the previous task". */ if (to_idx == from_idx) to_idx = ctx->error.last_switch.from; from = &ctx->tasks[from_idx]; to = &ctx->tasks[to_idx]; from->base.flags |= RTSWITCH_RT; from->last_switch = ++ctx->switches_count; ctx->error.last_switch.from = from_idx; ctx->error.last_switch.to = to_idx; barrier(); if (ctx->pause_us) { ctx->next_task = to_idx; barrier(); rtdm_timer_start(&ctx->wake_up_delay, ctx->pause_us * 1000, 0, RTDM_TIMERMODE_RELATIVE); xnpod_lock_sched(); } else switch (to->base.flags & RTSWITCH_RT) { case RTSWITCH_NRT: ctx->utask = to; barrier(); rtdm_nrtsig_pend(&ctx->wake_utask); xnpod_lock_sched(); break; case RTSWITCH_RT: xnpod_lock_sched(); rtdm_event_signal(&to->rt_synch); break; default: return -EINVAL; } rc = rtdm_event_wait(&from->rt_synch); xnpod_unlock_sched(); if (rc < 0) return rc; if (ctx->failed) return 1; return 0; }
static inline void rtpc_queue_processed_call(struct rt_proc_call *call) { rtdm_lockctx_t context; rtdm_lock_get_irqsave(&processed_calls_lock, context); list_add_tail(&call->list_entry, &processed_calls); rtdm_lock_put_irqrestore(&processed_calls_lock, context); rtdm_nrtsig_pend(&rtpc_nrt_signal); }
static void timed_wake_up(rtdm_timer_t *timer) { rtswitch_context_t *ctx = container_of(timer, rtswitch_context_t, wake_up_delay); rtswitch_task_t *task; task = &ctx->tasks[ctx->next_task]; switch (task->base.flags & RTSWITCH_RT) { case RTSWITCH_NRT: rtswitch_utask[ctx->cpu] = task; rtdm_nrtsig_pend(&rtswitch_wake_utask); break; case RTSWITCH_RT: rtdm_event_signal(&task->rt_synch); } }
int rtmac_vnic_rx(struct rtskb *rtskb, u16 type) { struct rtmac_priv *mac_priv = rtskb->rtdev->mac_priv; struct rtskb_pool *pool = &mac_priv->vnic_skb_pool; if (rtskb_acquire(rtskb, pool) != 0) { mac_priv->vnic_stats.rx_dropped++; kfree_rtskb(rtskb); return -1; } rtskb->protocol = type; rtskb_queue_tail(&rx_queue, rtskb); rtdm_nrtsig_pend(&vnic_signal); return 0; }
static int fec_enet_interrupt(rtdm_irq_t *irq_handle) { struct rtnet_device *ndev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); /* RTnet */ struct fec_enet_private *fep = rtnetdev_priv(ndev); uint int_events; irqreturn_t ret = RTDM_IRQ_NONE; /* RTnet */ nanosecs_abs_t time_stamp = rtdm_clock_read(); int packets = 0; do { int_events = readl(fep->hwp + FEC_IEVENT); writel(int_events, fep->hwp + FEC_IEVENT); if (int_events & FEC_ENET_RXF) { ret = RTDM_IRQ_HANDLED; fec_enet_rx(ndev, &packets, &time_stamp); } /* Transmit OK, or non-fatal error. Update the buffer * descriptors. FEC handles all errors, we just discover * them as part of the transmit process. */ if (int_events & FEC_ENET_TXF) { ret = RTDM_IRQ_HANDLED; fec_enet_tx(ndev); } if (int_events & FEC_ENET_MII) { ret = RTDM_IRQ_HANDLED; rtdm_nrtsig_pend(&fep->mdio_done_sig); } } while (int_events); if (packets > 0) rt_mark_stack_mgr(ndev); return ret; }
/* ************************************************************************ * This function runs in rtai context. * * It is called from inside rtnet whenever a packet has been received that * has to be processed by rtnetproxy. * ************************************************************************ */ static int rtnetproxy_recv(struct rtskb *rtskb) { /* Acquire rtskb (JK) */ if (rtskb_acquire(rtskb, &rtskb_pool) != 0) { rtdm_printk("rtnetproxy_recv: No free rtskb in pool\n"); kfree_rtskb(rtskb); } /* Place the rtskb in the ringbuffer: */ else if (write_to_ringbuffer(&ring_rtskb_rtnet_kernel, rtskb)) { /* Switch over to kernel context: */ rtdm_nrtsig_pend(&rtnetproxy_signal); } else { /* No space in ringbuffer => Free rtskb here... */ rtdm_printk("rtnetproxy_recv: No space in queue\n"); kfree_rtskb(rtskb); } return 0; }
static int rtdmtest_ioctl(struct rtdm_dev_context *context, rtdm_user_info_t *user_info, unsigned int request, void *arg) { struct rtdmtest_context *ctx; struct rttst_rtdmtest_config config_buf, *config; rtdm_toseq_t toseq_local, *toseq = NULL; int i, err = 0; ctx = (struct rtdmtest_context *)context->dev_private; switch (request) { case RTTST_RTIOC_RTDMTEST_SEM_TIMEDDOWN: case RTTST_RTIOC_RTDMTEST_EVENT_TIMEDWAIT: case RTTST_RTIOC_RTDMTEST_MUTEX_TIMEDTEST: case RTTST_RTIOC_RTDMTEST_MUTEX_TEST: config = arg; if (user_info) { if (rtdm_safe_copy_from_user (user_info, &config_buf, arg, sizeof(struct rttst_rtdmtest_config)) < 0) return -EFAULT; config = &config_buf; } if (!config->seqcount) config->seqcount = 1; if (config->timeout && config->seqcount > 1) { toseq = &toseq_local; rtdm_toseq_init(toseq, config->timeout); } switch(request) { case RTTST_RTIOC_RTDMTEST_SEM_TIMEDDOWN: for (i = 0; i < config->seqcount; i++) { err = rtdm_sem_timeddown(&ctx->sem, config->timeout, toseq); if (err) break; } break; case RTTST_RTIOC_RTDMTEST_EVENT_TIMEDWAIT: for (i = 0; i < config->seqcount; i++) { err = rtdm_event_timedwait(&ctx->event, config->timeout, toseq); if (err) break; } break; case RTTST_RTIOC_RTDMTEST_MUTEX_TIMEDTEST: for (i = 0; i < config->seqcount; i++) { err = rtdm_mutex_timedlock(&ctx->mutex, config->timeout, toseq); if (err) break; if (config->delay_jiffies) { __set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(config->delay_jiffies); } rtdm_lock_count++; rtdm_mutex_unlock(&ctx->mutex); } break; case RTTST_RTIOC_RTDMTEST_MUTEX_TEST: for (i = 0; i < config->seqcount; i++) { if ((err = rtdm_mutex_lock(&ctx->mutex))) break; rtdm_lock_count++; rtdm_mutex_unlock(&ctx->mutex); } break; } break; case RTTST_RTIOC_RTDMTEST_SEM_DOWN: err = rtdm_sem_down(&ctx->sem); break; case RTTST_RTIOC_RTDMTEST_SEM_UP: rtdm_sem_up(&ctx->sem); break; case RTTST_RTIOC_RTDMTEST_SEM_DESTROY: rtdm_sem_destroy(&ctx->sem); break; case RTTST_RTIOC_RTDMTEST_EVENT_WAIT: err = rtdm_event_wait(&ctx->event); break; case RTTST_RTIOC_RTDMTEST_EVENT_SIGNAL: rtdm_event_signal(&ctx->event); break; case RTTST_RTIOC_RTDMTEST_EVENT_DESTROY: rtdm_event_destroy(&ctx->event); break; case RTTST_RTIOC_RTDMTEST_MUTEX_DESTROY: rtdm_mutex_destroy(&ctx->mutex); break; case RTTST_RTIOC_RTDMTEST_MUTEX_GETSTAT: printk("RTTST_RTIOC_RTDMTEST_MUTEX_GETSTAT\n"); if (user_info) config = &config_buf; else config = arg; config->seqcount = rtdm_lock_count; if (user_info) { if (rtdm_safe_copy_to_user (user_info, arg, &config_buf, sizeof(struct rttst_rtdmtest_config)) < 0) return -EFAULT; } break; case RTTST_RTIOC_RTDMTEST_NRTSIG_PEND: rtdm_nrtsig_pend(&ctx->nrtsig); break; case RTTST_RTIOC_RTDMTEST_TASK_CREATE: case RTTST_RTIOC_RTDMTEST_TASK_SET_PRIO: config = arg; if (user_info) { if (rtdm_safe_copy_from_user (user_info, &config_buf, arg, sizeof(struct rttst_rtdmtest_config)) < 0) return -EFAULT; config = &config_buf; } if (request == RTTST_RTIOC_RTDMTEST_TASK_CREATE) { task_period = config->timeout; rtdm_task_init(&task, "RTDMTEST", rtdmtest_task, (void *)config, config->priority, 0); } else { rtdm_task_set_priority(&task, config->priority); } break; case RTTST_RTIOC_RTDMTEST_TASK_DESTROY: rtdm_task_destroy(&task); rtdm_task_join_nrt(&task, 100); break; default: printk("request=%d\n", request); err = -ENOTTY; } return err; }