int nomac_nrt_packet_tx(struct rtskb *rtskb)
{
    struct nomac_priv   *nomac;
    struct rtnet_device *rtdev;
    int                 ret;


    nomac = (struct nomac_priv *)rtskb->rtdev->mac_priv->disc_priv;

    rtcap_mark_rtmac_enqueue(rtskb);

    /* note: this routine may be called both in rt and non-rt context
     *       => detect and wrap the context if necessary */
    if (!rtos_in_rt_context()) {
        rtskb_queue_tail(&nrt_rtskb_queue, rtskb);
        rtos_event_sem_signal(&wakeup_sem);
        return 0;
    } else {
        rtdev = rtskb->rtdev;

        /* no MAC: we simply transmit the packet under xmit_lock */
        rtos_res_lock(&rtdev->xmit_lock);
        ret = rtmac_xmit(rtskb);
        rtos_res_unlock(&rtdev->xmit_lock);

        return ret;
    }
}
Exemple #2
0
static void do_slot_job(struct tdma_priv *tdma, struct tdma_slot *job,
                        rtdm_lockctx_t lockctx)
{
    struct rtskb *rtskb;

    if ((job->period != 1) &&
        (tdma->current_cycle % job->period != job->phasing))
        return;

    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    /* wait for slot begin, then send one pending packet */
    rtdm_task_sleep_abs(tdma->current_cycle_start + SLOT_JOB(job)->offset,
                        RTDM_TIMERMODE_REALTIME);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);
    rtskb = __rtskb_prio_dequeue(SLOT_JOB(job)->queue);
    if (!rtskb)
        return;
    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    rtmac_xmit(rtskb);

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);
}
Exemple #3
0
static struct tdma_job *do_reply_cal_job(struct tdma_priv *tdma,
                                         struct tdma_reply_cal *job,
                                         rtdm_lockctx_t lockctx)
{
    struct tdma_job *prev_job;

    if (job->reply_cycle > tdma->current_cycle)
        return &job->head;

    /* remove the job */
    __list_del(job->head.entry.prev, job->head.entry.next);
    job->head.ref_count--;
    prev_job = tdma->current_job =
        list_entry(job->head.entry.prev, struct tdma_job, entry);
    prev_job->ref_count++;
    tdma->job_list_revision++;

    rtdm_lock_put_irqrestore(&tdma->lock, lockctx);

    if (job->reply_cycle == tdma->current_cycle) {
        /* send reply in the assigned slot */
        rtdm_task_sleep_abs(tdma->current_cycle_start + job->reply_offset,
                            RTDM_TIMERMODE_REALTIME);
        rtmac_xmit(job->reply_rtskb);
    } else {
        /* cleanup if cycle already passed */
        kfree_rtskb(job->reply_rtskb);
    }

    rtdm_lock_get_irqsave(&tdma->lock, lockctx);

    return prev_job;
}
int tdma_rt_packet_tx(struct rtskb *skb, struct rtnet_device *rtdev)
{
    struct rtmac_tdma *tdma = (struct rtmac_tdma *)rtdev->mac_priv->disc_priv;

    rtcap_mark_rtmac_enqueue(skb);

    if (tdma->flags.mac_active == 0)
        return rtmac_xmit(skb);

    rtskb_prio_queue_tail(&tdma->tx_queue, skb);

    return 0;
}
int tdma_nrt_packet_tx(struct rtskb *skb)
{
    struct rtmac_tdma *tdma =
        (struct rtmac_tdma *)skb->rtdev->mac_priv->disc_priv;

    rtcap_mark_rtmac_enqueue(skb);

    if (tdma->flags.mac_active == 0)
        return rtmac_xmit(skb);

    skb->priority = QUEUE_MIN_PRIO;
    rtskb_prio_queue_tail(&tdma->tx_queue, skb);

    return 0;
}
int nomac_rt_packet_tx(struct rtskb *rtskb, struct rtnet_device *rtdev)
{
    struct nomac_priv   *nomac;
    int                 ret;


    nomac = (struct nomac_priv *)rtdev->mac_priv->disc_priv;

    rtcap_mark_rtmac_enqueue(rtskb);

    /* no MAC: we simply transmit the packet under xmit_lock */
    rtos_res_lock(&rtdev->xmit_lock);
    ret = rtmac_xmit(rtskb);
    rtos_res_unlock(&rtdev->xmit_lock);

    return ret;
}
void nrt_xmit_task(int arg)
{
    struct rtskb        *rtskb;
    struct rtnet_device *rtdev;


    while (!shutdown) {
        if ((rtskb = rtskb_dequeue(&nrt_rtskb_queue))) {
            rtdev = rtskb->rtdev;

            /* no MAC: we simply transmit the packet under xmit_lock */
            rtos_res_lock(&rtdev->xmit_lock);
            rtmac_xmit(rtskb);
            rtos_res_unlock(&rtdev->xmit_lock);
        }
        rtos_event_sem_wait(&wakeup_sem);
    }
}