Exemple #1
0
static int rtswitch_to_rt(rtswitch_context_t *ctx,
			  unsigned from_idx,
			  unsigned to_idx)
{
	rtswitch_task_t *from, *to;
	int rc;

	if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count)
		return -EINVAL;

	/* to == from is a special case which means
	   "return to the previous task". */
	if (to_idx == from_idx)
		to_idx = ctx->error.last_switch.from;

	from = &ctx->tasks[from_idx];
	to = &ctx->tasks[to_idx];

	from->base.flags |= RTSWITCH_RT;
	from->last_switch = ++ctx->switches_count;
	ctx->error.last_switch.from = from_idx;
	ctx->error.last_switch.to = to_idx;
	barrier();

	if (ctx->pause_us) {
		ctx->next_task = to_idx;
		barrier();
		rtdm_timer_start(&ctx->wake_up_delay,
				 ctx->pause_us * 1000, 0,
				 RTDM_TIMERMODE_RELATIVE);
		xnpod_lock_sched();
	} else
		switch (to->base.flags & RTSWITCH_RT) {
		case RTSWITCH_NRT:
			ctx->utask = to;
			barrier();
			rtdm_nrtsig_pend(&ctx->wake_utask);
			xnpod_lock_sched();
			break;

		case RTSWITCH_RT:
			xnpod_lock_sched();
			rtdm_event_signal(&to->rt_synch);
			break;

		default:
			return -EINVAL;
		}

	rc = rtdm_event_wait(&from->rt_synch);
	xnpod_unlock_sched();

	if (rc < 0)
		return rc;

	if (ctx->failed)
		return 1;

	return 0;
}
Exemple #2
0
static void handle_ktask_error(rtswitch_context_t *ctx, unsigned fp_val)
{
	unsigned i;
	
	ctx->failed = 1;
	ctx->error.fp_val = fp_val;

	for (i = 0; i < ctx->tasks_count; i++) {
		rtswitch_task_t *task = &ctx->tasks[i];

		/* Find the first non kernel-space task. */
		if ((task->base.flags & RTSWITCH_KERNEL))
			continue;

		/* Unblock it. */
		switch(task->base.flags & RTSWITCH_RT) {
		case RTSWITCH_NRT:
			rtswitch_utask[ctx->cpu] = task;
			rtdm_nrtsig_pend(&rtswitch_wake_utask);
			break;

		case RTSWITCH_RT:
			rtdm_event_signal(&task->rt_synch);
			break;
		}

		xnpod_suspend_self();
	}
}
Exemple #3
0
static int rtcfg_rx_handler(struct rtskb *rtskb, struct rtpacket_type *pt)
{
    if (rtskb_acquire(rtskb, &rtcfg_pool) == 0) {
        rtdev_reference(rtskb->rtdev);
        rtskb_queue_tail(&rx_queue, rtskb);
        rtdm_event_signal(&rx_event);
    } else
        kfree_rtskb(rtskb);

    return 0;
}
int rtnet_rtpc_dispatch_call(rtpc_proc proc, unsigned int timeout,
                             void* priv_data, size_t priv_data_size,
                             rtpc_copy_back_proc copy_back_handler,
                             rtpc_cleanup_proc cleanup_handler)
{
    struct rt_proc_call *call;
    rtdm_lockctx_t      context;
    int                 ret;


    call = kmalloc(sizeof(struct rt_proc_call) + priv_data_size, GFP_KERNEL);
    if (call == NULL) {
        if (call->cleanup_handler != NULL)
            call->cleanup_handler(priv_data);
        return -ENOMEM;
    }

    memcpy(call->priv_data, priv_data, priv_data_size);

    call->processed       = 0;
    call->proc            = proc;
    call->result          = 0;
    call->cleanup_handler = cleanup_handler;
    atomic_set(&call->ref_count, 2);    /* dispatcher + rt-procedure */
    init_waitqueue_head(&call->call_wq);

    rtdm_lock_get_irqsave(&pending_calls_lock, context);
    list_add_tail(&call->list_entry, &pending_calls);
    rtdm_lock_put_irqrestore(&pending_calls_lock, context);

    rtdm_event_signal(&dispatch_event);

    if (timeout > 0) {
        ret = wait_event_interruptible_timeout(call->call_wq,
            call->processed, (timeout * HZ) / 1000);
        if (ret == 0)
            ret = -ETIME;
    } else
        ret = wait_event_interruptible(call->call_wq, call->processed);

    if (ret >= 0) {
        if (copy_back_handler != NULL)
            copy_back_handler(call, priv_data);
        ret = call->result;
    }

    if (atomic_dec_and_test(&call->ref_count)) {
        if (call->cleanup_handler != NULL)
            call->cleanup_handler(&call->priv_data);
        kfree(call);
    }

    return ret;
}
Exemple #5
0
static int demo_interrupt(rtdm_irq_t *irq_context)
{
    struct demodrv_context *ctx;
    int           dev_id;
//    timestamp, if needed, can be obtained list this:
//    u64           timestamp = rtdm_clock_read();
    int           ret = RTDM_IRQ_HANDLED; // usual return value

    ctx = rtdm_irq_get_arg(irq_context, struct demodrv_context);
    dev_id    = ctx->dev_id;

    rtdm_lock_get(&ctx->lock);

    // do stuff
#ifdef TIMERINT
    if (events > EVENT_SIGNAL_COUNT) {
        rtdm_event_signal(&ctx->irq_event);
        events = 0;
    }
#else
    rtdm_event_signal(&ctx->irq_event);
#endif
    events++;
        
    rtdm_lock_put(&ctx->lock);
    // those return values were dropped from the RTDM
    // ret = RTDM_IRQ_ENABLE | RTDM_IRQ_PROPAGATE;

#ifdef TIMERINT
    // Only propagate the timer interrupt, so that linux sees it.
    // Forwarding interrupts to the non-realtime domain is not a common
    //     use-case of realtime device drivers, so usually DON'T DO THIS.
    // But here we grab the important timer interrupt, so we need to propagate it.
    return XN_ISR_PROPAGATE;
#else
    // signal interrupt is handled and don't propagate the interrupt to linux
    return RTDM_IRQ_HANDLED;
#endif
}
Exemple #6
0
static void timed_wake_up(rtdm_timer_t *timer)
{
	rtswitch_context_t *ctx =
		container_of(timer, rtswitch_context_t, wake_up_delay);
	rtswitch_task_t *task;

	task = &ctx->tasks[ctx->next_task];

	switch (task->base.flags & RTSWITCH_RT) {
	case RTSWITCH_NRT:
		rtswitch_utask[ctx->cpu] = task;
		rtdm_nrtsig_pend(&rtswitch_wake_utask);
		break;

	case RTSWITCH_RT:
		rtdm_event_signal(&task->rt_synch);
	}
}
Exemple #7
0
static int rtswitch_to_nrt(rtswitch_context_t *ctx,
			   unsigned from_idx,
			   unsigned to_idx)
{
	rtswitch_task_t *from, *to;

	if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count)
		return -EINVAL;

	from = &ctx->tasks[from_idx];
	to = &ctx->tasks[to_idx];

	from->base.flags &= ~RTSWITCH_RT;
	++ctx->switches_count;
	ctx->error.last_switch.from = from_idx;
	ctx->error.last_switch.to = to_idx;

	if (ctx->pause_us) {
		ctx->next_task = to_idx;
		rtdm_timer_start(&ctx->wake_up_delay,
				 ctx->pause_us * 1000, 0,
				 RTDM_TIMERMODE_RELATIVE);
	} else
		switch (to->base.flags & RTSWITCH_RT) {
		case RTSWITCH_NRT:
			up(&to->nrt_synch);
			break;

		case RTSWITCH_RT:
			rtdm_event_signal(&to->rt_synch);
			break;

		default:
			return -EINVAL;
		}

	if (down_interruptible(&from->nrt_synch))
		return -EINTR;

	if (ctx->failed)
		return 1;

	return 0;
}
static int rtdm_my_isr(rtdm_irq_t *irq_context)
{

	MY_DEV *up=rtdm_irq_get_arg(irq_context,MY_DEV);

	up->systime1 = rtdm_clock_read();

	up->timeout = up->systime1 - up->systime;

	printk("Interrupt Latency=%dl\n",up->timeout);

	up->systime1=0;
	up->systime=0;

	unsigned int iir,lsr;
	unsigned int type;
	irqreturn_t ret=IRQ_NONE;
	int err;
	int max_count = 256;
	rtdm_lockctx_t context1;

	printk("I am in rtdm_my_isr......!!!\n");

	printk("Local struct up=%x\n",up);

        err = rtdm_irq_disable(&up->irq_handle);
        if(err<0)
             rtdm_printk("error in rtdm_irq_enable\n");
        rtdm_lock_get_irqsave(&up->lock,context1);

	do{
	iir = serial_in(up,UART_IIR);
	if(iir & UART_IIR_NO_INT)
		break;

	ret=IRQ_HANDLED;
	lsr = serial_in(up,UART_LSR);
	type = iir & 0x3e;
	
		switch(type)
		{
			case UART_IIR_THRI:
			printk("type of int:UART_IIR_THRI\n");
			transmit_chars(up,lsr);
			rtdm_event_signal(&up->w_event_tx);
			break;

			case UART_IIR_RX_TIMEOUT:
			/*FALLTHROUGH*/

			case UART_IIR_RDI:
				printk("type of int:UART_IIR_RDI\n");
				serial_omap_rdi(up,lsr);	
				 rtdm_event_signal(&up->w_event_rx);
				break;
			
			case UART_IIR_RLSI:
				printk("type of int:UART_IIR_RLSI\n");
//				serial_omap_rlsi(up,lsr);
				break;
			
			case UART_IIR_CTS_RTS_DSR:
				break;
			
			case UART_IIR_XOFF:
			/*simpleThrough*/
			default:
				break;
		}
	}while(!(iir & UART_IIR_NO_INT) && max_count--);

      	rtdm_lock_put_irqrestore(&up->lock,context1);
        err = rtdm_irq_enable(&up->irq_handle);
        if(err<0)
              rtdm_printk("error in rtdm_irq_enable\n");

	printk("rtdm_irq ended\n");
	
	 up->systime = rtdm_clock_read();
	
	return RTDM_IRQ_HANDLED;
}
Exemple #9
0
static int rtswitch_to_nrt(rtswitch_context_t *ctx,
			   unsigned from_idx,
			   unsigned to_idx)
{
	rtswitch_task_t *from, *to;
	unsigned expected, fp_val;
	int fp_check;

	if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count)
		return -EINVAL;

	/* to == from is a special case which means
	   "return to the previous task". */
	if (to_idx == from_idx)
		to_idx = ctx->error.last_switch.from;

	from = &ctx->tasks[from_idx];
	to = &ctx->tasks[to_idx];

	fp_check = ctx->switches_count == from->last_switch + 1
		&& ctx->error.last_switch.from == to_idx
		&& ctx->error.last_switch.to == from_idx;

	from->base.flags &= ~RTSWITCH_RT;
	from->last_switch = ++ctx->switches_count;
	ctx->error.last_switch.from = from_idx;
	ctx->error.last_switch.to = to_idx;
	barrier();

	if (ctx->pause_us) {
		ctx->next_task = to_idx;
		barrier();
		rtdm_timer_start(&ctx->wake_up_delay,
				 ctx->pause_us * 1000, 0,
				 RTDM_TIMERMODE_RELATIVE);
	} else
		switch (to->base.flags & RTSWITCH_RT) {
		case RTSWITCH_NRT:
		switch_to_nrt:
			up(&to->nrt_synch);
			break;

		case RTSWITCH_RT:

			if (!fp_check || fp_linux_begin() < 0) {
				fp_check = 0;
				goto signal_nofp;
			}

			expected = from_idx + 500 +
				(ctx->switches_count % 4000000) * 1000;

			fp_regs_set(expected);
			rtdm_event_signal(&to->rt_synch);
			fp_val = fp_regs_check(expected);
			fp_linux_end();

			if(down_interruptible(&from->nrt_synch))
				return -EINTR;
			if (ctx->failed)
				return 1;
			if (fp_val != expected) {
				handle_ktask_error(ctx, fp_val);
				return 1;
			}

			from->base.flags &= ~RTSWITCH_RT;
			from->last_switch = ++ctx->switches_count;
			ctx->error.last_switch.from = from_idx;
			ctx->error.last_switch.to = to_idx;
			if ((to->base.flags & RTSWITCH_RT) == RTSWITCH_NRT)
				goto switch_to_nrt;
			expected = from_idx + 500 +
				(ctx->switches_count % 4000000) * 1000;
			barrier();

			fp_linux_begin();
			fp_regs_set(expected);
			rtdm_event_signal(&to->rt_synch);
			fp_val = fp_regs_check(expected);
			fp_linux_end();

			if (down_interruptible(&from->nrt_synch))
				return -EINTR;
			if (ctx->failed)
				return 1;
			if (fp_val != expected) {
				handle_ktask_error(ctx, fp_val);
				return 1;
			}

			from->base.flags &= ~RTSWITCH_RT;
			from->last_switch = ++ctx->switches_count;
			ctx->error.last_switch.from = from_idx;
			ctx->error.last_switch.to = to_idx;
			barrier();
			if ((to->base.flags & RTSWITCH_RT) == RTSWITCH_NRT)
				goto switch_to_nrt;

		signal_nofp:
			rtdm_event_signal(&to->rt_synch);
			break;

		default:
			return -EINVAL;
		}

	if (down_interruptible(&from->nrt_synch))
		return -EINTR;

	if (ctx->failed)
		return 1;

	return 0;
}
Exemple #10
0
static int rtdmtest_ioctl(struct rtdm_dev_context *context,
			  rtdm_user_info_t *user_info,
			  unsigned int request,
			  void *arg)
{
	struct rtdmtest_context *ctx;
	struct rttst_rtdmtest_config config_buf, *config;
	rtdm_toseq_t toseq_local, *toseq = NULL;
	int i, err = 0;

	ctx = (struct rtdmtest_context *)context->dev_private;

	switch (request) {
	case RTTST_RTIOC_RTDMTEST_SEM_TIMEDDOWN:
	case RTTST_RTIOC_RTDMTEST_EVENT_TIMEDWAIT:
        case RTTST_RTIOC_RTDMTEST_MUTEX_TIMEDTEST:
        case RTTST_RTIOC_RTDMTEST_MUTEX_TEST:
		config = arg;
		if (user_info) {
			if (rtdm_safe_copy_from_user
			    (user_info, &config_buf, arg,
			     sizeof(struct rttst_rtdmtest_config)) < 0)
				return -EFAULT;

			config = &config_buf;
		}
		if (!config->seqcount)
			config->seqcount = 1;
		if (config->timeout && config->seqcount > 1) {
			toseq = &toseq_local;
			rtdm_toseq_init(toseq, config->timeout);
		}
		switch(request) {
		case RTTST_RTIOC_RTDMTEST_SEM_TIMEDDOWN:
			for (i = 0; i < config->seqcount; i++) {
				err = rtdm_sem_timeddown(&ctx->sem,
							 config->timeout,
							 toseq);
				if (err)
					break;
			}
			break;
		case RTTST_RTIOC_RTDMTEST_EVENT_TIMEDWAIT:
			for (i = 0; i < config->seqcount; i++) {
				err = rtdm_event_timedwait(&ctx->event,
							   config->timeout,
							   toseq);
				if (err)
					break;
			}
			break;
		case RTTST_RTIOC_RTDMTEST_MUTEX_TIMEDTEST:
			for (i = 0; i < config->seqcount; i++) {
				err = rtdm_mutex_timedlock(&ctx->mutex,
							   config->timeout,
							   toseq);
				if (err)
					break;
				if (config->delay_jiffies) {
					__set_current_state(TASK_INTERRUPTIBLE);
					schedule_timeout(config->delay_jiffies);
				}
				rtdm_lock_count++;
				rtdm_mutex_unlock(&ctx->mutex);
			}
			break;
		case RTTST_RTIOC_RTDMTEST_MUTEX_TEST:
			for (i = 0; i < config->seqcount; i++) {
				if ((err = rtdm_mutex_lock(&ctx->mutex)))
					break;
				rtdm_lock_count++;
				rtdm_mutex_unlock(&ctx->mutex);
			}
			break;
		}
		break;

	case RTTST_RTIOC_RTDMTEST_SEM_DOWN:
		err = rtdm_sem_down(&ctx->sem);
		break;

	case RTTST_RTIOC_RTDMTEST_SEM_UP:
		rtdm_sem_up(&ctx->sem);
		break;

        case RTTST_RTIOC_RTDMTEST_SEM_DESTROY:
                rtdm_sem_destroy(&ctx->sem);
                break;

	case RTTST_RTIOC_RTDMTEST_EVENT_WAIT:
		err = rtdm_event_wait(&ctx->event);
		break;

	case RTTST_RTIOC_RTDMTEST_EVENT_SIGNAL:
		rtdm_event_signal(&ctx->event);
		break;

        case RTTST_RTIOC_RTDMTEST_EVENT_DESTROY:
                rtdm_event_destroy(&ctx->event);
                break;

        case RTTST_RTIOC_RTDMTEST_MUTEX_DESTROY:
                rtdm_mutex_destroy(&ctx->mutex);
                break;

        case RTTST_RTIOC_RTDMTEST_MUTEX_GETSTAT:
		printk("RTTST_RTIOC_RTDMTEST_MUTEX_GETSTAT\n");
		if (user_info)
			config = &config_buf;
		else
			config = arg;
		config->seqcount = rtdm_lock_count;
		if (user_info) {
			if (rtdm_safe_copy_to_user
			    (user_info, arg, &config_buf,
			     sizeof(struct rttst_rtdmtest_config)) < 0)
				return -EFAULT;
		}
                break;

        case RTTST_RTIOC_RTDMTEST_NRTSIG_PEND:
		rtdm_nrtsig_pend(&ctx->nrtsig);
                break;

        case RTTST_RTIOC_RTDMTEST_TASK_CREATE:
        case RTTST_RTIOC_RTDMTEST_TASK_SET_PRIO:
                config = arg;
                if (user_info) {
                        if (rtdm_safe_copy_from_user
                            (user_info, &config_buf, arg,
                             sizeof(struct rttst_rtdmtest_config)) < 0)
                                return -EFAULT;

                        config = &config_buf;
                }
		if (request == RTTST_RTIOC_RTDMTEST_TASK_CREATE) {
			task_period = config->timeout;
			rtdm_task_init(&task, "RTDMTEST",
				       rtdmtest_task, (void *)config,
				       config->priority, 0);
		} else {
			rtdm_task_set_priority(&task, config->priority);
		}
		break;

        case RTTST_RTIOC_RTDMTEST_TASK_DESTROY:
		rtdm_task_destroy(&task);
		rtdm_task_join_nrt(&task, 100);
                break;

	default:
		printk("request=%d\n", request);
		err = -ENOTTY;
	}

	return err;
}