コード例 #1
0
ファイル: pipe.c プロジェクト: JackieXie168/xenomai
int xnpipe_mount(void)
{
	struct xnpipe_state *state;
	int i;

	for (state = &xnpipe_states[0];
	     state < &xnpipe_states[XNPIPE_NDEVS]; state++) {
		inith(&state->slink);
		inith(&state->alink);
		state->status = 0;
		state->asyncq = NULL;
		initq(&state->inq);
		initq(&state->outq);
	}

	initq(&xnpipe_sleepq);
	initq(&xnpipe_asyncq);

	xnpipe_class = class_create(THIS_MODULE, "rtpipe");
	if (IS_ERR(xnpipe_class)) {
		xnlogerr("error creating rtpipe class, err=%ld.\n",
			 PTR_ERR(xnpipe_class));
		return -EBUSY;
	}

	for (i = 0; i < XNPIPE_NDEVS; i++) {
		DECLARE_DEVHANDLE(cldev);
		cldev = wrap_device_create(xnpipe_class, NULL,
					   MKDEV(XNPIPE_DEV_MAJOR, i),
					   NULL, "rtp%d", i);
		if (IS_ERR(cldev)) {
			xnlogerr
			    ("can't add device class, major=%d, minor=%d, err=%ld\n",
			     XNPIPE_DEV_MAJOR, i, PTR_ERR(cldev));
			class_destroy(xnpipe_class);
			return -EBUSY;
		}
	}

	if (register_chrdev(XNPIPE_DEV_MAJOR, "rtpipe", &xnpipe_fops)) {
		xnlogerr
		    ("unable to reserve major #%d for message pipe support.\n",
		     XNPIPE_DEV_MAJOR);
		return -EBUSY;
	}

	xnpipe_wakeup_apc =
	    rthal_apc_alloc("pipe_wakeup", &xnpipe_wakeup_proc, NULL);

	return 0;
}
コード例 #2
0
ファイル: proc.c プロジェクト: ArcEye/RTAI
int rtdm_proc_register_device(struct rtdm_device *device)
{
	int ret;

	ret = xnvfile_init_dir(device->proc_name,
			       &device->vfroot, &rtdm_vfroot);
	if (ret)
		goto err_out;

	memset(&device->info_vfile, 0, sizeof(device->info_vfile));
	device->info_vfile.ops = &devinfo_vfile_ops;

	ret = xnvfile_init_regular("information", &device->info_vfile,
				   &device->vfroot);
	if (ret) {
		xnvfile_destroy_dir(&device->vfroot);
		goto err_out;
	}

	xnvfile_priv(&device->info_vfile) = device;

	return 0;

      err_out:
	xnlogerr("RTDM: error while creating device vfile\n");
	return ret;
}
コード例 #3
0
ファイル: intr.c プロジェクト: gongguowang/xenomai-1
static inline int xnintr_irq_detach(xnintr_t *intr)
{
	xnintr_irq_t *shirq = &xnirqs[intr->irq];
	xnintr_t *e, **p = &shirq->handlers;
	int err = 0;

	while ((e = *p) != NULL) {
		if (e == intr) {
			/* Remove the given interrupt object from the list. */
			xnlock_get(&shirq->lock);
			*p = e->next;
			xnlock_put(&shirq->lock);

			xnintr_sync_stat_references(intr);

			/* Release the IRQ line if this was the last user */
			if (shirq->handlers == NULL)
				err = xnarch_release_irq(intr->irq);

			return err;
		}
		p = &e->next;
	}

	xnlogerr("attempted to detach a non previously attached interrupt "
		 "object.\n");
	return err;
}
コード例 #4
0
ファイル: module.c プロジェクト: nosnilwar/rtai-raw-gov-3.9.1
/*
 * Low-level interrupt handler dispatching non-shared ISRs -- Called with
 * interrupts off.
 */
static void xnintr_irq_handler(unsigned irq, void *cookie)
{

	xnintr_t *intr;


	int s;





	RTAI_SCHED_ISR_LOCK();

	xnlock_get(&xnirqs[irq].lock);

#ifdef CONFIG_SMP
	/* In SMP case, we have to reload the cookie under the per-IRQ lock
	   to avoid racing with xnintr_detach. */
	intr = xnarch_get_irq_cookie(irq);
	if (unlikely(!intr)) {
		s = 0;
		goto unlock_and_exit;
	}
#else
	/* cookie always valid, attach/detach happens with IRQs disabled */
	intr = cookie;
#endif
	s = intr->isr(intr);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++intr->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else {
		xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits);



		intr->unhandled = 0;
	}

#ifdef CONFIG_SMP
 unlock_and_exit:
#endif
	xnlock_put(&xnirqs[irq].lock);

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	RTAI_SCHED_ISR_UNLOCK();




}
コード例 #5
0
ファイル: module.c プロジェクト: nosnilwar/rtai-raw-gov-3.9.1
/*
 * Low-level interrupt handler dispatching the user-defined ISRs for
 * shared interrupts -- Called with interrupts off.
 */
static void xnintr_shirq_handler(unsigned irq, void *cookie)
{



	xnintr_irq_t *shirq = &xnirqs[irq];
	xnintr_t *intr;
	int s = 0;





	RTAI_SCHED_ISR_LOCK();

	xnlock_get(&shirq->lock);
	intr = shirq->handlers;

	while (intr) {
		int ret;

		ret = intr->isr(intr);
		s |= ret;

		if (ret & XN_ISR_HANDLED) {
			xnstat_counter_inc(
				&intr->stat[xnsched_cpu(sched)].hits);




		}

		intr = intr->next;
	}

	xnlock_put(&shirq->lock);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else
		shirq->unhandled = 0;

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	RTAI_SCHED_ISR_UNLOCK();




}
コード例 #6
0
ファイル: module.c プロジェクト: JackieXie168/xenomai
int SKIN_INIT(psos)
{
	int err;

	initq(&__psos_global_rholder.smq);
	initq(&__psos_global_rholder.qq);
	initq(&__psos_global_rholder.ptq);
	initq(&__psos_global_rholder.rnq);

	err = xnpod_init();

	if (err != 0)
		return err;

	err = xntbase_alloc("psos", tick_arg * 1000, sync_time ? 0 : XNTBISO,
			    &psos_tbase);

	if (err != 0)
		goto fail;

	xntbase_start(psos_tbase);

	err = psosrn_init(module_param_value(rn0_size_arg));

	if (err != 0) {
	fail:
		xnpod_shutdown(err);
		xnlogerr("pSOS skin init failed, code %d.\n", err);
		return err;
	}

	psossem_init();
	psosqueue_init();
	psospt_init();
	psosasr_init();
	psostm_init();
	psostask_init(module_param_value(time_slice_arg));
#ifdef CONFIG_XENO_OPT_PERVASIVE
	psos_syscall_init();
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	xnprintf("starting pSOS+ services.\n");

	return err;
}
コード例 #7
0
ファイル: module.c プロジェクト: JackieXie168/xenomai
int SKIN_INIT(vxworks)
{
	int err;

	initq(&__wind_global_rholder.wdq);
	initq(&__wind_global_rholder.msgQq);
	initq(&__wind_global_rholder.semq);

	/* The following fields are unused in the global holder;
	   still, we initialize them not to leave such data in an
	   invalid state. */
	xnsynch_init(&__wind_global_rholder.wdsynch, XNSYNCH_FIFO, NULL);
	initq(&__wind_global_rholder.wdpending);
	__wind_global_rholder.wdcount = 0;

	err = xnpod_init();

	if (err != 0)
		goto fail_core;

	err = wind_sysclk_init(tick_arg * 1000);

	if (err != 0) {
		xnpod_shutdown(err);

	fail_core:
		xnlogerr("VxWorks skin init failed, code %d.\n", err);
		return err;
	}

	wind_wd_init();
	wind_task_hooks_init();
	wind_sem_init();
	wind_msgq_init();
	wind_task_init();
#ifdef CONFIG_XENO_OPT_PERVASIVE
	wind_syscall_init();
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	xnprintf("starting VxWorks services.\n");

	return 0;
}
コード例 #8
0
ファイル: module.c プロジェクト: nosnilwar/rtai-raw-gov-3.9.1
static inline int xnintr_irq_detach(xnintr_t *intr)
{
	xnintr_irq_t *shirq = &xnirqs[intr->irq];
	xnintr_t *e, **p = &shirq->handlers;
	int err = 0;

	if (intr->irq >= RTHAL_NR_IRQS)
		return -EINVAL;

	if (!__testbits(intr->flags, XN_ISR_ATTACHED))
		return -EPERM;

	__clrbits(intr->flags, XN_ISR_ATTACHED);

	while ((e = *p) != NULL) {
		if (e == intr) {
			/* Remove the given interrupt object from the list. */
			xnlock_get(&shirq->lock);
			*p = e->next;
			xnlock_put(&shirq->lock);



			/* Release the IRQ line if this was the last user */
			if (shirq->handlers == NULL)
				err = xnarch_release_irq(intr->irq);

			return err;
		}
		p = &e->next;
	}

	xnlogerr("attempted to detach a non previously attached interrupt "
		 "object.\n");
	return err;
}
コード例 #9
0
ファイル: intr.c プロジェクト: gongguowang/xenomai-1
/*
 * Low-level interrupt handler dispatching non-shared ISRs -- Called with
 * interrupts off.
 */
static void xnintr_irq_handler(unsigned irq, void *cookie)
{
	struct xnsched *sched = xnpod_current_sched();
	xnstat_exectime_t *prev;
	struct xnintr *intr;
	xnticks_t start;
	int s;

	prev  = xnstat_exectime_get_current(sched);
	start = xnstat_exectime_now();
	trace_mark(xn_nucleus, irq_enter, "irq %u", irq);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&xnirqs[irq].lock);

#ifdef CONFIG_SMP
	/*
	 * In SMP case, we have to reload the cookie under the per-IRQ
	 * lock to avoid racing with xnintr_detach.  However, we
	 * assume that no CPU migration will occur while running the
	 * interrupt service routine, so the scheduler pointer will
	 * remain valid throughout this function.
	 */
	intr = xnarch_get_irq_cookie(irq);
	if (unlikely(!intr)) {
		s = 0;
		goto unlock_and_exit;
	}
#else
	/* cookie always valid, attach/detach happens with IRQs disabled */
	intr = cookie;
#endif
	s = intr->isr(intr);
	if (unlikely(s == XN_ISR_NONE)) {
		if (++intr->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else {
		xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits);
		xnstat_exectime_lazy_switch(sched,
			&intr->stat[xnsched_cpu(sched)].account,
			start);
		intr->unhandled = 0;
	}

#ifdef CONFIG_SMP
 unlock_and_exit:
#endif
	xnlock_put(&xnirqs[irq].lock);

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
	}

	trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
}
コード例 #10
0
ファイル: intr.c プロジェクト: gongguowang/xenomai-1
/*
 * Low-level interrupt handler dispatching the user-defined ISRs for
 * shared edge-triggered interrupts -- Called with interrupts off.
 */
static void xnintr_edge_shirq_handler(unsigned irq, void *cookie)
{
	const int MAX_EDGEIRQ_COUNTER = 128;
	struct xnsched *sched = xnpod_current_sched();
	xnintr_irq_t *shirq = &xnirqs[irq];
	int s = 0, counter = 0, ret, code;
	struct xnintr *intr, *end = NULL;
	xnstat_exectime_t *prev;
	xnticks_t start;

	prev  = xnstat_exectime_get_current(sched);
	start = xnstat_exectime_now();
	trace_mark(xn_nucleus, irq_enter, "irq %u", irq);

	++sched->inesting;
	__setbits(sched->lflags, XNINIRQ);

	xnlock_get(&shirq->lock);
	intr = shirq->handlers;

	while (intr != end) {
		xnstat_exectime_switch(sched,
			&intr->stat[xnsched_cpu(sched)].account);
		/*
		 * NOTE: We assume that no CPU migration will occur
		 * while running the interrupt service routine.
		 */
		ret = intr->isr(intr);
		code = ret & ~XN_ISR_BITMASK;
		s |= ret;

		if (code == XN_ISR_HANDLED) {
			end = NULL;
			xnstat_counter_inc(
				&intr->stat[xnsched_cpu(sched)].hits);
			xnstat_exectime_lazy_switch(sched,
				&intr->stat[xnsched_cpu(sched)].account,
				start);
			start = xnstat_exectime_now();
		} else if (end == NULL)
			end = intr;

		if (counter++ > MAX_EDGEIRQ_COUNTER)
			break;

		if (!(intr = intr->next))
			intr = shirq->handlers;
	}

	xnlock_put(&shirq->lock);

	if (counter > MAX_EDGEIRQ_COUNTER)
		xnlogerr
		    ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n",
		     irq);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else
		shirq->unhandled = 0;

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	xnstat_exectime_switch(sched, prev);

	if (--sched->inesting == 0) {
		__clrbits(sched->lflags, XNINIRQ);
		xnpod_schedule();
	}

	trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
}
コード例 #11
0
ファイル: module.c プロジェクト: gongguowang/xenomai-2.6.2.1
int SKIN_INIT(native)
{
	int err;

	initq(&__native_global_rholder.alarmq);
	initq(&__native_global_rholder.condq);
	initq(&__native_global_rholder.eventq);
	initq(&__native_global_rholder.heapq);
	initq(&__native_global_rholder.intrq);
	initq(&__native_global_rholder.mutexq);
	initq(&__native_global_rholder.pipeq);
	initq(&__native_global_rholder.queueq);
	initq(&__native_global_rholder.semq);
	initq(&__native_global_rholder.ioregionq);
	initq(&__native_global_rholder.bufferq);

	err = xnpod_init();
	if (err)
		goto fail;

	err = xntbase_alloc("native", tick_arg * 1000, 0, &__native_tbase);

	if (err)
		goto fail;

	xntbase_start(__native_tbase);

	err = __native_misc_pkg_init();

	if (err)
		goto cleanup_pod;

	err = __native_task_pkg_init();

	if (err)
		goto cleanup_misc;

	err = __native_sem_pkg_init();

	if (err)
		goto cleanup_task;

	err = __native_event_pkg_init();

	if (err)
		goto cleanup_sem;

	err = __native_mutex_pkg_init();

	if (err)
		goto cleanup_event;

	err = __native_cond_pkg_init();

	if (err)
		goto cleanup_mutex;

	err = __native_pipe_pkg_init();

	if (err)
		goto cleanup_cond;

	err = __native_queue_pkg_init();

	if (err)
		goto cleanup_pipe;

	err = __native_heap_pkg_init();

	if (err)
		goto cleanup_queue;

	err = __native_alarm_pkg_init();

	if (err)
		goto cleanup_heap;

	err = __native_intr_pkg_init();

	if (err)
		goto cleanup_alarm;

	err = __native_syscall_init();

	if (err)
		goto cleanup_intr;

	xnprintf("starting native API services.\n");

	return 0;		/* SUCCESS. */

      cleanup_intr:

	__native_intr_pkg_cleanup();

      cleanup_alarm:

	__native_alarm_pkg_cleanup();

      cleanup_heap:

	__native_heap_pkg_cleanup();

      cleanup_queue:

	__native_queue_pkg_cleanup();

      cleanup_pipe:

	__native_pipe_pkg_cleanup();

      cleanup_cond:

	__native_cond_pkg_cleanup();

      cleanup_mutex:

	__native_mutex_pkg_cleanup();

      cleanup_event:

	__native_event_pkg_cleanup();

      cleanup_sem:

	__native_sem_pkg_cleanup();

      cleanup_task:

	__native_task_pkg_cleanup();

      cleanup_misc:

	__native_misc_pkg_cleanup();

      cleanup_pod:

	xntbase_free(__native_tbase);

	xnpod_shutdown(err);

      fail:

	xnlogerr("native skin init failed, code %d.\n", err);
	return err;
}
コード例 #12
0
ファイル: module.c プロジェクト: nosnilwar/rtai-raw-gov-3.9.1
/*
 * Low-level interrupt handler dispatching the user-defined ISRs for
 * shared edge-triggered interrupts -- Called with interrupts off.
 */
static void xnintr_edge_shirq_handler(unsigned irq, void *cookie)
{
	const int MAX_EDGEIRQ_COUNTER = 128;




	xnintr_irq_t *shirq = &xnirqs[irq];
	xnintr_t *intr, *end = NULL;
	int s = 0, counter = 0;





	RTAI_SCHED_ISR_LOCK();

	xnlock_get(&shirq->lock);
	intr = shirq->handlers;

	while (intr != end) {
		int ret, code;




		ret = intr->isr(intr);
		code = ret & ~XN_ISR_BITMASK;
		s |= ret;

		if (code == XN_ISR_HANDLED) {
			end = NULL;
			xnstat_counter_inc(
				&intr->stat[xnsched_cpu(sched)].hits);




                } else if (end == NULL)
                        end = intr;

		if (counter++ > MAX_EDGEIRQ_COUNTER)
			break;

		if (!(intr = intr->next))
			intr = shirq->handlers;
	}

	xnlock_put(&shirq->lock);

	if (counter > MAX_EDGEIRQ_COUNTER)
		xnlogerr
		    ("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n",
		     irq);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else
		shirq->unhandled = 0;

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	RTAI_SCHED_ISR_UNLOCK();




}
コード例 #13
0
ファイル: thread.c プロジェクト: JackieXie168/xenomai
int xnthread_init(struct xnthread *thread,
		  const struct xnthread_init_attr *attr,
		  struct xnsched *sched,
		  struct xnsched_class *sched_class,
		  const union xnsched_policy_param *sched_param)
{
	unsigned int stacksize = attr->stacksize;
	xnflags_t flags = attr->flags;
	struct xnarchtcb *tcb;
	int ret;

	/* Setup the TCB. */
	tcb = xnthread_archtcb(thread);
	xnarch_init_tcb(tcb);

	flags &= ~XNSUSP;
#ifndef CONFIG_XENO_HW_FPU
	flags &= ~XNFPU;
#endif
#ifdef __XENO_SIM__
	flags &= ~XNSHADOW;
#endif
	if (flags & (XNSHADOW|XNROOT))
		stacksize = 0;
	else {
		if (stacksize == 0) /* Pick a reasonable default. */
			stacksize = XNARCH_THREAD_STACKSZ;
		/* Align stack size on a natural word boundary */
		stacksize &= ~(sizeof(long) - 1);
	}

	if (flags & XNROOT)
		thread->idtag = 0;
	else
		thread->idtag = ++idtags ?: 1;

#if CONFIG_XENO_OPT_SYS_STACKPOOLSZ == 0
#ifndef __XENO_SIM__
	if (stacksize > 0) {
		xnlogerr("%s: cannot create kernel thread '%s' (CONFIG_XENO_OPT_SYS_STACKPOOLSZ == 0)\n",
			 __FUNCTION__, attr->name);
		return -ENOMEM;
	}
#endif
#else
	ret = xnarch_alloc_stack(tcb, stacksize);
	if (ret) {
		xnlogerr("%s: no stack for kernel thread '%s' (raise CONFIG_XENO_OPT_SYS_STACKPOOLSZ)\n",
			 __FUNCTION__, attr->name);
		return ret;
	}
#endif
	if (stacksize)
		memset(xnarch_stack_base(tcb), 0, stacksize);

	if (attr->name)
		xnobject_copy_name(thread->name, attr->name);
	else
		snprintf(thread->name, sizeof(thread->name), "%p", thread);

	xntimer_init(&thread->rtimer, attr->tbase, xnthread_timeout_handler);
	xntimer_set_name(&thread->rtimer, thread->name);
	xntimer_set_priority(&thread->rtimer, XNTIMER_HIPRIO);
	xntimer_init(&thread->ptimer, attr->tbase, xnthread_periodic_handler);
	xntimer_set_name(&thread->ptimer, thread->name);
	xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO);

	thread->state = flags;
	thread->info = 0;
	thread->schedlck = 0;
	thread->signals = 0;
	thread->asrmode = 0;
	thread->asrimask = 0;
	thread->asr = XNTHREAD_INVALID_ASR;
	thread->asrlevel = 0;

	thread->ops = attr->ops;
	thread->rrperiod = XN_INFINITE;
	thread->rrcredit = XN_INFINITE;
	thread->wchan = NULL;
	thread->wwake = NULL;
	thread->wcontext = NULL;
	thread->hrescnt = 0;
	thread->errcode = 0;
	thread->registry.handle = XN_NO_HANDLE;
	thread->registry.waitkey = NULL;
	memset(&thread->stat, 0, sizeof(thread->stat));

	/* These will be filled by xnpod_start_thread() */
	thread->imask = 0;
	thread->imode = 0;
	thread->entry = NULL;
	thread->cookie = 0;

	inith(&thread->glink);
	initph(&thread->rlink);
	initph(&thread->plink);
#ifdef CONFIG_XENO_OPT_PRIOCPL
	initph(&thread->xlink);
	thread->rpi = NULL;
#endif /* CONFIG_XENO_OPT_PRIOCPL */
#ifdef CONFIG_XENO_OPT_SELECT
	thread->selector = NULL;
#endif /* CONFIG_XENO_OPT_SELECT */
	initpq(&thread->claimq);

	thread->sched = sched;
	thread->init_class = sched_class;
	thread->base_class = NULL; /* xnsched_set_policy() will set it. */
	thread->init_schedparam = *sched_param;
	ret = xnsched_init_tcb(thread);
	if (ret)
		goto fail;

	/*
	 * We must set the scheduling policy last; the scheduling
	 * class implementation code may need the TCB to be fully
	 * initialized to proceed.
	 */
	ret = xnsched_set_policy(thread, sched_class, sched_param);
	if (ret)
		goto fail;

	xnarch_init_display_context(thread);

	return 0;

fail:
#if CONFIG_XENO_OPT_SYS_STACKPOOLSZ > 0
	xnarch_free_stack(tcb);
#endif
	return ret;
}
コード例 #14
0
/*
 * Low-level interrupt handler dispatching the user-defined ISRs for
 * shared interrupts -- Called with interrupts off.
 */
static void xnintr_shirq_handler(unsigned irq, void *cookie)
{
	struct xnsched *sched = xnpod_current_sched();
	xnintr_irq_t *shirq = &xnirqs[irq];
	xnstat_exectime_t *prev;
	xnticks_t start;
	xnintr_t *intr;
	int s = 0, ret;

	prev  = xnstat_exectime_get_current(sched);
	start = xnstat_exectime_now();
	trace_mark(xn_nucleus, irq_enter, "irq %u", irq);

	++sched->inesting;
	__setbits(sched->status, XNINIRQ);

	xnlock_get(&shirq->lock);
	intr = shirq->handlers;

	while (intr) {
		/*
		 * NOTE: We assume that no CPU migration will occur
		 * while running the interrupt service routine.
		 */
		ret = intr->isr(intr);
		s |= ret;

		if (ret & XN_ISR_HANDLED) {
			xnstat_counter_inc(
				&intr->stat[xnsched_cpu(sched)].hits);
			xnstat_exectime_lazy_switch(sched,
				&intr->stat[xnsched_cpu(sched)].account,
				start);
			start = xnstat_exectime_now();
		}

		intr = intr->next;
	}

	xnlock_put(&shirq->lock);

	if (unlikely(s == XN_ISR_NONE)) {
		if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
			xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
				 "line.\n", __FUNCTION__, irq);
			s |= XN_ISR_NOENABLE;
		}
	} else
		shirq->unhandled = 0;

	if (s & XN_ISR_PROPAGATE)
		xnarch_chain_irq(irq);
	else if (!(s & XN_ISR_NOENABLE))
		xnarch_end_irq(irq);

	if (--sched->inesting == 0) {
		__clrbits(sched->status, XNINIRQ);
		xnpod_schedule();
	}

	trace_mark(xn_nucleus, irq_exit, "irq %u", irq);
	xnstat_exectime_switch(sched, prev);
}
コード例 #15
0
ファイル: module.c プロジェクト: BhargavKola/xenomai-forge
int __init __xeno_sys_init(void)
{
	int ret;

	xnmod_sysheap_size = module_param_value(sysheap_size_arg) * 1024;

	ret = xnarch_init();
	if (ret)
		goto fail;

#ifndef __XENO_SIM__
	ret = xnheap_init_mapped(&__xnsys_global_ppd.sem_heap,
				 CONFIG_XENO_OPT_GLOBAL_SEM_HEAPSZ * 1024,
				 XNARCH_SHARED_HEAP_FLAGS);
	if (ret)
		goto cleanup_arch;

	xnheap_set_label(&__xnsys_global_ppd.sem_heap, "global sem heap");

	xnheap_init_vdso();
	init_hostrt();
#endif /* !__XENO_SIM__ */

#ifdef __KERNEL__
	xnpod_mount();
	xnintr_mount();

#ifdef CONFIG_XENO_OPT_PIPE
	ret = xnpipe_mount();
	if (ret)
		goto cleanup_proc;
#endif /* CONFIG_XENO_OPT_PIPE */

#ifdef CONFIG_XENO_OPT_SELECT
	ret = xnselect_mount();
	if (ret)
		goto cleanup_pipe;
#endif /* CONFIG_XENO_OPT_SELECT */

	ret = xnshadow_mount();
	if (ret)
		goto cleanup_select;

	ret = xnheap_mount();
	if (ret)
		goto cleanup_shadow;
#endif /* __KERNEL__ */

	xntbase_mount();

	xnloginfo("real-time nucleus v%s (%s) loaded.\n",
		  XENO_VERSION_STRING, XENO_VERSION_NAME);

#ifdef CONFIG_XENO_OPT_DEBUG
	xnloginfo("debug mode enabled.\n");
#endif

	initq(&xnmod_glink_queue);

	xeno_nucleus_status = 0;

	xnarch_cpus_and(nkaffinity, nkaffinity, xnarch_supported_cpus);

	return 0;

#ifdef __KERNEL__

      cleanup_shadow:

	xnshadow_cleanup();

      cleanup_select:

#ifdef CONFIG_XENO_OPT_SELECT
	xnselect_umount();

      cleanup_pipe:
#endif /* CONFIG_XENO_OPT_SELECT */

#ifdef CONFIG_XENO_OPT_PIPE
	xnpipe_umount();

      cleanup_proc:

#endif /* CONFIG_XENO_OPT_PIPE */

	xnpod_umount();

      cleanup_arch:

	xnarch_exit();

#endif /* __KERNEL__ */

      fail:

	xnlogerr("system init failed, code %d.\n", ret);

	xeno_nucleus_status = ret;

	return ret;
}
コード例 #16
0
ファイル: module.c プロジェクト: gongguowang/xenomai-2.6.2.1
int SKIN_INIT(uitron)
{
	int err;

	initq(&__ui_global_rholder.flgq);
	initq(&__ui_global_rholder.mbxq);
	initq(&__ui_global_rholder.semq);

	err = xnpod_init();

	if (err)
		goto fail;

	err = xntbase_alloc("uitron", tick_arg * 1000,
			    sync_time ? 0 : XNTBISO, &ui_tbase);

	if (err)
		goto cleanup_pod;

	xntbase_start(ui_tbase);

	err = uitask_init();

	if (err)
		goto cleanup_tbase;

	err = uisem_init();

	if (err)
		goto cleanup_task;

	err = uiflag_init();

	if (err)
		goto cleanup_sem;

	err = uimbx_init();

	if (err)
		goto cleanup_flag;

#ifdef CONFIG_XENO_OPT_PERVASIVE
	ui_syscall_init();
#endif /* CONFIG_XENO_OPT_PERVASIVE */

	xnprintf("starting uITRON services.\n");

	return 0;

cleanup_flag:

	uiflag_cleanup();

cleanup_sem:

	uisem_cleanup();

cleanup_task:

	uitask_cleanup();

cleanup_tbase:

	xntbase_free(ui_tbase);

cleanup_pod:

	xnpod_shutdown(err);

fail:

	xnlogerr("uITRON skin init failed, code %d.\n", err);

	return err;
}