Esempio n. 1
0
File: mbx.c Progetto: Enextuse/RTAI
static int mbx_wait_until(MBX *mbx, int *fravbs, RTIME time, RT_TASK *rt_current)
{
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if (!(*fravbs))
	{
		void *retp;
		rt_current->blocked_on = (void *)mbx;
		mbx->waiting_task = rt_current;
		if ((rt_current->resume_time = time) > rt_smp_time_h[rtai_cpuid()])
		{
			rt_current->state |= (RT_SCHED_MBXSUSP | RT_SCHED_DELAYED);
			rem_ready_current(rt_current);
			enq_timed_task(rt_current);
			rt_schedule();
		}
		if (unlikely((retp = rt_current->blocked_on) != NULL))
		{
			mbx->waiting_task = NULL;
			rt_global_restore_flags(flags);
			return likely(retp > RTP_HIGERR) ? RTE_TIMOUT : (retp == RTP_UNBLKD ? RTE_UNBLKD : RTE_OBJREM);
		}
	}
	rt_global_restore_flags(flags);
	return 0;
}
Esempio n. 2
0
File: mbx.c Progetto: Enextuse/RTAI
/**
 * @brief Sends a message overwriting what already in the buffer
 * if there is no place for the message.
 *
 * rt_mbx_ovrwr_send sends the message @e msg of @e msg_size bytes
 * to the mailbox @e mbx overwriting what already in the mailbox
 * buffer if there is no place for the message. Useful for logging
 * purposes. It returns immediately and the caller is never blocked.
 *
 * @return On success, 0 is returned. On failure a negative value
 * is returned as described below:
 * - @b EINVAL: @e mbx points to an invalid mailbox.
 */
RTAI_SYSCALL_MODE int _rt_mbx_ovrwr_send(MBX *mbx, void *msg, int msg_size, int space)
{
	unsigned long flags;
	RT_TASK *rt_current = RT_CURRENT;

	CHK_MBX_MAGIC;

	flags = rt_global_save_flags_and_cli();
	if (mbx->sndsem.count > 0)
	{
		mbx->sndsem.count = 0;
		if (mbx->sndsem.type > 0)
		{
			mbx->sndsem.owndby = rt_current;
			enqueue_resqel(&mbx->sndsem.resq, rt_current);
		}
		rt_global_restore_flags(flags);
		msg_size = mbxovrwrput(mbx, (char **)(&msg), msg_size, space);
		mbx_signal(mbx);
		rt_sem_signal(&mbx->sndsem);
	}
	else
	{
		rt_global_restore_flags(flags);
	}
	return msg_size;
}
Esempio n. 3
0
static inline int tbx_wait_room_until(TBX *tbx, int *fravbs, int msgsize, RTIME time, RT_TASK *rt_current)
{
	int timed = 0;
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if ((*fravbs) < msgsize) {
	        tbx->waiting_nr++;
		rt_current->blocked_on = SOMETHING;
		rt_current->resume_time = time;
		rt_current->state |= DELAYED;
		rt_rem_ready_current(rt_current);
		tbx->waiting_task = rt_current;
		rt_enq_timed_task(rt_current);
		rt_schedule();
		if (rt_current->blocked_on) {
			tbx->waiting_nr--;
			rt_current->blocked_on = NOTHING;
			tbx->waiting_task = NOTHING;
			timed = 1;
		}
	}
	rt_global_restore_flags(flags);
	return timed;
}
Esempio n. 4
0
File: mbx.c Progetto: ArcEye/RTAI
/**
 * @brief Receives bytes as many as possible, without blocking the
 * calling task.
 *
 * rt_mbx_receive_wp receives at most @e msg_size of bytes of message
 * from the mailbox @e mbx then returns immediately.
 *
 * @param mbx is a pointer to a user allocated mailbox structure.
 *
 * @param msg points to a buffer provided by the caller.
 *
 * @param msg_size corresponds to the size of the message to be received.
 *
 * @return On success, the number of bytes not received is returned. On
 * failure a negative value is returned as described below:
 * - @b EINVAL: mbx points to not a valid mailbox.
 */
RTAI_SYSCALL_MODE int _rt_mbx_receive_wp(MBX *mbx, void *msg, int msg_size, int space)
{
	unsigned long flags;
	RT_TASK *rt_current = RT_CURRENT;
	int size = msg_size;

	CHK_MBX_MAGIC;
	flags = rt_global_save_flags_and_cli();
	if (mbx->rcvsem.count > 0 && mbx->avbs) {
		mbx->rcvsem.count = 0;
		if (mbx->rcvsem.type > 0) {
			mbx->rcvsem.owndby = rt_current;
			enqueue_resqel(&mbx->rcvsem.resq, rt_current);
		}
		rt_global_restore_flags(flags);
		msg_size = mbxget(mbx, (char **)(&msg), msg_size, space);
		mbx_signal(mbx);
		rt_sem_signal(&mbx->rcvsem);
	} else {
		rt_global_restore_flags(flags);
	}
	if (msg_size < size) {
		rt_wakeup_pollers(&mbx->poll_send, 0);
	}
	return msg_size;
}
Esempio n. 5
0
static inline int tbxput(TBX *tbx, char **msg, int msg_size, unsigned char type)
{
	int tocpy, last_byte;
	unsigned long flags;
	int msgpacksize;
            
	msgpacksize = msg_size + sizeof(type); 
	flags = rt_global_save_flags_and_cli();
	while (tbx->frbs && msgpacksize > 0) {
        	last_byte = TBX_MOD_SIZE(tbx->fbyte + tbx->avbs);
		tocpy = tbx->size - last_byte;
		if (tocpy > msgpacksize) {
			tocpy = msgpacksize;
		}
//		rt_global_restore_flags(flags);
		if (type != TYPE_NONE) {
			tocpy = sizeof(type);
			*(tbx->bufadr + last_byte) = type;
			msgpacksize -= tocpy;
			type = TYPE_NONE;
	        } else {
			memcpy(tbx->bufadr + last_byte, *msg, tocpy);
			msgpacksize -= tocpy;
			*msg += tocpy;
		}
//		flags = rt_global_save_flags_and_cli();
        	tbx->frbs -= tocpy;
		tbx->avbs += tocpy;
	}    
	rt_global_restore_flags(flags);
	return msgpacksize;
}
Esempio n. 6
0
static inline int tbxget(TBX *tbx, char **msg, int msg_size)
{
	int tocpy;
	unsigned long flags;
    
	flags = rt_global_save_flags_and_cli();
	while (tbx->avbs && msg_size > 0) {
		tocpy = tbx->size - tbx->fbyte;
		if (tocpy > msg_size) {
			tocpy = msg_size;
		}
		if (tocpy > tbx->avbs) {
			tocpy = tbx->avbs;
		}
//		rt_global_restore_flags(flags);
	        memcpy(*msg, tbx->bufadr + tbx->fbyte, tocpy);
        	msg_size  -= tocpy;
	        *msg      += tocpy;
//		flags = rt_global_save_flags_and_cli();
	        tbx->fbyte = TBX_MOD_SIZE(tbx->fbyte + tocpy);
        	tbx->frbs += tocpy;
	        tbx->avbs -= tocpy;
	}
	rt_global_restore_flags(flags);
	return msg_size;
}
Esempio n. 7
0
RTAI_SYSCALL_MODE unsigned long rt_bits_reset(BITS *bits, unsigned long mask)
{
	unsigned long flags, schedmap, oldmask;
	RT_TASK *task;
	QUEUE *q;

	CHECK_BITS_MAGIC(bits);

	schedmap = 0;
	q = &bits->queue;
	flags = rt_global_save_flags_and_cli();
	oldmask = bits->mask;
	bits->mask = mask;
	while ((q = q->next) != &bits->queue) {
		dequeue_blocked(task = q->task);
		rem_timed_task(task);
		if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
			enq_ready_task(task);
#ifdef CONFIG_SMP
			set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
#endif
		}
	}
	bits->queue.prev = bits->queue.next = &bits->queue;
	RT_SCHEDULE_MAP(schedmap);
	rt_global_restore_flags(flags);
	return oldmask;
}
Esempio n. 8
0
int rt_bits_delete(BITS *bits)
{
	unsigned long flags, schedmap;
	RT_TASK *task;
	QUEUE *q;

	CHECK_BITS_MAGIC(bits);

	schedmap = 0;
	q = &bits->queue;
	flags = rt_global_save_flags_and_cli();
	bits->magic = 0;
	while ((q = q->next) != &bits->queue && (task = q->task)) {
		rem_timed_task(task);
		if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
			task->blocked_on = RTP_OBJREM;
			enq_ready_task(task);
#ifdef CONFIG_SMP
			set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
#endif
		}
	}
	RT_SCHEDULE_MAP(schedmap);
	rt_global_restore_flags(flags);
	return 0;
}
Esempio n. 9
0
RTAI_SYSCALL_MODE int _rt_bits_wait_if(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask, int space)
{
	unsigned long flags, mask;
	int retval;

	CHECK_BITS_MAGIC(bits);

	flags = rt_global_save_flags_and_cli();
	mask = bits->mask;
	if (test_fun[testfun](bits, testmasks)) {
		exec_fun[exitfun](bits, exitmasks);
		retval = 1;
	} else {
		retval = 0;
	}
	rt_global_restore_flags(flags);
	if (resulting_mask) {
		if (space) {
			*resulting_mask = mask;
		} else {
			rt_copy_to_user(resulting_mask, &mask, sizeof(mask));
		}
	}
	return retval;
}
Esempio n. 10
0
RTAI_SYSCALL_MODE unsigned long rt_bits_signal(BITS *bits, int setfun, unsigned long masks)
{
	unsigned long flags, schedmap;
	RT_TASK *task;
	QUEUE *q;

	CHECK_BITS_MAGIC(bits);

	schedmap = 0;
	q = &bits->queue;
	flags = rt_global_save_flags_and_cli();
	exec_fun[setfun](bits, masks);
	masks = bits->mask;
	while ((q = q->next) != &bits->queue) {
		task = q->task;
		if (test_fun[TEST_FUN(task)](bits, TEST_MASK(task))) {
			dequeue_blocked(task);
			rem_timed_task(task);
			if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
				enq_ready_task(task);
#ifdef CONFIG_SMP
				set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
#endif
			}
		}
	}
	RT_SCHEDULE_MAP(schedmap);
	rt_global_restore_flags(flags);
	return masks;
}
Esempio n. 11
0
static inline void asgn_min_prio(void)
{
// find minimum priority in timers_struct 
	struct rt_tasklet_struct *timer;
	unsigned long flags;
	int priority;

	priority = (timer = timers_list.next)->priority;
	flags = rt_spin_lock_irqsave(&timers_lock);
	while ((timer = timer->next) != &timers_list) {
		if (timer->priority < priority) {
			priority = timer->priority;
		}
	}
	rt_spin_unlock_irqrestore(flags, &timers_lock);
	flags = rt_global_save_flags_and_cli();
	if (timers_manager.priority > priority) {
		timers_manager.priority = priority;
		if (timers_manager.state == READY || timers_manager.state == (READY | RUNNING)) {
			rt_rem_ready_task(&timers_manager);
			rt_enq_ready_task(&timers_manager);
		}
	}
	rt_global_restore_flags(flags);
}
Esempio n. 12
0
static inline void asgn_min_prio(int cpuid)
{
// find minimum priority in timers_struct
	RT_TASK *timer_manager;
	struct rt_tasklet_struct *timer, *timerl;
	spinlock_t *lock;
	unsigned long flags;
	int priority;

	priority = (timer = (timerl = &timers_list[LIST_CPUID])->next)->priority;
	flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]);
	while ((timer = timer->next) != timerl) {
		if (timer->priority < priority) {
			priority = timer->priority;
		}
		rt_spin_unlock_irqrestore(flags, lock);
		flags = rt_spin_lock_irqsave(lock);
	}
	rt_spin_unlock_irqrestore(flags, lock);
	flags = rt_global_save_flags_and_cli();
	if ((timer_manager = &timers_manager[LIST_CPUID])->priority > priority) {
		timer_manager->priority = priority;
		if (timer_manager->state == RT_SCHED_READY) {
			rem_ready_task(timer_manager);
			enq_ready_task(timer_manager);
		}
	}
	rt_global_restore_flags(flags);
}
Esempio n. 13
0
static inline int tbx_smx_wait_until(TBX *tbx, SEM *smx, RTIME time, RT_TASK *rt_current)
{
	int timed = 0;
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if (!(smx->count)) {
		tbx->waiting_nr++;
		rt_current->blocked_on = &smx->queue;
		rt_current->resume_time = time;
		rt_current->state |= (SEMAPHORE | DELAYED);
		rt_rem_ready_current(rt_current);
		enqueue_blocked(rt_current, &smx->queue, smx->qtype);
		rt_enq_timed_task(rt_current);
		rt_schedule();
        	if (rt_current->blocked_on) {
                        dequeue_blocked(rt_current);
			timed = 1;
			tbx->waiting_nr--;
		}
	} else {
		smx->count = 0;
	}
	rt_global_restore_flags(flags);
	return timed;
}
Esempio n. 14
0
File: bits.c Progetto: Enextuse/RTAI
RTAI_SYSCALL_MODE int _rt_bits_wait(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask, int space)
{
	RT_TASK *rt_current;
	unsigned long flags, mask = 0;
	int retval;

	CHECK_BITS_MAGIC(bits);

	flags = rt_global_save_flags_and_cli();
	if (!test_fun[testfun](bits, testmasks))
	{
		void *retpnt;
		long bits_test[2];
		rt_current = RT_CURRENT;
		TEST_BUF(rt_current, bits_test);
		TEST_FUN(rt_current)  = testfun;
		TEST_MASK(rt_current) = testmasks;
		rt_current->state |= RT_SCHED_SEMAPHORE;
		rem_ready_current(rt_current);
		enqueue_blocked(rt_current, &bits->queue, 1);
		rt_schedule();
		if (unlikely((retpnt = rt_current->blocked_on) != NULL))
		{
			if (likely(retpnt != RTP_OBJREM))
			{
				dequeue_blocked(rt_current);
				retval = RTE_UNBLKD;
			}
			else
			{
				rt_current->prio_passed_to = NULL;
				retval = RTE_OBJREM;
			}
			goto retmask;
		}
	}
	retval = 0;
	mask = bits->mask;
	exec_fun[exitfun](bits, exitmasks);
retmask:
	rt_global_restore_flags(flags);
	if (resulting_mask)
	{
		if (space)
		{
			*resulting_mask = mask;
		}
		else
		{
			rt_copy_to_user(resulting_mask, &mask, sizeof(mask));
		}
	}
	return retval;
}
Esempio n. 15
0
RTAI_SYSCALL_MODE int rt_insert_timer(struct rt_tasklet_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data, int pid)
{
	spinlock_t *lock;
	unsigned long flags, cpuid;
	RT_TASK *timer_manager;

// timer initialization
	timer->uses_fpu    = 0;

	if (pid >= 0) {
		if (!handler) {
			return -EINVAL;
		}
		timer->handler   = handler;
		timer->data 			 = data;
	} else {
		if (timer->handler != NULL || timer->handler == (void *)1) {
			timer->handler = (void *)1;
			timer->data    = data;
		}
	}

	timer->priority    = priority;
	REALTIME2COUNT(firing_time)
	timer->firing_time = firing_time;
	timer->period      = period;

	if (!pid) {
		timer->task = 0;
		timer->cpuid = cpuid = NUM_CPUS > 1 ? rtai_cpuid() : 0;
	} else {
		timer->cpuid = cpuid = NUM_CPUS > 1 ? (timer->task)->runnable_on_cpus : 0;
		(timer->task)->priority = priority;
		rt_copy_to_user(timer->usptasklet, timer, sizeof(struct rt_usp_tasklet_struct));
	}
// timer insertion in timers_list
	flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]);
	enq_timer(timer);
	rt_spin_unlock_irqrestore(flags, lock);
// timers_manager priority inheritance
	if (timer->priority < (timer_manager = &timers_manager[LIST_CPUID])->priority) {
		timer_manager->priority = timer->priority;
	}
// timers_task deadline inheritance
	flags = rt_global_save_flags_and_cli();
	if (timers_list[LIST_CPUID].next == timer && (timer_manager->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) {
		timer_manager->resume_time = firing_time;
		rem_timed_task(timer_manager);
		enq_timed_task(timer_manager);
		rt_schedule();
	}
	rt_global_restore_flags(flags);
	return 0;
}
Esempio n. 16
0
static inline int tbx_check_room(TBX *tbx, int *fravbs, int msgsize)
{
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if (((*fravbs) < msgsize) || !tbx->sndsmx.count) {
		rt_global_restore_flags(flags);
		return 0;    
	}
	tbx->sndsmx.count = 0;
	rt_global_restore_flags(flags);
	return msgsize;
}
Esempio n. 17
0
void rt_set_timer_firing_time(struct rt_tasklet_struct *timer, RTIME firing_time)
{
	unsigned long flags;

	set_timer_firing_time(timer, firing_time);
	flags = rt_global_save_flags_and_cli();
	if (timers_list.next == timer && (timers_manager.state & DELAYED) && firing_time < timers_manager.resume_time) {
		timers_manager.resume_time = firing_time;
		rt_rem_timed_task(&timers_manager);
		rt_enq_timed_task(&timers_manager);
		RT_SCHEDULE();
	}
	rt_global_restore_flags(flags);
}
Esempio n. 18
0
RTAI_SYSCALL_MODE void rt_set_timer_firing_time(struct rt_tasklet_struct *timer, RTIME firing_time)
{
	unsigned long flags;
	RT_TASK *timer_manager;

	set_timer_firing_time(timer, firing_time);
	flags = rt_global_save_flags_and_cli();
	if (timers_list[TIMER_CPUID].next == timer && ((timer_manager = &timers_manager[TIMER_CPUID])->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) {
		timer_manager->resume_time = firing_time;
		rem_timed_task(timer_manager);
		enq_timed_task(timer_manager);
		rt_schedule();
	}
	rt_global_restore_flags(flags);
}
Esempio n. 19
0
static inline int tbx_wait_room(TBX *tbx, int *fravbs, int msgsize, RT_TASK *rt_current)
{
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if ((*fravbs) < msgsize) {
		tbx->waiting_nr++;
		rt_current->suspdepth = 1;
		rt_current->state |= SUSPENDED;
		rt_rem_ready_current(rt_current);
		rt_current->blocked_on = SOMETHING;
		tbx->waiting_task = rt_current;
		rt_schedule();
	}
	rt_global_restore_flags(flags);
	return (int)(rt_current->blocked_on);
}
Esempio n. 20
0
static inline int tbx_smx_wait(TBX* tbx, SEM *smx, RT_TASK *rt_current)
{
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
    	if (!(smx->count)) {
		tbx->waiting_nr++;
	        rt_current->state |= SEMAPHORE;
                rt_rem_ready_current(rt_current);
                enqueue_blocked(rt_current, &smx->queue, smx->qtype);
                rt_schedule();
	} else {
		smx->count = 0;
	}
	rt_global_restore_flags(flags);
	return (int)(rt_current->blocked_on);
}
Esempio n. 21
0
int rt_insert_timer(struct rt_tasklet_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data, int pid)
{
	unsigned long flags;
	struct rt_tasklet_struct *tmr;

// timer initialization
	if (!handler) {
		return -EINVAL;
	}
	timer->uses_fpu    = 0;
	timer->priority    = priority;
	timer->firing_time = firing_time;
	timer->period      = period;
	timer->handler     = handler;
	timer->data        = data;
	if (!pid) {
		timer->task = 0;
	} else {
		(timer->task)->priority = priority;
		copy_to_user(timer->usptasklet, timer, sizeof(struct rt_tasklet_struct));
	}
// timer insertion in timers_list
	tmr = &timers_list;
	flags = rt_spin_lock_irqsave(&timers_lock);
	while (firing_time >= (tmr = tmr->next)->firing_time);
	timer->next     = tmr;
	timer->prev     = tmr->prev;
	(tmr->prev)->next = timer;
	tmr->prev         = timer;
	rt_spin_unlock_irqrestore(flags, &timers_lock);
// timers_manager priority inheritance
	if (timer->priority < timers_manager.priority) {
		timers_manager.priority = timer->priority;
	}
// timers_task deadline inheritance
	flags = rt_global_save_flags_and_cli();
	if (timers_list.next == timer && (timers_manager.state & DELAYED) && firing_time < timers_manager.resume_time) {
		timers_manager.resume_time = firing_time;
		rt_rem_timed_task(&timers_manager);
		rt_enq_timed_task(&timers_manager);
		RT_SCHEDULE();
	}
	rt_global_restore_flags(flags);
	return 0;
}
Esempio n. 22
0
static inline void tbx_signal(TBX *tbx)
{
	unsigned long flags;
	RT_TASK *task;

	flags = rt_global_save_flags_and_cli();
	if ((task = tbx->waiting_task)) {
		tbx->waiting_nr--;
                rt_rem_timed_task(task);
        	task->blocked_on = NOTHING;
		tbx->waiting_task = NOTHING;
		if ((task->state &= ~(SUSPENDED | DELAYED)) == READY) {
                        rt_enq_ready_task(task);
			rt_schedule();
		}
	}
	rt_global_restore_flags(flags);
}
Esempio n. 23
0
static inline void tbx_smx_signal(TBX* tbx, SEM *smx)
{
	unsigned long flags;
	RT_TASK *task;

	flags = rt_global_save_flags_and_cli();
	if ((task = (smx->queue.next)->task)) {
        	tbx->waiting_nr--;
		dequeue_blocked(task);
		rt_rem_timed_task(task);
		if ((task->state &= ~(SEMAPHORE | DELAYED)) == READY) {
			rt_enq_ready_task(task);
			rt_schedule();
		}
	} else {
		smx->count = 1;
	}
	rt_global_restore_flags(flags);
}
Esempio n. 24
0
File: signal.c Progetto: ArcEye/RTAI
RTAI_SYSCALL_MODE int rt_wait_signal(RT_TASK *sigtask, RT_TASK *task)
{
	unsigned long flags;

	if (sigtask->rt_signals != NULL) {
		flags = rt_global_save_flags_and_cli();
		if (!sigtask->suspdepth++) {
			sigtask->state |= RT_SCHED_SIGSUSP;
			rem_ready_current(sigtask);
			if (task->pstate > 0 && !(--task->pstate) && (task->state &= ~RT_SCHED_SIGSUSP) == RT_SCHED_READY) {
                	       	enq_ready_task(task);
	       		}
			rt_schedule();
		}
		rt_global_restore_flags(flags);
		return sigtask->retval;
	}
	return 0;
}
Esempio n. 25
0
File: signal.c Progetto: ArcEye/RTAI
static inline void rt_exec_signal(RT_TASK *sigtask, RT_TASK *task)
{
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if (sigtask->suspdepth > 0 && !(--sigtask->suspdepth)) {
		if (task) {
			sigtask->priority = task->priority; 
 			if (!task->pstate++) {
				rem_ready_task(task);
				task->state |= RT_SCHED_SIGSUSP;
			}
		}
		sigtask->state &= ~RT_SCHED_SIGSUSP;
		sigtask->retval = (long)task;
		enq_ready_task(sigtask);
		RT_SCHEDULE(sigtask, rtai_cpuid());
	}
	rt_global_restore_flags(flags);
}
Esempio n. 26
0
static inline unsigned char tbx_check_msg(TBX *tbx, int *fravbs, int msgsize, unsigned char* type)
{
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if (tbx->rcvsmx.count == 0 || (*fravbs) < msgsize ) {
		rt_global_restore_flags(flags);
		return 0;
	}
	tbx->rcvsmx.count = 0;
	if (*tbx->bcbadr == TYPE_BROADCAST) {
		*type = TYPE_BROADCAST;
	} else {
	        *type = *(tbx->bufadr + tbx->fbyte);
		tbx->fbyte = TBX_MOD_SIZE(tbx->fbyte + sizeof(*type));
		tbx->frbs += sizeof(*type);
		tbx->avbs -= sizeof(*type);
	}        
	rt_global_restore_flags(flags);
	return *type;
}
Esempio n. 27
0
File: mbx.c Progetto: ArcEye/RTAI
static int mbx_wait(MBX *mbx, int *fravbs, RT_TASK *rt_current)
{
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if (!(*fravbs)) {
		unsigned long retval;
		rt_current->state |= RT_SCHED_MBXSUSP;
		rem_ready_current(rt_current);
		rt_current->blocked_on = (void *)mbx;
		mbx->waiting_task = rt_current;
		rt_schedule();
		if (unlikely(retval = (unsigned long)rt_current->blocked_on)) {
			mbx->waiting_task = NULL;
			rt_global_restore_flags(flags);
			return retval;
		}
	}
	rt_global_restore_flags(flags);
	return 0;
}
Esempio n. 28
0
static inline int tbx_wait_msg(TBX *tbx, int *fravbs, int msgsize, unsigned char*type, RT_TASK *rt_current)
{
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if (*tbx->bcbadr == TYPE_BROADCAST) {
		*type = TYPE_BROADCAST;
	} else {
		if ((*fravbs) < msgsize ) {
			tbx->waiting_nr++;
			rt_current->state |= SUSPENDED;
	                rt_rem_ready_current(rt_current);
			rt_current->blocked_on = SOMETHING;
			tbx->waiting_task = rt_current;
			rt_schedule();
        	}
	        *type = *(tbx->bufadr + tbx->fbyte);
        	tbx->fbyte = TBX_MOD_SIZE(tbx->fbyte + sizeof(*type));
	        tbx->frbs += sizeof(*type);
        	tbx->avbs -= sizeof(*type);
	}        
	rt_global_restore_flags(flags);
	return (int)(rt_current->blocked_on);
}
Esempio n. 29
0
static inline int tbx_wait_msg_until(TBX *tbx, int *fravbs, int msgsize, RTIME time, unsigned char *type, RT_TASK *rt_current)
{
	int timed = 0;
	unsigned long flags;

	flags = rt_global_save_flags_and_cli();
	if (*tbx->bcbadr == TYPE_BROADCAST) {
		*type = TYPE_BROADCAST;
	} else {
	        *type = *(tbx->bufadr + tbx->fbyte);
        	if ((*fravbs) < msgsize && *tbx->bcbadr == TYPE_NONE) {
			tbx->waiting_nr++;
        		rt_current->blocked_on = SOMETHING;
			rt_current->resume_time = time;
			rt_current->state |= DELAYED;
                        rt_rem_ready_current(rt_current);
			tbx->waiting_task = rt_current;
                        rt_enq_timed_task(rt_current);
			rt_schedule();
			if (rt_current->blocked_on) {
				tbx->waiting_nr--;
				rt_current->blocked_on = NOTHING;
				tbx->waiting_task = NOTHING;
				timed = 1;
				*type = TYPE_NONE;
				rt_global_restore_flags(flags);
				return timed;
			}
	        }
        	tbx->fbyte = TBX_MOD_SIZE(tbx->fbyte + sizeof(*type));
	        tbx->frbs += sizeof(*type);
        	tbx->avbs -= sizeof(*type);
	}
	rt_global_restore_flags(flags);
	return timed;
}
Esempio n. 30
0
RTAI_SYSCALL_MODE int rt_timer_insert(struct rtdm_timer_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data)
{
	spinlock_t *lock;
	unsigned long flags, cpuid;
	RT_TASK *timer_manager;

	if (!handler) {
		return -EINVAL;
	}
	timer->handler     = handler;	
	timer->data        = data;
	timer->priority    = priority;	
	timer->firing_time = firing_time;
	timer->period      = period;
	REALTIME2COUNT(firing_time)
	
	timer->cpuid = cpuid = NUM_CPUS > 1 ? rtai_cpuid() : 0;
// timer insertion in timers_list
	flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]);
	enq_timer(timer);
	rt_spin_unlock_irqrestore(flags, lock);
// timers_manager priority inheritance
	if (timer->priority < (timer_manager = &timers_manager[LIST_CPUID])->priority) {
		timer_manager->priority = timer->priority;
	}
// timers_task deadline inheritance
	flags = rt_global_save_flags_and_cli();
	if (timers_list[LIST_CPUID].next == timer && (timer_manager->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) {
		timer_manager->resume_time = firing_time;
		rem_timed_task(timer_manager);
		enq_timed_task(timer_manager);
		rt_schedule();
	}
	rt_global_restore_flags(flags);
	return 0;
}