RTAI_SYSCALL_MODE int rt_msgq_init(RT_MSGQ *mq, int nmsg, int msg_size) { int i; void *p; if (!(mq->slots = rt_malloc((msg_size + RT_MSGH_SIZE + sizeof(void *))*nmsg + RT_MSGH_SIZE))) { return -ENOMEM; } mq->nmsg = nmsg; mq->fastsize = msg_size; mq->slot = 0; p = mq->slots + nmsg; for (i = 0; i < nmsg; i++) { mq->slots[i] = p; ((RT_MSGH *)p)->priority = 0; p += (msg_size + RT_MSGH_SIZE); } ((RT_MSGH *)(mq->firstmsg = p))->priority = (0xFFFFFFFF/2); rt_typed_sem_init(&mq->receivers, 1, RES_SEM); rt_typed_sem_init(&mq->senders, 1, RES_SEM); rt_typed_sem_init(&mq->received, 0, CNT_SEM); rt_typed_sem_init(&mq->freslots, nmsg, CNT_SEM); spin_lock_init(&mq->lock); return 0; }
static int my_init(void) { int i, ierr1, ierr2; rt_typed_sem_init(&semaphore1, 0, BIN_SEM); rt_typed_sem_init(&semaphore2, 0, BIN_SEM); rt_set_oneshot_mode(); ierr1 = rt_task_init_cpuid(&tasks[0], task_body1, 0, STACK_SIZE, 1, 0, 0, 0); ierr2 = rt_task_init_cpuid(&tasks[1], task_body2, 0, STACK_SIZE, 0, 0, 0, 0); printk("[task 1] init return code %d by program %s\n", ierr1, __FILE__); printk("[task 2] init return code %d by program %s\n", ierr2, __FILE__); if (ierr1 == -1 || ierr2 == -1) { return -1; } start_rt_timer(nano2count(TICK_PERIOD)); first_release = rt_get_time(); for (i = 0 ; i < N_TASK ; i++) { rt_task_make_periodic(&tasks[i], first_release, PERIOD); } return 0; }
int rtos_mutex_rec_init(rt_mutex_t* m) { CHK_LXRT_CALL(); // RES_SEM is PRIO_Q anyhow. m->sem = rt_typed_sem_init( rt_get_name(0), 1, RES_SEM); return m->sem == 0 ? -1 : 0; }
static int __switches_init(void) { int i; int e; printk("\nWait for it ...\n"); rt_typed_sem_init(&sem, 1, SEM_TYPE); rt_linux_use_fpu(1); thread = (RT_TASK *)kmalloc(ntasks*sizeof(RT_TASK), GFP_KERNEL); for (i = 0; i < ntasks; i++) { #ifdef DISTRIBUTE e = rt_task_init_cpuid(thread + i, pend_task, i, stack_size, 0, use_fpu, 0, i%2); #else e = rt_task_init_cpuid(thread + i, pend_task, i, stack_size, 0, use_fpu, 0, hard_cpu_id()); #endif if (e < 0) { task_init_has_failed: rt_printk("switches: failed to initialize task %d, error=%d\n", i, e); while (--i >= 0) rt_task_delete(thread + i); return -1; } } e = rt_task_init_cpuid(&task, sched_task, i, stack_size, 1, use_fpu, 0, hard_cpu_id()); if (e < 0) goto task_init_has_failed; rt_task_resume(&task); return 0; }
static int init_module(void) { int i; rt_set_oneshot_mode(); start_rt_timer(0); sync_sem = rt_sem_init(nam2num("SYNCSM"), 0); prio_sem = rt_sem_init(nam2num("PRIOSM"), 0); if (!(mbx_in = rt_mbx_init(nam2num("MBXIN"), NUM_TASKS*8)) || !(mbx_out = rt_mbx_init(nam2num("MBXOUT"), NUM_TASKS*8))) { printf("could not create message queues\n"); return 1; } for (i = 0; i < NUM_TASKS; ++i) { sems[i] = rt_sem_init(nam2num(sem[i]), 0); } end_sem = rt_typed_sem_init(nam2num("ENDSEM"), 0, CNT_SEM); print_sem = rt_typed_sem_init(nam2num("PRTSEM"), 1, BIN_SEM); return 0; }
int init_module(void) { rt_set_oneshot_mode(); start_rt_timer(0); if (SemType) { printk("USING A RESOURCE SEMAPHORE, AND WE HAVE ...\n"); rt_typed_sem_init(&mutex, 1, RES_SEM); } else { printk("USING A BINARY SEMAPHORE, AND WE HAVE ...\n"); rt_typed_sem_init(&mutex, 1, BIN_SEM | PRIO_Q); } rt_task_init_cpuid(&taskl, taskl_func, 0, RT_STACK, 1000, 0, 0, 0); rt_task_init_cpuid(&taskm, taskm_func, 0, RT_STACK, 500, 0, 0, 0); rt_task_init_cpuid(&taskh, taskh_func, 0, RT_STACK, 0, 0, 0, 0); rt_task_resume(&taskl); rt_task_resume(&taskm); rt_task_resume(&taskh); return 0; }
int init_module(void) { #ifdef ONE_SHOT rt_set_oneshot_mode(); #endif rt_sem_init(¬Empty, 0); rt_typed_sem_init(&mutex, 1, SEM_TYPE); TimesBufferstatus = empty; HourBufferstatus = empty; return 0; }
/** * @brief Initializes a fully typed mailbox queueing tasks * according to the specified type. * * rt_typed_mbx_init initializes a mailbox of size @e size. @e mbx must * point to a user allocated MBX structure. Tasks are queued in FIFO * order (FIFO_Q), priority order (PRIO_Q) or resource order (RES_Q). * * @param mbx is a pointer to a user allocated mailbox structure. * * @param size corresponds to the size of the mailbox. * * @param type corresponds to the queueing policy: FIFO_Q, PRIO_Q or RES_Q. * * @return On success 0 is returned. On failure, a special value is * returned as indicated below: * - @b ENOMEM: Space could not be allocated for the mailbox buffer. * * See also: notes under rt_mbx_init(). */ RTAI_SYSCALL_MODE int rt_typed_mbx_init(MBX *mbx, int size, int type) { if (!(mbx->bufadr = rt_malloc(size))) { return -ENOMEM; } rt_typed_sem_init(&(mbx->sndsem), 1, type & 3 ? type : BIN_SEM | type); rt_typed_sem_init(&(mbx->rcvsem), 1, type & 3 ? type : BIN_SEM | type); mbx->magic = RT_MBX_MAGIC; mbx->size = mbx->frbs = size; mbx->owndby = mbx->waiting_task = NULL; mbx->fbyte = mbx->lbyte = mbx->avbs = 0; spin_lock_init(&(mbx->lock)); #ifdef CONFIG_RTAI_RT_POLL mbx->poll_recv.pollq.prev = mbx->poll_recv.pollq.next = &(mbx->poll_recv.pollq); mbx->poll_send.pollq.prev = mbx->poll_send.pollq.next = &(mbx->poll_send.pollq); mbx->poll_recv.pollq.task = mbx->poll_send.pollq.task = NULL; spin_lock_init(&(mbx->poll_recv.pollock)); spin_lock_init(&(mbx->poll_send.pollock)); #endif return 0; }
int rt_tbx_init(TBX *tbx, int size, int flags) { if (!(tbx->bufadr = sched_malloc(size))) { return -ENOMEM; } if (!(tbx->bcbadr = sched_malloc(size))) { sched_free(tbx->bufadr); return -ENOMEM; } *tbx->bcbadr = TYPE_NONE; memset(tbx->bufadr, 0, size); memset(tbx->bcbadr, 0, size); rt_typed_sem_init(&(tbx->sndsmx), 1, CNT_SEM | flags); rt_typed_sem_init(&(tbx->rcvsmx), 1, CNT_SEM | flags); rt_typed_sem_init(&(tbx->bcbsmx), 1, BIN_SEM | flags); tbx->magic = RT_TBX_MAGIC; tbx->size = tbx->frbs = size; tbx->waiting_task = 0; tbx->waiting_nr = 0; spin_lock_init(&(tbx->buflock)); tbx->fbyte = tbx->avbs = 0; return 0; }
static int test_init(void) { int ierr; rt_set_oneshot_mode(); printk("PGM STARTING\n"); init_matrices(); // Create real-time tasks ierr = rt_task_init_cpuid(&sens_task, // task senscode, // rt_thread 0, // data STACK_SIZE, // stack_size 3, // priority 0, // uses_fpu 0, // signal 0); // cpuid ierr = rt_task_init_cpuid(&act_task, // task actcode, // rt_thread 0, // data STACK_SIZE, // stack_size 4, // priority 0, // uses_fpu 0, // signal 0); // cpuid // init semaphores rt_typed_sem_init(&sensDone, // semaphore pointer 0, // initial value BIN_SEM); // semaphore type if (!ierr) { start_rt_timer(nano2count(TICK_PERIOD)); now = rt_get_time(); // Start tasks rt_task_make_periodic(&sens_task, now, nano2count(PERIOD)); //rt_task_resume(&act_task); } //return ierr; return 0; // pour ne pas faire planter le kernel }
int init_module(void) { rt_assign_irq_to_cpu(0, 0); rtf_create(CMDF, 10000); if (Mode) { rt_typed_sem_init(&sem, 0, SEM_TYPE); } rt_task_init_cpuid(&thread, intr_handler, 0, STACK_SIZE, 0, 0, 0, 0); rt_task_resume(&thread); #ifdef IRQEXT rt_request_timer((void *)rt_timer_tick_ext, imuldiv(TICK, FREQ_8254, 1000000000), 0); rt_set_global_irq_ext(0, 1, 0); #else rt_request_timer((void *)rt_timer_tick, imuldiv(TICK, FREQ_8254, 1000000000), 0); #endif SETUP_8254_TSC_EMULATION; return 0; }
static int _broadcast(RT_MSGQ *mq, void *msg, int msg_size, int msgpri, int broadcast, int space) { unsigned long flags; RT_MSG *msg_ptr; void *p; if (msg_size > mq->fastsize) { if (!(p = rt_malloc(msg_size))) { rt_sem_signal(&mq->freslots); rt_sem_signal(&mq->senders); return -ENOMEM; } } else { p = NULL; } flags = rt_spin_lock_irqsave(&mq->lock); msg_ptr = mq->slots[mq->slot++]; rt_spin_unlock_irqrestore(flags, &mq->lock); msg_ptr->hdr.size = msg_size; msg_ptr->hdr.priority = msgpri; msg_ptr->hdr.malloc = p; if (space) { memcpy(p ? p : msg_ptr->msg, msg, msg_size); } else { rt_copy_from_user(p ? p : msg_ptr->msg, msg, msg_size); } rt_typed_sem_init(&mq->broadcast, broadcast + 1, CNT_SEM | PRIO_Q); msg_ptr->hdr.broadcast = broadcast; flags = rt_spin_lock_irqsave(&mq->lock); enq_msg(mq, &msg_ptr->hdr); rt_spin_unlock_irqrestore(flags, &mq->lock); rt_sem_signal(&mq->received); rt_sem_wait_barrier(&mq->broadcast); rt_sem_signal(&mq->senders); return broadcast; }
int main(void) { int i, k, prio, bprio; task[0] = rt_task_init_schmod(0, NTASKS - 1, 0, 0, SCHED_FIFO, CPUS_ALLOWED); mlockall(MCL_CURRENT | MCL_FUTURE); rt_make_hard_real_time(); for (i = 0; i < NTASKS; i++) { sem[i] = rt_typed_sem_init(0, RESEMT, RES_SEM); } rt_sem_wait(sem[0]); for (i = 1; i < NTASKS; i++) { rt_thread_create(tskfun, (void *)i, 0); rt_receive(NULL, &k); rt_printk("AFTER TSKNR %d CREATED > (TSKNR-PRI):\n", i); for (k = 0; k < i; k++) { rt_get_priorities(task[k], &prio, &bprio); rt_printk("%d-%d|", k, prio); } rt_get_priorities(task[i], &prio, &bprio); rt_printk("%d-%d\n\n", i, prio); } rt_sem_signal(sem[0]); rt_printk("FINAL > (TSKNR-PRI):\n"); for (k = 0; k < (NTASKS - 1); k++) { rt_get_priorities(task[k], &prio, &bprio); rt_printk("%d-%d|", k, prio); } rt_get_priorities(task[NTASKS - 1], &prio, &bprio); rt_printk("%d-%d\n\n", (NTASKS - 1), prio); for (i = 0; i < NTASKS; i++) { rt_sem_delete(sem[i]); } return 0; }
rosBlockInitResult_t registerRosBlock(SimStruct *S, char *rosName, int type, int subType) { char name[7]; int_T i; int num = numRosBlocks; rosBlockInitResult_t res; rosBlockConfigs[num].S = S; memcpy(rosBlockConfigs[num].name, rosName, MAX_NAMES_SIZE); rosBlockConfigs[num].type = type; rosBlockConfigs[num].subType = subType; for (i = 0; i < MAX_ROS_BLOCKS; i++) { sprintf(name, "%s%d", RosShmID, i); if (!rt_get_adr(nam2num(name))) break; } if (!(res.shm = (rosShmData_t *)rt_shm_alloc(nam2num(name), sizeof(rosShmData_t), USE_VMALLOC))) { printf("Cannot init shared memory %s: Time to die!\n", name); exit(1); } memcpy(rosBlockConfigs[num].shmName, name, 7); for (i = 0; i < MAX_ROS_BLOCKS; i++) { sprintf(name, "%s%d", RosSemID, i); if (!rt_get_adr(nam2num(name))) break; } if (!(res.sem = rt_typed_sem_init(nam2num(name), 1, BIN_SEM | PRIO_Q))) { printf("Cannot init semaphore %s: Time to kill myself!\n", name); exit(1); } memcpy(rosBlockConfigs[num].semName, name, 7); res.shm->length = 0; res.shm->state = 0; res.shm->msg.state = 0; res.num = num; numRosBlocks++; return res; }
int rtos_mutex_init(rt_mutex_t* m) { CHK_LXRT_CALL(); m->sem = rt_typed_sem_init( rt_get_name(0),1, BIN_SEM | PRIO_Q); return m->sem == 0 ? -1 : 0; }