int __rtai_shm_init (void) { #if USE_UDEV_CLASS if ((shm_class = class_create(THIS_MODULE, "rtai_shm")) == NULL) { printk("RTAI-SHM: cannot create class.\n"); return -EBUSY; } if (CLASS_DEVICE_CREATE(shm_class, MKDEV(MISC_MAJOR, RTAI_SHM_MISC_MINOR), NULL, "rtai_shm") == NULL) { printk("RTAI-SHM: cannot attach class.\n"); class_destroy(shm_class); return -EBUSY; } #endif if (misc_register(&rtai_shm_dev) < 0) { printk("***** UNABLE TO REGISTER THE SHARED MEMORY DEVICE (miscdev minor: %d) *****\n", RTAI_SHM_MISC_MINOR); return -EBUSY; } #ifdef CONFIG_RTAI_MALLOC #ifdef CONFIG_RTAI_MALLOC_VMALLOC rt_register(GLOBAL_HEAP_ID, rtai_global_heap_adr, rtai_global_heap_size, 0); rt_smp_linux_task->heap[GLOBAL].heap = &rtai_global_heap; rt_smp_linux_task->heap[GLOBAL].kadr = rt_smp_linux_task->heap[GLOBAL].uadr = rtai_global_heap_adr; #else printk("***** WARNING: GLOBAL HEAP NEITHER SHARABLE NOR USABLE FROM USER SPACE (use the vmalloc option for RTAI malloc) *****\n"); #endif #endif return set_rt_fun_entries(rt_shm_entries); }
static inline void *_rt_shm_alloc(unsigned long name, int size, int suprt) { void *adr; // suprt = USE_GFP_ATOMIC; // to force some testing if (!(adr = rt_get_adr_cnt(name)) && size > 0 && suprt >= 0 && RT_SHM_OP_PERM()) { size = ((size - 1) & PAGE_MASK) + PAGE_SIZE; if ((adr = suprt ? rkmalloc(&size, SUPRT[suprt]) : rvmalloc(size))) { if (!rt_register(name, adr, suprt ? -size : size, 0)) { if (suprt) { rkfree(adr, size); } else { rvfree(adr, size); } return 0; } memset(ALIGN2PAGE(adr), 0, size); } } return ALIGN2PAGE(adr); }
int init_module(void) { tasknode = ddn2nl(TaskNode); rt_mbx_init(&mbx, 1); rt_register(nam2num("HDLMBX"), &mbx, IS_MBX, 0); rt_task_init(&sup_task, sup_fun, 0, 2000, 1, 0, 0); rt_task_resume(&sup_task); rt_request_timer(timer_tick, imuldiv(PERIOD, FREQ_8254, 1000000000), 0); return 0; }
void *rt_named_malloc(unsigned long name, int size) { void *mem_ptr; if ((mem_ptr = rt_get_adr_cnt(name))) { return mem_ptr; } if ((mem_ptr = _rt_halloc(size, &rt_smp_linux_task->heap[GLOBAL]))) { if (rt_register(name, mem_ptr, IS_HPCK, 0)) { return mem_ptr; } rt_hfree(mem_ptr); } return NULL; }
RTAI_SYSCALL_MODE void *rt_bits_init_u(unsigned long name, unsigned long mask) { BITS *bits; if (rt_get_adr(name)) { return NULL; } if ((bits = rt_malloc(sizeof(BITS)))) { rt_bits_init(bits, mask); if (rt_register(name, bits, IS_BIT, current)) { return bits; } else { rt_free(bits); } } return NULL; }
static inline void *rt_named_halloc_typed(unsigned long name, int size, int htype) { RT_TASK *task; void *mem_ptr; RTAI_TASK(return NULL); if ((mem_ptr = rt_get_adr_cnt(name))) { return task->heap[htype].uadr + (mem_ptr - task->heap[htype].kadr); } if ((mem_ptr = _rt_halloc(size, &task->heap[htype]))) { if (rt_register(name, task->heap[htype].kadr + (mem_ptr - task->heap[htype].uadr), IS_HPCK, 0)) { return mem_ptr; } _rt_hfree(mem_ptr, &task->heap[htype]); } return NULL; }
/** * @brief Initializes a specifically typed (fifo queued, priority queued * or resource queued) mailbox identified by a name. * * _rt_typed_named_mbx_init initializes a mailbox of type @e qtype * and size @e size identified by @e name. Named mailboxed * are useful for use among different processes, kernel/user space and * in distributed applications, see netrpc. * * @param mbx_name is the mailbox name; since it can be a clumsy identifier, * services are provided to convert 6 characters identifiers to unsigned long * (see nam2num()). * * @param size corresponds to the size of the mailbox. * * @param qtype corresponds to the queueing policy: FIFO_Q, PRIO_Q or RES_Q. * * @return On success the pointer to the allocated mbx is returned. * On failure, NULL is returned. * * See also: notes under rt_mbx_init() and rt_typed_mbx_init(). */ RTAI_SYSCALL_MODE MBX *_rt_typed_named_mbx_init(unsigned long mbx_name, int size, int qtype) { MBX *mbx; if ((mbx = rt_get_adr_cnt(mbx_name))) { return mbx; } if ((mbx = rt_malloc(sizeof(MBX)))) { rt_typed_mbx_init(mbx, size, qtype); if (rt_register(mbx_name, mbx, IS_MBX, 0)) { return mbx; } rt_mbx_delete(mbx); } rt_free(mbx); return (MBX *)0; }
RTAI_SYSCALL_MODE RT_MSGQ *_rt_named_msgq_init(unsigned long msgq_name, int nmsg, int msg_size) { RT_MSGQ *msgq; if ((msgq = rt_get_adr_cnt(msgq_name))) { return msgq; } if ((msgq = rt_malloc(sizeof(RT_MSGQ)))) { rt_msgq_init(msgq, nmsg, msg_size); if (rt_register(msgq_name, msgq, IS_MBX, 0)) { return msgq; } rt_msgq_delete(msgq); } rt_free(msgq); return NULL; }
RTAI_SYSCALL_MODE BITS *rt_named_bits_init(const char *bits_name, unsigned long mask) { BITS *bits; unsigned long name; if ((bits = rt_get_adr(name = nam2num(bits_name)))) { return bits; } if ((bits = rt_malloc(sizeof(SEM)))) { rt_bits_init(bits, mask); if (rt_register(name, bits, IS_BIT, 0)) { return bits; } rt_bits_delete(bits); } rt_free(bits); return NULL; }
// Create a synchronous IPC proxy task. pid_t rt_Proxy_attach(pid_t pid, void *msg, int nbytes, int prio) { RT_TASK *task; char proxy_name[8]; task = pid ? pid2rttask(pid) : 0; task = __rt_proxy_attach((void *)Proxy_Task, task, msg, nbytes, prio); if (task) { if ((pid = assign_pid(task)) < 0) { rt_proxy_detach(task); } else if (task->lnxtsk) { // A user space program may have created the proxy. pid2nam(pid, proxy_name); rt_register(nam2num(proxy_name), pid2rttask(task->retval), IS_PRX, task->lnxtsk); } return pid; } return -ENOMEM; }
static inline long long handle_lxrt_request (unsigned int lxsrq, long *arg, RT_TASK *task) { #define larg ((struct arg *)arg) union {unsigned long name; RT_TASK *rt_task; SEM *sem; MBX *mbx; RWL *rwl; SPL *spl; int i; void *p; long long ll; } arg0; int srq; if (likely((srq = SRQ(lxsrq)) < MAX_LXRT_FUN)) { unsigned long type; struct rt_fun_entry *funcm; /* * The next two lines of code do a lot. It makes possible to extend the use of * USP to any other real time module service in user space, both for soft and * hard real time. Concept contributed and copyrighted by: Giuseppe Renoldi * ([email protected]). */ if (unlikely(!(funcm = rt_fun_ext[INDX(lxsrq)]))) { rt_printk("BAD: null rt_fun_ext, no module for extension %d?\n", INDX(lxsrq)); return -ENOSYS; } if (!(type = funcm[srq].type)) { return ((RTAI_SYSCALL_MODE long long (*)(unsigned long, ...))funcm[srq].fun)(RTAI_FUN_ARGS); } if (unlikely(NEED_TO_RW(type))) { lxrt_fun_call_wbuf(task, funcm[srq].fun, NARG(lxsrq), arg, type); } else { lxrt_fun_call(task, funcm[srq].fun, NARG(lxsrq), arg); } return task->retval; } arg0.name = arg[0]; switch (srq) { case LXRT_GET_ADR: { arg0.p = rt_get_adr(arg0.name); return arg0.ll; } case LXRT_GET_NAME: { arg0.name = rt_get_name(arg0.p); return arg0.ll; } case LXRT_TASK_INIT: { struct arg { unsigned long name; long prio, stack_size, max_msg_size, cpus_allowed; }; arg0.rt_task = __task_init(arg0.name, larg->prio, larg->stack_size, larg->max_msg_size, larg->cpus_allowed); return arg0.ll; } case LXRT_TASK_DELETE: { arg0.i = __task_delete(arg0.rt_task ? arg0.rt_task : task); return arg0.ll; } case LXRT_SEM_INIT: { if (rt_get_adr(arg0.name)) { return 0; } if ((arg0.sem = rt_malloc(sizeof(SEM)))) { struct arg { unsigned long name; long cnt; long typ; }; lxrt_typed_sem_init(arg0.sem, larg->cnt, larg->typ); if (rt_register(larg->name, arg0.sem, IS_SEM, current)) { return arg0.ll; } else { rt_free(arg0.sem); } } return 0; } case LXRT_SEM_DELETE: { if (lxrt_sem_delete(arg0.sem)) { arg0.i = -EFAULT; return arg0.ll; } rt_free(arg0.sem); arg0.i = rt_drg_on_adr(arg0.sem); return arg0.ll; } case LXRT_MBX_INIT: { if (rt_get_adr(arg0.name)) { return 0; } if ((arg0.mbx = rt_malloc(sizeof(MBX)))) { struct arg { unsigned long name; long size; int qtype; }; if (lxrt_typed_mbx_init(arg0.mbx, larg->size, larg->qtype) < 0) { rt_free(arg0.mbx); return 0; } if (rt_register(larg->name, arg0.mbx, IS_MBX, current)) { return arg0.ll; } else { rt_free(arg0.mbx); } } return 0; } case LXRT_MBX_DELETE: { if (lxrt_mbx_delete(arg0.mbx)) { arg0.i = -EFAULT; return arg0.ll; } rt_free(arg0.mbx); arg0.i = rt_drg_on_adr(arg0.mbx); return arg0.ll; } case LXRT_RWL_INIT: { if (rt_get_adr(arg0.name)) { return 0; } if ((arg0.rwl = rt_malloc(sizeof(RWL)))) { struct arg { unsigned long name; long type; }; lxrt_typed_rwl_init(arg0.rwl, larg->type); if (rt_register(larg->name, arg0.rwl, IS_SEM, current)) { return arg0.ll; } else { rt_free(arg0.rwl); } } return 0; } case LXRT_RWL_DELETE: { if (lxrt_rwl_delete(arg0.rwl)) { arg0.i = -EFAULT; return arg0.ll; } rt_free(arg0.rwl); arg0.i = rt_drg_on_adr(arg0.rwl); return arg0.ll; } case LXRT_SPL_INIT: { if (rt_get_adr(arg0.name)) { return 0; } if ((arg0.spl = rt_malloc(sizeof(SPL)))) { struct arg { unsigned long name; }; lxrt_spl_init(arg0.spl); if (rt_register(larg->name, arg0.spl, IS_SEM, current)) { return arg0.ll; } else { rt_free(arg0.spl); } } return 0; } case LXRT_SPL_DELETE: { if (lxrt_spl_delete(arg0.spl)) { arg0.i = -EFAULT; return arg0.ll; } rt_free(arg0.spl); arg0.i = rt_drg_on_adr(arg0.spl); return arg0.ll; } case MAKE_HARD_RT: { rt_make_hard_real_time(task); return 0; if (!task || task->is_hard) { return 0; } steal_from_linux(task); return 0; } case MAKE_SOFT_RT: { rt_make_soft_real_time(task); return 0; if (!task || !task->is_hard) { return 0; } if (task->is_hard < 0) { task->is_hard = 0; } else { give_back_to_linux(task, 0); } return 0; } case PRINT_TO_SCREEN: { struct arg { char *display; long nch; }; arg0.i = rtai_print_to_screen("%s", larg->display); return arg0.ll; } case PRINTK: { struct arg { char *display; long nch; }; arg0.i = rt_printk("%s", larg->display); return arg0.ll; } case NONROOT_HRT: { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) current->cap_effective |= ((1 << CAP_IPC_LOCK) | (1 << CAP_SYS_RAWIO) | (1 << CAP_SYS_NICE)); #else set_lxrt_perm(CAP_IPC_LOCK); set_lxrt_perm(CAP_SYS_RAWIO); set_lxrt_perm(CAP_SYS_NICE); #endif return 0; } case RT_BUDDY: { arg0.rt_task = task && current->rtai_tskext(TSKEXT1) == current ? task : NULL; return arg0.ll; } case HRT_USE_FPU: { struct arg { RT_TASK *task; long use_fpu; }; if(!larg->use_fpu) { clear_lnxtsk_uses_fpu((larg->task)->lnxtsk); } else { init_fpu((larg->task)->lnxtsk); } return 0; } case GET_USP_FLAGS: { arg0.name = arg0.rt_task->usp_flags; return arg0.ll; } case SET_USP_FLAGS: { struct arg { RT_TASK *task; unsigned long flags; }; arg0.rt_task->usp_flags = larg->flags; arg0.rt_task->force_soft = (arg0.rt_task->is_hard > 0) && (larg->flags & arg0.rt_task->usp_flags_mask & FORCE_SOFT); return 0; } case GET_USP_FLG_MSK: { arg0.name = arg0.rt_task->usp_flags_mask; return arg0.ll; } case SET_USP_FLG_MSK: { task->usp_flags_mask = arg0.name; task->force_soft = (task->is_hard > 0) && (task->usp_flags & arg0.name & FORCE_SOFT); return 0; } case FORCE_TASK_SOFT: { extern void rt_do_force_soft(RT_TASK *rt_task); struct task_struct *ltsk; if ((ltsk = find_task_by_pid(arg0.name))) { if ((arg0.rt_task = ltsk->rtai_tskext(TSKEXT0))) { if ((arg0.rt_task->force_soft = (arg0.rt_task->is_hard != 0) && FORCE_SOFT)) { rt_do_force_soft(arg0.rt_task); } return arg0.ll; } } return 0; } case IS_HARD: { arg0.i = arg0.rt_task || (arg0.rt_task = current->rtai_tskext(TSKEXT0)) ? arg0.rt_task->is_hard : 0; return arg0.ll; } case GET_EXECTIME: { struct arg { RT_TASK *task; RTIME *exectime; }; if ((larg->task)->exectime[0] && (larg->task)->exectime[1]) { larg->exectime[0] = (larg->task)->exectime[0]; larg->exectime[1] = (larg->task)->exectime[1]; larg->exectime[2] = rtai_rdtsc(); } return 0; } case GET_TIMEORIG: { struct arg { RTIME *time_orig; }; if (larg->time_orig) { RTIME time_orig[2]; rt_gettimeorig(time_orig); rt_copy_to_user(larg->time_orig, time_orig, sizeof(time_orig)); } else { rt_gettimeorig(NULL); } return 0; } case LINUX_SERVER: { struct arg { struct linux_syscalls_list syscalls; }; if (larg->syscalls.nr) { if (larg->syscalls.task->linux_syscall_server) { RT_TASK *serv; rt_get_user(serv, &larg->syscalls.serv); rt_task_masked_unblock(serv, ~RT_SCHED_READY); } larg->syscalls.task->linux_syscall_server = larg->syscalls.serv; rtai_set_linux_task_priority(current, (larg->syscalls.task)->lnxtsk->policy, (larg->syscalls.task)->lnxtsk->rt_priority); arg0.rt_task = __task_init((unsigned long)larg->syscalls.task, larg->syscalls.task->base_priority >= BASE_SOFT_PRIORITY ? larg->syscalls.task->base_priority - BASE_SOFT_PRIORITY : larg->syscalls.task->base_priority, 0, 0, 1 << larg->syscalls.task->runnable_on_cpus); larg->syscalls.task->linux_syscall_server = arg0.rt_task; arg0.rt_task->linux_syscall_server = larg->syscalls.serv; return arg0.ll; } else { if (!larg->syscalls.task) { larg->syscalls.task = RT_CURRENT; } if ((arg0.rt_task = larg->syscalls.task->linux_syscall_server)) { larg->syscalls.task->linux_syscall_server = NULL; arg0.rt_task->suspdepth = -RTE_HIGERR; rt_task_masked_unblock(arg0.rt_task, ~RT_SCHED_READY); } } return 0; } default: { rt_printk("RTAI/LXRT: Unknown srq #%d\n", srq); arg0.i = -ENOSYS; return arg0.ll; } } return 0; }
static inline RT_TASK* __task_init(unsigned long name, int prio, int stack_size, int max_msg_size, int cpus_allowed) { void *msg_buf0, *msg_buf1; RT_TASK *rt_task; if ((rt_task = current->rtai_tskext(TSKEXT0))) { if (num_online_cpus() > 1 && cpus_allowed) { cpus_allowed = hweight32(cpus_allowed) > 1 ? get_min_tasks_cpuid() : ffnz(cpus_allowed); } else { cpus_allowed = rtai_cpuid(); } put_current_on_cpu(cpus_allowed); return rt_task; } if (rt_get_adr(name)) { return 0; } if (prio > RT_SCHED_LOWEST_PRIORITY) { prio = RT_SCHED_LOWEST_PRIORITY; } if (!max_msg_size) { max_msg_size = USRLAND_MAX_MSG_SIZE; } if (!(msg_buf0 = rt_malloc(max_msg_size))) { return 0; } if (!(msg_buf1 = rt_malloc(max_msg_size))) { rt_free(msg_buf0); return 0; } rt_task = rt_malloc(sizeof(RT_TASK) + 3*sizeof(struct fun_args)); if (rt_task) { rt_task->magic = 0; if (num_online_cpus() > 1 && cpus_allowed) { cpus_allowed = hweight32(cpus_allowed) > 1 ? get_min_tasks_cpuid() : ffnz(cpus_allowed); } else { cpus_allowed = rtai_cpuid(); } if (!set_rtext(rt_task, prio, 0, 0, cpus_allowed, 0)) { rt_task->fun_args = (long *)((struct fun_args *)(rt_task + 1)); rt_task->msg_buf[0] = msg_buf0; rt_task->msg_buf[1] = msg_buf1; rt_task->max_msg_size[0] = rt_task->max_msg_size[1] = max_msg_size; if (rt_register(name, rt_task, IS_TASK, 0)) { rt_task->state = 0; #ifdef __IPIPE_FEATURE_ENABLE_NOTIFIER ipipe_enable_notifier(current); #else current->flags |= PF_EVNOTIFY; #endif #if (defined VM_PINNED) && (defined CONFIG_MMU) ipipe_disable_ondemand_mappings(current); #endif RTAI_OOM_DISABLE(); return rt_task; } else { clr_rtext(rt_task); } } rt_free(rt_task); } rt_free(msg_buf0); rt_free(msg_buf1); return 0; }