Example #1
0
void fpu_state_restore(struct pt_regs *regs)
{
    struct task_struct *tsk = current;

    if (unlikely(!user_mode(regs))) {
        printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
        BUG();
        return;
    }

    if (!tsk_used_math(tsk)) {
        local_irq_enable();
        /*
         * does a slab alloc which can sleep
         */
        if (init_fpu(tsk)) {
            /*
             * ran out of memory!
             */
            do_group_exit(SIGKILL);
            return;
        }
        local_irq_disable();
    }

    grab_fpu(regs);

    __fpu_state_restore();
}
Example #2
0
File: traps.c Project: 8l/glendix
/*
 * 'math_state_restore()' saves the current math information in the
 * old math state array, and gets the new ones from the current task
 *
 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 * Don't touch unless you *really* know how it works.
 *
 * Must be called with kernel preemption disabled (in this case,
 * local interrupts are disabled at the call-site in entry.S).
 */
asmlinkage void math_state_restore(void)
{
	struct thread_info *thread = current_thread_info();
	struct task_struct *tsk = thread->task;

	if (!tsk_used_math(tsk)) {
		local_irq_enable();
		/*
		 * does a slab alloc which can sleep
		 */
		if (init_fpu(tsk)) {
			/*
			 * ran out of memory!
			 */
			do_group_exit(SIGKILL);
			return;
		}
		local_irq_disable();
	}

	clts();				/* Allow maths ops (or we recurse) */
	/*
	 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
	 */
	if (unlikely(restore_fpu_checking(tsk))) {
		stts();
		force_sig(SIGSEGV, tsk);
		return;
	}

	thread->status |= TS_USEDFPU;	/* So we fnsave on switch_to() */
	tsk->fpu_counter++;
}
Example #3
0
File: exec.c Project: jmgc/noah
static void
init_reg_state(void)
{
  vmm_write_register(HV_X86_RAX, 0);
  vmm_write_register(HV_X86_RBX, 0);
  vmm_write_register(HV_X86_RCX, 0);
  vmm_write_register(HV_X86_RDX, 0);
  vmm_write_register(HV_X86_RSI, 0);
  vmm_write_register(HV_X86_RDI, 0);
  vmm_write_register(HV_X86_R8, 0);
  vmm_write_register(HV_X86_R9, 0);
  vmm_write_register(HV_X86_R10, 0);
  vmm_write_register(HV_X86_R11, 0);
  vmm_write_register(HV_X86_R12, 0);
  vmm_write_register(HV_X86_R13, 0);
  vmm_write_register(HV_X86_R14, 0);
  vmm_write_register(HV_X86_R15, 0);

  vmm_write_vmcs(VMCS_GUEST_FS, 0);
  vmm_write_vmcs(VMCS_GUEST_ES, 0);
  vmm_write_vmcs(VMCS_GUEST_GS, 0);
  vmm_write_vmcs(VMCS_GUEST_DS, 0);
  vmm_write_vmcs(VMCS_GUEST_CS, GSEL(SEG_CODE, 0));
  vmm_write_vmcs(VMCS_GUEST_DS, GSEL(SEG_DATA, 0));

  vmm_write_vmcs(VMCS_GUEST_FS_BASE, 0);
  vmm_write_vmcs(VMCS_GUEST_GS_BASE, 0);

  vmm_write_vmcs(VMCS_GUEST_LDTR, 0);

  init_fpu();
}
/*
 * This restores directly out of user space. Exceptions are handled.
 */
int restore_i387_xstate(void __user *buf)
{
    struct task_struct *tsk = current;
    int err = 0;

    if (!buf) {
        if (used_math())
            goto clear;
        return 0;
    } else if (!access_ok(VERIFY_READ, buf, sig_xstate_size))
        return -EACCES;

    if (!used_math()) {
        err = init_fpu(tsk);
        if (err)
            return err;
    }

    user_fpu_begin();
    if (use_xsave())
        err = restore_user_xstate(buf);
    else
        err = fxrstor_checking((__force struct i387_fxsave_struct *)
                               buf);
    if (unlikely(err)) {
        /*
         * Encountered an error while doing the restore from the
         * user buffer, clear the fpu state.
         */
clear:
        clear_fpu(tsk);
        clear_used_math();
    }
    return err;
}
/*
 * This restores directly out of user space. Exceptions are handled.
 */
static inline int restore_i387(struct _fpstate __user *buf)
{
	struct task_struct *tsk = current;
	int err;

	if (!used_math()) {
		err = init_fpu(tsk);
		if (err)
			return err;
	}

	if (!(task_thread_info(current)->status & TS_USEDFPU)) {
		clts();
		task_thread_info(current)->status |= TS_USEDFPU;
	}
	err = restore_fpu_checking((__force struct i387_fxsave_struct *)buf);
	if (unlikely(err)) {
		/*
		 * Encountered an error while doing the restore from the
		 * user buffer, clear the fpu state.
		 */
		clear_fpu(tsk);
		clear_used_math();
	}
	return err;
}
Example #6
0
/*
 *  'math_state_restore()' saves the current math information in the
 * old math state array, and gets the new ones from the current task
 *
 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 * Don't touch unless you *really* know how it works.
 */
asmlinkage void math_state_restore(void)
{
	struct task_struct *me = current;
	clts();			/* Allow maths ops (or we recurse) */

	if (!used_math())
		init_fpu(me);
	restore_fpu_checking(&me->thread.i387.fxsave);
	me->thread_info->status |= TS_USEDFPU;
}
Example #7
0
void kmain(uint32_t magic, multiboot_info_t* mbi) {
    clear();
    printf("Kernel is on!\n");

    if (magic != MULTIBOOT_BOOTLOADER_MAGIC) {
        printf("Invalid magic code: %x", magic);
        return;
    }

    // Check if the cpuid instruction is available
    if (check_cpuid()) {
        printf("CPUID available\n");
        if (check_apic()) {
            printf("APIC available\n");
        } else {
            printf("APIC not available\n");
        }
    } else {
        printf("CPUID not available\n");
    }

    // Init the floating point unit
    init_fpu();

    // Initialize the Interrupt Descriptor Table and Interrupt Service Routines
    init_idt();

    // Print (if available) memory map
    if (mbi->flags && MULTIBOOT_INFO_MEM_MAP) {
        uint32_t mmap_entries = mbi->mmap_length / 24;

        printf("## Memory map ##\n");
        printf("Entries: %u\n", mmap_entries);

        multiboot_memory_map_t* mmap_entry = (multiboot_memory_map_t *)
            mbi->mmap_addr;
        for (uint32_t i = 0; i < mmap_entries; ++i, ++mmap_entry) {
            printf("Entry %u\n", i);
            printf("\t.addr: %x\n", mmap_entry->addr);
            printf("\t.len: %u\n", mmap_entry->len);
            printf("\t.type: ");
            if (mmap_entry->type == MULTIBOOT_MEMORY_AVAILABLE) {
                printf("available\n");
            } else {
                printf("reserved\n");
            }
        }
    }

    // Test breakpoint interrupt
    __asm __volatile("int $0x3");

    init_timer(50); // Initialise timer to 50Hz
}
Example #8
0
/*
 * Find devices.  The system is alive.
 */
void machine_init(void)
{
	/*
	 * Initialize the console.
	 */
	cninit();

	/*
	 * Set up to use floating point.
	 */
	init_fpu();

#ifdef MACH_HYP
	hyp_init();
#else	/* MACH_HYP */
#ifdef LINUX_DEV
	/*
	 * Initialize Linux drivers.
	 */
	linux_init();
#endif

	/*
	 * Find the devices
	 */
	probeio();
#endif	/* MACH_HYP */

	/*
	 * Get the time
	 */
	inittodr();

#ifndef MACH_HYP
	/*
	 * Tell the BIOS not to clear and test memory.
	 */
	*(unsigned short *)phystokv(0x472) = 0x1234;
#endif	/* MACH_HYP */

#if VM_MIN_KERNEL_ADDRESS == 0
	/*
	 * Unmap page 0 to trap NULL references.
	 *
	 * Note that this breaks accessing some BIOS areas stored there.
	 */
	pmap_unmap_page_zero();
#endif
}
Example #9
0
/*
 * Find devices.  The system is alive.
 */
void machine_init()
{
	/*
	 * Initialize the console.
	 */
	cninit();

	/*
	 * Set up to use floating point.
	 */
	init_fpu();

#ifdef LINUX_DEV
	/*
	 * Initialize Linux drivers.
	 */
	linux_init();
#endif

	/*
	 * Find the devices
	 */
	probeio();

	/*
	 * Get the time
	 */
	inittodr();

	/*
	 * Tell the BIOS not to clear and test memory.
	 */
	*(unsigned short *)phystokv(0x472) = 0x1234;

	/*
	 * Unmap page 0 to trap NULL references.
	 */
	pmap_unmap_page_zero();
}
Example #10
0
/*
 * Find devices.  The system is alive.
 */
void machine_init()
{
	/*
	 * Set up to use floating point.
	 */
	init_fpu();

	/*
	 * Find the devices
	 */
	probeio();

	/*
	 * Find the root device
	 */
	get_root_device();

	/*
	 * Get the time
	 */
	inittodr();
}
Example #11
0
int ckpt_restore_fpu(ckpt_desc_t desc)
{
    int ret;
    int flag;

    log_restore_fpu("restoring fpu ...");
    if (ckpt_read(desc, &flag, sizeof(int)) != sizeof(int)) {
        log_err("failed to get file");
        return -EIO;
    }

    kernel_fpu_begin();
    clear_used_math();
    if (flag) {
        if (!ckpt_get_i387(current)) {
            init_fpu(current);
            if (!ckpt_get_i387(current)) {
                log_err("failed to get i387");
                return -EFAULT;
            }
        }
        if (ckpt_read(desc, ckpt_get_i387(current), xstate_size) != xstate_size) {
            log_err("failed to get i387");
            return -EFAULT;
        }

        ret = ckpt_check_fpu_state();
        if (ret) {
            log_err("failed to restore i387");
            return ret;
        }
        set_used_math();
    }
    kernel_fpu_end();
    log_restore_pos(desc);
    return 0;
}
Example #12
0
/*
 * 'math_state_restore()' saves the current math information in the
 * old math state array, and gets the new ones from the current task
 *
 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 * Don't touch unless you *really* know how it works.
 *
 * Must be called with kernel preemption disabled (in this case,
 * local interrupts are disabled at the call-site in entry.S).
 */
asmlinkage void math_state_restore(void)
{
	struct thread_info *thread = current_thread_info();
	struct task_struct *tsk = thread->task;

	if (!tsk_used_math(tsk)) {
		local_irq_enable();
		/*
		 * does a slab alloc which can sleep
		 */
		if (init_fpu(tsk)) {
			/*
			 * ran out of memory!
			 */
			do_group_exit(SIGKILL);
			return;
		}
		local_irq_disable();
	}

	clts();				/* Allow maths ops (or we recurse) */

	__math_state_restore();
}
Example #13
0
int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{
	int ia32_fxstate = (buf != buf_fx);
	struct task_struct *tsk = current;
	int state_size = xstate_size;
	u64 xstate_bv = 0;
	int fx_only = 0;

	ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
			 config_enabled(CONFIG_IA32_EMULATION));

	if (!buf) {
		drop_init_fpu(tsk);
		return 0;
	}

	if (!access_ok(VERIFY_READ, buf, size))
		return -EACCES;

	if (!used_math() && init_fpu(tsk))
		return -1;

	if (!static_cpu_has(X86_FEATURE_FPU))
		return fpregs_soft_set(current, NULL,
				       0, sizeof(struct user_i387_ia32_struct),
				       NULL, buf) != 0;

	if (use_xsave()) {
		struct _fpx_sw_bytes fx_sw_user;
		if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
			/*
			 * Couldn't find the extended state information in the
			 * memory layout. Restore just the FP/SSE and init all
			 * the other extended state.
			 */
			state_size = sizeof(struct i387_fxsave_struct);
			fx_only = 1;
		} else {
			state_size = fx_sw_user.xstate_size;
			xstate_bv = fx_sw_user.xstate_bv;
		}
	}

	if (ia32_fxstate) {
		/*
		 * For 32-bit frames with fxstate, copy the user state to the
		 * thread's fpu state, reconstruct fxstate from the fsave
		 * header. Sanitize the copied state etc.
		 */
		struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
		struct user_i387_ia32_struct env;
		int err = 0;

		/*
		 * Drop the current fpu which clears used_math(). This ensures
		 * that any context-switch during the copy of the new state,
		 * avoids the intermediate state from getting restored/saved.
		 * Thus avoiding the new restored state from getting corrupted.
		 * We will be ready to restore/save the state only after
		 * set_used_math() is again set.
		 */
		drop_fpu(tsk);

		if (__copy_from_user(xsave, buf_fx, state_size) ||
		    __copy_from_user(&env, buf, sizeof(env))) {
			err = -1;
		} else {
			sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
			set_used_math();
		}

		if (use_eager_fpu())
			math_state_restore();

		return err;
	} else {
		/*
		 * For 64-bit frames and 32-bit fsave frames, restore the user
		 * state to the registers directly (with exceptions handled).
		 */
		user_fpu_begin();
		if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
			drop_init_fpu(tsk);
			return -1;
		}
	}

	return 0;
}
Example #14
0
static inline long long handle_lxrt_request (unsigned int lxsrq, long *arg, RT_TASK *task)
{
#define larg ((struct arg *)arg)

	union {unsigned long name; RT_TASK *rt_task; SEM *sem; MBX *mbx; RWL *rwl; SPL *spl; int i; void *p; long long ll; } arg0;
	int srq;

	if (likely((srq = SRQ(lxsrq)) < MAX_LXRT_FUN)) {
		unsigned long type;
		struct rt_fun_entry *funcm;
/*
 * The next two lines of code do a lot. It makes possible to extend the use of
 * USP to any other real time module service in user space, both for soft and
 * hard real time. Concept contributed and copyrighted by: Giuseppe Renoldi 
 * ([email protected]).
 */
		if (unlikely(!(funcm = rt_fun_ext[INDX(lxsrq)]))) {
			rt_printk("BAD: null rt_fun_ext, no module for extension %d?\n", INDX(lxsrq));
			return -ENOSYS;
		}
		if (!(type = funcm[srq].type)) {
			return ((RTAI_SYSCALL_MODE long long (*)(unsigned long, ...))funcm[srq].fun)(RTAI_FUN_ARGS);
		}
		if (unlikely(NEED_TO_RW(type))) {
			lxrt_fun_call_wbuf(task, funcm[srq].fun, NARG(lxsrq), arg, type);
		} else {
			lxrt_fun_call(task, funcm[srq].fun, NARG(lxsrq), arg);
	        }
		return task->retval;
	}

	arg0.name = arg[0];
	switch (srq) {
		case LXRT_GET_ADR: {
			arg0.p = rt_get_adr(arg0.name);
			return arg0.ll;
		}

		case LXRT_GET_NAME: {
			arg0.name = rt_get_name(arg0.p);
			return arg0.ll;
		}

		case LXRT_TASK_INIT: {
			struct arg { unsigned long name; long prio, stack_size, max_msg_size, cpus_allowed; };
			arg0.rt_task = __task_init(arg0.name, larg->prio, larg->stack_size, larg->max_msg_size, larg->cpus_allowed);
			return arg0.ll;
		}

		case LXRT_TASK_DELETE: {
			arg0.i = __task_delete(arg0.rt_task ? arg0.rt_task : task);
			return arg0.ll;
		}

		case LXRT_SEM_INIT: {
			if (rt_get_adr(arg0.name)) {
				return 0;
			}
			if ((arg0.sem = rt_malloc(sizeof(SEM)))) {
				struct arg { unsigned long name; long cnt; long typ; };
				lxrt_typed_sem_init(arg0.sem, larg->cnt, larg->typ);
				if (rt_register(larg->name, arg0.sem, IS_SEM, current)) {
					return arg0.ll;
				} else {
					rt_free(arg0.sem);
				}
			}
			return 0;
		}

		case LXRT_SEM_DELETE: {
			if (lxrt_sem_delete(arg0.sem)) {
				arg0.i = -EFAULT;
				return arg0.ll;
			}
			rt_free(arg0.sem);
			arg0.i = rt_drg_on_adr(arg0.sem);
			return arg0.ll;
		}

		case LXRT_MBX_INIT: {
			if (rt_get_adr(arg0.name)) {
				return 0;
			}
			if ((arg0.mbx = rt_malloc(sizeof(MBX)))) {
				struct arg { unsigned long name; long size; int qtype; };
				if (lxrt_typed_mbx_init(arg0.mbx, larg->size, larg->qtype) < 0) {
					rt_free(arg0.mbx);
					return 0;
				}
				if (rt_register(larg->name, arg0.mbx, IS_MBX, current)) {
					return arg0.ll;
				} else {
					rt_free(arg0.mbx);
				}
			}
			return 0;
		}

		case LXRT_MBX_DELETE: {
			if (lxrt_mbx_delete(arg0.mbx)) {
				arg0.i = -EFAULT;
				return arg0.ll;
			}
			rt_free(arg0.mbx);
			arg0.i = rt_drg_on_adr(arg0.mbx);
			return arg0.ll;
		}

		case LXRT_RWL_INIT: {
			if (rt_get_adr(arg0.name)) {
				return 0;
			}
			if ((arg0.rwl = rt_malloc(sizeof(RWL)))) {
				struct arg { unsigned long name; long type; };
				lxrt_typed_rwl_init(arg0.rwl, larg->type);
				if (rt_register(larg->name, arg0.rwl, IS_SEM, current)) {
					return arg0.ll;
				} else {
					rt_free(arg0.rwl);
				}
			}
			return 0;
		}

		case LXRT_RWL_DELETE: {
			if (lxrt_rwl_delete(arg0.rwl)) {
				arg0.i = -EFAULT;
				return arg0.ll;
			}
			rt_free(arg0.rwl);
			arg0.i = rt_drg_on_adr(arg0.rwl);
			return arg0.ll;
		}

		case LXRT_SPL_INIT: {
			if (rt_get_adr(arg0.name)) {
				return 0;
			}
			if ((arg0.spl = rt_malloc(sizeof(SPL)))) {
				struct arg { unsigned long name; };
				lxrt_spl_init(arg0.spl);
				if (rt_register(larg->name, arg0.spl, IS_SEM, current)) {
					return arg0.ll;
				} else {
					rt_free(arg0.spl);
				}
			}
			return 0;
		}

		case LXRT_SPL_DELETE: {
			if (lxrt_spl_delete(arg0.spl)) {
				arg0.i = -EFAULT;
				return arg0.ll;
			}
			rt_free(arg0.spl);
			arg0.i = rt_drg_on_adr(arg0.spl);
			return arg0.ll;
		}

		case MAKE_HARD_RT: {
			rt_make_hard_real_time(task);
			return 0;
			if (!task || task->is_hard) {
				 return 0;
			}
			steal_from_linux(task);
			return 0;
		}

		case MAKE_SOFT_RT: {
			rt_make_soft_real_time(task);
			return 0;
			if (!task || !task->is_hard) {
				return 0;
			}
			if (task->is_hard < 0) {
				task->is_hard = 0;
			} else {
				give_back_to_linux(task, 0);
			}
			return 0;
		}
		case PRINT_TO_SCREEN: {
			struct arg { char *display; long nch; };
			arg0.i = rtai_print_to_screen("%s", larg->display);
			return arg0.ll;
		}

		case PRINTK: {
			struct arg { char *display; long nch; };
			arg0.i = rt_printk("%s", larg->display);
			return arg0.ll;
		}

		case NONROOT_HRT: {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
			current->cap_effective |= ((1 << CAP_IPC_LOCK)  |
						   (1 << CAP_SYS_RAWIO) |
						   (1 << CAP_SYS_NICE));
#else
			set_lxrt_perm(CAP_IPC_LOCK);
			set_lxrt_perm(CAP_SYS_RAWIO);
			set_lxrt_perm(CAP_SYS_NICE);
#endif
			return 0;
		}

		case RT_BUDDY: {
			arg0.rt_task = task && current->rtai_tskext(TSKEXT1) == current ? task : NULL;
			return arg0.ll;
		}

		case HRT_USE_FPU: {
			struct arg { RT_TASK *task; long use_fpu; };
			if(!larg->use_fpu) {
				clear_lnxtsk_uses_fpu((larg->task)->lnxtsk);
			} else {
				init_fpu((larg->task)->lnxtsk);
			}
			return 0;
		}

                case GET_USP_FLAGS: {
                        arg0.name = arg0.rt_task->usp_flags;
			return arg0.ll;
                }
                case SET_USP_FLAGS: {
                        struct arg { RT_TASK *task; unsigned long flags; };
                        arg0.rt_task->usp_flags = larg->flags;
                        arg0.rt_task->force_soft = (arg0.rt_task->is_hard > 0) && (larg->flags & arg0.rt_task->usp_flags_mask & FORCE_SOFT);
                        return 0;
                }

                case GET_USP_FLG_MSK: {
                        arg0.name = arg0.rt_task->usp_flags_mask;
			return arg0.ll;
                }

                case SET_USP_FLG_MSK: {
                        task->usp_flags_mask = arg0.name;
                        task->force_soft = (task->is_hard > 0) && (task->usp_flags & arg0.name & FORCE_SOFT);
                        return 0;
                }

                case FORCE_TASK_SOFT: {
			extern void rt_do_force_soft(RT_TASK *rt_task);
                        struct task_struct *ltsk;
                        if ((ltsk = find_task_by_pid(arg0.name)))  {
                                if ((arg0.rt_task = ltsk->rtai_tskext(TSKEXT0))) {
					if ((arg0.rt_task->force_soft = (arg0.rt_task->is_hard != 0) && FORCE_SOFT)) {
						rt_do_force_soft(arg0.rt_task);
					}
					return arg0.ll;
                                }
                        }
                        return 0;
                }

		case IS_HARD: {
			arg0.i = arg0.rt_task || (arg0.rt_task = current->rtai_tskext(TSKEXT0)) ? arg0.rt_task->is_hard : 0;
			return arg0.ll;
		}
		case GET_EXECTIME: {
			struct arg { RT_TASK *task; RTIME *exectime; };
			if ((larg->task)->exectime[0] && (larg->task)->exectime[1]) {
				larg->exectime[0] = (larg->task)->exectime[0]; 
				larg->exectime[1] = (larg->task)->exectime[1]; 
				larg->exectime[2] = rtai_rdtsc(); 
			}
                        return 0;
		}
		case GET_TIMEORIG: {
			struct arg { RTIME *time_orig; };
			if (larg->time_orig) {
				RTIME time_orig[2];
				rt_gettimeorig(time_orig);
				rt_copy_to_user(larg->time_orig, time_orig, sizeof(time_orig));
			} else {
				rt_gettimeorig(NULL);
			}
                        return 0;
		}

		case LINUX_SERVER: {
			struct arg { struct linux_syscalls_list syscalls; };
			if (larg->syscalls.nr) {
				if (larg->syscalls.task->linux_syscall_server) {
					RT_TASK *serv;
					rt_get_user(serv, &larg->syscalls.serv);
					rt_task_masked_unblock(serv, ~RT_SCHED_READY);
				}
				larg->syscalls.task->linux_syscall_server = larg->syscalls.serv;
				rtai_set_linux_task_priority(current, (larg->syscalls.task)->lnxtsk->policy, (larg->syscalls.task)->lnxtsk->rt_priority);
				arg0.rt_task = __task_init((unsigned long)larg->syscalls.task, larg->syscalls.task->base_priority >= BASE_SOFT_PRIORITY ? larg->syscalls.task->base_priority - BASE_SOFT_PRIORITY : larg->syscalls.task->base_priority, 0, 0, 1 << larg->syscalls.task->runnable_on_cpus);

				larg->syscalls.task->linux_syscall_server = arg0.rt_task;
				arg0.rt_task->linux_syscall_server = larg->syscalls.serv;

				return arg0.ll;
			} else {
				if (!larg->syscalls.task) {
					larg->syscalls.task = RT_CURRENT;
				}
				if ((arg0.rt_task = larg->syscalls.task->linux_syscall_server)) {
					larg->syscalls.task->linux_syscall_server = NULL;
					arg0.rt_task->suspdepth = -RTE_HIGERR;
					rt_task_masked_unblock(arg0.rt_task, ~RT_SCHED_READY);
				}
			}
			return 0;
		}

	        default: {
		    rt_printk("RTAI/LXRT: Unknown srq #%d\n", srq);
		    arg0.i = -ENOSYS;
		    return arg0.ll;
		}
	}
	return 0;
}