static int __wind_taskinfo_status(struct task_struct *curr, struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); unsigned long status; WIND_TCB *pTcb; spl_t s; if (!__xn_access_ok (curr, VERIFY_WRITE, __xn_reg_arg2(regs), sizeof(status))) return -EFAULT; xnlock_get_irqsave(&nklock, s); pTcb = (WIND_TCB *)xnregistry_fetch(handle); if (!pTcb || pTcb->magic != WIND_TASK_MAGIC) { xnlock_put_irqrestore(&nklock, s); return S_objLib_OBJ_ID_ERROR; } status = xnthread_state_flags(&pTcb->threadbase); xnlock_put_irqrestore(&nklock, s); __xn_copy_to_user(curr, (void __user *)__xn_reg_arg2(regs), &status, sizeof(status)); return 0; }
u_long t_mode(u_long mask, u_long newmask, u_long *oldmode) { psostask_t *task; if (!xnpod_primary_p()) return -EPERM; task = psos_current_task(); /* We have no error case here: just clear out any unwanted bit. */ mask &= T_MODE_MASK; newmask &= T_MODE_MASK; *oldmode = xeno_mode_to_psos(xnthread_state_flags(&task->threadbase) & XNTHREAD_MODE_BITS); *oldmode |= ((task->threadbase.imask & 0x7) << 8); if (mask & T_TSLICE) { if (newmask & T_TSLICE) xnpod_set_thread_tslice(&task->threadbase, psos_time_slice); else xnpod_set_thread_tslice(&task->threadbase, XN_INFINITE); mask &= ~T_TSLICE; } if (mask == 0) return SUCCESS; xnpod_set_thread_mode(&task->threadbase, psos_mode_to_xeno(mask), psos_mode_to_xeno(newmask)); /* Reschedule in case the scheduler has been unlocked. */ xnpod_schedule(); return SUCCESS; }
static int __wind_taskinfo_status(struct pt_regs *regs) { xnhandle_t handle = __xn_reg_arg1(regs); unsigned long status; WIND_TCB *pTcb; spl_t s; xnlock_get_irqsave(&nklock, s); pTcb = __wind_lookup_task(handle); if (!pTcb || pTcb->magic != WIND_TASK_MAGIC) { xnlock_put_irqrestore(&nklock, s); return S_objLib_OBJ_ID_ERROR; } status = xnthread_state_flags(&pTcb->threadbase); xnlock_put_irqrestore(&nklock, s); return __xn_safe_copy_to_user((void __user *)__xn_reg_arg2(regs), &status, sizeof(status)); }