/* * return timer owned by the process, used by exit_itimers */ static void itimer_delete(struct k_itimer *timer) { unsigned long flags; retry_delete: spin_lock_irqsave(&timer->it_lock, flags); /* On RT we can race with a deletion */ if (!timer->it_signal) { unlock_timer(timer, flags); return; } if (timer_delete_hook(timer) == TIMER_RETRY) { rcu_read_lock(); unlock_timer(timer, flags); timer_wait_for_callback(clockid_to_kclock(timer->it_clock), timer); rcu_read_unlock(); goto retry_delete; } list_del(&timer->list); /* * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ timer->it_signal = NULL; unlock_timer(timer, flags); release_posix_timer(timer, IT_ID_SET); }
/* Get the time remaining on a POSIX.1b interval timer. */ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct itimerspec __user *, setting) { struct itimerspec cur_setting; struct k_itimer *timr; struct k_clock *kc; unsigned long flags; int ret = 0; timr = lock_timer(timer_id, &flags); if (!timr) return -EINVAL; kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_get)) ret = -EINVAL; else kc->timer_get(timr, &cur_setting); unlock_timer(timr, flags); if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting))) return -EFAULT; return ret; }
/* Delete a POSIX.1b interval timer. */ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) { struct k_itimer *timer; unsigned long flags; retry_delete: timer = lock_timer(timer_id, &flags); if (!timer) return -EINVAL; rcu_read_lock(); if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); timer_wait_for_callback(clockid_to_kclock(timer->it_clock), timer); rcu_read_unlock(); goto retry_delete; } rcu_read_unlock(); spin_lock(¤t->sighand->siglock); list_del(&timer->list); spin_unlock(¤t->sighand->siglock); /* * This keeps any tasks waiting on the spin lock from thinking * they got something (see the lock code above). */ timer->it_signal = NULL; unlock_timer(timer, flags); release_posix_timer(timer, IT_ID_SET); return 0; }
static inline int timer_delete_hook(struct k_itimer *timer) { struct k_clock *kc = clockid_to_kclock(timer->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_del)) return -EINVAL; return kc->timer_del(timer); }
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { struct k_clock *kc = clockid_to_kclock(which_clock); struct k_itimer *new_timer; int error, new_timer_id; sigevent_t event; int it_id_set = IT_ID_NOT
/* * This will restart clock_nanosleep. This is required only by * compat_clock_nanosleep_restart for now. */ long clock_nanosleep_restart(struct restart_block *restart_block) { clockid_t which_clock = restart_block->nanosleep.clockid; struct k_clock *kc = clockid_to_kclock(which_clock); if (WARN_ON_ONCE(!kc || !kc->nsleep_restart)) return -EINVAL; return kc->nsleep_restart(restart_block); }
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, const struct timespec __user *, tp) { struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec new_tp; if (!kc || !kc->clock_set) return -EINVAL; if (copy_from_user(&new_tp, tp, sizeof (*tp))) return -EFAULT; return kc->clock_set(which_clock, &new_tp); }
/* Set a POSIX.1b interval timer */ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, const struct itimerspec __user *, new_setting, struct itimerspec __user *, old_setting) { struct k_itimer *timr; struct itimerspec new_spec, old_spec; int error = 0; unsigned long flag; struct itimerspec *rtn = old_setting ? &old_spec : NULL; struct k_clock *kc; if (!new_setting) return -EINVAL; if (copy_from_user(&new_spec, new_setting, sizeof (new_spec))) return -EFAULT; if (!timespec_valid(&new_spec.it_interval) || !timespec_valid(&new_spec.it_value)) return -EINVAL; retry: timr = lock_timer(timer_id, &flag); if (!timr) return -EINVAL; rcu_read_lock(); kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; else error = kc->timer_set(timr, flags, &new_spec, rtn); unlock_timer(timr, flag); if (error == TIMER_RETRY) { timer_wait_for_callback(kc, timr); rtn = NULL; // We already got the old time... rcu_read_unlock(); goto retry; } rcu_read_unlock(); if (old_setting && !error && copy_to_user(old_setting, &old_spec, sizeof (old_spec))) error = -EFAULT; return error; }
SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, struct timespec __user *,tp) { struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec kernel_tp; int error; if (!kc) return -EINVAL; error = kc->clock_get(which_clock, &kernel_tp); if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) error = -EFAULT; return error; }
SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, const struct timespec __user *, rqtp, struct timespec __user *, rmtp) { struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec t; if (!kc) return -EINVAL; if (!kc->nsleep) return -ENANOSLEEP_NOTSUP; if (copy_from_user(&t, rqtp, sizeof (struct timespec))) return -EFAULT; if (!timespec_valid(&t)) return -EINVAL; return kc->nsleep(which_clock, flags, &t, rmtp); }
SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, struct timex __user *, utx) { struct k_clock *kc = clockid_to_kclock(which_clock); struct timex ktx; int err; if (!kc) return -EINVAL; if (!kc->clock_adj) return -EOPNOTSUPP; if (copy_from_user(&ktx, utx, sizeof(ktx))) return -EFAULT; err = kc->clock_adj(which_clock, &ktx); if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) return -EFAULT; return err; }
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { struct k_clock *kc = clockid_to_kclock(which_clock); struct k_itimer *new_timer; int error, new_timer_id; sigevent_t event; int it_id_set = IT_ID_NOT_SET; if (!kc) return -EINVAL; if (!kc->timer_create) return -EOPNOTSUPP; new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); retry: if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) { error = -EAGAIN; goto out; } spin_lock_irq(&idr_lock); error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id); spin_unlock_irq(&idr_lock); if (error) { if (error == -EAGAIN) goto retry; /* * Weird looking, but we return EAGAIN if the IDR is * full (proper POSIX return value for this) */ error = -EAGAIN; goto out; } it_id_set = IT_ID_SET; new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->it_overrun = -1; if (timer_event_spec) { if (copy_from_user(&event, timer_event_spec, sizeof (event))) { error = -EFAULT; goto out; } rcu_read_lock(); new_timer->it_pid = get_pid(good_sigevent(&event)); rcu_read_unlock(); if (!new_timer->it_pid) { error = -EINVAL; goto out; } } else { event.sigev_notify = SIGEV_SIGNAL; event.sigev_signo = SIGALRM; event.sigev_value.sival_int = new_timer->it_id; new_timer->it_pid = get_pid(task_tgid(current)); } new_timer->it_sigev_notify = event.sigev_notify; new_timer->sigq->info.si_signo = event.sigev_signo; new_timer->sigq->info.si_value = event.sigev_value; new_timer->sigq->info.si_tid = new_timer->it_id; new_timer->sigq->info.si_code = SI_TIMER; if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } error = kc->timer_create(new_timer); if (error) goto out; spin_lock_irq(¤t->sighand->siglock); new_timer->it_signal = current->signal; list_add(&new_timer->list, ¤t->signal->posix_timers); spin_unlock_irq(¤t->sighand->siglock); return 0; /* * In the case of the timer belonging to another task, after * the task is unlocked, the timer is owned by the other task * and may cease to exist at any time. Don't use or modify * new_timer after the unlock call. */ out: release_posix_timer(new_timer, it_id_set); return error; }
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { struct k_clock *kc = clockid_to_kclock(which_clock); struct k_itimer *new_timer; int error, new_timer_id; sigevent_t event; int it_id_set = IT_ID_NOT_SET; if (!kc) return -EINVAL; if (!kc->timer_create) return -EOPNOTSUPP; new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); retry: if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) { error = -EAGAIN; goto out; } spin_lock_irq(&idr_lock); error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id); spin_unlock_irq(&idr_lock); if (error) { if (error == -EAGAIN) goto retry; error = -EAGAIN; goto out; } it_id_set = IT_ID_SET; new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->it_overrun = -1; if (timer_event_spec) { if (copy_from_user(&event, timer_event_spec, sizeof (event))) { error = -EFAULT; goto out; } rcu_read_lock(); new_timer->it_pid = get_pid(good_sigevent(&event)); rcu_read_unlock(); if (!new_timer->it_pid) { error = -EINVAL; goto out; } } else { event.sigev_notify = SIGEV_SIGNAL; event.sigev_signo = SIGALRM; event.sigev_value.sival_int = new_timer->it_id; new_timer->it_pid = get_pid(task_tgid(current)); } new_timer->it_sigev_notify = event.sigev_notify; new_timer->sigq->info.si_signo = event.sigev_signo; new_timer->sigq->info.si_value = event.sigev_value; new_timer->sigq->info.si_tid = new_timer->it_id; new_timer->sigq->info.si_code = SI_TIMER; if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } error = kc->timer_create(new_timer); if (error) goto out; spin_lock_irq(¤t->sighand->siglock); new_timer->it_signal = current->signal; list_add(&new_timer->list, ¤t->signal->posix_timers); spin_unlock_irq(¤t->sighand->siglock); return 0; out: release_posix_timer(new_timer, it_id_set); return error; }