asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
		struct compat_timespec __user *utime, u32 __user *uaddr2,
		u32 val3)
{
	struct timespec t;
	unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
	int val2 = 0;

	if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
		if (get_compat_timespec(&t, utime))
			return -EFAULT;
		if (!timespec_valid(&t))
			return -EINVAL;
		if (op == FUTEX_WAIT)
			timeout = timespec_to_jiffies(&t) + 1;
		else {
			timeout = t.tv_sec;
			val2 = t.tv_nsec;
		}
	}
	if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
		val2 = (int) (unsigned long) utime;

	return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
}
Beispiel #2
0
asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
				     struct compat_timespec __user *rmtp)
{
	struct timespec tu, rmt;
	mm_segment_t oldfs;
	long ret;

	if (get_compat_timespec(&tu, rqtp))
		return -EFAULT;

	if (!timespec_valid(&tu))
		return -EINVAL;

	oldfs = get_fs();
	set_fs(KERNEL_DS);
	ret = hrtimer_nanosleep(&tu,
				rmtp ? (struct timespec __user *)&rmt : NULL,
				HRTIMER_MODE_REL, CLOCK_MONOTONIC);
	set_fs(oldfs);

	if (ret) {
		struct restart_block *restart
			= &current_thread_info()->restart_block;

		restart->fn = compat_nanosleep_restart;
		restart->nanosleep.compat_rmtp = rmtp;

		if (rmtp && put_compat_timespec(&rmt, rmtp))
			return -EFAULT;
	}

	return ret;
}
Beispiel #3
0
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
		struct compat_timespec __user *utime, u32 __user *uaddr2,
		u32 val3)
{
	struct timespec ts;
	ktime_t t, *tp = NULL;
	int val2 = 0;
	int cmd = op & FUTEX_CMD_MASK;

	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
		      cmd == FUTEX_WAIT_BITSET ||
		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
		if (get_compat_timespec(&ts, utime))
			return -EFAULT;
		if (!timespec_valid(&ts))
			return -EINVAL;

		t = timespec_to_ktime(ts);
		if (cmd == FUTEX_WAIT)
			t = ktime_add_safe(ktime_get(), t);
		tp = &t;
	}
	if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
	    cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
		val2 = (int) (unsigned long) utime;

	return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
Beispiel #4
0
int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
{
	static int firsttime = 1;
	int error = 0;

	if (tv && !timespec_valid(tv))
		return -EINVAL;

	error = security_settime(tv, tz);
	if (error)
		return error;

	if (tz) {
		/* SMP safe, global irq locking makes it work. */
		sys_tz = *tz;
		if (firsttime) {
			firsttime = 0;
			if (!tv)
				warp_clock();
		}
	}
	if (tv)
	{
		/* SMP safe, again the code in arch/foo/time.c should
		 * globally block out interrupts when it runs.
		 */
		return do_settimeofday(tv);
	}
	return 0;
}
/* hmm, is there an absolute sleep in the linux kernel? */
int
rumpuser_clock_sleep(int enum_rumpclock, int64_t sec, long nsec)
{
	enum rumpclock clk = enum_rumpclock;
	struct timespec rqt;
	struct timespec ctime, delta;
	unsigned long timo;

	rqt.tv_sec = sec;
	rqt.tv_nsec = nsec;

	switch (clk) {
	case RUMPUSER_CLOCK_RELWALL:
		timo = timespec_to_jiffies(&rqt);
		break;
	case RUMPUSER_CLOCK_ABSMONO:
		ctime = current_kernel_time();
		delta = timespec_sub(rqt, ctime);
		if (!timespec_valid(&delta))
			goto out;
		timo = timespec_to_jiffies(&delta);
		break;
	default:	
		panic("unreachable");
	}

	set_current_state(TASK_UNINTERRUPTIBLE);
	KLOCK_WRAP(schedule_timeout(timo));

 out:
	return 0;
}
Beispiel #6
0
/* Set a POSIX.1b interval timer */
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
		const struct itimerspec __user *, new_setting,
		struct itimerspec __user *, old_setting)
{
	struct k_itimer *timr;
	struct itimerspec new_spec, old_spec;
	int error = 0;
	unsigned long flag;
	struct itimerspec *rtn = old_setting ? &old_spec : NULL;
	struct k_clock *kc;

	if (!new_setting)
		return -EINVAL;

	if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
		return -EFAULT;

	if (!timespec_valid(&new_spec.it_interval) ||
	    !timespec_valid(&new_spec.it_value))
		return -EINVAL;
retry:
	timr = lock_timer(timer_id, &flag);
	if (!timr)
		return -EINVAL;

	rcu_read_lock();
	kc = clockid_to_kclock(timr->it_clock);
	if (WARN_ON_ONCE(!kc || !kc->timer_set))
		error = -EINVAL;
	else
		error = kc->timer_set(timr, flags, &new_spec, rtn);

	unlock_timer(timr, flag);
	if (error == TIMER_RETRY) {
		timer_wait_for_callback(kc, timr);
		rtn = NULL;	// We already got the old time...
		rcu_read_unlock();
		goto retry;
	}
	rcu_read_unlock();

	if (old_setting && !error &&
	    copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
		error = -EFAULT;

	return error;
}
Beispiel #7
0
/* Set a POSIX.1b interval timer */
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
		const struct itimerspec __user *, new_setting,
		struct itimerspec __user *, old_setting)
{
	struct k_itimer *timr;
	struct itimerspec new_spec, old_spec;
	int error = 0;
	unsigned long flag;
	struct itimerspec *rtn = old_setting ? &old_spec : NULL;

	if (!new_setting)
		return -EINVAL;

	if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
		return -EFAULT;

	if (!timespec_valid(&new_spec.it_interval) ||
	    !timespec_valid(&new_spec.it_value))
		return -EINVAL;
retry:
	timr = lock_timer(timer_id, &flag);
	if (!timr)
		return -EINVAL;

	error = CLOCK_DISPATCH(timr->it_clock, timer_set,
			       (timr, flags, &new_spec, rtn));

	unlock_timer(timr, flag);
	if (error == TIMER_RETRY) {
		hrtimer_wait_for_timer(&timr->it.real.timer);
		rtn = NULL;	// We already got the old time...
		goto retry;
	}

	if (old_setting && !error &&
	    copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
		error = -EFAULT;

	return error;
}
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
		struct timespec __user *, rmtp)
{
	struct timespec tu;

	if (copy_from_user(&tu, rqtp, sizeof(tu)))
		return -EFAULT;

	if (!timespec_valid(&tu))
		return -EINVAL;

	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
asmlinkage long
sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
{
	struct timespec tu;

	if (copy_from_user(&tu, rqtp, sizeof(tu)))
		return -EFAULT;

	if (!timespec_valid(&tu))
		return -EINVAL;

	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
Beispiel #10
0
COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
		       struct compat_timespec __user *, rmtp)
{
	struct timespec tu, rmt;
	mm_segment_t oldfs;
	long ret;

	if (compat_get_timespec(&tu, rqtp))
		return -EFAULT;

	if (!timespec_valid(&tu))
		return -EINVAL;

	oldfs = get_fs();
	set_fs(KERNEL_DS);
	ret = hrtimer_nanosleep(&tu,
				rmtp ? (struct timespec __user *)&rmt : NULL,
				HRTIMER_MODE_REL, CLOCK_MONOTONIC);
	set_fs(oldfs);

	/*
	 * hrtimer_nanosleep() can only return 0 or
	 * -ERESTART_RESTARTBLOCK here because:
	 *
	 * - we call it with HRTIMER_MODE_REL and therefor exclude the
	 *   -ERESTARTNOHAND return path.
	 *
	 * - we supply the rmtp argument from the task stack (due to
	 *   the necessary compat conversion. So the update cannot
	 *   fail, which excludes the -EFAULT return path as well. If
	 *   it fails nevertheless we have a bigger problem and wont
	 *   reach this place anymore.
	 *
	 * - if the return value is 0, we do not have to update rmtp
	 *    because there is no remaining time.
	 *
	 * We check for -ERESTART_RESTARTBLOCK nevertheless if the
	 * core implementation decides to return random nonsense.
	 */
	if (ret == -ERESTART_RESTARTBLOCK) {
		struct restart_block *restart = &current->restart_block;

		restart->fn = compat_nanosleep_restart;
		restart->nanosleep.compat_rmtp = rmtp;

		if (rmtp && compat_put_timespec(&rmt, rmtp))
			return -EFAULT;
	}
	return ret;
}
int sched_wait_interval(int flags, const struct timespec __user * rqtp, struct timespec __user * rmtp) {
    struct hrtimer_sleeper t;
    enum hrtimer_mode mode = flags & TIMER_ABSTIME ?
                             HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
    int ret = 0;

    hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
    hrtimer_set_expires(&t.timer, timespec_to_ktime(*rqtp));
    hrtimer_init_sleeper(&t, current);

    do {
        set_current_state(TASK_INTERRUPTIBLE);
        hrtimer_start_expires(&t.timer, mode);
        if (!hrtimer_active(&t.timer))
            t.task = NULL;

        if (likely(t.task)) {
            t.task->dl.flags |= DL_NEW;
            schedule();
        }
        hrtimer_cancel(&t.timer);
        mode = HRTIMER_MODE_ABS;
    } while (t.task && !signal_pending(current));
    __set_current_state(TASK_RUNNING);

    if (t.task == NULL)
        goto out;

    if (mode == HRTIMER_MODE_ABS) {
        ret = -ERESTARTNOHAND;
        goto out;
    }

    if (rmtp) {
        ktime_t rmt;
        struct timespec rmt_ts;
        rmt = hrtimer_expires_remaining(&t.timer);
        if (rmt.tv64 > 0)
            goto out;
        rmt_ts = ktime_to_timespec(rmt);
        if (!timespec_valid(&rmt_ts))
            goto out;
        *rmtp = rmt_ts;
    }
out:
    destroy_hrtimer_on_stack(&t.timer);
    return ret;
}
Beispiel #12
0
SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
		const struct timespec __user *, rqtp,
		struct timespec __user *, rmtp)
{
	struct timespec t;

	if (invalid_clockid(which_clock))
		return -EINVAL;

	if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
		return -EFAULT;

	if (!timespec_valid(&t))
		return -EINVAL;

	return CLOCK_DISPATCH(which_clock, nsleep,
			      (which_clock, flags, &t, rmtp));
}
Beispiel #13
0
asmlinkage long
sys_clock_nanosleep(const clockid_t which_clock, int flags,
		    const struct timespec __user *rqtp,
		    struct timespec __user *rmtp)
{
	struct timespec t;

	if (invalid_clockid(which_clock))
		return -EINVAL;

	if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
		return -EFAULT;

	if (!timespec_valid(&t))
		return -EINVAL;

	return CLOCK_DISPATCH(which_clock, nsleep,
			      (which_clock, flags, &t, rmtp));
}
Beispiel #14
0
SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
		const struct timespec __user *, rqtp,
		struct timespec __user *, rmtp)
{
	struct k_clock *kc = clockid_to_kclock(which_clock);
	struct timespec t;

	if (!kc)
		return -EINVAL;
	if (!kc->nsleep)
		return -ENANOSLEEP_NOTSUP;

	if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
		return -EFAULT;

	if (!timespec_valid(&t))
		return -EINVAL;

	return kc->nsleep(which_clock, flags, &t, rmtp);
}
Beispiel #15
0
asmlinkage long
sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
{
	struct timespec tu, rmt;
	int ret;

	if (copy_from_user(&tu, rqtp, sizeof(tu)))
		return -EFAULT;

	if (!timespec_valid(&tu))
		return -EINVAL;

	ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL,
				CLOCK_MONOTONIC);

	if (ret && rmtp) {
		if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
			return -EFAULT;
	}

	return ret;
}
Beispiel #16
0
long sys_set_reserve(pid_t pid, struct timespec __user *user_C,
                     struct timespec __user *user_T, int cid) {

  struct cpumask set;
  struct timespec T, C, empty;
  struct pid *pid_struct;
  struct task_struct *task;
  struct task_struct *tmp;
  int i;
  int cpu_task_count[] = {0, 0, 0, 0};

  set_normalized_timespec(&empty, 0, 0);

  // locate the task_struct for the task required
  if (pid == 0) {
    task = current;
  } else {
    rcu_read_lock();
    pid_struct = find_get_pid(pid);
    if (!pid_struct) {
      rcu_read_unlock();
      return -ENODEV;
    }
    task = pid_task(pid_struct, PIDTYPE_PID);
    if (!task) {
      rcu_read_unlock();
      return -ENODEV;
    }
    rcu_read_unlock();
  }

  // get timespec struct info
  if (copy_from_user(&C, user_C, sizeof(struct timespec))) {
    printk(KERN_ALERT "[sys_set_reserve] failed to copy C from user\n");
    return -EFAULT;
  }

  if (copy_from_user(&T, user_T, sizeof(struct timespec))) {
    printk(KERN_ALERT "[sys_set_reserve] failed to copy T from user\n");
    return -EFAULT;
  }

  // check for timespec validity
  if ((timespec_compare(&T, &C) < 0) || !timespec_valid(&T) || !timespec_valid(&C) ||
      (cid >= NUM_CPUS)) {
    printk(KERN_ALERT "[sys_set_reserve] invalid T and C\n");
    return -EINVAL;
  }

  // do a reservation admission check
  cid = admission_check(task, C, T, cid);
  if (cid < 0) {
    return -EBUSY;
  }

  if (set_reserve_hook(task) != 0) {
    return -EFAULT;
  }

  // cancel any old timers for an updated reservation
  if (hrtimer_active(&(task->C_timer))) {
    hrtimer_cancel(&(task->C_timer));
  }
  if (hrtimer_active(&(task->T_timer))) {
    hrtimer_cancel(&(task->T_timer));
  }

  // make runnable any task suspended by enforcement
  if (task->put_to_sleep) {
    task->put_to_sleep = 0;
    wake_up_process(task);
  }

  // copy into task struct ktime values
  task->real_C_time = ktime_set(0, 0);
  task->C_time = ktime_set(C.tv_sec, C.tv_nsec);
  task->T_time = ktime_set(T.tv_sec, T.tv_nsec);

  // find what cpus have tasks on them
  rcu_read_lock();
  for_each_process(tmp) {
    if (tmp->has_reservation) {
      cpu_task_count[task_cpu(tmp)] = 1;
    }
  }
  rcu_read_unlock();

  cpu_task_count[cid] = 1;
  task->reserve_cpu = cid;
  // Bring offline all cpus with no tasks
  for (i = 0; i < NUM_CPUS; i ++) {
    if (cpu_task_count[i] == 0) {
      if (power_cpu(i, 0) != 0) {
        printk(KERN_ALERT"[sys_set_reserve] failed to turn off cpu %d\n", i);
        goto fail;
      }
      printk(KERN_ALERT"[sys_set_reserve] turned OFF CPU %d\n", i);
    } else {
      if (power_cpu(i, 1) != 0) {
        printk(KERN_ALERT"[sys_set_reserve] failed to turn on cpu %d\n", i);
        goto fail;
      }
      printk(KERN_ALERT"[sys_set_reserve] turned ON CPU %d\n", i);
    }
  }

  // set process CPU
  cpumask_clear(&set);
  cpumask_set_cpu(cid, &set);
  if (sched_setaffinity(pid, &set)) {
    printk(KERN_ALERT"[sys_set_reserve] failed to set CPU affinity\n");
    goto fail;
  }

  printk(KERN_ALERT "[sys_set_reserve] PID %d (C = %lld ms / T = %lld ms) CPU %u\n",
         pid, ktime_to_ms(task->C_time), ktime_to_ms(task->T_time), cid);

  // mark as having a reservation
  task->has_reservation = 1;

  // set the frequency based on sysclock algorithm
  sysclock_set();

  return 0;

  fail:
    if (task->has_reservation || task->energymon_node) {
      cancel_reserve_hook(task);
    }
    return -EINVAL;
}