/* hmm, is there an absolute sleep in the linux kernel? */ int rumpuser_clock_sleep(int enum_rumpclock, int64_t sec, long nsec) { enum rumpclock clk = enum_rumpclock; struct timespec rqt; struct timespec ctime, delta; unsigned long timo; rqt.tv_sec = sec; rqt.tv_nsec = nsec; switch (clk) { case RUMPUSER_CLOCK_RELWALL: timo = timespec_to_jiffies(&rqt); break; case RUMPUSER_CLOCK_ABSMONO: ctime = current_kernel_time(); delta = timespec_sub(rqt, ctime); if (!timespec_valid(&delta)) goto out; timo = timespec_to_jiffies(&delta); break; default: panic("unreachable"); } set_current_state(TASK_UNINTERRUPTIBLE); KLOCK_WRAP(schedule_timeout(timo)); out: return 0; }
asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { struct timespec t; struct restart_block *restart; unsigned long expire; if (get_compat_timespec(&t, rqtp)) return -EFAULT; if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0)) return -EINVAL; expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); expire = schedule_timeout_interruptible(expire); if (expire == 0) return 0; if (rmtp) { jiffies_to_timespec(expire, &t); if (put_compat_timespec(&t, rmtp)) return -EFAULT; } restart = ¤t_thread_info()->restart_block; restart->fn = compat_nanosleep_restart; restart->arg0 = jiffies + expire; restart->arg1 = (unsigned long) rmtp; return -ERESTART_RESTARTBLOCK; }
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, struct compat_timespec __user *utime, u32 __user *uaddr2, u32 val3) { struct timespec t; unsigned long timeout = MAX_SCHEDULE_TIMEOUT; int val2 = 0; if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) { if (get_compat_timespec(&t, utime)) return -EFAULT; if (!timespec_valid(&t)) return -EINVAL; if (op == FUTEX_WAIT) timeout = timespec_to_jiffies(&t) + 1; else { timeout = t.tv_sec; val2 = t.tv_nsec; } } if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE) val2 = (int) (unsigned long) utime; return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3); }
PUBLIC IX_STATUS ixOsalSleepUninterruptible( UINT32 sleeptime_ms) { struct timespec value; if(sleeptime_ms < 1) { schedule(); return IX_SUCCESS; } value.tv_sec = sleeptime_ms/1000; value.tv_nsec = (sleeptime_ms%1000) * 1000000; sleeptime_ms = timespec_to_jiffies(&value); { struct task_struct* aTask = current; aTask->state = TASK_UNINTERRUPTIBLE; sleeptime_ms = schedule_timeout(sleeptime_ms); } return IX_SUCCESS; } /* ixOsalSleepUninterruptible */
void _sv_wait( sv_t *sv, spinlock_t *lock, int spl, int intr, struct timespec *timeout) { DECLARE_WAITQUEUE( wait, current ); spin_lock(&sv->lock); /* No need to do interrupts since they better be disabled */ /* Don't restore interrupts until we are done with both locks */ spin_unlock( lock ); add_wait_queue_exclusive( &sv->waiters, &wait ); #if 0 if (intr) { set_current_state(TASK_INTERRUPTIBLE | TASK_EXCLUSIVE); } else { set_current_state(TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); } #endif if (intr) { set_current_state(TASK_INTERRUPTIBLE ); } else { set_current_state(TASK_UNINTERRUPTIBLE ); } spin_unlock_irqrestore( & sv->lock, (long)spl ); if (timeout) { schedule_timeout(timespec_to_jiffies(timeout)); } else { schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue( &sv->waiters, &wait ); }
/* * Calculate the time in jiffies until a dentry/attributes are valid */ static u64 time_to_jiffies(unsigned long sec, unsigned long nsec) { if (sec || nsec) { struct timespec ts = {sec, nsec}; return get_jiffies_64() + timespec_to_jiffies(&ts); } else return 0; }
static inline void mod_grading_timer_on_grade(struct tegra_simon_grader *grader) { if (grader->grade) { /* restart timer while at high grade */ struct timespec ts = {grading_sec, 0}; mod_timer(&grader->grade_timer, jiffies + timespec_to_jiffies(&ts)); } }
static inline void mod_wdt_on_grade(struct tegra_simon_grader *grader) { if (grader->grade) { /* restart WDT while at high grade */ struct timespec ts = {timeout_sec, 0}; mod_timer(&grader->grade_wdt, jiffies + timespec_to_jiffies(&ts)); } }
kerrighed_node_t krg_lock_pid_location(pid_t pid) { kerrighed_node_t node = KERRIGHED_NODE_ID_NONE; struct task_kddm_object *obj; #ifdef CONFIG_KRG_EPM struct timespec back_off_time = { .tv_sec = 0, .tv_nsec = 1000000 /* 1 ms */ }; #endif if (!(pid & GLOBAL_PID_MASK)) goto out; for (;;) { obj = krg_task_readlock(pid); if (likely(obj)) { node = obj->node; } else { krg_task_unlock(pid); break; } #ifdef CONFIG_KRG_EPM if (likely(node != KERRIGHED_NODE_ID_NONE)) break; /* * Task is migrating. * Back off and hope that it will stop migrating. */ krg_task_unlock(pid); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(timespec_to_jiffies(&back_off_time) + 1); #else break; #endif } out: return node; } void krg_unlock_pid_location(pid_t pid) { krg_task_unlock(pid); }
int pps_init(void) { int result; struct timespec value; value.tv_sec = 0; value.tv_nsec = 200000000; // 200 millisecs j_delay = timespec_to_jiffies(&value); result = register_chrdev(major, "gps-pps-io", &pps_i_fops); if (result < 0) { printk(KERN_INFO "gps-pps-io: can't get major number\n"); return result; } if (major == 0) major = result; /* dynamic */ pps_buffer = (int *)__get_free_pages(GFP_KERNEL,0); if (configureInterruptOn(PPS_GPIO) == -1){ printk(KERN_INFO "gps-pps-io: failed installation\n"); pps_cleanup(); return -1; } if (configureWriteOn(OUTPUT_GPIO) == -1){ printk(KERN_INFO "gps-pps-io: failed installation\n"); pps_cleanup(); return -1; } if (configureInterruptOn(INTRPT_GPIO) == -1){ printk(KERN_INFO "gps-pps-io: failed installation\n"); pps_cleanup(); return -1; } printk(KERN_INFO "gps-pps-io: installed\n"); return 0; }
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, int val, struct compat_timespec __user *utime, u32 __user *uaddr2, int val3) { struct timespec t; unsigned long timeout = MAX_SCHEDULE_TIMEOUT; int val2 = 0; if ((op == FUTEX_WAIT) && utime) { if (get_compat_timespec(&t, utime)) return -EFAULT; timeout = timespec_to_jiffies(&t) + 1; } if (op >= FUTEX_REQUEUE) val2 = (int) (unsigned long) utime; return do_futex((unsigned long)uaddr, op, val, timeout, (unsigned long)uaddr2, val2, val3); }
static enum ach_status rdlock_wait(ach_channel_t * chan, const struct timespec *reltime) { int res; struct ach_header *shm = chan->shm; volatile uint64_t *c_seq = &chan->seq_num, *s_seq = &shm->last_seq; volatile unsigned int *cancel = &chan->cancel; enum ach_status r; for(;;) { /* do the wait */ if (reltime->tv_sec != 0 || reltime->tv_nsec != 0) { res = wait_event_interruptible_timeout( shm->sync. readq, ((*c_seq != *s_seq) || *cancel), timespec_to_jiffies (reltime) ); if (0 == res) return ACH_TIMEOUT; } else { res = wait_event_interruptible( shm->sync.readq, ((*c_seq != *s_seq) || *cancel) ); } /* check what happened */ if (-ERESTARTSYS == res) return ACH_EINTR; if( res < 0 ) { ACH_ERRF("ach bug: rdlock_wait(), " "could not wait for event, " "timeout: (%lu,%ld), result=%d\n", reltime->tv_sec, reltime->tv_nsec, res); return ACH_BUG; } r = chan_lock( chan ); /* Check condition with the lock held in case someone * else flushed the channel, or someone else unset the * cancel */ if( (*c_seq != *s_seq) || (ACH_OK != r) || *cancel ) { return r; } rt_mutex_unlock(&shm->sync.mutex); } }
static void sync_cmos_clock(unsigned long dummy) { struct timespec now, next; int fail = 1; /* * If we have an externally synchronized Linux clock, then update * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be * called as close as possible to 500 ms before the new second starts. * This code is run on a timer. If the clock is set, that timer * may not expire at the correct time. Thus, we adjust... */ if (!ntp_synced()) /* * Not synced, exit, do not restart a timer (if one is * running, let it run out). */ return; getnstimeofday(&now); if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) fail = update_persistent_clock(now); next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; if (next.tv_nsec <= 0) next.tv_nsec += NSEC_PER_SEC; if (!fail) next.tv_sec = 659; else next.tv_sec = 0; if (next.tv_nsec >= NSEC_PER_SEC) { next.tv_sec++; next.tv_nsec -= NSEC_PER_SEC; } mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next)); }
asmlinkage long sys_semtimedop (int semid, struct sembuf *tsops, unsigned nsops, const struct timespec *timeout) { int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; struct sembuf* sops = fast_sops, *sop; struct sem_undo *un; int undos = 0, decrease = 0, alter = 0; struct sem_queue queue; unsigned long jiffies_left = 0; if (nsops < 1 || semid < 0) return -EINVAL; if (nsops > sc_semopm) return -E2BIG; if(nsops > SEMOPM_FAST) { sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); if(sops==NULL) return -ENOMEM; } if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { error=-EFAULT; goto out_free; } if (timeout) { struct timespec _timeout; if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { error = -EFAULT; goto out_free; } if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || _timeout.tv_nsec >= 1000000000L) { error = -EINVAL; goto out_free; } jiffies_left = timespec_to_jiffies(&_timeout); } sma = sem_lock(semid); error=-EINVAL; if(sma==NULL) goto out_free; error = -EIDRM; if (sem_checkid(sma,semid)) goto out_unlock_free; error = -EFBIG; for (sop = sops; sop < sops + nsops; sop++) { if (sop->sem_num >= sma->sem_nsems) goto out_unlock_free; if (sop->sem_flg & SEM_UNDO) undos++; if (sop->sem_op < 0) decrease = 1; if (sop->sem_op > 0) alter = 1; } alter |= decrease; error = -EACCES; if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) goto out_unlock_free; if (undos) { /* Make sure we have an undo structure * for this process and this semaphore set. */ un=current->semundo; while(un != NULL) { if(un->semid==semid) break; if(un->semid==-1) un=freeundos(sma,un); else un=un->proc_next; } if (!un) { error = alloc_undo(sma,&un,semid,alter); if(error) goto out_free; } } else un = NULL; error = try_atomic_semop (sma, sops, nsops, un, current->tgid, 0); if (error <= 0) goto update; /* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. */ queue.sma = sma; queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = current->tgid; queue.alter = decrease; queue.id = semid; if (alter) append_to_queue(sma ,&queue); else prepend_to_queue(sma ,&queue); current->semsleeping = &queue; for (;;) { struct sem_array* tmp; queue.status = -EINTR; queue.sleeper = current; current->state = TASK_INTERRUPTIBLE; sem_unlock(semid); if (timeout) jiffies_left = schedule_timeout(jiffies_left); else schedule(); tmp = sem_lock(semid); if(tmp==NULL) { if(queue.prev != NULL) BUG(); current->semsleeping = NULL; error = -EIDRM; goto out_free; } /* * If queue.status == 1 we where woken up and * have to retry else we simply return. * If an interrupt occurred we have to clean up the * queue * */ if (queue.status == 1) { error = try_atomic_semop (sma, sops, nsops, un, current->tgid, 0); if (error <= 0) break; } else { error = queue.status; if (error == -EINTR && timeout && jiffies_left == 0) error = -EAGAIN; if (queue.prev) /* got Interrupt */ break; /* Everything done by update_queue */ current->semsleeping = NULL; goto out_unlock_free; } } current->semsleeping = NULL; remove_from_queue(sma,&queue); update: if (alter) update_queue (sma); out_unlock_free: sem_unlock(semid); out_free: if(sops != fast_sops) kfree(sops); return error; }
/* * Calcualte the time in jiffies until a dentry/attributes are valid */ static inline unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec) { struct timespec ts = {sec, nsec}; return jiffies + timespec_to_jiffies(&ts); }
int sys32_rt_sigtimedwait(compat_sigset_t *uthese, siginfo_t32 *uinfo, struct compat_timespec *uts, compat_time_t sigsetsize) { int ret, sig; sigset_t these; compat_sigset_t these32; struct timespec ts; siginfo_t info; long timeout = 0; /* * As the result of a brainfarting competition a few years ago the * size of sigset_t for the 32-bit kernel was choosen to be 128 bits * but nothing so far is actually using that many, 64 are enough. So * for now we just drop the high bits. */ if (copy_from_user (&these32, uthese, sizeof(compat_old_sigset_t))) return -EFAULT; switch (_NSIG_WORDS) { #ifdef __MIPSEB__ case 4: these.sig[3] = these32.sig[6] | (((long)these32.sig[7]) << 32); case 3: these.sig[2] = these32.sig[4] | (((long)these32.sig[5]) << 32); case 2: these.sig[1] = these32.sig[2] | (((long)these32.sig[3]) << 32); case 1: these.sig[0] = these32.sig[0] | (((long)these32.sig[1]) << 32); #endif #ifdef __MIPSEL__ case 4: these.sig[3] = these32.sig[7] | (((long)these32.sig[6]) << 32); case 3: these.sig[2] = these32.sig[5] | (((long)these32.sig[4]) << 32); case 2: these.sig[1] = these32.sig[3] | (((long)these32.sig[2]) << 32); case 1: these.sig[0] = these32.sig[1] | (((long)these32.sig[0]) << 32); #endif } /* * Invert the set of allowed signals to get those we * want to block. */ sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); signotset(&these); if (uts) { if (get_user (ts.tv_sec, &uts->tv_sec) || get_user (ts.tv_nsec, &uts->tv_nsec)) return -EINVAL; if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 || ts.tv_sec < 0) return -EINVAL; } spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(current, &these, &info); if (!sig) { /* None ready -- temporarily unblock those we're interested in so that we'll be awakened when they arrive. */ sigset_t oldblocked = current->blocked; sigandsets(¤t->blocked, ¤t->blocked, &these); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); timeout = MAX_SCHEDULE_TIMEOUT; if (uts) timeout = (timespec_to_jiffies(&ts) + (ts.tv_sec || ts.tv_nsec)); current->state = TASK_INTERRUPTIBLE; timeout = schedule_timeout(timeout); spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(current, &these, &info); current->blocked = oldblocked; recalc_sigpending(); } spin_unlock_irq(¤t->sighand->siglock); if (sig) { ret = sig; if (uinfo) { if (copy_siginfo_to_user32(uinfo, &info)) ret = -EFAULT; } } else { ret = -EAGAIN; if (timeout) ret = -EINTR; } return ret; }