SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, struct timespec __user *, rmtp) { struct timespec tu; if (copy_from_user(&tu, rqtp, sizeof(tu))) return -EFAULT; if (!timespec_valid(&tu)) return -EINVAL; return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); }
asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) { struct timespec tu; if (copy_from_user(&tu, rqtp, sizeof(tu))) return -EFAULT; if (!timespec_valid(&tu)) return -EINVAL; return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); }
COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, struct compat_timespec __user *, rmtp) { struct timespec tu, rmt; mm_segment_t oldfs; long ret; if (compat_get_timespec(&tu, rqtp)) return -EFAULT; if (!timespec_valid(&tu)) return -EINVAL; oldfs = get_fs(); set_fs(KERNEL_DS); ret = hrtimer_nanosleep(&tu, rmtp ? (struct timespec __user *)&rmt : NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); set_fs(oldfs); /* * hrtimer_nanosleep() can only return 0 or * -ERESTART_RESTARTBLOCK here because: * * - we call it with HRTIMER_MODE_REL and therefor exclude the * -ERESTARTNOHAND return path. * * - we supply the rmtp argument from the task stack (due to * the necessary compat conversion. So the update cannot * fail, which excludes the -EFAULT return path as well. If * it fails nevertheless we have a bigger problem and wont * reach this place anymore. * * - if the return value is 0, we do not have to update rmtp * because there is no remaining time. * * We check for -ERESTART_RESTARTBLOCK nevertheless if the * core implementation decides to return random nonsense. */ if (ret == -ERESTART_RESTARTBLOCK) { struct restart_block *restart = ¤t->restart_block; restart->fn = compat_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; if (rmtp && compat_put_timespec(&rmt, rmtp)) return -EFAULT; } return ret; }
COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, struct compat_timespec __user *, rmtp) { struct timespec64 tu; if (compat_get_timespec64(&tu, rqtp)) return -EFAULT; if (!timespec64_valid(&tu)) return -EINVAL; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC); }
SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, struct timespec __user *, tp) { struct timespec rtn_tp = { .tv_sec = 0, .tv_nsec = hrtimer_resolution, }; switch (which_clock) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_BOOTTIME: if (copy_to_user(tp, &rtn_tp, sizeof(rtn_tp))) return -EFAULT; return 0; default: return -EINVAL; } } SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, const struct timespec __user *, rqtp, struct timespec __user *, rmtp) { struct timespec64 t64; struct timespec t; switch (which_clock) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_BOOTTIME: if (copy_from_user(&t, rqtp, sizeof (struct timespec))) return -EFAULT; t64 = timespec_to_timespec64(t); if (!timespec64_valid(&t64)) return -EINVAL; return hrtimer_nanosleep(&t64, rmtp, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); default: return -EINVAL; } } #ifdef CONFIG_COMPAT long clock_nanosleep_restart(struct restart_block *restart_block) { return hrtimer_nanosleep_restart(restart_block); }
SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock, struct old_timespec32 __user *, tp) { struct timespec64 rtn_tp = { .tv_sec = 0, .tv_nsec = hrtimer_resolution, }; switch (which_clock) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_BOOTTIME: if (put_old_timespec32(&rtn_tp, tp)) return -EFAULT; return 0; default: return -EINVAL; } } SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags, struct old_timespec32 __user *, rqtp, struct old_timespec32 __user *, rmtp) { struct timespec64 t; switch (which_clock) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_BOOTTIME: break; default: return -EINVAL; } if (get_old_timespec32(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); }
/* * nanosleep for monotonic and realtime clocks */ static int common_nsleep(const clockid_t which_clock, int flags, struct timespec *tsave, struct timespec __user *rmtp) { struct timespec rmt; int ret; ret = hrtimer_nanosleep(tsave, rmtp ? &rmt : NULL, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); if (ret && rmtp) { if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) return -EFAULT; } return ret; }
void mipi_sharp_delay_us(unsigned long usec) { struct timespec tu; if (usec >= 1000*1000) { tu.tv_sec = usec / 1000000; tu.tv_nsec = (usec % 1000000) * 1000; } else { tu.tv_sec = 0; tu.tv_nsec = usec * 1000; } hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); return; }
/* * nanosleep for monotonic and realtime clocks */ static int common_nsleep(const clockid_t which_clock, int flags, struct timespec *tsave, struct timespec __user *rmtp) { int mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL; int clockid = which_clock; switch (which_clock) { case CLOCK_REALTIME: /* Posix madness. Only absolute timers on clock realtime are affected by clock set. */ if (mode != HRTIMER_ABS) clockid = CLOCK_MONOTONIC; case CLOCK_MONOTONIC: break; default: return -EINVAL; } return hrtimer_nanosleep(tsave, rmtp, mode, clockid); }
void ir_reg_irdadrv_sir_set_led(void) { unsigned long spin_lock_flags; struct timespec tu; register uint16 w_dmy = IR_CR15_SD_TERM | IR_CR15_IRTX_AB_OUTPUT; register uint16 w_dmy2 = IR_CR15_OPT_IO_A_CNN_ENA; spin_lock_irqsave(&Irreg_spin_lock, spin_lock_flags); *(volatile uint16*)IR_REG_CR15 = w_dmy; *(volatile uint16*)IR_REG_CR14 = w_dmy2; *(volatile uint16*)IR_REG_CR15 = w_dmy2; spin_unlock_irqrestore(&Irreg_spin_lock, spin_lock_flags); tu.tv_sec = 0; tu.tv_nsec = IR_SD_RECOVERY_WAIT_NSEC; hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); }
void ir_reg_irdadrv_fir_set_led(void) { unsigned long spin_lock_flags; #ifdef IR_FEATURE_WAIT_FOR_SLEEP struct timespec tu; #endif register uint16 w_dmy; register uint16 w_dmy2 = IR_CR15_SD_TERM | IR_CR15_IRTX_AB_OUTPUT | IR_CR15_IRTX_A_OUTDATA; register uint16 w_dmy3 = IR_CR15_IRTX_AB_OUTPUT | IR_CR15_IRTX_A_OUTDATA; IR_WRITE_CR0(IR_CR0_SYSTEM_RESET | IR_CR0_CAREER_RESET); w_dmy = IR_READ_SR14; ir_reg_led_pwr_sel(IR_LED_POWER_LOW); spin_lock_irqsave(&Irreg_spin_lock, spin_lock_flags); *(volatile uint16*)IR_REG_CR15 = w_dmy2; *(volatile uint16*)IR_REG_CR14 = w_dmy2; *(volatile uint16*)IR_REG_CR15 = w_dmy3; w_dmy = IR_READ_SR14; IR_WRITE_CR15(IR_CR15_OPT_IO_A_CNN_ENA); spin_unlock_irqrestore(&Irreg_spin_lock, spin_lock_flags); #ifdef IR_FEATURE_WAIT_FOR_SLEEP tu.tv_sec = 0; tu.tv_nsec = IR_SD_RECOVERY_WAIT_NSEC; hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); #else mdelay(IR_SD_RECOVERY_WAIT); #endif ir_reg_led_pwr_sel(g_ir_ledpow); }
asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) { struct timespec tu, rmt; int ret; if (copy_from_user(&tu, rqtp, sizeof(tu))) return -EFAULT; if (!timespec_valid(&tu)) return -EINVAL; ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); if (ret && rmtp) { if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) return -EFAULT; } return ret; }
TYPE_IR_REG_RESULT ir_reg_irda_hwinit(void) { TYPE_IR_REG_RESULT w_ret = IR_REG_RESULT_SUCCESS; struct timespec tu; spin_lock_init(&Irreg_spin_lock); ir_reg_gol_irdacc_reset_dis(); ir_reg_irdacc_ena(); ir_reg_set_sd(IR_GPIO_LOW); tu.tv_sec = 0; tu.tv_nsec = IR_SD_RECOVERY_WAIT_NSEC; hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); return (w_ret); }
TYPE_IR_REG_RESULT ir_reg_irsd_active(void) { TYPE_IR_REG_RESULT w_ret = IR_REG_RESULT_SUCCESS; struct timespec tu; int f_map_hcs1 = 0; if (lr388g7_hcs1 != NULL) { goto ready; } f_map_hcs1 = 1; if (ir_reg_ioremap_nocache() != IR_REG_RESULT_SUCCESS) { w_ret = IR_REG_RESULT_FAIL; MSG_IRREG_FATAL("ioremap error\n"); goto error; } ready: ir_reg_gol_irdacc_reset_dis(); ir_reg_set_sd(IR_GPIO_LOW); tu.tv_sec = 0; tu.tv_nsec = IR_SD_RECOVERY_WAIT_NSEC; hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); error: if (f_map_hcs1 == 1) { ir_reg_iounmap(); f_map_hcs1 = 0; } return (w_ret); }
/* * nanosleep for monotonic and realtime clocks */ static int common_nsleep(const clockid_t which_clock, int flags, struct timespec *tsave, struct timespec __user *rmtp) { return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL, which_clock); }
SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, struct __kernel_timespec __user *, tp) { struct timespec64 rtn_tp = { .tv_sec = 0, .tv_nsec = hrtimer_resolution, }; switch (which_clock) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_BOOTTIME: if (put_timespec64(&rtn_tp, tp)) return -EFAULT; return 0; default: return -EINVAL; } } SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, const struct __kernel_timespec __user *, rqtp, struct __kernel_timespec __user *, rmtp) { struct timespec64 t; switch (which_clock) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: case CLOCK_BOOTTIME: break; default: return -EINVAL; } if (get_timespec64(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current->restart_block.nanosleep.rmtp = rmtp; return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); } #ifdef CONFIG_COMPAT COMPAT_SYS_NI(timer_create); COMPAT_SYS_NI(getitimer); COMPAT_SYS_NI(setitimer); #endif #ifdef CONFIG_COMPAT_32BIT_TIME SYS_NI(timer_settime32); SYS_NI(timer_gettime32); SYSCALL_DEFINE2(clock_settime32, const clockid_t, which_clock, struct old_timespec32 __user *, tp) { struct timespec64 new_tp; if (which_clock != CLOCK_REALTIME) return -EINVAL; if (get_old_timespec32(&new_tp, tp)) return -EFAULT; return do_sys_settimeofday64(&new_tp, NULL); } SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock, struct old_timespec32 __user *, tp) { int ret; struct timespec64 kernel_tp; ret = do_clock_gettime(which_clock, &kernel_tp); if (ret) return ret; if (put_old_timespec32(&kernel_tp, tp)) return -EFAULT; return 0; }
static int btpm_bluetooth_on( struct device *pdev , int on ) { btpm_data_t *p_sts; struct timespec tu; #ifdef BTPM_BCM_4330 int ret; #endif if ( pdev == NULL ){ disp_err( "device not found\n" ); return -1; } p_sts = (btpm_data_t *)dev_get_drvdata(pdev); if ( p_sts == NULL ){ disp_err( "driver infomation not found\n" ); return -1; } if ( p_sts->bluetooth == on ){ disp_dbg( "%s: no need to change status (%d->%d)\n" , __func__, p_sts->bluetooth , on ); return 0; } if ( on ){ #ifdef BTPM_BCM_4330 /* BT WAKE Configuration */ ret = gpio_tlmm_config(GPIO_CFG( BTPM_WAKE_OUT, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_6MA), GPIO_CFG_ENABLE); if (ret) { disp_err( "uart gpio_tlmm_config(BTPM_WAKE_OUT) : %d\n", ret); return -EIO; } ret = gpio_tlmm_config(GPIO_CFG( BTPM_WAKE_IN, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE); if (ret) { disp_err( "uart gpio_tlmm_config(BTPM_WAKE_IN ) : %d\n", ret); return -EIO; } #endif /* Turn ON RF/WLAN_IO-3.0V */ gpio_set_value( BTPM_PORT_BT_RESET_N, 0 ); /* BTRST_N LOW */ if( wifi <= 0 ){ gpio_set_value( BTPM_PORT_RF_LNA_EN, 1 ); /* BT_POW HIGH */ disp_dbg( "%s: RF ON\n" , __func__); /* BC7 is always turned on */ } tu.tv_sec = (time_t)0; tu.tv_nsec = 20000000; /* over 20ms */ hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); gpio_set_value( BTPM_PORT_BT_RESET_N, 1 ); /* BTRST_N HIGH */ tu.tv_sec = (time_t)0; tu.tv_nsec = 200000000; /* over 200ms */ hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); gpio_set_value( BTPM_WAKE_OUT, 0 ); /* BT_WAKEOUT_N LOW(WAKE) */ } else { gpio_set_value( BTPM_WAKE_OUT, 1 ); /* BT_WAKEOUT_N HIGH(SLEEP) */ gpio_set_value( BTPM_PORT_BT_RESET_N, 0 ); /* BTRST_N LOW */ if( wifi <= 0 ){ tu.tv_sec = (time_t)0; tu.tv_nsec = 1000000; /* over 1ms */ hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); /* BC7 is always turned on */ gpio_set_value( BTPM_PORT_RF_LNA_EN, 0 ); /* BT_POW LOW */ disp_dbg( "%s: RF OFF\n" , __func__); } #ifdef BTPM_BCM_4330 /* BT WAKE Configuration */ ret = gpio_tlmm_config(GPIO_CFG( BTPM_WAKE_OUT, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_6MA), GPIO_CFG_ENABLE); if (ret) { disp_err( "gpio_tlmm_config(BTPM_WAKE_OUT) : %d\n", ret); return -EIO; } ret = gpio_tlmm_config(GPIO_CFG( BTPM_WAKE_IN, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG_ENABLE); if (ret) { disp_err( "gpio_tlmm_config(BTPM_WAKE_IN) : %d\n", ret); return -EIO; } #endif } if ( p_sts->bluetooth < 0 ){ disp_inf( "Bluetooth power on reset\n" ); } else { disp_dbg( "%s: change status (%d->%d)\n" , __func__, p_sts->bluetooth , on ); } p_sts->bluetooth = on; bluetooth = on; return 0; }
static int try_to_freeze_tasks(bool sig_only) { struct task_struct *g, *p; unsigned long end_time; unsigned int todo; bool wq_busy = false; struct timeval start, end; u64 elapsed_csecs64; unsigned int elapsed_csecs; bool wakeup = false; #ifdef CONFIG_SHSYS_CUST struct timespec tu; #endif do_gettimeofday(&start); end_time = jiffies + TIMEOUT; if (!sig_only) freeze_workqueues_begin(); while (true) { todo = 0; read_lock(&tasklist_lock); do_each_thread(g, p) { if (frozen(p) || !freezable(p)) continue; if (!freeze_task(p, sig_only)) continue; /* * Now that we've done set_freeze_flag, don't * perturb a task in TASK_STOPPED or TASK_TRACED. * It is "frozen enough". If the task does wake * up, it will immediately call try_to_freeze. * * Because freeze_task() goes through p's * scheduler lock after setting TIF_FREEZE, it's * guaranteed that either we see TASK_RUNNING or * try_to_stop() after schedule() in ptrace/signal * stop sees TIF_FREEZE. */ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) todo++; } while_each_thread(g, p); read_unlock(&tasklist_lock); if (!sig_only) { wq_busy = freeze_workqueues_busy(); todo += wq_busy; } if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { wakeup = 1; break; } if (!todo || time_after(jiffies, end_time)) break; if (pm_wakeup_pending()) { wakeup = true; break; } /* * We need to retry, but first give the freezing tasks some * time to enter the regrigerator. */ #ifdef CONFIG_SHSYS_CUST tu.tv_sec = 0; tu.tv_nsec = 10000000; hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC); #else msleep(10); #endif } do_gettimeofday(&end); elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); do_div(elapsed_csecs64, NSEC_PER_SEC / 100); elapsed_csecs = elapsed_csecs64; if (todo) { /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ if(wakeup) { printk("\n"); printk(KERN_ERR "Freezing of %s aborted\n", sig_only ? "user space " : "tasks "); } else { printk("\n"); printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " "(%d tasks refusing to freeze, wq_busy=%d):\n", elapsed_csecs / 100, elapsed_csecs % 100, todo - wq_busy, wq_busy); } thaw_workqueues(); read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); if (freezing(p) && !freezer_should_skip(p) && elapsed_csecs > 100) sched_show_task(p); cancel_freezing(p); task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); } else {