/* Note: it is necessary to treat option as an unsigned int, * with the corresponding cast to a signed int to insure that the * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) * and the register representation of a signed int (msr in 64-bit mode) is performed. */ asmlinkage long compat_sys_sched_get_priority_max(u32 policy) { return sys_sched_get_priority_max((int)policy); }
static int rtasd(void *unused) { int cpu = 0; int error; int first_pass = 1; int event_scan = rtas_token("event-scan"); if (event_scan == RTAS_UNKNOWN_SERVICE || get_eventscan_parms() == -1) goto error; rtas_log_buf = vmalloc(rtas_error_log_max*LOG_NUMBER); if (!rtas_log_buf) { printk(KERN_ERR "rtasd: no memory\n"); goto error; } DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2); daemonize("rtasd"); #if 0 /* Rusty unreal time task */ current->policy = SCHED_FIFO; current->nice = sys_sched_get_priority_max(SCHED_FIFO) + 1; #endif repeat: for (cpu = 0; cpu < NR_CPUS; cpu++) { if (!cpu_online(cpu)) continue; DEBUG("scheduling on %d\n", cpu); set_cpus_allowed(current, cpumask_of_cpu(cpu)); DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id()); do { memset(logdata, 0, rtas_error_log_max); error = rtas_call(event_scan, 4, 1, NULL, EVENT_SCAN_ALL_EVENTS, 0, __pa(logdata), rtas_error_log_max); if (error == -1) { printk(KERN_ERR "event-scan failed\n"); break; } if (error == 0) log_rtas(logdata); } while(error == 0); /* * Check all cpus for pending events quickly, sleeping for * at least one second since some machines have problems * if we call event-scan too quickly */ set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(first_pass ? HZ : (HZ*60/rtas_event_scan_rate) / 2); } if (first_pass && surveillance_requested) { DEBUG("enabling surveillance\n"); if (enable_surveillance()) goto error_vfree; DEBUG("surveillance enabled\n"); } first_pass = 0; goto repeat; error_vfree: vfree(rtas_log_buf); error: /* Should delete proc entries */ return -EINVAL; }
static int rtasd(void *unused) { int cpu = 0; int error; int first_pass = 1; int event_scan = rtas_token("event-scan"); if (event_scan == RTAS_UNKNOWN_SERVICE || get_eventscan_parms() == -1) goto error; rtas_log_buf = vmalloc(rtas_error_log_max*LOG_NUMBER); if (!rtas_log_buf) { printk(KERN_ERR "rtasd: no memory\n"); goto error; } DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2); daemonize(); sigfillset(¤t->blocked); sprintf(current->comm, "rtasd"); /* Rusty unreal time task */ current->policy = SCHED_FIFO; current->nice = sys_sched_get_priority_max(SCHED_FIFO) + 1; cpu = 0; current->cpus_allowed = 1UL << cpu_logical_map(cpu); schedule(); while(1) { do { memset(logdata, 0, rtas_error_log_max); error = rtas_call(event_scan, 4, 1, NULL, EVENT_SCAN_ALL_EVENTS, 0, __pa(logdata), rtas_error_log_max); if (error == -1) { printk(KERN_ERR "event-scan failed\n"); break; } if (error == 0) log_rtas(logdata); } while(error == 0); DEBUG("watchdog scheduled on cpu %d\n", smp_processor_id()); cpu++; if (cpu >= smp_num_cpus) { if (first_pass && surveillance_requested) { DEBUG("enabling surveillance\n"); if (enable_surveillance()) goto error_vfree; DEBUG("surveillance enabled\n"); } first_pass = 0; cpu = 0; } current->cpus_allowed = 1UL << cpu_logical_map(cpu); /* Check all cpus for pending events before sleeping*/ if (first_pass) { schedule(); } else { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout((HZ*60/rtas_event_scan_rate) / 2); } } error_vfree: vfree(rtas_log_buf); error: /* Should delete proc entries */ return -EINVAL; }
static void autok_thread_func(struct work_struct *data) #endif // USE_KERNEL_THREAD { int err = 0; #ifdef USE_KERNEL_THREAD struct sdio_autok_thread_data *autok_thread_data = (struct sdio_autok_thread_data *)data; #else // USE_KERNEL_THREAD struct sdio_autok_workqueue_data *autok_thread_data = (struct sdio_autok_workqueue_data *)data; #endif // USE_KERNEL_THREAD struct msdc_host *host = autok_thread_data->host; char stage = autok_thread_data->stage; char *envp[2]; char *lteprocenvp[2]; #ifdef CHANGE_SCHED_POLICY struct sched_param param; int sched_policy; #ifdef SCHED_POLICY_INFO sched_policy = sys_sched_getscheduler(0); printk("[%s] orig. sched policy: %d\n", __func__, sched_policy); param.sched_priority = sys_sched_get_priority_max(SCHED_FIFO); if( sys_sched_setscheduler( 0, SCHED_FIFO, ¶m ) == -1 ) { printk("[%s] sched_setscheduler fail\n", __func__); } sched_policy = sys_sched_getscheduler(0); printk("[%s] sched policy FIFO: %d\n", __func__, sched_policy); #endif //param.sched_priority = sched_get_priority_max(SCHED_RR); param.sched_priority = 1; if( sys_sched_setscheduler( 0, SCHED_RR, ¶m ) == -1 ) { printk("[%s] sched_setscheduler fail\n", __func__); } #ifdef SCHED_POLICY_INFO sched_policy = sys_sched_getscheduler(0); printk("[%s] modified sched policy: %d\n", __func__, sched_policy); #endif #endif if(stage == 1) { // call stage 1 auto-K callback function msdc_autok_stg1_cal(host); envp[0] = "FROM=sdio_autok"; envp[1] = NULL; err = kobject_uevent_env(&host->mmc->class_dev.kobj, KOBJ_ONLINE, envp); if(err < 0) printk(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err); } else if(stage == 2) { // call stage 2 auto-K callback function msdc_autok_stg2_cal(host, autok_thread_data->autok_stage1_result, autok_thread_data->len); } else { printk(KERN_INFO "[%s] stage %d doesn't support in auto-K\n", __func__, stage); #ifdef USE_KERNEL_THREAD return -EFAULT; #else // USE_KERNEL_THREAD return; #endif // USE_KERNEL_THREAD } lteprocenvp[0] = "FROM=autok_done"; lteprocenvp[1] = NULL; err = kobject_uevent_env(&host->mmc->class_dev.kobj, KOBJ_ONLINE, lteprocenvp); if(err < 0) printk(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err); #ifdef USE_KERNEL_THREAD return 0; #else // USE_KERNEL_THREAD return; #endif // USE_KERNEL_THREAD }