void init_tune_sdio(struct msdc_host *host) { int i; for (i=0; i<HOST_MAX_NUM; i++) { if (p_autok_thread_data->p_autok_progress[i].host_id == host->id) { if (p_autok_thread_data->p_autok_progress[i].done == 1 && p_autok_thread_data->p_autok_progress[i].fail == 0) { u32 vcore_uv = 0; vcore_uv = autok_get_current_vcore_offset(); msdc_autok_apply_param(host, vcore_uv); //wait_sdio_autok_ready(host->mmc); } break; } } }
int wait_sdio_autok_ready(void *data){ int i; int ret = 0; // BOOTMODE btmod; struct mmc_host *mmc = (struct mmc_host*)data; struct msdc_host *host = NULL; int id; unsigned int vcore_uv = 0; //btmod = get_boot_mode(); //printk("btmod = %d\n", btmod); if (1/*(btmod!=META_BOOT) && (btmod!=FACTORY_BOOT) && (btmod!=ATE_FACTORY_BOOT)*/){ sdio_host_debug = 0; //host = mtk_msdc_host[id]; host = mmc_priv(mmc); id = host->id; #ifndef UT_TEST // claim host #ifdef CONFIG_SDIOAUTOK_SUPPORT //mt_cpufreq_disable(0, true); //FIXME@CCJ mt_vcore_dvfs_disable_by_sdio(0, true); #endif #ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT atomic_set(&host->ot_work.ot_disable, 1); #endif // MTK_SDIO30_ONLINE_TUNING_SUPPORT autok_claim_host(host); #endif #ifdef AUTOK_THREAD for (i = 0; i < HOST_MAX_NUM; i++) { if (p_autok_thread_data->p_autok_progress[i].host_id == id && p_autok_thread_data->p_autok_progress[i].done == 1 && p_autok_thread_data->p_autok_progress[i].fail == 0) { vcore_uv = autok_get_current_vcore_offset(); msdc_autok_stg2_cal(host, &p_autok_predata[id], vcore_uv); break; } } if (i != HOST_MAX_NUM) goto EXIT_WAIT_AUTOK_READY; #endif for(i=0; i<HOST_MAX_NUM; i++){ if(p_autok_thread_data->p_autok_progress[i].host_id == -1 || p_autok_thread_data->p_autok_progress[i].host_id == id){ send_autok_uevent("s2_ready", host); init_completion(&p_autok_thread_data->autok_completion[i]); p_autok_thread_data->p_autok_progress[i].done = 0; p_autok_thread_data->p_autok_progress[i].fail = 0; p_autok_thread_data->p_autok_progress[i].host_id = id; wait_for_completion_interruptible(&p_autok_thread_data->autok_completion[i]); for (i = 0; i < HOST_MAX_NUM; i++) { if (p_autok_thread_data->p_autok_progress[i].host_id == id && p_autok_thread_data->p_autok_progress[i].fail == 1) { ret = -1; break; } } send_autok_uevent("s2_done", host); break; } } //reset_autok_cursor(0); EXIT_WAIT_AUTOK_READY: #ifndef UT_TEST // release host autok_release_host(host); #ifdef CONFIG_SDIOAUTOK_SUPPORT //mt_cpufreq_disable(0, false); //FIXME@CCJ mt_vcore_dvfs_disable_by_sdio(0, false); #endif #ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT atomic_set(&host->ot_work.autok_done, 1); atomic_set(&host->ot_work.ot_disable, 0); #endif // MTK_SDIO30_ONLINE_TUNING_SUPPORT #endif } return ret; }
static int autok_thread_func(void *data) { struct sdio_autok_thread_data *autok_thread_data; struct sched_param param = { .sched_priority = 99 }; unsigned int vcore_uv = 0; struct msdc_host *host; struct mmc_host *mmc; char stage = 0; int i,j; int res = 0; int doStg2 = 0; void __iomem *base; u32 dma; struct timeval t0,t1; int time_in_s, time_in_ms; // unsigned long flags; autok_thread_data = (struct sdio_autok_thread_data *)data; sched_setscheduler(current, SCHED_FIFO, ¶m); // preempt_disable(); host = autok_thread_data->host; mmc = host->mmc; stage = autok_thread_data->stage; base = host->base; dma = msdc_dma_status(); // Inform msdc_set_mclk() auto-K is going to process sdio_autok_processed = 1; // Set clock to card max clock mmc_set_clock(mmc, mmc->ios.clock); msdc_sdio_set_long_timing_delay_by_freq(host, mmc->ios.clock); msdc_ungate_clock(host); // Set PIO mode msdc_dma_off(); vcore_uv = autok_get_current_vcore_offset(); // End of initialize do_gettimeofday(&t0); if(autok_thread_data->log != NULL) log_info = autok_thread_data->log; if(stage == 1) { // call stage 1 auto-K callback function autok_thread_data->is_autok_done[host->id] = 0; res = msdc_autok_stg1_cal(host, vcore_uv, autok_thread_data->p_autok_predata); if(res){ printk(KERN_INFO "[%s] Auto-K stage 1 fail, res = %d, set msdc parameter settings stored in nvram to 0\n", __func__, res); memset(autok_thread_data->p_autok_predata->ai_data[0], 0, autok_thread_data->p_autok_predata->param_count * sizeof(unsigned int)); autok_thread_data->is_autok_done[host->id] = 2; } } else if(stage == 2) { // call stage 2 auto-K callback function // check if msdc params of different volt are all 0, if so, that means auto-K stg1 failed for(i=0; i<autok_thread_data->p_autok_predata->vol_count; i++){ for(j=0; j<autok_thread_data->p_autok_predata->param_count; j++){ if(autok_thread_data->p_autok_predata->ai_data[i][j].data.sel != 0){ doStg2 = 1; break; } } if(doStg2) break; } if(doStg2){ res = msdc_autok_stg2_cal(host, autok_thread_data->p_autok_predata, vcore_uv); if(res){ printk(KERN_INFO "[%s] Auto-K stage 2 fail, res = %d, downgrade SDIO freq to 50MHz\n", __func__, res); mmc->ios.clock = 50*1000*1000; mmc_set_clock(mmc, mmc->ios.clock); msdc_sdio_set_long_timing_delay_by_freq(host, mmc->ios.clock); sdio_autok_processed = 0; for (i = 0; i < HOST_MAX_NUM; i++) { if (autok_thread_data->p_autok_progress[i].host_id == -1) { break; } else if (autok_thread_data->p_autok_progress[i].host_id == host->id) { autok_thread_data->p_autok_progress[i].fail = 1; } } } } else { // Auto-K stg1 failed printk(KERN_INFO "[%s] Auto-K stage 1 fail, downgrade SDIO freq to 50MHz\n", __func__); mmc->ios.clock = 50*1000*1000; mmc_set_clock(mmc, mmc->ios.clock); msdc_sdio_set_long_timing_delay_by_freq(host, mmc->ios.clock); sdio_autok_processed = 0; for (i = 0; i < HOST_MAX_NUM; i++) { if (autok_thread_data->p_autok_progress[i].host_id == -1) { break; } else if (autok_thread_data->p_autok_progress[i].host_id == host->id) { autok_thread_data->p_autok_progress[i].fail = 1; } } } log_info = NULL; } else { printk(KERN_INFO "[%s] stage %d doesn't support in auto-K\n", __func__, stage); //return -EFAULT; } do_gettimeofday(&t1); if(dma == DMA_ON) msdc_dma_on(); msdc_gate_clock(host,1); // [FIXDONE] Tell native module that auto-K has finished if(stage == 1) autok_calibration_done(host->id, autok_thread_data); else if(stage == 2){ for(i=0; i<HOST_MAX_NUM; i++){ if(autok_thread_data->p_autok_progress[i].host_id == -1){ break; } else if(autok_thread_data->p_autok_progress[i].host_id == host->id){ autok_thread_data->p_autok_progress[i].done = 1; if(autok_thread_data->p_autok_progress[i].done > 0) complete(&autok_thread_data->autok_completion[i]); } } } time_in_s = (t1.tv_sec - t0.tv_sec); time_in_ms = (t1.tv_usec - t0.tv_usec)>>10; printk(KERN_ERR "\n[AUTOKK][Stage%d] Timediff is %d.%d(s)\n", (int)stage, time_in_s, time_in_ms ); // preempt_enable(); return 0; } #endif int send_autok_uevent(char *text, struct msdc_host *host) { int err = 0; char *envp[3]; char *host_buf; char *what_buf; //struct msdc_host *host = mtk_msdc_host[id]; host_buf = kzalloc(sizeof(char)*128, GFP_KERNEL); what_buf = kzalloc(sizeof(char)*128, GFP_KERNEL); snprintf(host_buf, MAX_ARGS_BUF-1, "HOST=%d", host->id); snprintf(what_buf, MAX_ARGS_BUF-1, "WHAT=%s", text); envp[0] = host_buf; envp[1] = what_buf; envp[2] = NULL; if(host != NULL){ err = kobject_uevent_env(&host->mmc->class_dev.kobj, KOBJ_CHANGE, envp); } kfree(host_buf); kfree(what_buf); if(err < 0) printk(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err); return err; }
int wait_sdio_autok_ready(void *data){ int i; int ret = 0; BOOTMODE btmod; struct mmc_host *mmc = (struct mmc_host*)data; struct msdc_host *host = NULL; int id; unsigned int vcore_uv = 0; int is_screen_off; btmod = get_boot_mode(); //printk("btmod = %d\n", btmod); atomic_set(&autok_is_abort, 0); if((vcorefs_is_95m_segment()) && (btmod == META_BOOT)) return 0; if (1/*(btmod!=META_BOOT)*/){ sdio_host_debug = 0; //host = mtk_msdc_host[id]; host = mmc_priv(mmc); id = host->id; #ifndef UT_TEST // claim host #ifdef CONFIG_SDIOAUTOK_SUPPORT //mt_cpufreq_disable(0, true); #ifndef _DVFS_ENABLE_ mt_vcore_dvfs_disable_by_sdio(0, true); #else // If it's screen off, sdio add suggest vol with LV is_screen_off = vcorefs_sdio_lock_dvfs(0); if(is_screen_off){ autok_add_suggest_vol(1000000); //DVFS Define 1.0v as LV } #endif if(is_sug_more_than_real(id)){ for (i = 0; i < HOST_MAX_NUM; i++) { if (p_autok_thread_data->p_autok_progress[i].host_id == id){ p_autok_thread_data->p_autok_progress[i].done = 0; break; } } } #endif #ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT atomic_set(&host->ot_work.ot_disable, 1); atomic_set(&host->ot_work.autok_done, 0); #endif // MTK_SDIO30_ONLINE_TUNING_SUPPORT autok_claim_host(host); #endif #ifdef AUTOK_THREAD for (i = 0; i < HOST_MAX_NUM; i++) { if (p_autok_thread_data->p_autok_progress[i].host_id == id && p_autok_thread_data->p_autok_progress[i].done == 1 && p_autok_thread_data->p_autok_progress[i].fail == 0) { vcore_uv = autok_get_current_vcore_offset(); msdc_autok_stg2_cal(host, &p_autok_predata[id], vcore_uv); break; } } if (i != HOST_MAX_NUM) goto EXIT_WAIT_AUTOK_READY; #endif for(i=0; i<HOST_MAX_NUM; i++){ if(p_autok_thread_data->p_autok_progress[i].host_id == -1 || p_autok_thread_data->p_autok_progress[i].host_id == id){ send_autok_uevent("s2_ready", host); init_completion(&p_autok_thread_data->autok_completion[i]); p_autok_thread_data->p_autok_progress[i].done = 0; p_autok_thread_data->p_autok_progress[i].fail = 0; p_autok_thread_data->p_autok_progress[i].host_id = id; wait_for_completion_interruptible(&p_autok_thread_data->autok_completion[i]); for (i = 0; i < HOST_MAX_NUM; i++) { if (p_autok_thread_data->p_autok_progress[i].host_id == id && p_autok_thread_data->p_autok_progress[i].fail == 1) { ret = -1; break; } } send_autok_uevent("s2_done", host); break; } } //reset_autok_cursor(0); EXIT_WAIT_AUTOK_READY: #ifndef UT_TEST // release host autok_release_host(host); #ifdef CONFIG_SDIOAUTOK_SUPPORT //mt_cpufreq_disable(0, false); #ifndef _DVFS_ENABLE_ mt_vcore_dvfs_disable_by_sdio(0, false); #else vcorefs_sdio_unlock_dvfs(0); #endif #endif #ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT atomic_set(&host->ot_work.autok_done, 1); atomic_set(&host->ot_work.ot_disable, 0); #endif // MTK_SDIO30_ONLINE_TUNING_SUPPORT #endif } return ret; }
static int autok_thread_func(void *data) { struct sdio_autok_thread_data *autok_thread_data; //struct sched_param param = { .sched_priority = 99 }; unsigned int vcore_uv = 0; struct msdc_host *host; struct mmc_host *mmc; char stage = 0; int i,j; int res = 0; int doStg2 = 0; void __iomem *base; u32 dma; struct timeval t0,t1; int time_in_s, time_in_ms; // unsigned long flags; autok_thread_data = (struct sdio_autok_thread_data *)data; //sched_setscheduler(current, SCHED_FIFO, ¶m); // preempt_disable(); host = autok_thread_data->host; mmc = host->mmc; stage = autok_thread_data->stage; base = host->base; dma = msdc_dma_status(); // Inform msdc_set_mclk() auto-K is going to process sdio_autok_processed = 1; // Set clock to card max clock mmc_set_clock(mmc, mmc->ios.clock); msdc_sdio_set_long_timing_delay_by_freq(host, mmc->ios.clock); msdc_ungate_clock(host); // Set PIO mode msdc_dma_off(); vcore_uv = autok_get_current_vcore_offset(); // End of initialize do_gettimeofday(&t0); if(autok_thread_data->log != NULL) log_info = autok_thread_data->log; if(stage == 1) { // call stage 1 auto-K callback function autok_thread_data->is_autok_done[host->id] = 0; res = msdc_autok_stg1_cal(host, vcore_uv, autok_thread_data->p_autok_predata); if(res){ pr_debug("[%s] Auto-K stage 1 fail, res = %d, set msdc parameter settings stored in nvram to 0\n", __func__, res); memset(autok_thread_data->p_autok_predata->ai_data[0], 0, autok_thread_data->p_autok_predata->param_count * sizeof(unsigned int)); autok_thread_data->is_autok_done[host->id] = 2; } // For Abort function if(atomic_read(&autok_is_abort) == 1){ autok_thread_data->is_autok_done[host->id] = 3; } } else if(stage == 2) { // call stage 2 auto-K callback function // check if msdc params of different volt are all 0, if so, that means auto-K stg1 failed for(i=0; i<autok_thread_data->p_autok_predata->vol_count; i++){ for(j=0; j<autok_thread_data->p_autok_predata->param_count; j++){ if(autok_thread_data->p_autok_predata->ai_data[i][j].data.sel != 0){ doStg2 = 1; break; } } if(doStg2) break; } if(doStg2){ res = msdc_autok_stg2_cal(host, autok_thread_data->p_autok_predata, vcore_uv); if(res){ pr_debug("[%s] Auto-K stage 2 fail, res = %d, downgrade SDIO freq to 50MHz\n", __func__, res); mmc->ios.clock = 50*1000*1000; mmc_set_clock(mmc, mmc->ios.clock); msdc_sdio_set_long_timing_delay_by_freq(host, mmc->ios.clock); sdio_autok_processed = 0; for (i = 0; i < HOST_MAX_NUM; i++) { if (autok_thread_data->p_autok_progress[i].host_id == -1) { break; } else if (autok_thread_data->p_autok_progress[i].host_id == host->id) { autok_thread_data->p_autok_progress[i].fail = 1; } } } } else { // Auto-K stg1 failed pr_debug("[%s] Auto-K stage 1 fail, downgrade SDIO freq to 50MHz\n", __func__); mmc->ios.clock = 50*1000*1000; mmc_set_clock(mmc, mmc->ios.clock); msdc_sdio_set_long_timing_delay_by_freq(host, mmc->ios.clock); sdio_autok_processed = 0; for (i = 0; i < HOST_MAX_NUM; i++) { if (autok_thread_data->p_autok_progress[i].host_id == -1) { break; } else if (autok_thread_data->p_autok_progress[i].host_id == host->id) { autok_thread_data->p_autok_progress[i].fail = 1; } } } log_info = NULL; } else { pr_debug("[%s] stage %d doesn't support in auto-K\n", __func__, stage); //return -EFAULT; } do_gettimeofday(&t1); if(dma == DMA_ON) msdc_dma_on(); msdc_gate_clock(host,1); // [FIXDONE] Tell native module that auto-K has finished if(stage == 1) autok_calibration_done(host->id, autok_thread_data); else if(stage == 2){ for(i=0; i<HOST_MAX_NUM; i++){ if(autok_thread_data->p_autok_progress[i].host_id == -1){ break; } else if(autok_thread_data->p_autok_progress[i].host_id == host->id){ autok_thread_data->p_autok_progress[i].done = 1; if(autok_thread_data->p_autok_progress[i].done > 0) complete(&autok_thread_data->autok_completion[i]); } } } time_in_s = (t1.tv_sec - t0.tv_sec); time_in_ms = (t1.tv_usec - t0.tv_usec)>>10; pr_err("\n[AUTOKK][Stage%d] Timediff is %d.%d(s)\n", (int)stage, time_in_s, time_in_ms ); // preempt_enable(); return 0; }
int wait_sdio_autok_ready(void *data) { int i; int ret = 0; enum boot_mode_t btmod; struct mmc_host *mmc = (struct mmc_host *)data; struct msdc_host *host = NULL; int id; unsigned int vcore_uv = 0; btmod = get_boot_mode(); pr_debug("btmod = %d\n", btmod); if ((btmod != NORMAL_BOOT) && (btmod != META_BOOT)) { pr_debug("Not META or normal boot, return directly\n"); return 0; } if (1 /*(btmod!=META_BOOT) && (btmod!=FACTORY_BOOT) && (btmod!=ATE_FACTORY_BOOT) */) { sdio_host_debug = 0; /* host = mtk_msdc_host[id]; */ host = mmc_priv(mmc); id = host->id; #ifndef UT_TEST /* claim host */ #ifdef CONFIG_SDIOAUTOK_SUPPORT /* ALPS02017456 */ /* true if dwork was pending, false otherwise */ if (cancel_delayed_work_sync(&(host->set_vcore_workq)) == 0) pr_debug("** no pending vcore_workq\n"); else pr_debug("** cancel vcore_workq\n"); /* ALPS02017456 */ pr_debug("wait_sdio_autok_ready(): is_vcorefs_can_work= %d\n", is_vcorefs_can_work()); if (vcorefs_request_dvfs_opp(KIR_SDIO, OPPI_PERF) != 0) { /* performance mode, return 0 pass */ /* BUG_ON("vcorefs_request_dvfs_opp@OPPI_PERF fail!\n"); */ pr_err("vcorefs_request_dvfs_opp@OPPI_PERF fail!\n"); } g_autok_vcore_sel[0] = vcorefs_get_curr_voltage(); pr_debug("wait_sdio_autok_ready(): vcorefs_get_curr_voltage= %d\n", vcorefs_get_curr_voltage()); /* mt_cpufreq_disable(0, true); */ /* FIXME@CCJ mt_vcore_dvfs_disable_by_sdio(0, true); */ /* vcorefs_sdio_lock_dvfs(0); //ccyeh */ #endif #ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT /* ccyeh atomic_set(&host->ot_work.ot_disable, 1); */ #endif /* MTK_SDIO30_ONLINE_TUNING_SUPPORT */ autok_claim_host(host); #endif #ifdef AUTOK_THREAD for (i = 0; i < HOST_MAX_NUM; i++) { if (p_autok_thread_data->p_autok_progress[i].host_id == id && p_autok_thread_data->p_autok_progress[i].done == 1 && p_autok_thread_data->p_autok_progress[i].fail == 0) { vcore_uv = autok_get_current_vcore_offset(); msdc_autok_stg2_cal(host, &p_autok_predata[id], vcore_uv); break; } } if (i != HOST_MAX_NUM) goto EXIT_WAIT_AUTOK_READY; #endif for (i = 0; i < HOST_MAX_NUM; i++) { if (p_autok_thread_data->p_autok_progress[i].host_id == -1 || p_autok_thread_data->p_autok_progress[i].host_id == id) { send_autok_uevent("s2_ready", host); init_completion(&p_autok_thread_data->autok_completion[i]); p_autok_thread_data->p_autok_progress[i].done = 0; p_autok_thread_data->p_autok_progress[i].fail = 0; p_autok_thread_data->p_autok_progress[i].host_id = id; wait_for_completion_interruptible (&p_autok_thread_data->autok_completion[i]); for (i = 0; i < HOST_MAX_NUM; i++) { if (p_autok_thread_data->p_autok_progress[i].host_id == id && p_autok_thread_data->p_autok_progress[i].fail == 1) { ret = -1; break; } } send_autok_uevent("s2_done", host); break; } } /* reset_autok_cursor(0); */ EXIT_WAIT_AUTOK_READY: #ifndef UT_TEST /* release host */ autok_release_host(host); #ifdef CONFIG_SDIOAUTOK_SUPPORT /* mt_cpufreq_disable(0, false); */ /* FIXME@CCJ mt_vcore_dvfs_disable_by_sdio(0, false); */ /* vcorefs_sdio_unlock_dvfs(0);//ccyeh */ if (vcorefs_request_dvfs_opp(KIR_SDIO, OPPI_UNREQ) != 0) { /* un-request, return 0 pass */ /* BUG_ON("vcorefs_request_dvfs_opp@OPPI_UNREQ fail!\n"); */ pr_err("vcorefs_request_dvfs_opp@OPPI_UNREQ fail!\n"); } #endif #ifdef MTK_SDIO30_ONLINE_TUNING_SUPPORT atomic_set(&host->ot_work.autok_done, 1); atomic_set(&host->ot_work.ot_disable, 0); #endif /* MTK_SDIO30_ONLINE_TUNING_SUPPORT */ #endif } return ret; }