static void mmc_test_run(struct mmc_test_card *test, int testcase) { int i, ret; printk(KERN_INFO "%s: Starting tests of card %s...\n", mmc_hostname(test->card->host), mmc_card_id(test->card)); mmc_claim_host(test->card->host); for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { if (testcase && ((i + 1) != testcase)) continue; printk(KERN_INFO "%s: Test case %d. %s...\n", mmc_hostname(test->card->host), i + 1, mmc_test_cases[i].name); if (mmc_test_cases[i].prepare) { ret = mmc_test_cases[i].prepare(test); if (ret) { printk(KERN_INFO "%s: Result: Prepare " "stage failed! (%d)\n", mmc_hostname(test->card->host), ret); continue; } } ret = mmc_test_cases[i].run(test); switch (ret) { case RESULT_OK: printk(KERN_INFO "%s: Result: OK\n", mmc_hostname(test->card->host)); break; case RESULT_FAIL: printk(KERN_INFO "%s: Result: FAILED\n", mmc_hostname(test->card->host)); break; case RESULT_UNSUP_HOST: printk(KERN_INFO "%s: Result: UNSUPPORTED " "(by host)\n", mmc_hostname(test->card->host)); break; case RESULT_UNSUP_CARD: printk(KERN_INFO "%s: Result: UNSUPPORTED " "(by card)\n", mmc_hostname(test->card->host)); break; default: printk(KERN_INFO "%s: Result: ERROR (%d)\n", mmc_hostname(test->card->host), ret); } if (mmc_test_cases[i].cleanup) { ret = mmc_test_cases[i].cleanup(test); if (ret) { printk(KERN_INFO "%s: Warning: Cleanup " "stage failed! (%d)\n", mmc_hostname(test->card->host), ret); } } } mmc_release_host(test->card->host); printk(KERN_INFO "%s: Tests completed.\n", mmc_hostname(test->card->host)); }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err = 0; int extend_wakelock = 0; #ifdef CONFIG_MMC_PARANOID_SD_INIT int retries = 2; #endif mmc_bus_get(host); /* * if there is a _removable_ card registered, check whether it is * still present */ if (host->bus_ops && host->bus_ops->detect && !host->bus_dead && !(host->caps & MMC_CAP_NONREMOVABLE)) { host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; } mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; retry: mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: #ifdef CONFIG_MMC_PARANOID_SD_INIT if (err && (err != -ENOMEDIUM) && retries) { printk(KERN_INFO "%s: Re-scan card rc = %d (retries = %d)\n", mmc_hostname(host), err, retries); retries--; goto retry; } #endif if (extend_wakelock) wake_lock_timeout(&host->wakelock, 5 * HZ); else wake_unlock(&host->wakelock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; int extend_wakelock = 0; int ret; unsigned long flags; /* * Add checking gpio pin status before initialization of bus. * If the GPIO pin status is changed, check gpio pin status again. * Should check until it's stable. * [email protected], 2010-09-27 */ if (host->ops->get_status){ ret = host->ops->get_status(host); if (ret == 1) { mmc_schedule_delayed_work(&host->detect, HZ / 3); return; } } spin_lock_irqsave(&host->lock, flags); if (host->rescan_disable) { spin_unlock_irqrestore(&host->lock, flags); return; } spin_unlock_irqrestore(&host->lock, flags); mmc_bus_get(host); /* * if there is a _removable_ card registered, check whether it is * still present */ if (host->bus_ops && host->bus_ops->detect && !host->bus_dead && !(host->caps & MMC_CAP_NONREMOVABLE)) { host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; } mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
static int mmc_ext_csd_open(struct inode *inode, struct file *filp) #endif { #ifdef CONFIG_MACH_LGE /* */ struct mmc_card *card = s->private; #else struct mmc_card *card = inode->i_private; char *buf; ssize_t n = 0; #endif u8 *ext_csd; #ifdef CONFIG_MACH_LGE /* */ u8 ext_csd_rev; int err; const char *str; char *buf_for_health_report; char *buf_for_firmwware_version; ssize_t output = 0; int cnt; #else int err, i; buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); if (!buf) return -ENOMEM; #endif ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { err = -ENOMEM; goto out_free; } mmc_claim_host(card->host); err = mmc_send_ext_csd(card, ext_csd); mmc_release_host(card->host); if (err) goto out_free; #ifdef CONFIG_MACH_LGE /* */ ext_csd_rev = ext_csd[192]; #else for (i = 511; i >= 0; i--) n += sprintf(buf + n, "%02x", ext_csd[i]); n += sprintf(buf + n, "\n"); BUG_ON(n != EXT_CSD_STR_LEN); filp->private_data = buf; kfree(ext_csd); return 0; #endif #ifdef CONFIG_MACH_LGE /* */ switch (ext_csd_rev) { case 7: str = "5.0"; break; case 6: str = "4.5"; break; case 5: str = "4.41"; break; case 3: str = "4.3"; break; case 2: str = "4.2"; break; case 1: str = "4.1"; break; case 0: str = "4.0"; break; default: goto out_free; } seq_printf(s, "Extended CSD rev 1.%d (MMC %s)\n", ext_csd_rev, str); if (ext_csd_rev < 3) goto out_free; /* No ext_csd */ /* Parse the Extended CSD registers. * Reserved bit should be read as "0" in case of spec older * than A441. */ /* B50: reserved [511:506] */ /* B45: reserved [511:505] */ if (ext_csd_rev >= 7) seq_printf(s, "[505] Extended Security Commands Error, ext_security_err: 0x%02x\n", ext_csd[505]); seq_printf(s, "[504] Supported Command Sets, s_cmd_set: 0x%02x\n", ext_csd[504]); seq_printf(s, "[503] HPI features, hpi_features: 0x%02x\n", ext_csd[503]); seq_printf(s, "[502] Background operations support, bkops_support: 0x%02x\n", ext_csd[502]); if (ext_csd_rev >= 6) { seq_printf(s, "[501] Max packed read commands, max_packed_reads: 0x%02x\n", ext_csd[501]); seq_printf(s, "[500] Max packed write commands, max_packed_writes: 0x%02x\n", ext_csd[500]); seq_printf(s, "[499] Data Tag Support, data_tag_support: 0x%02x\n", ext_csd[499]); seq_printf(s, "[498] Tag Unit Size, tag_unit_size: 0x%02x\n", ext_csd[498]); seq_printf(s, "[497] Tag Resources Size, tag_res_size: 0x%02x\n", ext_csd[497]); seq_printf(s, "[496] Context management capabilities, context_capabilities: 0x%02x\n", ext_csd[496]); seq_printf(s, "[495] Large Unit size, large_unit_size_m1: 0x%02x\n", ext_csd[495]); seq_printf(s, "[494] Extended partitions attribute support, ext_support: 0x%02x\n", ext_csd[494]); if (ext_csd_rev >= 7) { buf_for_health_report = kmalloc(66, GFP_KERNEL); if (!buf_for_health_report) return -ENOMEM; buf_for_firmwware_version = kmalloc(18, GFP_KERNEL); if (!buf_for_firmwware_version) return -ENOMEM; seq_printf(s, "[493] Supported modes, supported_modes: 0x%02x\n", ext_csd[493]); seq_printf(s, "[492] Ffu features, ffu_features: 0x%02x\n", ext_csd[492]); seq_printf(s, "[491] Operation codes timeout, operation_code_timeout: 0x%02x\n", ext_csd[491]); seq_printf(s, "[490:487] Ffu features, ffu_features: 0x%08x\n", (ext_csd[487] << 0) | (ext_csd[488] << 8) | (ext_csd[489] << 16) | (ext_csd[490] << 24)); /* B50: reserved [486:306] */ seq_printf(s, "[305:302] Number of FW sectors correctly programmed, number_of_fw_sectors_correctly_programmed: 0x%08x\n", (ext_csd[302] << 0) | (ext_csd[303] << 8) | (ext_csd[304] << 16) | (ext_csd[305] << 24)); output = 0; for (cnt = 301 ; cnt >= 270 ; cnt--) output += snprintf(buf_for_health_report + output, 3, "%02x", ext_csd[cnt]); output += snprintf(buf_for_health_report + output, 2, "\n"); seq_printf(s, "[301:270] Vendor proprietary health report, vendor_proprietary_health_report: %s", buf_for_health_report); kfree(buf_for_health_report); seq_printf(s, "[269] Device life time estimation type B, device_life_time_est_typ_b: 0x%02x\n", ext_csd[269]); seq_printf(s, "[268] Device life time estimation type A, device_life_time_est_typ_a: 0x%02x\n", ext_csd[268]); seq_printf(s, "[267] Pre EOL information, pre_eol_info: 0x%02x\n", ext_csd[267]); seq_printf(s, "[266] Optimal read size, optimal_read_size: 0x%02x\n", ext_csd[266]); seq_printf(s, "[265] Optimal write size, optimal_write_size: 0x%02x\n", ext_csd[265]); seq_printf(s, "[264] Optimal trim unit size, optimal_trim_unit_size: 0x%02x\n", ext_csd[264]); seq_printf(s, "[263:262] Device version, device_version: 0x%02x\n", (ext_csd[262] << 0) | (ext_csd[263] << 8)); output = 0; for (cnt = 261 ; cnt >= 254 ; cnt--) output += snprintf(buf_for_firmwware_version + output, 3, "%02x", ext_csd[cnt]); output += snprintf(buf_for_firmwware_version + output, 2, "\n"); seq_printf(s, "[261:254] Firmware version, firmwware_version: %s", buf_for_firmwware_version); kfree(buf_for_firmwware_version); seq_printf(s, "[253] Power class for 200MHz, DDR at VCC=3.6V, pwr_cl_ddr_200_360: 0x%02x\n", ext_csd[253]); } /* B45: reserved [493:253] */ seq_printf(s, "[252:249] Cache size, cache_size %d KiB\n", (ext_csd[249] << 0) | (ext_csd[250] << 8) | (ext_csd[251] << 16) | (ext_csd[252] << 24)); seq_printf(s, "[248] Generic CMD6 timeout, generic_cmd6_time: 0x%02x\n", ext_csd[248]); seq_printf(s, "[247] Power off notification timeout, power_off_long_time: 0x%02x\n", ext_csd[247]); seq_printf(s, "[246] Background operations status, bkops_status: 0x%02x\n", ext_csd[246]); seq_printf(s, "[245:242] Number of correctly programmed sectors, correctly_prg_sectors_num %d KiB\n", (ext_csd[242] << 0) | (ext_csd[243] << 8) | (ext_csd[244] << 16) | (ext_csd[245] << 24)); } /* B45: Reserved [493:253] * A441: Reserved [501:247] * A43: reserved [246:229] */ if (ext_csd_rev >= 5) { seq_printf(s, "[241] 1st initialization time after partitioning, ini_timeout_ap: 0x%02x\n", ext_csd[241]); /* B50, B45, A441: reserved [240] */ seq_printf(s, "[239] Power class for 52MHz, DDR at 3.6V, pwr_cl_ddr_52_360: 0x%02x\n", ext_csd[239]); seq_printf(s, "[238] POwer class for 52MHz, DDR at 1.95V, pwr_cl_ddr_52_195: 0x%02x\n", ext_csd[238]); /* A441: reserved [237-236] */ if (ext_csd_rev >= 6) { seq_printf(s, "[237] Power class for 200MHz, SDR at 3.6V, pwr_cl_200_360: 0x%02x\n", ext_csd[237]); seq_printf(s, "[236] Power class for 200MHz, SDR at 1.95V, pwr_cl_200_195: 0x%02x\n", ext_csd[236]); } seq_printf(s, "[235] Minimun Write Performance for 8bit at 52MHz in DDR mode, min_perf_ddr_w_8_52: 0x%02x\n", ext_csd[235]); seq_printf(s, "[234] Minimun Read Performance for 8bit at 52MHz in DDR modemin_perf_ddr_r_8_52: 0x%02x\n", ext_csd[234]); /* B50, B45, A441: reserved [233] */ seq_printf(s, "[232] TRIM Multiplier, trim_mult: 0x%02x\n", ext_csd[232]); seq_printf(s, "[231] Secure Feature support, sec_feature_support: 0x%02x\n", ext_csd[231]); } if (ext_csd_rev == 5 || ext_csd_rev == 7) { /* Obsolete in 4.5 */ /*---->revived in 5.0*/ seq_printf(s, "[230] Secure Erase Multiplier, sec_erase_mult: 0x%02x\n", ext_csd[230]); seq_printf(s, "[229] Secure TRIM Multiplier, sec_trim_mult: 0x%02x\n", ext_csd[229]); } seq_printf(s, "[228] Boot information, boot_info: 0x%02x\n", ext_csd[228]); /* B50, B45, A441/A43: reserved [227] */ seq_printf(s, "[226] Boot partition size, boot_size_mult : 0x%02x\n", ext_csd[226]); seq_printf(s, "[225] Access size, acc_size: 0x%02x\n", ext_csd[225]); seq_printf(s, "[224] High-capacity erase unit size, hc_erase_grp_size: 0x%02x\n", ext_csd[224]); seq_printf(s, "[223] High-capacity erase timeout, erase_timeout_mult: 0x%02x\n", ext_csd[223]); seq_printf(s, "[222] Reliable write sector count, rel_wr_sec_c: 0x%02x\n", ext_csd[222]); seq_printf(s, "[221] High-capacity write protect group size, hc_wp_grp_size: 0x%02x\n", ext_csd[221]); seq_printf(s, "[220] Sleep current(VCC), s_c_vcc: 0x%02x\n", ext_csd[220]); seq_printf(s, "[219] Sleep current(VCCQ), s_c_vccq: 0x%02x\n", ext_csd[219]); if (ext_csd_rev == 7) seq_printf(s, "[218] Production state awareness timeout, production_state_awareness_timeout: 0x%02x\n", ext_csd[218]); /* B45, A441/A43: reserved [218] */ seq_printf(s, "[217] Sleep/awake timeout, s_a_timeout: 0x%02x\n", ext_csd[217]); if (ext_csd_rev == 7) seq_printf(s, "[216] Sleep notification timeout, sleep_notification_time: 0x%02x\n", ext_csd[216]); /* B45, A441/A43: reserved [216] */ seq_printf(s, "[215:212] Sector Count, sec_count: 0x%08x\n", (ext_csd[215] << 24) | (ext_csd[214] << 16) | (ext_csd[213] << 8) | ext_csd[212]); /* B50, B45, A441/A43: reserved [211] */ seq_printf(s, "[210] Minimum Write Performance for 8bit at 52MHz, min_perf_w_8_52: 0x%02x\n", ext_csd[210]); seq_printf(s, "[209] Minimum Read Performance for 8bit at 52MHz, min_perf_r_8_52: 0x%02x\n", ext_csd[209]); seq_printf(s, "[208] Minimum Write Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_w_8_26_4_52: 0x%02x\n", ext_csd[208]); seq_printf(s, "[207] Minimum Read Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_r_8_26_4_52: 0x%02x\n", ext_csd[207]); seq_printf(s, "[206] Minimum Write Performance for 4bit at 26MHz, min_perf_w_4_26: 0x%02x\n", ext_csd[206]); seq_printf(s, "[205] Minimum Read Performance for 4bit at 26MHz, min_perf_r_4_26: 0x%02x\n", ext_csd[205]); /* B45: reserved [204] */ /* A441/A43: reserved [204] */ seq_printf(s, "[203] Power class for 26MHz at 3.6V, pwr_cl_26_360: 0x%02x\n", ext_csd[203]); seq_printf(s, "[202] Power class for 52MHz at 3.6V, pwr_cl_52_360: 0x%02x\n", ext_csd[202]); seq_printf(s, "[201] Power class for 26MHz at 1.95V, pwr_cl_26_195: 0x%02x\n", ext_csd[201]); seq_printf(s, "[200] Power class for 52MHz at 1.95V, pwr_cl_52_195: 0x%02x\n", ext_csd[200]); /* A43: reserved [199:198] */ if (ext_csd_rev >= 5) { seq_printf(s, "[199] Partition switching timing, partition_switch_time: 0x%02x\n", ext_csd[199]); seq_printf(s, "[198] Out-of-interrupt busy timing, out_of_interrupt_time: 0x%02x\n", ext_csd[198]); } /* B50, B45: reserved [195] [193] [190] [188] [186] [184] [182] [180] [176] */ /* A441/A43: reserved [197] [195] [193] [190] [188] * [186] [184] [182] [180] [176] */ if (ext_csd_rev >= 6) seq_printf(s, "[197] IO Driver Strength, driver_strength: 0x%02x\n", ext_csd[197]); seq_printf(s, "[196] Device type, device_type: 0x%02x\n", ext_csd[196]); seq_printf(s, "[194] CSD structure version, csd_structure: 0x%02x\n", ext_csd[194]); seq_printf(s, "[192] Extended CSD revision, ext_csd_rev: 0x%02x\n", ext_csd[192]); seq_printf(s, "[191] Command set, cmd_set: 0x%02x\n", ext_csd[191]); seq_printf(s, "[189] Command set revision, cmd_set_rev: 0x%02x\n", ext_csd[189]); seq_printf(s, "[187] Power class, power_class: 0x%02x\n", ext_csd[187]); seq_printf(s, "[185] High-speed interface timing, hs_timing: 0x%02x\n", ext_csd[185]); /* bus_width: ext_csd[183] not readable */ seq_printf(s, "[181] Erased memory content, erased_mem_cont: 0x%02x\n", ext_csd[181]); seq_printf(s, "[179] Partition configuration, partition_config: 0x%02x\n", ext_csd[179]); seq_printf(s, "[178] Boot config protection, boot_config_prot: 0x%02x\n", ext_csd[178]); seq_printf(s, "[177] Boot bus Conditions, boot_bus_conditions: 0x%02x\n", ext_csd[177]); seq_printf(s, "[175] High-density erase group definition, erase_group_def: 0x%02x\n", ext_csd[175]); /* A43: reserved [174:0] */ if (ext_csd_rev >= 5) { seq_printf(s, "[174] Boot write protection status registers, boot_wp_status: 0x%02x\n", ext_csd[174]); seq_printf(s, "[173] Boot area write protection register, boot_wp: 0x%02x\n", ext_csd[173]); /* B45, A441: reserved [172] */ seq_printf(s, "[171] User area write protection register, user_wp: 0x%02x\n", ext_csd[171]); /* B45, A441: reserved [170] */ seq_printf(s, "[169] FW configuration, fw_config: 0x%02x\n", ext_csd[169]); seq_printf(s, "[168] RPMB Size, rpmb_size_mult: 0x%02x\n", ext_csd[168]); seq_printf(s, "[167] Write reliability setting register, wr_rel_set: 0x%02x\n", ext_csd[167]); seq_printf(s, "[166] Write reliability parameter register, wr_rel_param: 0x%02x\n", ext_csd[166]); /* sanitize_start ext_csd[165]: not readable * bkops_start ext_csd[164]: only writable */ seq_printf(s, "[163] Enable background operations handshake, bkops_en: 0x%02x\n", ext_csd[163]); seq_printf(s, "[162] H/W reset function, rst_n_function: 0x%02x\n", ext_csd[162]); seq_printf(s, "[161] HPI management, hpi_mgmt: 0x%02x\n", ext_csd[161]); seq_printf(s, "[160] Partitioning Support, partitioning_support: 0x%02x\n", ext_csd[160]); seq_printf(s, "[159:157] Max Enhanced Area Size, max_enh_size_mult: 0x%06x\n", (ext_csd[159] << 16) | (ext_csd[158] << 8) | ext_csd[157]); seq_printf(s, "[156] Partitions attribute, partitions_attribute: 0x%02x\n", ext_csd[156]); seq_printf(s, "[155] Partitioning Setting, partition_setting_completed: 0x%02x\n", ext_csd[155]); seq_printf(s, "[154:152] General Purpose Partition Size, gp_size_mult_4: 0x%06x\n", (ext_csd[154] << 16) | (ext_csd[153] << 8) | ext_csd[152]); seq_printf(s, "[151:149] General Purpose Partition Size, gp_size_mult_3: 0x%06x\n", (ext_csd[151] << 16) | (ext_csd[150] << 8) | ext_csd[149]); seq_printf(s, "[148:146] General Purpose Partition Size, gp_size_mult_2: 0x%06x\n", (ext_csd[148] << 16) | (ext_csd[147] << 8) | ext_csd[146]); seq_printf(s, "[145:143] General Purpose Partition Size, gp_size_mult_1: 0x%06x\n", (ext_csd[145] << 16) | (ext_csd[144] << 8) | ext_csd[143]); seq_printf(s, "[142:140] Enhanced User Data Area Size, enh_size_mult: 0x%06x\n", (ext_csd[142] << 16) | (ext_csd[141] << 8) | ext_csd[140]); seq_printf(s, "[139:136] Enhanced User Data Start Address, enh_start_addr: 0x%06x\n", (ext_csd[139] << 24) | (ext_csd[138] << 16) | (ext_csd[137] << 8) | ext_csd[136]); /* B45, A441: reserved [135] [133] */ seq_printf(s, "[134] Bad Block Management mode, sec_bad_blk_mgmnt: 0x%02x\n", ext_csd[134]); /* A441: reserved [133:0] */ } /* B45 */ if (ext_csd_rev >= 6) { int j; /* tcase_support ext_csd[132] not readable */ seq_printf(s, "[131] Periodic Wake-up, periodic_wakeup: 0x%02x\n", ext_csd[131]); seq_printf(s, "[130] Program CID CSD in DDR mode support, program_cid_csd_ddr_support: 0x%02x\n", ext_csd[130]); /* B45: reserved [129:128] */ for (j = 127; j >= 64; j--) seq_printf(s, "[127:64] Vendor Specific Fields, vendor_specific_field[%d]: 0x%02x\n", j, ext_csd[j]); seq_printf(s, "[63] Native sector size, native_sector_size: 0x%02x\n", ext_csd[63]); seq_printf(s, "[62] Sector size emulation, use_native_sector: 0x%02x\n", ext_csd[62]); seq_printf(s, "[61] Sector size, data_sector_size: 0x%02x\n", ext_csd[61]); seq_printf(s, "[60] 1st initialization after disabling sector size emulation, ini_timeout_emu: 0x%02x\n", ext_csd[60]); seq_printf(s, "[59] Class 6 commands control, class_6_ctrl: 0x%02x\n", ext_csd[59]); seq_printf(s, "[58] Number of addressed group to be Released, dyncap_needed: 0x%02x\n", ext_csd[58]); seq_printf(s, "[57:56] Exception events control, exception_events_ctrl: 0x%04x\n", (ext_csd[57] << 8) | ext_csd[56]); seq_printf(s, "[55:54] Exception events status, exception_events_status: 0x%04x\n", (ext_csd[55] << 8) | ext_csd[54]); seq_printf(s, "[53:52] Extended Partitions Attribute, ext_partitions_attribute: 0x%04x\n", (ext_csd[53] << 8) | ext_csd[52]); for (j = 51; j >= 37; j--) seq_printf(s, "[51:37]Context configuration, context_conf[%d]: 0x%02x\n", j, ext_csd[j]); seq_printf(s, "[36] Packed command status, packed_command_status: 0x%02x\n", ext_csd[36]); seq_printf(s, "[35] Packed command failure index, packed_failure_index: 0x%02x\n", ext_csd[35]); seq_printf(s, "[34] Power Off Notification, power_off_notification: 0x%02x\n", ext_csd[34]); seq_printf(s, "[33] Control to turn the Cache On Off, cache_ctrl: 0x%02x\n", ext_csd[33]); /* flush_cache ext_csd[32] not readable */ /*Reserved [31:0] */ } #endif out_free: #ifndef CONFIG_MACH_LGE /* */ kfree(buf); #endif kfree(ext_csd); return err; }
/* * Starting point for SD card init. */ int mmc_attach_sd(struct mmc_host *host, u32 ocr) { int err; #ifdef CONFIG_MMC_PARANOID_SD_INIT int retries; #endif BUG_ON(!host); WARN_ON(!host->claimed); mmc_sd_attach_bus_ops(host); /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { mmc_go_idle(host); err = mmc_spi_read_ocr(host, 0, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } if (ocr & MMC_VDD_165_195) { printk(KERN_WARNING "%s: SD card claims to support the " "incompletely defined 'low voltage range'. This " "will be ignored.\n", mmc_hostname(host)); ocr &= ~MMC_VDD_165_195; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ #ifdef CONFIG_MMC_PARANOID_SD_INIT retries = 5; while (retries) { err = mmc_sd_init_card(host, host->ocr, NULL); if (err) { retries--; continue; } break; } if (!retries) { printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n", mmc_hostname(host), err); goto err; } #else err = mmc_sd_init_card(host, host->ocr, NULL); if (err) goto err; #endif mmc_release_host(host); err = mmc_add_card(host->card); if (err) goto remove_card; return 0; remove_card: mmc_remove_card(host->card); host->card = NULL; mmc_claim_host(host); err: mmc_detach_bus(host); mmc_release_host(host); printk(KERN_ERR "%s: error %d whilst initialising SD card\n", mmc_hostname(host), err); return err; }
/* * Starting point for MMC card init. */ int mmc_attach_mmc(struct mmc_host *host, u32 ocr) { int err; BUG_ON(!host); WARN_ON(!host->claimed); mmc_attach_bus_ops(host); /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { err = mmc_spi_read_ocr(host, 1, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ err = mmc_init_card(host, host->ocr, NULL); if (err) goto err; mmc_release_host(host); err = mmc_add_card(host->card); if (err) goto remove_card; return 0; remove_card: mmc_remove_card(host->card); host->card = NULL; mmc_claim_host(host); err: mmc_detach_bus(host); mmc_release_host(host); printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", mmc_hostname(host), err); return err; }
/* * Starting point for MMC card init. */ int mmc_attach_mmc(struct mmc_host *host, u32 ocr) { int err; int i = 0; BUG_ON(!host); WARN_ON(!host->claimed); mmc_attach_bus_ops(host); /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { err = mmc_spi_read_ocr(host, 1, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ err = mmc_init_card(host, host->ocr, NULL); if (err) goto err; /* WA : Lock/Unlock CMD in case of 32nm iNAND */ /*check iNAND*/ if (host->card->cid.manfid == 0x45 || host->card->cid.manfid == 0x02) /*check 32nm*/ if (!(host->card->ext_csd.hpi & 0x1)) { printk(KERN_DEBUG "%s: Lock-unlock started, MID=0x%x, HPI=0x%x\n", __func__, host->card->cid.manfid, host->card->ext_csd.hpi); for (i = 0 ; i < 50 ; i++) { if (mmc_send_lock_cmd(host, 1)) { printk(KERN_ERR "%s: eMMC lock CMD is failed.\n", mmc_hostname(host)); goto remove_card; } if (mmc_send_lock_cmd(host, 0)) { printk(KERN_ERR "%s: eMMC unlock CMD is failed.\n", mmc_hostname(host)); goto remove_card; } } printk(KERN_DEBUG "%s:COMPLETED\n",__func__); } mmc_release_host(host); err = mmc_add_card(host->card); if (err) goto remove_card; return 0; remove_card: mmc_remove_card(host->card); host->card = NULL; mmc_claim_host(host); err: mmc_detach_bus(host); mmc_release_host(host); printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", mmc_hostname(host), err); return err; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err=0; unsigned long flags; int extend_wakelock = 0; printk("%s: %s start\n", mmc_hostname(host), __func__); spin_lock_irqsave(&host->lock, flags); if (host->rescan_disable) { spin_unlock_irqrestore(&host->lock, flags); return; } spin_unlock_irqrestore(&host->lock, flags); mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: printk("%s: %s rescann is out\n", mmc_hostname(host), __func__); if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) { printk("%s : schedule host->detect(mmc_sd_detect)\n",__func__); mmc_schedule_delayed_work(&host->detect, HZ); } }
/* * Starting point for MMC card init. */ int mmc_attach_mmc(struct mmc_host *host) { int err; u32 ocr; BUG_ON(!host); WARN_ON(!host->claimed); mt6575_mmc_info( "+%s--Liu\n", __func__); err = mmc_send_op_cond(host, 0, &ocr); if (err) return err; mmc_attach_bus_ops(host); if (host->ocr_avail_mmc) host->ocr_avail = host->ocr_avail_mmc; /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { err = mmc_spi_read_ocr(host, 1, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ if (!host->ocr) { mt6575_mmc_info("%s: error, ocr is NULL--Liu\n", __func__); err = -EINVAL; goto err; } /* * Detect and init the card. */ err = mmc_init_card(host, host->ocr, NULL); if (err) goto err; mmc_release_host(host); err = mmc_add_card(host->card); #ifdef MTK_EMMC_SUPPORT err = init_pmt(); host->card_init_complete(host); #endif mmc_claim_host(host); if (err) goto remove_card; mt6575_mmc_info( "-%s--Liu\n", __func__); return 0; remove_card: mmc_release_host(host); mmc_remove_card(host->card); mmc_claim_host(host); host->card = NULL; err: mmc_detach_bus(host); printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", mmc_hostname(host), err); return err; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (host->rescan_disable) { spin_unlock_irqrestore(&host->lock, flags); return; } spin_unlock_irqrestore(&host->lock, flags); mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) { mmc_claim_host(host); /* try SDMEM (but not MMC) even if SDIO is broken */ if (mmc_send_app_op_cond(host, 0, &ocr)) goto out_fail; if (mmc_attach_sd(host, ocr)) mmc_power_off(host); } goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); goto out; } out_fail: mmc_release_host(host); mmc_power_off(host); out: if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
static void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; if(fo_k){ fo_k = 0; return; } printk(" mmc_rescan... \n"); mmc_bus_get(host); if (host->bus_ops == NULL) { /* * Only we can add a new handler, so it's safe to * release the lock here. */ if(pollux_gpio_getpin(SDI_CD_IO)) { printk(" mmc_rescan 1===> no card \n"); mmc_bus_put(host); return; } mmc_bus_put(host); mmc_claim_host(host); mmc_power_up(host); if(pollux_gpio_getpin(SDI_CD_IO)) { printk(" mmc_rescan 2===> no card \n"); mmc_power_off(host); mmc_release_host(host); return; } mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); err = mmc_send_app_op_cond(host, 0, &ocr); if (err == MMC_ERR_NONE) { if (mmc_attach_sd(host, ocr)){ printk("mmc_attach_sd_error .... \n"); mmc_power_off(host); fo_k = 1; } } else { /* * If we fail to detect any SD cards then try * searching for MMC cards. */ if(pollux_gpio_getpin(SDI_CD_IO)) { printk(" mmc_rescan 4===> no card \n"); mmc_power_off(host); mmc_release_host(host); return; } err = mmc_send_op_cond(host, 0, &ocr); if (err == MMC_ERR_NONE) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); } else { mmc_power_off(host); mmc_release_host(host); } } } else { if (host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); mmc_bus_put(host); } }
static int _mmc_cmd_log_dump(struct mmc_host *host, struct seq_file *s) { int i; if (!host->mmc_cmd_log) return 0; mmc_claim_host(host); i = host->mmc_cmd_log_idx; /* next slot should be the oldest */ do { u32 cmd = host->mmc_cmd_log[i++]; u32 arg = host->mmc_cmd_log[i++]; u32 resp = 0; u32 when = 0; if (host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_TIME) when = host->mmc_cmd_log[i++]; if (host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_RESP) resp = host->mmc_cmd_log[i++]; if (i >= host->mmc_cmd_log_len) i = 0; /* Skip empty or partial records */ if (cmd == UINT_MAX || resp == UINT_MAX) continue; if ((host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_TIME) && !(host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_DELTA)) { if (s) seq_printf(s, "[%u] ", when); else pr_info("[%u] ", when); } if (s) seq_printf(s, "CMD%d: 0x%08X", cmd & 0x3F, arg); else pr_info("CMD%d: 0x%08X", cmd & 0x3F, arg); if (host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_RESP) { if (s) seq_printf(s, " R:0x%08X", resp); else pr_info(" R:0x%08X", resp); } if ((host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_TIME) && (host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_DELTA)) { if (s) seq_printf(s, " %uns", when); else pr_info(" %uns", when); } if (s) seq_printf(s, "\n"); else pr_info("\n"); } while (i != host->mmc_cmd_log_idx); mmc_release_host(host); return 0; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; mmc_bus_get(host); if (host->bus_ops == NULL) { /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); return; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); return; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); return; } mmc_release_host(host); mmc_power_off(host); } else { if (host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); mmc_bus_put(host); } }
static int sdio_irq_thread(void *_host) { struct mmc_host *host = _host; struct sched_param param = { .sched_priority = 1 }; unsigned long period, idle_period; int ret; sched_setscheduler(current, SCHED_FIFO, ¶m); /* * We want to allow for SDIO cards to work even on non SDIO * aware hosts. One thing that non SDIO host cannot do is * asynchronous notification of pending SDIO card interrupts * hence we poll for them in that case. */ idle_period = msecs_to_jiffies(10); period = (host->caps & MMC_CAP_SDIO_IRQ) ? MAX_SCHEDULE_TIMEOUT : idle_period; pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n", mmc_hostname(host), period); do { /* * We claim the host here on drivers behalf for a couple * reasons: * * 1) it is already needed to retrieve the CCCR_INTx; * 2) we want the driver(s) to clear the IRQ condition ASAP; * 3) we need to control the abort condition locally. * * Just like traditional hard IRQ handlers, we expect SDIO * IRQ handlers to be quick and to the point, so that the * holding of the host lock does not cover too much work * that doesn't require that lock to be held. */ ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); if (ret) break; ret = process_sdio_pending_irqs(host->card); mmc_release_host(host); /* * Give other threads a chance to run in the presence of * errors. */ if (ret < 0) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule_timeout(HZ); set_current_state(TASK_RUNNING); } /* * Adaptive polling frequency based on the assumption * that an interrupt will be closely followed by more. * This has a substantial benefit for network devices. */ if (!(host->caps & MMC_CAP_SDIO_IRQ)) { if (ret > 0) period /= 2; else { period++; if (period > idle_period) period = idle_period; } } set_current_state(TASK_INTERRUPTIBLE); if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 1); if (!kthread_should_stop()) schedule_timeout(period); set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 0); pr_debug("%s: IRQ thread exiting with code %d\n", mmc_hostname(host), ret); return ret; } static int sdio_card_irq_get(struct mmc_card *card) { struct mmc_host *host = card->host; WARN_ON(!host->claimed); if (!host->sdio_irqs++) { atomic_set(&host->sdio_irq_thread_abort, 0); host->sdio_irq_thread = kthread_run(sdio_irq_thread, host, "ksdioirqd/%s", mmc_hostname(host)); if (IS_ERR(host->sdio_irq_thread)) { int err = PTR_ERR(host->sdio_irq_thread); host->sdio_irqs--; return err; } } return 0; } static int sdio_card_irq_put(struct mmc_card *card) { struct mmc_host *host = card->host; WARN_ON(!host->claimed); BUG_ON(host->sdio_irqs < 1); if (!--host->sdio_irqs) { atomic_set(&host->sdio_irq_thread_abort, 1); kthread_stop(host->sdio_irq_thread); } return 0; } /* If there is only 1 function registered set sdio_single_irq */ static void sdio_single_irq_set(struct mmc_card *card) { struct sdio_func *func; int i; card->sdio_single_irq = NULL; if ((card->host->caps & MMC_CAP_SDIO_IRQ) && card->host->sdio_irqs == 1) for (i = 0; i < card->sdio_funcs; i++) { func = card->sdio_func[i]; if (func && func->irq_handler) { card->sdio_single_irq = func; break; } } } /** * sdio_claim_irq - claim the IRQ for a SDIO function * @func: SDIO function * @handler: IRQ handler callback * * Claim and activate the IRQ for the given SDIO function. The provided * handler will be called when that IRQ is asserted. The host is always * claimed already when the handler is called so the handler must not * call sdio_claim_host() nor sdio_release_host(). */ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler) { int ret; unsigned char reg; BUG_ON(!func); BUG_ON(!func->card); pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func)); if (func->irq_handler) { pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func)); return -EBUSY; } ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®); if (ret) return ret; reg |= 1 << func->num; reg |= 1; /* Master interrupt enable */ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL); if (ret) return ret; func->irq_handler = handler; ret = sdio_card_irq_get(func->card); if (ret) func->irq_handler = NULL; sdio_single_irq_set(func->card); return ret; }
int stub_sendcmd(struct mmc_card *card, unsigned int cmd, unsigned long arg, unsigned int len, unsigned char *buff) { int returnVal = -1; unsigned char *kbuffer = NULL; int direction = 0; int result = 0; if (card == NULL) { printk(KERN_DEBUG "stub_sendcmd: card is null error\n"); return -ENXIO; } kbuffer = kmalloc(len, GFP_KERNEL); if (kbuffer == NULL) { printk(KERN_DEBUG "malloc failed\n"); return -ENOMEM; } memset(kbuffer, 0x00, len); printk(KERN_DEBUG "%s]cmd=0x%x,len=%d\n ", __func__, cmd, len); mmc_claim_host(card->host); switch (cmd) { case ACMD43: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureRW(card, SD_ACMD43_GET_MKB, direction, arg, kbuffer, len); printk(KERN_DEBUG "SD_ACMD43_GET_MKB:0x%x\n", returnVal); break; case ACMD44: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureRW(card, SD_ACMD44_GET_MID, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD44_GET_MID:0x%x\n", returnVal); break; case ACMD45: direction = MMC_DATA_WRITE; result = copy_from_user((void *)kbuffer, (void *)buff, len); returnVal = CPRM_CMD_SecureRW(card, SD_ACMD45_SET_CER_RN1, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD45_SET_CER_RN1:0x%x\n", returnVal); break; case ACMD46: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureRW(card, SD_ACMD46_GET_CER_RN2, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD46_GET_CER_RN2:0x%x\n", returnVal); break; case ACMD47: direction = MMC_DATA_WRITE; result = copy_from_user((void *)kbuffer, (void *)buff, len); returnVal = CPRM_CMD_SecureRW(card, SD_ACMD47_SET_CER_RES2, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD47_SET_CER_RES2:0x%x\n", returnVal); break; case ACMD48: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureRW(card, SD_ACMD48_GET_CER_RES1, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD48_GET_CER_RES1:0x%x\n", returnVal); break; case ACMD25: direction = MMC_DATA_WRITE; result = copy_from_user((void *)kbuffer, (void *)buff, len); returnVal = CPRM_CMD_SecureMultiRW(card, SD_ACMD25_SECURE_WRITE_MULTI_BLOCK, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD25_SECURE_WRITE_MULTI_BLOCK[%d]=%d\n", len, returnVal); break; case ACMD18: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureMultiRW(card, SD_ACMD18_SECURE_READ_MULTI_BLOCK, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD18_SECURE_READ_MULTI_BLOCK [%d]=%d\n", len, returnVal); break; case ACMD13: break; default: printk(KERN_DEBUG " %s ] : CMD [ %x ] ERROR", __func__, cmd); break; } if (returnVal == 0) { if (direction == MMC_DATA_READ) result = copy_to_user((void *)buff, (void *)kbuffer, len); result = returnVal; printk(KERN_DEBUG "stub_sendcmd SDAS_E_SUCCESS\n"); } else { printk(KERN_DEBUG "stub_sendcmd SDAS_E_FAIL\n"); result = -EIO; } mmc_release_host(card->host); kfree(kbuffer); return result; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; unsigned long flags; int extend_wakelock = 0; spin_lock_irqsave(&host->lock, flags); if (host->rescan_disable) { spin_unlock_irqrestore(&host->lock, flags); if (atomic_dec_return(&wakelock_refs) > 0) { printk(KERN_DEBUG "Another host want the wakelock : %d\n", atomic_read(&wakelock_refs)); }else { printk(KERN_DEBUG "unlock case1 : mmc%d: wake_lock_timeout 0.5 sec %d\n", host->index, atomic_read(&wakelock_refs)); wake_lock_timeout(&mmc_delayed_work_wake_lock, msecs_to_jiffies(500)); } return; } spin_unlock_irqrestore(&host->lock, flags); //[NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion mutex_lock(&host->carddetect_lock); //]NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { if(host->ops->get_cd && host->ops->get_cd(host) == 0) { if(host->bus_ops->remove) host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_release_host(host); } else host->bus_ops->detect(host); } /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); printk(KERN_DEBUG "*** DEBUG : start %s (mmc%d)***\n", __func__, host->index); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ printk(KERN_DEBUG "*** DEBUG : First we search for SDIO...(%d)***\n", host->index); err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ printk(KERN_DEBUG "*** DEBUG : ...then normal SD...(%d) ***\n", host->index); err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ printk(KERN_DEBUG "*** DEBUG : ...and finally MMC. (%d)***\n", host->index); err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } printk(KERN_DEBUG "*** DEBUG : end %s (mmc%d)***\n", __func__, host->index); mmc_release_host(host); mmc_power_off(host); out: #if 0 //if (extend_wakelock) // wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); //else // wake_unlock(&mmc_delayed_work_wake_lock); #else if (atomic_dec_return(&wakelock_refs) > 0) { printk(KERN_DEBUG "Another host want the wakelock : %d\n", atomic_read(&wakelock_refs)); } else { printk(KERN_DEBUG "unlock case2 : mmc%d: wake_lock_timeout 0.5 sec %d\n", host->index, atomic_read(&wakelock_refs)); wake_lock_timeout(&mmc_delayed_work_wake_lock, msecs_to_jiffies(500)); } #endif if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); //[NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion mutex_unlock(&host->carddetect_lock); //]NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion }
static int simple_sd_ioctl_multi_rw(struct msdc_ioctl* msdc_ctl) { char l_buf[512]; struct scatterlist msdc_sg; struct mmc_data msdc_data; struct mmc_command msdc_cmd; struct mmc_command msdc_stop; int ret = 0; #ifdef MTK_MSDC_USE_CMD23 struct mmc_command msdc_sbc; #endif struct mmc_request msdc_mrq; struct msdc_host *host_ctl; host_ctl = mtk_msdc_host[msdc_ctl->host_num]; BUG_ON(!host_ctl); BUG_ON(!host_ctl->mmc); BUG_ON(!host_ctl->mmc->card); mmc_claim_host(host_ctl->mmc); #if DEBUG_MMC_IOCTL printk("user want access %d partition\n",msdc_ctl->partition); #endif ret = mmc_send_ext_csd(host_ctl->mmc->card, l_buf); if (ret) { printk("mmc_send_ext_csd error, multi rw\n"); goto multi_end; } #ifdef MTK_EMMC_SUPPORT switch (msdc_ctl->partition){ case EMMC_PART_BOOT1: if (0x1 != (l_buf[179] & 0x7)){ /* change to access boot partition 1 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x1; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; case EMMC_PART_BOOT2: if (0x2 != (l_buf[179] & 0x7)){ /* change to access boot partition 2 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x2; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; default: /* make sure access partition is user data area */ if (0 != (l_buf[179] & 0x7)){ /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; } #endif if(msdc_ctl->total_size > 64*1024){ msdc_ctl->result = -1; goto multi_end; } memset(&msdc_data, 0, sizeof(struct mmc_data)); memset(&msdc_mrq, 0, sizeof(struct mmc_request)); memset(&msdc_cmd, 0, sizeof(struct mmc_command)); memset(&msdc_stop, 0, sizeof(struct mmc_command)); #ifdef MTK_MSDC_USE_CMD23 memset(&msdc_sbc, 0, sizeof(struct mmc_command)); #endif msdc_mrq.cmd = &msdc_cmd; msdc_mrq.data = &msdc_data; if(msdc_ctl->trans_type) dma_force[host_ctl->id] = FORCE_IN_DMA; else dma_force[host_ctl->id] = FORCE_IN_PIO; if (msdc_ctl->iswrite){ msdc_data.flags = MMC_DATA_WRITE; msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; ret = -EFAULT; goto multi_end; } } else { /* called from other kernel module */ memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size); } } else { msdc_data.flags = MMC_DATA_READ; msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; memset(sg_msdc_multi_buffer, 0 , msdc_ctl->total_size); } #ifdef MTK_MSDC_USE_CMD23 if ((mmc_card_mmc(host_ctl->mmc->card) || (mmc_card_sd(host_ctl->mmc->card) && host_ctl->mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)) && !(host_ctl->mmc->card->quirks & MMC_QUIRK_BLK_NO_CMD23)){ msdc_mrq.sbc = &msdc_sbc; msdc_mrq.sbc->opcode = MMC_SET_BLOCK_COUNT; #ifdef MTK_MSDC_USE_CACHE /* if ioctl access cacheable partition data, there is on flush mechanism in msdc driver * so do reliable write .*/ if(mmc_card_mmc(host_ctl->mmc->card) && (host_ctl->mmc->card->ext_csd.cache_ctrl & 0x1) && (msdc_cmd.opcode == MMC_WRITE_MULTIPLE_BLOCK)) msdc_mrq.sbc->arg = msdc_data.blocks | (1<<31); else msdc_mrq.sbc->arg = msdc_data.blocks; #else msdc_mrq.sbc->arg = msdc_data.blocks; #endif msdc_mrq.sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; } #endif msdc_cmd.arg = msdc_ctl->address; if (!mmc_card_blockaddr(host_ctl->mmc->card)){ printk("this device use byte address!!\n"); msdc_cmd.arg <<= 9; } msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; msdc_stop.opcode = MMC_STOP_TRANSMISSION; msdc_stop.arg = 0; msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; msdc_data.stop = &msdc_stop; msdc_data.blksz = 512; msdc_data.sg = &msdc_sg; msdc_data.sg_len = 1; #if DEBUG_MMC_IOCTL printk("total size is %d\n",msdc_ctl->total_size); #endif sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size); mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card); mmc_wait_for_req(host_ctl->mmc, &msdc_mrq); if (!msdc_ctl->iswrite){ if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; ret = -EFAULT; goto multi_end; } } else { /* called from other kernel module */ memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size); } } if (msdc_ctl->partition){ ret = mmc_send_ext_csd(host_ctl->mmc->card,l_buf); if (ret) { printk("mmc_send_ext_csd error, multi rw2\n"); goto multi_end; } if (l_buf[179] & 0x7) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } } multi_end: mmc_release_host(host_ctl->mmc); if (ret) { msdc_ctl->result = ret; } if (msdc_cmd.error) msdc_ctl->result = msdc_cmd.error; if (msdc_data.error){ msdc_ctl->result = msdc_data.error; } else { msdc_ctl->result = 0; } dma_force[host_ctl->id] = FORCE_NOTHING; return msdc_ctl->result; }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; struct request *req; current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { req = NULL; /* Must be set to NULL at each iteration */ spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = blk_fetch_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); #ifdef CONFIG_MMC_AUTO_SUSPEND mmc_auto_suspend(mq->card->host, 0); #endif #ifdef CONFIG_MMC_BLOCK_PARANOID_RESUME if (mq->check_status) { struct mmc_command cmd; int retries = 3; unsigned long delay = jiffies + HZ; do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = mq->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; mmc_claim_host(mq->card->host); err = mmc_wait_for_cmd(mq->card->host, &cmd, 5); mmc_release_host(mq->card->host); if (err) { printk(KERN_ERR "%s: failed to get status (%d)\n", __func__, err); msleep(5); retries--; continue; } if (time_after(jiffies, delay)) { printk(KERN_ERR "failed to get card ready\n"); break; } printk(KERN_DEBUG "%s: status 0x%.8x\n", __func__, cmd.resp[0]); } while (retries && (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7))); mq->check_status = 0; } #endif if (!(mq->issue_fn(mq, req))) printk(KERN_ERR "mmc_blk_issue_rq failed!!\n"); } while (1); up(&mq->thread_sem); return 0; }
static int simple_sd_ioctl_single_rw(struct msdc_ioctl* msdc_ctl) { char l_buf[512]; struct scatterlist msdc_sg; struct mmc_data msdc_data; struct mmc_command msdc_cmd; struct mmc_request msdc_mrq; struct msdc_host *host_ctl; int ret = 0; host_ctl = mtk_msdc_host[msdc_ctl->host_num]; BUG_ON(!host_ctl); BUG_ON(!host_ctl->mmc); BUG_ON(!host_ctl->mmc->card); #ifdef MTK_MSDC_USE_CACHE if(msdc_ctl->iswrite && mmc_card_mmc(host_ctl->mmc->card) && (host_ctl->mmc->card->ext_csd.cache_ctrl & 0x1)) return simple_sd_ioctl_multi_rw(msdc_ctl); #endif mmc_claim_host(host_ctl->mmc); #if DEBUG_MMC_IOCTL printk("user want access %d partition\n",msdc_ctl->partition); #endif ret = mmc_send_ext_csd(host_ctl->mmc->card, l_buf); if (ret) { printk("mmc_send_ext_csd error, single rw\n"); goto single_end; } #ifdef MTK_EMMC_SUPPORT switch (msdc_ctl->partition){ case EMMC_PART_BOOT1: if (0x1 != (l_buf[179] & 0x7)){ /* change to access boot partition 1 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x1; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; case EMMC_PART_BOOT2: if (0x2 != (l_buf[179] & 0x7)){ /* change to access boot partition 2 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x2; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; default: /* make sure access partition is user data area */ if (0 != (l_buf[179] & 0x7)){ /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; } #endif if(msdc_ctl->total_size > 512){ msdc_ctl->result = -1; goto single_end; } #if DEBUG_MMC_IOCTL printk("start MSDC_SINGLE_READ_WRITE !!\n"); #endif memset(&msdc_data, 0, sizeof(struct mmc_data)); memset(&msdc_mrq, 0, sizeof(struct mmc_request)); memset(&msdc_cmd, 0, sizeof(struct mmc_command)); msdc_mrq.cmd = &msdc_cmd; msdc_mrq.data = &msdc_data; if(msdc_ctl->trans_type) dma_force[host_ctl->id] = FORCE_IN_DMA; else dma_force[host_ctl->id] = FORCE_IN_PIO; if (msdc_ctl->iswrite){ msdc_data.flags = MMC_DATA_WRITE; msdc_cmd.opcode = MMC_WRITE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, 512)){ dma_force[host_ctl->id] = FORCE_NOTHING; ret = -EFAULT; goto single_end; } } else { /* called from other kernel module */ memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, 512); } } else { msdc_data.flags = MMC_DATA_READ; msdc_cmd.opcode = MMC_READ_SINGLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; memset(sg_msdc_multi_buffer, 0 , 512); } msdc_cmd.arg = msdc_ctl->address; if (!mmc_card_blockaddr(host_ctl->mmc->card)){ printk("the device is used byte address!\n"); msdc_cmd.arg <<= 9; } msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; msdc_data.stop = NULL; msdc_data.blksz = 512; msdc_data.sg = &msdc_sg; msdc_data.sg_len = 1; #if DEBUG_MMC_IOCTL printk("single block: ueser buf address is 0x%p!\n",msdc_ctl->buffer); #endif sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size); mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card); mmc_wait_for_req(host_ctl->mmc, &msdc_mrq); if (!msdc_ctl->iswrite){ if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_to_user(msdc_ctl->buffer,sg_msdc_multi_buffer,512)){ dma_force[host_ctl->id] = FORCE_NOTHING; ret = -EFAULT; goto single_end; } } else { /* called from other kernel module */ memcpy(msdc_ctl->buffer,sg_msdc_multi_buffer,512); } } if (msdc_ctl->partition){ ret = mmc_send_ext_csd(host_ctl->mmc->card,l_buf); if (ret) { printk("mmc_send_ext_csd error, single rw2\n"); goto single_end; } if (l_buf[179] & 0x7) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } } single_end: mmc_release_host(host_ctl->mmc); if (ret) { msdc_ctl->result = ret; } if (msdc_cmd.error) msdc_ctl->result= msdc_cmd.error; if (msdc_data.error) msdc_ctl->result= msdc_data.error; else msdc_ctl->result= 0; dma_force[host_ctl->id] = FORCE_NOTHING; return msdc_ctl->result; }
int simple_sd_ioctl_multi_rw(struct msdc_ioctl* msdc_ctl) { char l_buf[512]; struct scatterlist msdc_sg; struct mmc_data msdc_data; struct mmc_command msdc_cmd; struct mmc_command msdc_stop; struct mmc_request msdc_mrq; struct msdc_host *host_ctl; host_ctl = mtk_msdc_host[msdc_ctl->host_num]; BUG_ON(!host_ctl); BUG_ON(!host_ctl->mmc); BUG_ON(!host_ctl->mmc->card); mmc_claim_host(host_ctl->mmc); #if DEBUG_MMC_IOCTL printk("user want access %d partition\n",msdc_ctl->partition); #endif mmc_send_ext_csd(host_ctl->mmc->card, l_buf); switch (msdc_ctl->partition){ case BOOT_PARTITION_1: if (0x1 != (l_buf[179] & 0x7)){ /* change to access boot partition 1 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x1; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; case BOOT_PARTITION_2: if (0x2 != (l_buf[179] & 0x7)){ /* change to access boot partition 2 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x2; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; default: /* make sure access partition is user data area */ if (0 != (l_buf[179] & 0x7)){ /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; } if(msdc_ctl->total_size > 64*1024){ msdc_ctl->result = -1; return msdc_ctl->result; } memset(&msdc_data, 0, sizeof(struct mmc_data)); memset(&msdc_mrq, 0, sizeof(struct mmc_request)); memset(&msdc_cmd, 0, sizeof(struct mmc_command)); memset(&msdc_stop, 0, sizeof(struct mmc_command)); msdc_mrq.cmd = &msdc_cmd; msdc_mrq.data = &msdc_data; if(msdc_ctl->trans_type) dma_force[host_ctl->id] = FORCE_IN_DMA; else dma_force[host_ctl->id] = FORCE_IN_PIO; if (msdc_ctl->iswrite){ msdc_data.flags = MMC_DATA_WRITE; msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size); } } else { msdc_data.flags = MMC_DATA_READ; msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; memset(sg_msdc_multi_buffer, 0 , msdc_ctl->total_size); } msdc_cmd.arg = msdc_ctl->address; if (!mmc_card_blockaddr(host_ctl->mmc->card)){ printk("this device use byte address!!\n"); msdc_cmd.arg <<= 9; } msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; msdc_stop.opcode = MMC_STOP_TRANSMISSION; msdc_stop.arg = 0; msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; msdc_data.stop = &msdc_stop; msdc_data.blksz = 512; msdc_data.sg = &msdc_sg; msdc_data.sg_len = 1; #if DEBUG_MMC_IOCTL printk("total size is %d\n",msdc_ctl->total_size); #endif sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size); mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card); mmc_wait_for_req(host_ctl->mmc, &msdc_mrq); if (!msdc_ctl->iswrite){ if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size); } } if (msdc_ctl->partition){ mmc_send_ext_csd(host_ctl->mmc->card,l_buf); if (l_buf[179] & 0x7) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } } mmc_release_host(host_ctl->mmc); if (msdc_cmd.error) msdc_ctl->result = msdc_cmd.error; if (msdc_data.error){ msdc_ctl->result = msdc_data.error; } else { msdc_ctl->result = 0; } dma_force[host_ctl->id] = FORCE_NOTHING; return msdc_ctl->result; }
void autok_release_host(struct msdc_host *host) { mmc_release_host(host->mmc); pr_debug("[%s] msdc%d host released\n", __func__, host->id); }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err = 0; int extend_wakelock = 0; mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; } mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if ((host->caps & MMC_CAP_NEEDS_POLL) || (host->index==0 && err && !gpio_get_value( 49 ))) mmc_schedule_delayed_work(&host->detect, HZ); }
static int mmc_ext_csd_open(struct inode *inode, struct file *filp) #endif { #ifdef CONFIG_MACH_LGE /* LGE_CHANGE * http://www.mail-archive.com/[email protected]/msg10669.html * 2012-03-09, [email protected] */ struct mmc_card *card = s->private; #else struct mmc_card *card = inode->i_private; char *buf; ssize_t n = 0; #endif u8 *ext_csd; #ifdef CONFIG_MACH_LGE /* LGE_CHANGE * http://www.mail-archive.com/[email protected]/msg10669.html * 2012-03-09, [email protected] */ u8 ext_csd_rev; int err; const char *str; #else int err, i; buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); if (!buf) return -ENOMEM; #endif ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { err = -ENOMEM; goto out_free; } mmc_claim_host(card->host); err = mmc_send_ext_csd(card, ext_csd); mmc_release_host(card->host); if (err) goto out_free; #ifdef CONFIG_MACH_LGE /* LGE_CHANGE * http://www.mail-archive.com/[email protected]/msg10669.html * 2012-03-09, [email protected] */ ext_csd_rev = ext_csd[192]; #else for (i = 511; i >= 0; i--) n += sprintf(buf + n, "%02x", ext_csd[i]); n += sprintf(buf + n, "\n"); BUG_ON(n != EXT_CSD_STR_LEN); filp->private_data = buf; kfree(ext_csd); return 0; #endif #ifdef CONFIG_MACH_LGE /* LGE_CHANGE * http://www.mail-archive.com/[email protected]/msg10669.html * 2012-03-09, [email protected] */ switch (ext_csd_rev) { case 6: str = "4.5"; break; case 5: str = "4.41"; break; case 3: str = "4.3"; break; case 2: str = "4.2"; break; case 1: str = "4.1"; break; case 0: str = "4.0"; break; default: goto out_free; } seq_printf(s, "Extended CSD rev 1.%d (MMC %s)\n", ext_csd_rev, str); if (ext_csd_rev < 3) goto out_free; /* No ext_csd */ /* Parse the Extended CSD registers. * Reserved bit should be read as "0" in case of spec older * than A441. */ seq_printf(s, "[504] Supported Command Sets, s_cmd_set: 0x%02x\n", ext_csd[504]); seq_printf(s, "[503] HPI features, hpi_features: 0x%02x\n", ext_csd[503]); seq_printf(s, "[502] Background operations support, bkops_support: 0x%02x\n", ext_csd[502]); if (ext_csd_rev >= 6) { seq_printf(s, "max_packed_reads: 0x%02x\n", ext_csd[501]); seq_printf(s, "max_packed_writes: 0x%02x\n", ext_csd[500]); seq_printf(s, "data_tag_support: 0x%02x\n", ext_csd[499]); seq_printf(s, "tag_unit_size: 0x%02x\n", ext_csd[498]); seq_printf(s, "tag_res_size: 0x%02x\n", ext_csd[497]); seq_printf(s, "context_capabilities: 0x%02x\n", ext_csd[496]); seq_printf(s, "large_unit_size_m1: 0x%02x\n", ext_csd[495]); seq_printf(s, "ext_support: 0x%02x\n", ext_csd[494]); seq_printf(s, "generic_cmd6_time: 0x%02x\n", ext_csd[248]); seq_printf(s, "power_off_long_time: 0x%02x\n", ext_csd[247]); seq_printf(s, "cache_size %d KiB\n", ext_csd[249] << 0 | (ext_csd[250] << 8) | (ext_csd[251] << 16) | (ext_csd[252] << 24)); } /* A441: Reserved [501:247] A43: reserved [246:229] */ if (ext_csd_rev >= 5) { seq_printf(s, "[241] 1st initialization time after partitioning, ini_timeout_ap: 0x%02x\n", ext_csd[241]); /* A441: reserved [240] */ seq_printf(s, "[239] Power class for 52MHz, DDR at 3.6V, pwr_cl_ddr_52_360: 0x%02x\n", ext_csd[239]); seq_printf(s, "[238] POwer class for 52MHz, DDR at 1.95V, pwr_cl_ddr_52_195: 0x%02x\n", ext_csd[238]); /* A441: reserved [237-236] */ if (ext_csd_rev >= 6) { seq_printf(s, "pwr_cl_200_360: 0x%02x\n", ext_csd[237]); seq_printf(s, "pwr_cl_200_195: 0x%02x\n", ext_csd[236]); } seq_printf(s, "[235] Minimun Write Performance for 8bit at 52MHz in DDR mode, min_perf_ddr_w_8_52: 0x%02x\n", ext_csd[235]); seq_printf(s, "[234] Minimun Read Performance for 8bit at 52MHz in DDR modemin_perf_ddr_r_8_52: 0x%02x\n", ext_csd[234]); /* A441: reserved [233] */ seq_printf(s, "[232] TRIM Multiplier, trim_mult: 0x%02x\n", ext_csd[232]); seq_printf(s, "[231] Secure Feature support, sec_feature_support: 0x%02x\n", ext_csd[231]); } if (ext_csd_rev == 5) { /* Obsolete in 4.5 */ seq_printf(s, "[230] Secure Erase Multiplier, sec_erase_mult: 0x%02x\n", ext_csd[230]); seq_printf(s, "[229] Secure TRIM Multiplier, sec_trim_mult: 0x%02x\n", ext_csd[229]); } seq_printf(s, "[228] Boot information, boot_info: 0x%02x\n", ext_csd[228]); /* A441/A43: reserved [227] */ seq_printf(s, "[226] Boot partition size, boot_size_mult : 0x%02x\n", ext_csd[226]); seq_printf(s, "[225] Access size, acc_size: 0x%02x\n", ext_csd[225]); seq_printf(s, "[224] High-capacity erase unit size, hc_erase_grp_size: 0x%02x\n", ext_csd[224]); seq_printf(s, "[223] High-capacity erase timeout, erase_timeout_mult: 0x%02x\n", ext_csd[223]); seq_printf(s, "[222] Reliable write sector count, rel_wr_sec_c: 0x%02x\n", ext_csd[222]); seq_printf(s, "[221] High-capacity write protect group size, hc_wp_grp_size: 0x%02x\n", ext_csd[221]); seq_printf(s, "[220] Sleep current(VCC), s_c_vcc: 0x%02x\n", ext_csd[220]); seq_printf(s, "[219] Sleep current(VCCQ), s_c_vccq: 0x%02x\n", ext_csd[219]); /* A441/A43: reserved [218] */ seq_printf(s, "[217] Sleep/awake timeout, s_a_timeout: 0x%02x\n", ext_csd[217]); /* A441/A43: reserved [216] */ seq_printf(s, "[215:212] Sector Count, sec_count: 0x%08x\n", (ext_csd[215] << 24) |(ext_csd[214] << 16) | (ext_csd[213] << 8) | ext_csd[212]); /* A441/A43: reserved [211] */ seq_printf(s, "[210] Minimum Write Performance for 8bit at 52MHz, min_perf_w_8_52: 0x%02x\n", ext_csd[210]); seq_printf(s, "[209] Minimum Read Performance for 8bit at 52MHz, min_perf_r_8_52: 0x%02x\n", ext_csd[209]); seq_printf(s, "[208] Minimum Write Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_w_8_26_4_52: 0x%02x\n", ext_csd[208]); seq_printf(s, "[207] Minimum Read Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_r_8_26_4_52: 0x%02x\n", ext_csd[207]); seq_printf(s, "[206] Minimum Write Performance for 4bit at 26MHz, min_perf_w_4_26: 0x%02x\n", ext_csd[206]); seq_printf(s, "[205] Minimum Read Performance for 4bit at 26MHz, min_perf_r_4_26: 0x%02x\n", ext_csd[205]); /* A441/A43: reserved [204] */ seq_printf(s, "[203] Power class for 26MHz at 3.6V, pwr_cl_26_360: 0x%02x\n", ext_csd[203]); seq_printf(s, "[202] Power class for 52MHz at 3.6V, pwr_cl_52_360: 0x%02x\n", ext_csd[202]); seq_printf(s, "[201] Power class for 26MHz at 1.95V, pwr_cl_26_195: 0x%02x\n", ext_csd[201]); seq_printf(s, "[200] Power class for 52MHz at 1.95V, pwr_cl_52_195: 0x%02x\n", ext_csd[200]); /* A43: reserved [199:198] */ if (ext_csd_rev >= 5) { seq_printf(s, "[199] Partition switching timing, partition_switch_time: 0x%02x\n", ext_csd[199]); seq_printf(s, "[198] Out-of-interrupt busy timing, out_of_interrupt_time: 0x%02x\n", ext_csd[198]); } /* A441/A43: reserved [197] [195] [193] [190] [188] * [186] [184] [182] [180] [176] */ if (ext_csd_rev >= 6) seq_printf(s, "driver_strength: 0x%02x\n", ext_csd[197]); seq_printf(s, "[196] Card type, card_type: 0x%02x\n", ext_csd[196]); seq_printf(s, "[194] CSD structure version, csd_structure: 0x%02x\n", ext_csd[194]); seq_printf(s, "[192] Extended CSD revision, ext_csd_rev: 0x%02x\n", ext_csd[192]); seq_printf(s, "[191] Command set, cmd_set: 0x%02x\n", ext_csd[191]); seq_printf(s, "[189] Command set revision, cmd_set_rev: 0x%02x\n", ext_csd[189]); seq_printf(s, "[187] Power class, power_class: 0x%02x\n", ext_csd[187]); seq_printf(s, "[185] High-speed interface timing, hs_timing: 0x%02x\n", ext_csd[185]); /* bus_width: ext_csd[183] not readable */ seq_printf(s, "[181] Erased memory content, erased_mem_cont: 0x%02x\n", ext_csd[181]); seq_printf(s, "[179] Partition configuration, partition_config: 0x%02x\n", ext_csd[179]); seq_printf(s, "[178] Boot config protection, boot_config_prot: 0x%02x\n", ext_csd[178]); seq_printf(s, "[177] Boot bus width1, boot_bus_conditions: 0x%02x\n", ext_csd[177]); seq_printf(s, "[175] High-density erase group definition, erase_group_def: 0x%02x\n", ext_csd[175]); /* A43: reserved [174:0] */ if (ext_csd_rev >= 5) { seq_printf(s, "[174] boot_wp_status: 0x%02x\n", ext_csd[174]); seq_printf(s, "[173] Boot area write protection register, boot_wp: 0x%02x\n", ext_csd[173]); /* A441: reserved [172] */ seq_printf(s, "[171] User area write protection register, user_wp: 0x%02x\n", ext_csd[171]); /* A441: reserved [170] */ seq_printf(s, "[169] FW configuration, fw_config: 0x%02x\n", ext_csd[169]); seq_printf(s, "[168] RPMB Size, rpmb_size_mult: 0x%02x\n", ext_csd[168]); seq_printf(s, "[167] Write reliability setting register, wr_rel_set: 0x%02x\n", ext_csd[167]); seq_printf(s, "[166] Write reliability parameter register, wr_rel_param: 0x%02x\n", ext_csd[166]); /* sanitize_start ext_csd[165]: not readable * bkops_start ext_csd[164]: only writable */ seq_printf(s, "[163] Enable background operations handshake, bkops_en: 0x%02x\n", ext_csd[163]); seq_printf(s, "[162] H/W reset function, rst_n_function: 0x%02x\n", ext_csd[162]); seq_printf(s, "[160] HPI management, hpi_mgmt: 0x%02x\n", ext_csd[161]); seq_printf(s, "[169] Partitioning Support, partitioning_support: 0x%02x\n", ext_csd[160]); seq_printf(s, "[159:157] Max Enhanced Area Size, max_enh_size_mult: 0x%06x\n", (ext_csd[159] << 16) | (ext_csd[158] << 8) |ext_csd[157]); seq_printf(s, "[156] Partitions attribute, partitions_attribute: 0x%02x\n", ext_csd[156]); seq_printf(s, "[155] Partitioning Setting, partition_setting_completed: 0x%02x\n", ext_csd[155]); seq_printf(s, "[154:152] General Purpose Partition Size, gp_size_mult_4: 0x%06x\n", (ext_csd[154] << 16) |(ext_csd[153] << 8) | ext_csd[152]); seq_printf(s, "[151:149] General Purpose Partition Size, gp_size_mult_3: 0x%06x\n", (ext_csd[151] << 16) |(ext_csd[150] << 8) | ext_csd[149]); seq_printf(s, "[148:146] General Purpose Partition Size, gp_size_mult_2: 0x%06x\n", (ext_csd[148] << 16) |(ext_csd[147] << 8) | ext_csd[146]); seq_printf(s, "[145:143] General Purpose Partition Size, gp_size_mult_1: 0x%06x\n", (ext_csd[145] << 16) |(ext_csd[144] << 8) | ext_csd[143]); seq_printf(s, "[142:140] Enhanced User Data Area Size, enh_size_mult: 0x%06x\n", (ext_csd[142] << 16) |(ext_csd[141] << 8) | ext_csd[140]); seq_printf(s, "[139:137] Enhanced User Data Start Address, enh_start_addr: 0x%06x\n", (ext_csd[139] << 16) |(ext_csd[138] << 8) | ext_csd[137]); /* A441: reserved [135] */ seq_printf(s, "[134] Bad Block Management mode, sec_bad_blk_mgmnt: 0x%02x\n", ext_csd[134]); /* A441: reserved [133:0] */ } /* B45 */ if (ext_csd_rev >= 6) { int j; /* tcase_support ext_csd[132] not readable */ seq_printf(s, "periodic_wakeup: 0x%02x\n", ext_csd[131]); seq_printf(s, "program_cid_csd_ddr_support: 0x%02x\n", ext_csd[130]); for (j = 127; j >= 64; j--) seq_printf(s, "vendor_specific_field[%d]: 0x%02x\n", j, ext_csd[j]); seq_printf(s, "native_sector_size: 0x%02x\n", ext_csd[63]); seq_printf(s, "use_native_sector: 0x%02x\n", ext_csd[62]); seq_printf(s, "data_sector_size: 0x%02x\n", ext_csd[61]); seq_printf(s, "ini_timeout_emu: 0x%02x\n", ext_csd[60]); seq_printf(s, "class_6_ctrl: 0x%02x\n", ext_csd[59]); seq_printf(s, "dyncap_needed: 0x%02x\n", ext_csd[58]); seq_printf(s, "exception_events_ctrl: 0x%04x\n", (ext_csd[57] << 8) | ext_csd[56]); seq_printf(s, "exception_events_status: 0x%04x\n", (ext_csd[55] << 8) | ext_csd[54]); seq_printf(s, "ext_partitions_attribute: 0x%04x\n", (ext_csd[53] << 8) | ext_csd[52]); for (j = 51; j >= 37; j--) seq_printf(s, "context_conf[%d]: 0x%02x\n", j, ext_csd[j]); seq_printf(s, "packed_command_status: 0x%02x\n", ext_csd[36]); seq_printf(s, "packed_failure_index: 0x%02x\n", ext_csd[35]); seq_printf(s, "power_off_notification: 0x%02x\n", ext_csd[34]); seq_printf(s, "cache_ctrl: 0x%02x\n", ext_csd[33]); /* flush_cache ext_csd[32] not readable */ /*Reserved [31:0] */ } #endif out_free: #ifndef CONFIG_MACH_LGE /* LGE_CHANGE * http://www.mail-archive.com/[email protected]/msg10669.html * 2012-03-09, [email protected] */ kfree(buf); #endif kfree(ext_csd); return err; }