static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; struct request *req; current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { req = NULL; /* Must be set to NULL at each iteration */ spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = blk_fetch_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); #ifdef CONFIG_MMC_AUTO_SUSPEND mmc_auto_suspend(mq->card->host, 0); #endif #ifdef CONFIG_MMC_BLOCK_PARANOID_RESUME if (mq->check_status) { struct mmc_command cmd; int retries = 3; unsigned long delay = jiffies + HZ; do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = mq->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; mmc_claim_host(mq->card->host); err = mmc_wait_for_cmd(mq->card->host, &cmd, 5); mmc_release_host(mq->card->host); if (err) { printk(KERN_ERR "%s: failed to get status (%d)\n", __func__, err); msleep(5); retries--; continue; } if (time_after(jiffies, delay)) { printk(KERN_ERR "failed to get card ready\n"); break; } printk(KERN_DEBUG "%s: status 0x%.8x\n", __func__, cmd.resp[0]); } while (retries && (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7))); mq->check_status = 0; } #endif if (!(mq->issue_fn(mq, req))) printk(KERN_ERR "mmc_blk_issue_rq failed!!\n"); } while (1); up(&mq->thread_sem); return 0; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; int extend_wakelock = 0; #ifdef MMC_PATCH_2 unsigned int attached = 0; #endif NV_DRIVER_TRACE(("mmc_rescan\n")); mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { host->bus_ops->detect(host); } /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) { extend_wakelock = 1; } NV_DRIVER_TRACE(("mmc_rescan extend_wakelock=%d\n",extend_wakelock)); mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) #ifdef MMC_PATCH_2 { #endif mmc_power_off(host); #ifdef MMC_PATCH_2 } else { attached = 1; } #endif extend_wakelock = 1; goto out; } NV_DRIVER_TRACE(("mmc_rescan 7\n")); /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) #ifdef MMC_PATCH_2 { #endif mmc_power_off(host); #ifdef MMC_PATCH_2 } else { attached = 1; } #endif extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) #ifdef MMC_PATCH_2 { #endif mmc_power_off(host); #ifdef MMC_PATCH_2 } else { attached = 1; } #endif extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: NV_DRIVER_TRACE(("mmc_rescan out\n")); if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) queue_delayed_work(workqueue, &host->detect, HZ); #ifdef MMC_PATCH_2 /* Create a one-shot timer work, work after 1 second */ if(!strcmp(mmc_hostname(host), "mmc1")) { printk("Try to add timer for %s ---------------- \n", mmc_hostname(host)); if(timer_inited){ init_timer(&detect_timer); detect_timer.data = (unsigned long)host; detect_timer.function = sdhci_tegra_card_detect; detect_timer.expires = jiffies + HZ; add_timer(&detect_timer); timer_inited = 1; } else { if(!timer_pending(&detect_timer)) { return; } else { add_timer(&detect_timer); } } } #endif }
/* * Starting point for MMC card init. */ int mmc_attach_mmc(struct mmc_host *host) { int err; u32 ocr; BUG_ON(!host); WARN_ON(!host->claimed); err = mmc_send_op_cond(host, 0, &ocr); if (err) return err; mmc_attach_bus_ops(host); if (host->ocr_avail_mmc) host->ocr_avail = host->ocr_avail_mmc; /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { err = mmc_spi_read_ocr(host, 1, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ err = mmc_init_card(host, host->ocr, NULL); if (err) goto err; mmc_release_host(host); err = mmc_add_card(host->card); mmc_claim_host(host); if (err) goto remove_card; return 0; remove_card: mmc_release_host(host); mmc_remove_card(host->card); mmc_claim_host(host); host->card = NULL; err: mmc_detach_bus(host); printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", mmc_hostname(host), err); return err; }
static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; current->flags |= PF_MEMALLOC; down(&mq->thread_sem); do { struct request *req = NULL; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); if (!blk_queue_plugged(q)) req = elv_next_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); if (!req) { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; } up(&mq->thread_sem); schedule(); down(&mq->thread_sem); continue; } set_current_state(TASK_RUNNING); #ifdef CONFIG_MMC_BLOCK_PARANOID_RESUME if (mq->check_status) { struct mmc_command cmd; int count = 0; do { int err; cmd.opcode = MMC_SEND_STATUS; cmd.arg = mq->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; mmc_claim_host(mq->card->host); err = mmc_wait_for_cmd(mq->card->host, &cmd, 5); mmc_release_host(mq->card->host); if (err) { count++; if (count == 1) { printk(KERN_ERR "%s: failed to get status (%d)\n", __func__, err); } msleep(5); if (count <= 200) { continue; } else { printk(KERN_ERR "%s: failed to get status (%d) > 200 times\n", __func__, err); break; } } printk(KERN_DEBUG "%s: status 0x%.8x\n", __func__, cmd.resp[0]); } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd.resp[0]) == 7)); mq->check_status = 0; } #endif mq->issue_fn(mq, req); } while (1); up(&mq->thread_sem); return 0; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); struct atsmb_host *atsmb_host = mmc_priv(host); u32 ocr; int err; int extend_wakelock = 0; int retry = 5; DBG("[%s] s\n",__func__); host->card_scan_status = false; while(retry > 0) { retry--; mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); #if 1 //zhf: marked by James Tian /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } #endif /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); DBG("[%s]retry = %d slot = %x power = %x\n", __func__,retry,host->ops->get_slot_status(host), host->ios.power_mode); if((host->ops->get_slot_status(host) == 0) || (host->ios.power_mode != MMC_POWER_OFF)) break; msleep(1000); } if (host->ios.power_mode != MMC_POWER_OFF) host->card_scan_status = true; printk("SD%d Host Clock %dHz\n",host->wmt_host_index, atsmb_host->current_clock); DBG("[%s] e\n",__func__); }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); goto out; } mmc_release_host(host); mmc_power_off(host); out: if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
static int simple_sd_ioctl_multi_rw(struct msdc_ioctl *msdc_ctl) { char l_buf[512]; struct scatterlist msdc_sg; struct mmc_data msdc_data; struct mmc_command msdc_cmd; struct mmc_command msdc_stop; int ret = 0; #ifdef MTK_MSDC_USE_CMD23 struct mmc_command msdc_sbc; #endif struct mmc_request msdc_mrq; struct msdc_host *host_ctl; if(!msdc_ctl) return -EINVAL; host_ctl = mtk_msdc_host[msdc_ctl->host_num]; BUG_ON(!host_ctl); BUG_ON(!host_ctl->mmc); BUG_ON(!host_ctl->mmc->card); mmc_claim_host(host_ctl->mmc); #if DEBUG_MMC_IOCTL pr_debug("user want access %d partition\n", msdc_ctl->partition); #endif ret = mmc_send_ext_csd(host_ctl->mmc->card, l_buf); if (ret) { pr_debug("mmc_send_ext_csd error, multi rw\n"); goto multi_end; } #ifdef CONFIG_MTK_EMMC_SUPPORT switch (msdc_ctl->partition) { case EMMC_PART_BOOT1: if (0x1 != (l_buf[179] & 0x7)) { /* change to access boot partition 1 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x1; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; case EMMC_PART_BOOT2: if (0x2 != (l_buf[179] & 0x7)) { /* change to access boot partition 2 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x2; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; default: /* make sure access partition is user data area */ if (0 != (l_buf[179] & 0x7)) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; } #endif if(msdc_ctl->total_size > host_ctl->mmc->max_req_size) { //64*1024){ msdc_ctl->result = -1; goto multi_end; } memset(&msdc_data, 0, sizeof(struct mmc_data)); memset(&msdc_mrq, 0, sizeof(struct mmc_request)); memset(&msdc_cmd, 0, sizeof(struct mmc_command)); memset(&msdc_stop, 0, sizeof(struct mmc_command)); #ifdef MTK_MSDC_USE_CMD23 memset(&msdc_sbc, 0, sizeof(struct mmc_command)); #endif msdc_mrq.cmd = &msdc_cmd; msdc_mrq.data = &msdc_data; if (msdc_ctl->trans_type) dma_force[host_ctl->id] = FORCE_IN_DMA; else dma_force[host_ctl->id] = FORCE_IN_PIO; if (msdc_ctl->iswrite) { msdc_data.flags = MMC_DATA_WRITE; msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)) { dma_force[host_ctl->id] = FORCE_NOTHING; ret = -EFAULT; goto multi_end; } } else { /* called from other kernel module */ memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size); } } else { msdc_data.flags = MMC_DATA_READ; msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; memset(sg_msdc_multi_buffer, 0, msdc_ctl->total_size); } #ifdef MTK_MSDC_USE_CMD23 if ((mmc_card_mmc(host_ctl->mmc->card) || (mmc_card_sd(host_ctl->mmc->card) && host_ctl->mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)) && !(host_ctl->mmc->card->quirks & MMC_QUIRK_BLK_NO_CMD23)) { msdc_mrq.sbc = &msdc_sbc; msdc_mrq.sbc->opcode = MMC_SET_BLOCK_COUNT; #ifdef MTK_MSDC_USE_CACHE /* if ioctl access cacheable partition data, there is on flush mechanism in msdc driver * so do reliable write .*/ if (mmc_card_mmc(host_ctl->mmc->card) && (host_ctl->mmc->card->ext_csd.cache_ctrl & 0x1) && (msdc_cmd.opcode == MMC_WRITE_MULTIPLE_BLOCK)) msdc_mrq.sbc->arg = msdc_data.blocks | (1 << 31); else msdc_mrq.sbc->arg = msdc_data.blocks; #else msdc_mrq.sbc->arg = msdc_data.blocks; #endif msdc_mrq.sbc->flags = MMC_RSP_R1 | MMC_CMD_AC; } #endif msdc_cmd.arg = msdc_ctl->address; if (!mmc_card_blockaddr(host_ctl->mmc->card)) { pr_debug("this device use byte address!!\n"); msdc_cmd.arg <<= 9; } msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; msdc_stop.opcode = MMC_STOP_TRANSMISSION; msdc_stop.arg = 0; msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; msdc_data.stop = &msdc_stop; msdc_data.blksz = 512; msdc_data.sg = &msdc_sg; msdc_data.sg_len = 1; #if DEBUG_MMC_IOCTL pr_debug("total size is %d\n", msdc_ctl->total_size); #endif sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size); mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card); mmc_wait_for_req(host_ctl->mmc, &msdc_mrq); if (!msdc_ctl->iswrite) { if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)) { dma_force[host_ctl->id] = FORCE_NOTHING; ret = -EFAULT; goto multi_end; } } else { /* called from other kernel module */ memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size); } } if (msdc_ctl->partition) { ret = mmc_send_ext_csd(host_ctl->mmc->card, l_buf); if (ret) { pr_debug("mmc_send_ext_csd error, multi rw2\n"); goto multi_end; } if (l_buf[179] & 0x7) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } } multi_end: mmc_release_host(host_ctl->mmc); if (ret) msdc_ctl->result = ret; if (msdc_cmd.error) msdc_ctl->result = msdc_cmd.error; if (msdc_data.error) { msdc_ctl->result = msdc_data.error; } else { msdc_ctl->result = 0; } dma_force[host_ctl->id] = FORCE_NOTHING; return msdc_ctl->result; }
int mmc_gen_cmd(struct mmc_card *card, void *buf, u8 index, u8 arg1, u8 arg2, u8 mode) { struct mmc_request mrq; struct mmc_command cmd; struct mmc_data data; struct mmc_command stop; struct scatterlist sg; void *data_buf; mmc_set_blocklen(card, 512); data_buf = kmalloc(512, GFP_KERNEL); if (data_buf == NULL) return -ENOMEM; memset(&mrq, 0, sizeof(struct mmc_request)); memset(&cmd, 0, sizeof(struct mmc_command)); memset(&data, 0, sizeof(struct mmc_data)); memset(&stop, 0, sizeof(struct mmc_command)); mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; cmd.opcode = MMC_GEN_CMD; cmd.arg = (arg2 << 16) | (arg1 << 8) | (index << 1) | mode; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 512; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; stop.opcode = MMC_STOP_TRANSMISSION; stop.arg = 0; stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; sg_init_one(&sg, data_buf, 512); mmc_set_data_timeout(&data, card); mmc_claim_host(card->host); mmc_wait_for_req(card->host, &mrq); mmc_release_host(card->host); memcpy(buf, data_buf, 512); kfree(data_buf); if (cmd.error) return cmd.error; if (data.error) return data.error; if (stop.error) return stop.error; return 0; }
int stub_sendcmd(struct mmc_card *card, unsigned int cmd, unsigned long arg, unsigned int len, unsigned char *buff) { int returnVal = -1; unsigned char *kbuffer = NULL; int direction = 0; int result = 0; if (card == NULL) { printk(KERN_DEBUG "stub_sendcmd: card is null error\n"); return -ENXIO; } kbuffer = kmalloc(len, GFP_KERNEL); if (kbuffer == NULL) { printk(KERN_DEBUG "malloc failed\n"); return -ENOMEM; } memset(kbuffer, 0x00, len); printk(KERN_DEBUG "%s]cmd=0x%x,len=%d\n ", __func__, cmd, len); mmc_claim_host(card->host); switch (cmd) { case ACMD43: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureRW(card, SD_ACMD43_GET_MKB, direction, arg, kbuffer, len); printk(KERN_DEBUG "SD_ACMD43_GET_MKB:0x%x\n", returnVal); break; case ACMD44: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureRW(card, SD_ACMD44_GET_MID, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD44_GET_MID:0x%x\n", returnVal); break; case ACMD45: direction = MMC_DATA_WRITE; result = copy_from_user((void *)kbuffer, (void *)buff, len); returnVal = CPRM_CMD_SecureRW(card, SD_ACMD45_SET_CER_RN1, direction, 0, kbuffer, len); printk(KERN_INFO"SD_ACMD45_SET_CER_RN1 [0x%x]\n ", returnVal); break; case ACMD46: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureRW(card, SD_ACMD46_GET_CER_RN2, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD46_GET_CER_RN2:0x%x\n", returnVal); break; case ACMD47: direction = MMC_DATA_WRITE; result = copy_from_user((void *)kbuffer, (void *)buff, len); returnVal = CPRM_CMD_SecureRW(card, SD_ACMD47_SET_CER_RES2, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD47_SET_CER_RES2:0x%x\n", returnVal); break; case ACMD48: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureRW(card, SD_ACMD48_GET_CER_RES1, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD48_GET_CER_RES1:0x%x\n", returnVal); break; case ACMD25: direction = MMC_DATA_WRITE; result = copy_from_user((void *)kbuffer, (void *)buff, len); returnVal = CPRM_CMD_SecureMultiRW(card, SD_ACMD25_SECURE_WRITE_MULTI_BLOCK, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD25_SECURE_WRITE_MULTI_BLOCK[%d]=%d\n", len, returnVal); break; case ACMD18: direction = MMC_DATA_READ; returnVal = CPRM_CMD_SecureMultiRW(card, SD_ACMD18_SECURE_READ_MULTI_BLOCK, direction, 0, kbuffer, len); printk(KERN_DEBUG "SD_ACMD18_SECURE_READ_MULTI_BLOCK [%d]=%d\n", len, returnVal); break; case ACMD13: break; default: printk(KERN_DEBUG " %s ] : CMD [ %x ] ERROR", __func__, cmd); break; } if (returnVal == 0) { if (direction == MMC_DATA_READ) result = copy_to_user((void *)buff, (void *)kbuffer, len); result = returnVal; printk(KERN_DEBUG "stub_sendcmd SDAS_E_SUCCESS\n"); } else { printk(KERN_DEBUG "stub_sendcmd SDAS_E_FAIL\n"); result = -EIO; } mmc_release_host(card->host); kfree(kbuffer); return result; }
/* swrm callback function - called from the block driver */ static int swrm_driver_callback_imp(struct mmc_card *card, u8 *buffer, u32 opt_mode, u32 length) { int rc = SWRM_ERR_SUCCESS; switch (opt_mode) { case SWRM_INIT_DEV: rc = swrm_mmc_power_cycle(card); break; case SWRM_WRITE: rc = swrm_mmc_write(card, buffer, COMMAND_BASE, length, WAIT_FOR_REQ); break; case SWRM_WRITE_NO_STATUS: rc = swrm_mmc_write(card, buffer, COMMAND_BASE, length, 0); break; case SWRM_WRITE_ACCUMELATED_DATA: mutex_lock(&swrm_lock); if (swrm_dat.buf) { rc = swrm_mmc_write(card, swrm_dat.buf, COMMAND_BASE, length, WAIT_FOR_REQ); kfree(swrm_dat.buf); swrm_dat.buf = NULL; swrm_dat.offset = 0; swrm_dat.length = 0; } else { rc = SWRM_ERR_INVALID; } mutex_unlock(&swrm_lock); break; case SWRM_READ: rc = swrm_mmc_read(card, COMMAND_BASE, buffer, length); break; case SWRM_ACCUMULATE_DATA: rc = swrm_accumulate_data(buffer, length); break; case SWRM_ALLOCAT_SIZE: rc = swrm_allocate_data(length); break; case SWRM_FREE_DATA_BUF: mutex_lock(&swrm_lock); kfree(swrm_dat.buf); swrm_dat.buf = NULL; swrm_dat.offset = 0; swrm_dat.length = 0; mutex_unlock(&swrm_lock); break; case SWRM_CLAIM: mmc_claim_host(card->host); break; case SWRM_RELEASE: mmc_release_host(card->host); break; default: rc = SWRM_ERR_INVALID; break; } printk(KERN_DEBUG "SWRM swrm_driver_callback_imp status %d", rc); return rc; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; int extend_wakelock = 0; int ret; unsigned long flags; /* * Add checking gpio pin status before initialization of bus. * If the GPIO pin status is changed, check gpio pin status again. * Should check until it's stable. * [email protected], 2010-09-27 */ if (host->ops->get_status){ ret = host->ops->get_status(host); if (ret == 1) { mmc_schedule_delayed_work(&host->detect, HZ / 3); return; } } spin_lock_irqsave(&host->lock, flags); if (host->rescan_disable) { spin_unlock_irqrestore(&host->lock, flags); return; } spin_unlock_irqrestore(&host->lock, flags); mmc_bus_get(host); /* * if there is a _removable_ card registered, check whether it is * still present */ if (host->bus_ops && host->bus_ops->detect && !host->bus_dead && !(host->caps & MMC_CAP_NONREMOVABLE)) { host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; } mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; unsigned long flags; int extend_wakelock = 0; spin_lock_irqsave(&host->lock, flags); if (host->rescan_disable) { spin_unlock_irqrestore(&host->lock, flags); if (atomic_dec_return(&wakelock_refs) > 0) { printk(KERN_DEBUG "Another host want the wakelock : %d\n", atomic_read(&wakelock_refs)); }else { printk(KERN_DEBUG "unlock case1 : mmc%d: wake_lock_timeout 0.5 sec %d\n", host->index, atomic_read(&wakelock_refs)); wake_lock_timeout(&mmc_delayed_work_wake_lock, msecs_to_jiffies(500)); } return; } spin_unlock_irqrestore(&host->lock, flags); //[NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion // mutex_lock(&host->carddetect_lock); //]NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { if(host->ops->get_cd && host->ops->get_cd(host) == 0) { if(host->bus_ops->remove) host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_release_host(host); } else host->bus_ops->detect(host); } /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); printk(KERN_DEBUG "*** DEBUG : start %s (mmc%d)***\n", __func__, host->index); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ printk(KERN_DEBUG "*** DEBUG : First we search for SDIO...(%d)***\n", host->index); err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ printk(KERN_DEBUG "*** DEBUG : ...then normal SD...(%d) ***\n", host->index); err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ printk(KERN_DEBUG "*** DEBUG : ...and finally MMC. (%d)***\n", host->index); err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } printk(KERN_DEBUG "*** DEBUG : end %s (mmc%d)***\n", __func__, host->index); mmc_release_host(host); mmc_power_off(host); out: #if 0 //if (extend_wakelock) // wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); //else // wake_unlock(&mmc_delayed_work_wake_lock); #else if (atomic_dec_return(&wakelock_refs) > 0) { printk(KERN_DEBUG "Another host want the wakelock : %d\n", atomic_read(&wakelock_refs)); } else { printk(KERN_DEBUG "unlock case2 : mmc%d: wake_lock_timeout 0.5 sec %d\n", host->index, atomic_read(&wakelock_refs)); wake_lock_timeout(&mmc_delayed_work_wake_lock, msecs_to_jiffies(500)); } #endif if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); //[NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion // mutex_unlock(&host->carddetect_lock); //]NAGSM_Android_HDLNC_SDcard_shinjonghyun_20100504 : mutual exclusion when MoviNand and SD cardusing using this funtion }
static void mmc_test_run(struct mmc_test_card *test, int testcase) { int i, ret; printk(KERN_INFO "%s: Starting tests of card %s...\n", mmc_hostname(test->card->host), mmc_card_id(test->card)); mmc_claim_host(test->card->host); for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { if (testcase && ((i + 1) != testcase)) continue; printk(KERN_INFO "%s: Test case %d. %s...\n", mmc_hostname(test->card->host), i + 1, mmc_test_cases[i].name); if (mmc_test_cases[i].prepare) { ret = mmc_test_cases[i].prepare(test); if (ret) { printk(KERN_INFO "%s: Result: Prepare " "stage failed! (%d)\n", mmc_hostname(test->card->host), ret); continue; } } ret = mmc_test_cases[i].run(test); switch (ret) { case RESULT_OK: printk(KERN_INFO "%s: Result: OK\n", mmc_hostname(test->card->host)); break; case RESULT_FAIL: printk(KERN_INFO "%s: Result: FAILED\n", mmc_hostname(test->card->host)); break; case RESULT_UNSUP_HOST: printk(KERN_INFO "%s: Result: UNSUPPORTED " "(by host)\n", mmc_hostname(test->card->host)); break; case RESULT_UNSUP_CARD: printk(KERN_INFO "%s: Result: UNSUPPORTED " "(by card)\n", mmc_hostname(test->card->host)); break; default: printk(KERN_INFO "%s: Result: ERROR (%d)\n", mmc_hostname(test->card->host), ret); } if (mmc_test_cases[i].cleanup) { ret = mmc_test_cases[i].cleanup(test); if (ret) { printk(KERN_INFO "%s: Warning: Cleanup " "stage failed! (%d)\n", mmc_hostname(test->card->host), ret); } } } mmc_release_host(test->card->host); printk(KERN_INFO "%s: Tests completed.\n", mmc_hostname(test->card->host)); }
static int mmc_ext_csd_open(struct inode *inode, struct file *filp) #endif { #ifdef CONFIG_MACH_LGE /* */ struct mmc_card *card = s->private; #else struct mmc_card *card = inode->i_private; char *buf; ssize_t n = 0; #endif u8 *ext_csd; #ifdef CONFIG_MACH_LGE /* */ u8 ext_csd_rev; int err; const char *str; #else int err, i; buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); if (!buf) return -ENOMEM; #endif ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { err = -ENOMEM; goto out_free; } mmc_claim_host(card->host); err = mmc_send_ext_csd(card, ext_csd); mmc_release_host(card->host); if (err) goto out_free; #ifdef CONFIG_MACH_LGE /* */ ext_csd_rev = ext_csd[192]; #else for (i = 511; i >= 0; i--) n += sprintf(buf + n, "%02x", ext_csd[i]); n += sprintf(buf + n, "\n"); BUG_ON(n != EXT_CSD_STR_LEN); filp->private_data = buf; kfree(ext_csd); return 0; #endif #ifdef CONFIG_MACH_LGE /* */ switch (ext_csd_rev) { case 6: str = "4.5"; break; case 5: str = "4.41"; break; case 3: str = "4.3"; break; case 2: str = "4.2"; break; case 1: str = "4.1"; break; case 0: str = "4.0"; break; default: goto out_free; } seq_printf(s, "Extended CSD rev 1.%d (MMC %s)\n", ext_csd_rev, str); if (ext_csd_rev < 3) goto out_free; /* No ext_csd */ /* Parse the Extended CSD registers. * Reserved bit should be read as "0" in case of spec older * than A441. */ seq_printf(s, "[504] Supported Command Sets, s_cmd_set: 0x%02x\n", ext_csd[504]); seq_printf(s, "[503] HPI features, hpi_features: 0x%02x\n", ext_csd[503]); seq_printf(s, "[502] Background operations support, bkops_support: 0x%02x\n", ext_csd[502]); if (ext_csd_rev >= 6) { seq_printf(s, "max_packed_reads: 0x%02x\n", ext_csd[501]); seq_printf(s, "max_packed_writes: 0x%02x\n", ext_csd[500]); seq_printf(s, "data_tag_support: 0x%02x\n", ext_csd[499]); seq_printf(s, "tag_unit_size: 0x%02x\n", ext_csd[498]); seq_printf(s, "tag_res_size: 0x%02x\n", ext_csd[497]); seq_printf(s, "context_capabilities: 0x%02x\n", ext_csd[496]); seq_printf(s, "large_unit_size_m1: 0x%02x\n", ext_csd[495]); seq_printf(s, "ext_support: 0x%02x\n", ext_csd[494]); seq_printf(s, "generic_cmd6_time: 0x%02x\n", ext_csd[248]); seq_printf(s, "power_off_long_time: 0x%02x\n", ext_csd[247]); seq_printf(s, "cache_size %d KiB\n", ext_csd[249] << 0 | (ext_csd[250] << 8) | (ext_csd[251] << 16) | (ext_csd[252] << 24)); } /* A441: Reserved [501:247] A43: reserved [246:229] */ if (ext_csd_rev >= 5) { seq_printf(s, "[241] 1st initialization time after partitioning, ini_timeout_ap: 0x%02x\n", ext_csd[241]); /* A441: reserved [240] */ seq_printf(s, "[239] Power class for 52MHz, DDR at 3.6V, pwr_cl_ddr_52_360: 0x%02x\n", ext_csd[239]); seq_printf(s, "[238] POwer class for 52MHz, DDR at 1.95V, pwr_cl_ddr_52_195: 0x%02x\n", ext_csd[238]); /* A441: reserved [237-236] */ if (ext_csd_rev >= 6) { seq_printf(s, "pwr_cl_200_360: 0x%02x\n", ext_csd[237]); seq_printf(s, "pwr_cl_200_195: 0x%02x\n", ext_csd[236]); } seq_printf(s, "[235] Minimun Write Performance for 8bit at 52MHz in DDR mode, min_perf_ddr_w_8_52: 0x%02x\n", ext_csd[235]); seq_printf(s, "[234] Minimun Read Performance for 8bit at 52MHz in DDR modemin_perf_ddr_r_8_52: 0x%02x\n", ext_csd[234]); /* A441: reserved [233] */ seq_printf(s, "[232] TRIM Multiplier, trim_mult: 0x%02x\n", ext_csd[232]); seq_printf(s, "[231] Secure Feature support, sec_feature_support: 0x%02x\n", ext_csd[231]); } if (ext_csd_rev == 5) { /* Obsolete in 4.5 */ seq_printf(s, "[230] Secure Erase Multiplier, sec_erase_mult: 0x%02x\n", ext_csd[230]); seq_printf(s, "[229] Secure TRIM Multiplier, sec_trim_mult: 0x%02x\n", ext_csd[229]); } seq_printf(s, "[228] Boot information, boot_info: 0x%02x\n", ext_csd[228]); /* A441/A43: reserved [227] */ seq_printf(s, "[226] Boot partition size, boot_size_mult : 0x%02x\n", ext_csd[226]); seq_printf(s, "[225] Access size, acc_size: 0x%02x\n", ext_csd[225]); seq_printf(s, "[224] High-capacity erase unit size, hc_erase_grp_size: 0x%02x\n", ext_csd[224]); seq_printf(s, "[223] High-capacity erase timeout, erase_timeout_mult: 0x%02x\n", ext_csd[223]); seq_printf(s, "[222] Reliable write sector count, rel_wr_sec_c: 0x%02x\n", ext_csd[222]); seq_printf(s, "[221] High-capacity write protect group size, hc_wp_grp_size: 0x%02x\n", ext_csd[221]); seq_printf(s, "[220] Sleep current(VCC), s_c_vcc: 0x%02x\n", ext_csd[220]); seq_printf(s, "[219] Sleep current(VCCQ), s_c_vccq: 0x%02x\n", ext_csd[219]); /* A441/A43: reserved [218] */ seq_printf(s, "[217] Sleep/awake timeout, s_a_timeout: 0x%02x\n", ext_csd[217]); /* A441/A43: reserved [216] */ seq_printf(s, "[215:212] Sector Count, sec_count: 0x%08x\n", (ext_csd[215] << 24) |(ext_csd[214] << 16) | (ext_csd[213] << 8) | ext_csd[212]); /* A441/A43: reserved [211] */ seq_printf(s, "[210] Minimum Write Performance for 8bit at 52MHz, min_perf_w_8_52: 0x%02x\n", ext_csd[210]); seq_printf(s, "[209] Minimum Read Performance for 8bit at 52MHz, min_perf_r_8_52: 0x%02x\n", ext_csd[209]); seq_printf(s, "[208] Minimum Write Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_w_8_26_4_52: 0x%02x\n", ext_csd[208]); seq_printf(s, "[207] Minimum Read Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_r_8_26_4_52: 0x%02x\n", ext_csd[207]); seq_printf(s, "[206] Minimum Write Performance for 4bit at 26MHz, min_perf_w_4_26: 0x%02x\n", ext_csd[206]); seq_printf(s, "[205] Minimum Read Performance for 4bit at 26MHz, min_perf_r_4_26: 0x%02x\n", ext_csd[205]); /* A441/A43: reserved [204] */ seq_printf(s, "[203] Power class for 26MHz at 3.6V, pwr_cl_26_360: 0x%02x\n", ext_csd[203]); seq_printf(s, "[202] Power class for 52MHz at 3.6V, pwr_cl_52_360: 0x%02x\n", ext_csd[202]); seq_printf(s, "[201] Power class for 26MHz at 1.95V, pwr_cl_26_195: 0x%02x\n", ext_csd[201]); seq_printf(s, "[200] Power class for 52MHz at 1.95V, pwr_cl_52_195: 0x%02x\n", ext_csd[200]); /* A43: reserved [199:198] */ if (ext_csd_rev >= 5) { seq_printf(s, "[199] Partition switching timing, partition_switch_time: 0x%02x\n", ext_csd[199]); seq_printf(s, "[198] Out-of-interrupt busy timing, out_of_interrupt_time: 0x%02x\n", ext_csd[198]); } /* A441/A43: reserved [197] [195] [193] [190] [188] * [186] [184] [182] [180] [176] */ if (ext_csd_rev >= 6) seq_printf(s, "driver_strength: 0x%02x\n", ext_csd[197]); seq_printf(s, "[196] Card type, card_type: 0x%02x\n", ext_csd[196]); seq_printf(s, "[194] CSD structure version, csd_structure: 0x%02x\n", ext_csd[194]); seq_printf(s, "[192] Extended CSD revision, ext_csd_rev: 0x%02x\n", ext_csd[192]); seq_printf(s, "[191] Command set, cmd_set: 0x%02x\n", ext_csd[191]); seq_printf(s, "[189] Command set revision, cmd_set_rev: 0x%02x\n", ext_csd[189]); seq_printf(s, "[187] Power class, power_class: 0x%02x\n", ext_csd[187]); seq_printf(s, "[185] High-speed interface timing, hs_timing: 0x%02x\n", ext_csd[185]); /* bus_width: ext_csd[183] not readable */ seq_printf(s, "[181] Erased memory content, erased_mem_cont: 0x%02x\n", ext_csd[181]); seq_printf(s, "[179] Partition configuration, partition_config: 0x%02x\n", ext_csd[179]); seq_printf(s, "[178] Boot config protection, boot_config_prot: 0x%02x\n", ext_csd[178]); seq_printf(s, "[177] Boot bus width1, boot_bus_conditions: 0x%02x\n", ext_csd[177]); seq_printf(s, "[175] High-density erase group definition, erase_group_def: 0x%02x\n", ext_csd[175]); /* A43: reserved [174:0] */ if (ext_csd_rev >= 5) { seq_printf(s, "[174] boot_wp_status: 0x%02x\n", ext_csd[174]); seq_printf(s, "[173] Boot area write protection register, boot_wp: 0x%02x\n", ext_csd[173]); /* A441: reserved [172] */ seq_printf(s, "[171] User area write protection register, user_wp: 0x%02x\n", ext_csd[171]); /* A441: reserved [170] */ seq_printf(s, "[169] FW configuration, fw_config: 0x%02x\n", ext_csd[169]); seq_printf(s, "[168] RPMB Size, rpmb_size_mult: 0x%02x\n", ext_csd[168]); seq_printf(s, "[167] Write reliability setting register, wr_rel_set: 0x%02x\n", ext_csd[167]); seq_printf(s, "[166] Write reliability parameter register, wr_rel_param: 0x%02x\n", ext_csd[166]); /* sanitize_start ext_csd[165]: not readable * bkops_start ext_csd[164]: only writable */ seq_printf(s, "[163] Enable background operations handshake, bkops_en: 0x%02x\n", ext_csd[163]); seq_printf(s, "[162] H/W reset function, rst_n_function: 0x%02x\n", ext_csd[162]); seq_printf(s, "[160] HPI management, hpi_mgmt: 0x%02x\n", ext_csd[161]); seq_printf(s, "[169] Partitioning Support, partitioning_support: 0x%02x\n", ext_csd[160]); seq_printf(s, "[159:157] Max Enhanced Area Size, max_enh_size_mult: 0x%06x\n", (ext_csd[159] << 16) | (ext_csd[158] << 8) |ext_csd[157]); seq_printf(s, "[156] Partitions attribute, partitions_attribute: 0x%02x\n", ext_csd[156]); seq_printf(s, "[155] Partitioning Setting, partition_setting_completed: 0x%02x\n", ext_csd[155]); seq_printf(s, "[154:152] General Purpose Partition Size, gp_size_mult_4: 0x%06x\n", (ext_csd[154] << 16) |(ext_csd[153] << 8) | ext_csd[152]); seq_printf(s, "[151:149] General Purpose Partition Size, gp_size_mult_3: 0x%06x\n", (ext_csd[151] << 16) |(ext_csd[150] << 8) | ext_csd[149]); seq_printf(s, "[148:146] General Purpose Partition Size, gp_size_mult_2: 0x%06x\n", (ext_csd[148] << 16) |(ext_csd[147] << 8) | ext_csd[146]); seq_printf(s, "[145:143] General Purpose Partition Size, gp_size_mult_1: 0x%06x\n", (ext_csd[145] << 16) |(ext_csd[144] << 8) | ext_csd[143]); seq_printf(s, "[142:140] Enhanced User Data Area Size, enh_size_mult: 0x%06x\n", (ext_csd[142] << 16) |(ext_csd[141] << 8) | ext_csd[140]); seq_printf(s, "[139:137] Enhanced User Data Start Address, enh_start_addr: 0x%06x\n", (ext_csd[139] << 16) |(ext_csd[138] << 8) | ext_csd[137]); /* A441: reserved [135] */ seq_printf(s, "[134] Bad Block Management mode, sec_bad_blk_mgmnt: 0x%02x\n", ext_csd[134]); /* A441: reserved [133:0] */ } /* B45 */ if (ext_csd_rev >= 6) { int j; /* tcase_support ext_csd[132] not readable */ seq_printf(s, "periodic_wakeup: 0x%02x\n", ext_csd[131]); seq_printf(s, "program_cid_csd_ddr_support: 0x%02x\n", ext_csd[130]); for (j = 127; j >= 64; j--) seq_printf(s, "vendor_specific_field[%d]: 0x%02x\n", j, ext_csd[j]); seq_printf(s, "native_sector_size: 0x%02x\n", ext_csd[63]); seq_printf(s, "use_native_sector: 0x%02x\n", ext_csd[62]); seq_printf(s, "data_sector_size: 0x%02x\n", ext_csd[61]); seq_printf(s, "ini_timeout_emu: 0x%02x\n", ext_csd[60]); seq_printf(s, "class_6_ctrl: 0x%02x\n", ext_csd[59]); seq_printf(s, "dyncap_needed: 0x%02x\n", ext_csd[58]); seq_printf(s, "exception_events_ctrl: 0x%04x\n", (ext_csd[57] << 8) | ext_csd[56]); seq_printf(s, "exception_events_status: 0x%04x\n", (ext_csd[55] << 8) | ext_csd[54]); seq_printf(s, "ext_partitions_attribute: 0x%04x\n", (ext_csd[53] << 8) | ext_csd[52]); for (j = 51; j >= 37; j--) seq_printf(s, "context_conf[%d]: 0x%02x\n", j, ext_csd[j]); seq_printf(s, "packed_command_status: 0x%02x\n", ext_csd[36]); seq_printf(s, "packed_failure_index: 0x%02x\n", ext_csd[35]); seq_printf(s, "power_off_notification: 0x%02x\n", ext_csd[34]); seq_printf(s, "cache_ctrl: 0x%02x\n", ext_csd[33]); /* flush_cache ext_csd[32] not readable */ /*Reserved [31:0] */ } #endif out_free: #ifndef CONFIG_MACH_LGE /* */ kfree(buf); #endif kfree(ext_csd); return err; }
static ssize_t mmc_wr_prot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { #define PARTITION_NOT_PROTED 0 #define PARTITION_PROTED 1 struct mmc_card *card = filp->private_data; //used for mmcrequest unsigned int wp_group_size; struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; void *data_buf; unsigned char buf[8]; unsigned int addr = 0; unsigned int init_addr = 0; char line_buf[128]; int i, j, k; unsigned char ch; unsigned char wp_flag; int len = 8; unsigned int loop_count = 0; unsigned int size = 0; unsigned int status_prot = PARTITION_NOT_PROTED; struct emmc_partition *p_emmc_partition; pr_info("[HW]: eMMC protect driver built on %s @ %s\n", __DATE__, __TIME__); p_emmc_partition = g_emmc_partition; for(i = 0; i < MAX_EMMC_PARTITION_NUM; i++){ if(p_emmc_partition->flags == 0) break; if(strcmp(p_emmc_partition->name, "system") == 0){ addr = (unsigned int)(p_emmc_partition->start); size = (unsigned int)(p_emmc_partition->size_sectors); pr_info("[HW]:%s: partitionname = %s \n", __func__, p_emmc_partition->name); pr_info("[HW]:%s: partition start from = 0x%08x \n", __func__, addr); pr_info("[HW]:%s: partition size = 0x%08x \n", __func__, size); } p_emmc_partition++; } init_addr = addr; if(addr < 0) { pr_err("[HW]:%s:invalid addr = 0x%08x.", __func__, addr); if(copy_to_user(ubuf, "fail", strlen("fail "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } return -1; } wp_group_size =(512 * 1024) * card->ext_csd.raw_hc_erase_gap_size * card->ext_csd.raw_hc_erase_grp_size/512; if(addr % wp_group_size == 0){ }else{ addr = (addr / wp_group_size) * wp_group_size + wp_group_size; pr_info("[HW]:%s: setting start area is not muti size of wp_group_size\n", __func__); } loop_count = (init_addr + size - addr) / wp_group_size; pr_info("[HW]:%s: EXT_CSD_HC_WP_GRP_SIZE = 0x%02x. \n", __func__, card->ext_csd.raw_hc_erase_gap_size); pr_info("[HW]:%s: EXT_CSD_HC_ERASE_GRP_SIZE = 0x%02x. \n", __func__, card->ext_csd.raw_hc_erase_grp_size); pr_info("[HW]:%s: addr = 0x%08x, wp_group_size=0x%08x, size = 0x%08x \n",__func__, addr, wp_group_size, size); pr_info("[HW]:%s: loop_count = 0x%08x \n",__func__, loop_count); /* dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ... */ addr = addr - wp_group_size * 32; for(k=0; k< loop_count/32 + 2; k++){ data_buf = kmalloc(32, GFP_KERNEL); //dma size 32 if (data_buf == NULL) return -ENOMEM; mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = 31; cmd.arg = addr; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = len; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; sg_init_one(&sg, data_buf, len); mmc_set_data_timeout(&data, card); mmc_claim_host(card->host); mmc_wait_for_req(card->host, &mrq); mmc_release_host(card->host); memcpy(buf, data_buf, len); kfree(data_buf); /* * start to show the detailed read status from the response */ #if 0 for(i = 0; i < 8; i++){ pr_info("[HW]:%s: buffer = 0x%02x \n", __func__, buf[i]); } #endif /* * end of show the detailed read status from the response */ for(i = 7; i >= 0; i--) { ch = buf[i]; for(j = 0; j < 4; j++) { wp_flag = ch & 0x3; memset(line_buf, 0x00, sizeof(line_buf)); sprintf(line_buf, "[0x%08x~0x%08x] Write protection group is ", addr, addr + wp_group_size - 1); switch(wp_flag) { case 0: strcat(line_buf, "disable"); break; case 1: strcat(line_buf, "temporary write protection"); break; case 2: strcat(line_buf, "power-on write protection"); break; case 3: strcat(line_buf, "permanent write protection"); break; default: break; } pr_info("%s: %s\n", mmc_hostname(card->host), line_buf); if( wp_flag == 1){ if(is_within_group(addr, init_addr, size, wp_group_size) == 0){ status_prot = PARTITION_PROTED; // pr_info("[HW]: %s: addr = 0x%08x, init_addr = 0x%08x, size = 0x%08x, group protected \n", __func__, addr, init_addr, size); } } addr += wp_group_size; ch = ch >> 2; } } } pr_info("[HW]: %s: end sector = 0x%08x \n", __func__, size + init_addr); if (cmd.error) { pr_err("[HW]:%s:cmd.error=%d.", __func__, cmd.error); if(copy_to_user(ubuf, "fail", strlen("fail "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } return cmd.error; } if (data.error) { pr_err("[HW]:%s:data.error=%d.", __func__, data.error); if(copy_to_user(ubuf, "fail", strlen("fail "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } return data.error; } switch(status_prot){ case PARTITION_PROTED: if(copy_to_user(ubuf, "protected", strlen("protected "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } pr_info("[HW]: %s: protected \n", __func__); break; case PARTITION_NOT_PROTED: if(copy_to_user(ubuf, "not_protected", strlen("not_protected "))){ pr_info("[HW]: %s: copy to user error \n", __func__); return -EFAULT;; } pr_info("[HW]: %s: not_protected \n", __func__); break; default:break; } return 0; }
static int CPRM_CMD_SecureRW(struct mmc_card *card, unsigned int command, unsigned int dir, unsigned long arg, unsigned char *buff, unsigned int length) { int err; int i = 0; struct mmc_request mrq; struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; struct scatterlist sg; if (command == SD_ACMD25_SECURE_WRITE_MULTI_BLOCK || command == SD_ACMD18_SECURE_READ_MULTI_BLOCK) { return -EINVAL; } memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; //kishore mmc_rpm_hold(card->host, &card->dev); mmc_claim_host(card->host); err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { printk("CPRM mmc_wait_for_cmd fail = %d ERROR\n", err); mmc_release_host(card->host); mmc_rpm_release(card->host, &card->dev); return (u32)-1; } if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) { printk("CPRM mmc_host_is_spi fail ERROR\n"); mmc_release_host(card->host); mmc_rpm_release(card->host, &card->dev); return (u32)-1; } printk("CPRM_CMD_SecureRW: 1, command : %d\n", command); memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = command; if (command == SD_ACMD43_GET_MKB) cmd.arg = arg; else cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; memset(&data, 0, sizeof(struct mmc_data)); data.timeout_ns = 100000000; data.timeout_clks = 0; data.blksz = length; data.blocks = 1; data.flags = dir; data.sg = &sg; data.sg_len = 1; stop.opcode = MMC_STOP_TRANSMISSION; stop.arg = 0; stop.flags = MMC_RSP_R1B | MMC_CMD_AC; memset(&mrq, 0, sizeof(struct mmc_request)); mrq.cmd = &cmd; mrq.data = &data; if (data.blocks == 1) mrq.stop = NULL; else mrq.stop = &stop; printk(KERN_DEBUG"CPRM_CMD_SecureRW: 2\n"); sg_init_one(&sg, buff, length); printk(KERN_DEBUG"CPRM_CMD_SecureRW: 3\n"); mmc_wait_for_req(card->host, &mrq); //kishore mmc_release_host(card->host); mmc_rpm_release(card->host, &card->dev); printk(KERN_DEBUG"CPRM_CMD_SecureRW: 4\n"); i = 0; do { printk(KERN_DEBUG"%x", buff[i++]); if (i > 10) break; } while (i < length); printk(KERN_DEBUG"\n"); if (cmd.error) { printk(KERN_DEBUG "%s]cmd.error=%d\n ", __func__, cmd.error); return cmd.error; } if (data.error) { printk(KERN_DEBUG "%s]data.error=%d\n ", __func__, data.error); return data.error; } err = mmc_wait_busy(card); printk(KERN_DEBUG"CPRM_CMD_SecureRW: 5\n"); if (err) return err; return 0; }
// extern struct partition partitions[]; static ssize_t mmc_wr_prot_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct mmc_card *card = filp->private_data; unsigned int wp_group_size; unsigned int set_clear_wp, status; int ret, i; unsigned int addr = 0; unsigned int init_addr = 0; unsigned int loop_count = 0; unsigned int size = 0; char *cmd_buffer; // struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct emmc_partition *p_emmc_partition; pr_info("[HW]: eMMC protect driver built on %s @ %s\n", __DATE__, __TIME__); if( ubuf == NULL){ pr_info("[HW]:%s: NULL pointer \n", __func__); return -1; } cmd_buffer = kmalloc(sizeof(char)*cnt, GFP_KERNEL); if (cmd_buffer == NULL) { return -ENOMEM; } memset(cmd_buffer, 0, sizeof(char)*cnt); if(copy_from_user(cmd_buffer, ubuf, cnt)){ kfree(cmd_buffer); return -EFAULT; } pr_info("[HW]:%s: input arg = %s, cnt = %d \n", __func__, cmd_buffer, cnt); if(strncmp(cmd_buffer, "disable_prot", strlen("disable_prot")) == 0){ set_clear_wp = 0; }else if(strncmp(cmd_buffer, "enable_prot", strlen("enable_prot")) == 0){ set_clear_wp = 1; } else{ kfree(cmd_buffer); return -1; } // mrq.cmd = &cmd; // mrq.data = &data; wp_group_size =(512 * 1024) * card->ext_csd.raw_hc_erase_gap_size * card->ext_csd.raw_hc_erase_grp_size / 512; p_emmc_partition = g_emmc_partition; for(i = 0; i < MAX_EMMC_PARTITION_NUM; i++){ if(p_emmc_partition->flags == 0) break; if(strcmp(p_emmc_partition->name, "system") == 0){ addr = (unsigned int)(p_emmc_partition->start); size = (unsigned int)(p_emmc_partition->size_sectors); pr_info("[HW]:%s: partitionname = %s \n", __func__, p_emmc_partition->name); pr_info("[HW]:%s: partition start from = 0x%08x \n", __func__, addr); pr_info("[HW]:%s: partition size = 0x%08x \n", __func__, size); break; } p_emmc_partition++; } if(strcmp(p_emmc_partition->name, "") == 0){ pr_info("[HW]:%s: can not find partition system \n", __func__); kfree(cmd_buffer); return -1; } pr_info("[HW]:%s: card->ext_csd.raw_hc_erase_gap_size = 0x%02x, card->ext_csd.raw_hc_erase_grp_size = 0x%02x \n", __func__, \ card->ext_csd.raw_hc_erase_gap_size, card->ext_csd.raw_hc_erase_grp_size); pr_info("[HW]:%s, size = 0x%08x, wp_group_size = 0x%08x, unit is block \n", \ __func__, size, wp_group_size); if (wp_group_size == 0) { pr_info("[HW]:%s:invalid wp_group_size=0x%08x.", __func__, wp_group_size); kfree(cmd_buffer); return -2; } init_addr = addr; if(addr % wp_group_size == 0){ }else{ addr = (addr / wp_group_size) * wp_group_size + wp_group_size; pr_info("[HW]:%s: setting start area is not muti size of wp_group_size\n", __func__); } loop_count = (init_addr + size - addr) / wp_group_size; pr_info("[HW]:%s:prot_start_sec_addr = 0x%08x \n", __func__, addr); pr_info("[HW]:%s:loop_count = %x \n", __func__, loop_count); cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; if (set_clear_wp){ cmd.opcode = MMC_SET_WRITE_PROT; }else{ cmd.opcode = MMC_CLR_WRITE_PROT; } for (i = 0; i < loop_count; i++) { /* Sending CMD28 for each WP group size address is in sectors already */ cmd.arg = addr + (i * wp_group_size); pr_info("[HW:%s:loop_count = %d, cmd.arg = 0x%08x, cmd.opcode = %d, \n", __func__, i, cmd.arg, cmd.opcode); mmc_claim_host(card->host); ret = mmc_wait_for_cmd(card->host, &cmd, 3); mmc_release_host(card->host); if (ret) { pr_err("[HW]:%s:mmc_wait_for_cmd return err = %d \n", __func__, ret); kfree(cmd_buffer); return -3; } /* Sending CMD13 to check card status */ do { mmc_claim_host(card->host); ret = mmc_send_status(card, &status); mmc_release_host(card->host); if (R1_CURRENT_STATE(status) == R1_STATE_TRAN) break; }while ((!ret) && (R1_CURRENT_STATE(status) == R1_STATE_PRG)); if (ret) { pr_err("[HW]:%s: mmc_send_status return err = %d \n", __func__, ret); kfree(cmd_buffer); return -4; } } pr_info("[HW]: %s: end sector = 0x%08x \n", __func__, size + init_addr); pr_info("[HW]: %s: size = 0x%08x \n", __func__, size); kfree(cmd_buffer); return size; }
int msmsdcc_probe(struct platform_device *pdev) { struct mmc_platform_data *plat = pdev->dev.platform_data; struct msmsdcc_host *host; struct mmc_host *mmc; struct resource *cmd_irqres = NULL; struct resource *pio_irqres = NULL; struct resource *stat_irqres = NULL; struct resource *memres = NULL; struct resource *dmares = NULL; int ret; /* must have platform data */ if (!plat) { printk(KERN_ERR "%s: Platform data not available\n", __func__); ret = -EINVAL; goto out; } if (pdev->id < 1 || pdev->id > 4) return -EINVAL; if (pdev->resource == NULL || pdev->num_resources < 2) { printk(KERN_ERR "%s: Invalid resource\n", __func__); return -ENXIO; } memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "cmd_irq"); pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "pio_irq"); stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "status_irq"); if (!cmd_irqres || !pio_irqres || !memres) { printk(KERN_ERR "%s: Invalid resource\n", __func__); return -ENXIO; } /* * Setup our host structure */ mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; goto out; } host = mmc_priv(mmc); host->pdev_id = pdev->id; host->plat = plat; host->mmc = mmc; host->cmdpoll = 1; host->base = ioremap(memres->start, PAGE_SIZE); if (!host->base) { ret = -ENOMEM; goto out; } host->cmd_irqres = cmd_irqres; host->pio_irqres = pio_irqres; host->memres = memres; host->dmares = dmares; spin_lock_init(&host->lock); #ifdef CONFIG_MMC_EMBEDDED_SDIO if (plat->embedded_sdio) mmc_set_embedded_sdio_data(mmc, &plat->embedded_sdio->cis, &plat->embedded_sdio->cccr, plat->embedded_sdio->funcs, plat->embedded_sdio->num_funcs); #endif #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ INIT_WORK(&host->resume_task, do_resume_work); #endif /* * Setup DMA */ msmsdcc_init_dma(host); /* * Setup main peripheral bus clock */ host->pclk = clk_get(&pdev->dev, "sdc2_pclk"); if (IS_ERR(host->pclk)) { ret = PTR_ERR(host->pclk); goto host_free; } ret = clk_enable(host->pclk); if (ret) goto pclk_put; host->pclk_rate = clk_get_rate(host->pclk); /* * Setup SDC MMC clock */ host->clk = clk_get(&pdev->dev, "sdc2_clk"); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); goto pclk_disable; } ret = clk_enable(host->clk); if (ret) goto clk_put; ret = clk_set_rate(host->clk, msmsdcc_fmin); if (ret) { printk(KERN_ERR "%s: Clock rate set failed (%d)\n", __func__, ret); goto clk_disable; } host->clk_rate = clk_get_rate(host->clk); host->clks_on = 1; /* * Setup MMC host structure */ mmc->ops = &msmsdcc_ops; mmc->f_min = msmsdcc_fmin; mmc->f_max = msmsdcc_fmax; mmc->ocr_avail = plat->ocr_mask; if (msmsdcc_4bit) mmc->caps |= MMC_CAP_4_BIT_DATA; if (msmsdcc_sdioirq) mmc->caps |= MMC_CAP_SDIO_IRQ; mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; mmc->max_phys_segs = NR_SG; mmc->max_hw_segs = NR_SG; mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */ mmc->max_blk_count = 65536; mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */ mmc->max_seg_size = mmc->max_req_size; writel(0, host->base + MMCIMASK0); writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */ writel(MCI_IRQENABLE, host->base + MMCIMASK0); host->saved_irq0mask = MCI_IRQENABLE; /* * Setup card detect change */ memset(&host->timer, 0, sizeof(host->timer)); if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) { unsigned long irqflags = IRQF_SHARED | (stat_irqres->flags & IRQF_TRIGGER_MASK); host->stat_irq = stat_irqres->start; ret = request_irq(host->stat_irq, msmsdcc_platform_status_irq, irqflags, DRIVER_NAME " (slot)", host); if (ret) { printk(KERN_ERR "Unable to get slot IRQ %d (%d)\n", host->stat_irq, ret); goto clk_disable; } } else if (plat->register_status_notify) { plat->register_status_notify(msmsdcc_status_notify_cb, host); } else if (!plat->status) printk(KERN_ERR "%s: No card detect facilities available\n", mmc_hostname(mmc)); else { init_timer(&host->timer); host->timer.data = (unsigned long)host; host->timer.function = msmsdcc_check_status; host->timer.expires = jiffies + HZ; add_timer(&host->timer); } if (plat->status) { host->oldstat = host->plat->status(mmc_dev(host->mmc)); host->eject = !host->oldstat; } /* * Setup a command timer. We currently need this due to * some 'strange' timeout / error handling situations. */ init_timer(&host->command_timer); host->command_timer.data = (unsigned long) host; host->command_timer.function = msmsdcc_command_expired; ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); if (ret) goto stat_irq_free; ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); if (ret) goto cmd_irq_free; mmc_set_drvdata(pdev, mmc); mmc_claim_host(mmc); printk(KERN_INFO "%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n", mmc_hostname(mmc), (unsigned long long)memres->start, (unsigned int) cmd_irqres->start, (unsigned int) host->stat_irq, host->dma.channel); printk(KERN_INFO "%s: 4 bit data mode %s\n", mmc_hostname(mmc), (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled")); printk(KERN_INFO "%s: MMC clock %u -> %u Hz, PCLK %u Hz\n", mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate); printk(KERN_INFO "%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject); printk(KERN_INFO "%s: Power save feature enable = %d\n", mmc_hostname(mmc), msmsdcc_pwrsave); if (host->dma.channel != -1) { printk(KERN_INFO "%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n", mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr); printk(KERN_INFO "%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n", mmc_hostname(mmc), host->dma.cmd_busaddr, host->dma.cmdptr_busaddr); } else printk(KERN_INFO "%s: PIO transfer enabled\n", mmc_hostname(mmc)); if (host->timer.function) printk(KERN_INFO "%s: Polling status mode enabled\n", mmc_hostname(mmc)); #if defined(CONFIG_DEBUG_FS) msmsdcc_dbg_createhost(host); #endif return 0; cmd_irq_free: free_irq(cmd_irqres->start, host); stat_irq_free: if (host->stat_irq) free_irq(host->stat_irq, host); clk_disable: clk_disable(host->clk); clk_put: clk_put(host->clk); pclk_disable: clk_disable(host->pclk); pclk_put: clk_put(host->pclk); host_free: mmc_free_host(mmc); out: return ret; }
void autok_claim_host(struct msdc_host *host) { mmc_claim_host(host->mmc); pr_debug("[%s] msdc%d host claimed\n", __func__, host->id); }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err = 0; int extend_wakelock = 0; #ifdef CONFIG_MMC_PARANOID_SD_INIT int retries = 2; #endif mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; retry: mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: #ifdef CONFIG_MMC_PARANOID_SD_INIT if (err && (err != -ENOMEDIUM) && retries) { printk(KERN_INFO "%s: Re-scan card rc = %d (retries = %d)\n", mmc_hostname(host), err, retries); retries--; goto retry; } #endif if (extend_wakelock) wake_lock_timeout(&host->wakelock, 5 * HZ); else wake_unlock(&host->wakelock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
static int simple_sd_ioctl_single_rw(struct msdc_ioctl *msdc_ctl) { char l_buf[512]; struct scatterlist msdc_sg; struct mmc_data msdc_data; struct mmc_command msdc_cmd; struct mmc_request msdc_mrq; struct msdc_host *host_ctl; int ret = 0; if(!msdc_ctl) return -EINVAL; host_ctl = mtk_msdc_host[msdc_ctl->host_num]; BUG_ON(!host_ctl); BUG_ON(!host_ctl->mmc); BUG_ON(!host_ctl->mmc->card); #ifdef MTK_MSDC_USE_CACHE if (msdc_ctl->iswrite && mmc_card_mmc(host_ctl->mmc->card) && (host_ctl->mmc->card->ext_csd.cache_ctrl & 0x1)) return simple_sd_ioctl_multi_rw(msdc_ctl); #endif mmc_claim_host(host_ctl->mmc); #if DEBUG_MMC_IOCTL pr_debug("user want access %d partition\n", msdc_ctl->partition); #endif ret = mmc_send_ext_csd(host_ctl->mmc->card, l_buf); if (ret) { pr_debug("mmc_send_ext_csd error, single rw\n"); goto single_end; } #ifdef CONFIG_MTK_EMMC_SUPPORT switch (msdc_ctl->partition) { case EMMC_PART_BOOT1: if (0x1 != (l_buf[179] & 0x7)) { /* change to access boot partition 1 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x1; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; case EMMC_PART_BOOT2: if (0x2 != (l_buf[179] & 0x7)) { /* change to access boot partition 2 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x2; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; default: /* make sure access partition is user data area */ if (0 != (l_buf[179] & 0x7)) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; } #endif if (msdc_ctl->total_size > 512) { msdc_ctl->result = -1; goto single_end; } #if DEBUG_MMC_IOCTL pr_debug("start MSDC_SINGLE_READ_WRITE !!\n"); #endif memset(&msdc_data, 0, sizeof(struct mmc_data)); memset(&msdc_mrq, 0, sizeof(struct mmc_request)); memset(&msdc_cmd, 0, sizeof(struct mmc_command)); msdc_mrq.cmd = &msdc_cmd; msdc_mrq.data = &msdc_data; if (msdc_ctl->trans_type) dma_force[host_ctl->id] = FORCE_IN_DMA; else dma_force[host_ctl->id] = FORCE_IN_PIO; if (msdc_ctl->iswrite) { msdc_data.flags = MMC_DATA_WRITE; msdc_cmd.opcode = MMC_WRITE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, 512)) { dma_force[host_ctl->id] = FORCE_NOTHING; ret = -EFAULT; goto single_end; } } else { /* called from other kernel module */ memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, 512); } } else { msdc_data.flags = MMC_DATA_READ; msdc_cmd.opcode = MMC_READ_SINGLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; memset(sg_msdc_multi_buffer, 0, 512); } msdc_cmd.arg = msdc_ctl->address; if (!mmc_card_blockaddr(host_ctl->mmc->card)) { pr_debug("the device is used byte address!\n"); msdc_cmd.arg <<= 9; } msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; msdc_data.stop = NULL; msdc_data.blksz = 512; msdc_data.sg = &msdc_sg; msdc_data.sg_len = 1; #if DEBUG_MMC_IOCTL pr_debug("single block: ueser buf address is 0x%p!\n", msdc_ctl->buffer); #endif sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size); mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card); mmc_wait_for_req(host_ctl->mmc, &msdc_mrq); if (!msdc_ctl->iswrite) { if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, 512)) { dma_force[host_ctl->id] = FORCE_NOTHING; ret = -EFAULT; goto single_end; } } else { /* called from other kernel module */ memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, 512); } } if (msdc_ctl->partition) { ret = mmc_send_ext_csd(host_ctl->mmc->card, l_buf); if (ret) { pr_debug("mmc_send_ext_csd error, single rw2\n"); goto single_end; } if (l_buf[179] & 0x7) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } } single_end: mmc_release_host(host_ctl->mmc); if (ret) msdc_ctl->result = ret; if (msdc_cmd.error) msdc_ctl->result = msdc_cmd.error; if (msdc_data.error) msdc_ctl->result = msdc_data.error; else msdc_ctl->result = 0; dma_force[host_ctl->id] = FORCE_NOTHING; return msdc_ctl->result; }
int simple_sd_ioctl_multi_rw(struct msdc_ioctl* msdc_ctl) { char l_buf[512]; struct scatterlist msdc_sg; struct mmc_data msdc_data; struct mmc_command msdc_cmd; struct mmc_command msdc_stop; struct mmc_request msdc_mrq; struct msdc_host *host_ctl; host_ctl = mtk_msdc_host[msdc_ctl->host_num]; BUG_ON(!host_ctl); BUG_ON(!host_ctl->mmc); BUG_ON(!host_ctl->mmc->card); mmc_claim_host(host_ctl->mmc); #if DEBUG_MMC_IOCTL printk("user want access %d partition\n",msdc_ctl->partition); #endif mmc_send_ext_csd(host_ctl->mmc->card, l_buf); switch (msdc_ctl->partition){ case BOOT_PARTITION_1: if (0x1 != (l_buf[179] & 0x7)){ /* change to access boot partition 1 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x1; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; case BOOT_PARTITION_2: if (0x2 != (l_buf[179] & 0x7)){ /* change to access boot partition 2 */ l_buf[179] &= ~0x7; l_buf[179] |= 0x2; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; default: /* make sure access partition is user data area */ if (0 != (l_buf[179] & 0x7)){ /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } break; } if(msdc_ctl->total_size > 64*1024){ msdc_ctl->result = -1; return msdc_ctl->result; } memset(&msdc_data, 0, sizeof(struct mmc_data)); memset(&msdc_mrq, 0, sizeof(struct mmc_request)); memset(&msdc_cmd, 0, sizeof(struct mmc_command)); memset(&msdc_stop, 0, sizeof(struct mmc_command)); msdc_mrq.cmd = &msdc_cmd; msdc_mrq.data = &msdc_data; if(msdc_ctl->trans_type) dma_force[host_ctl->id] = FORCE_IN_DMA; else dma_force[host_ctl->id] = FORCE_IN_PIO; if (msdc_ctl->iswrite){ msdc_data.flags = MMC_DATA_WRITE; msdc_cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_from_user(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(sg_msdc_multi_buffer, msdc_ctl->buffer, msdc_ctl->total_size); } } else { msdc_data.flags = MMC_DATA_READ; msdc_cmd.opcode = MMC_READ_MULTIPLE_BLOCK; msdc_data.blocks = msdc_ctl->total_size / 512; memset(sg_msdc_multi_buffer, 0 , msdc_ctl->total_size); } msdc_cmd.arg = msdc_ctl->address; if (!mmc_card_blockaddr(host_ctl->mmc->card)){ printk("this device use byte address!!\n"); msdc_cmd.arg <<= 9; } msdc_cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; msdc_stop.opcode = MMC_STOP_TRANSMISSION; msdc_stop.arg = 0; msdc_stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; msdc_data.stop = &msdc_stop; msdc_data.blksz = 512; msdc_data.sg = &msdc_sg; msdc_data.sg_len = 1; #if DEBUG_MMC_IOCTL printk("total size is %d\n",msdc_ctl->total_size); #endif sg_init_one(&msdc_sg, sg_msdc_multi_buffer, msdc_ctl->total_size); mmc_set_data_timeout(&msdc_data, host_ctl->mmc->card); mmc_wait_for_req(host_ctl->mmc, &msdc_mrq); if (!msdc_ctl->iswrite){ if (MSDC_CARD_DUNM_FUNC != msdc_ctl->opcode) { if (copy_to_user(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size)){ dma_force[host_ctl->id] = FORCE_NOTHING; return -EFAULT; } } else { /* called from other kernel module */ memcpy(msdc_ctl->buffer, sg_msdc_multi_buffer, msdc_ctl->total_size); } } if (msdc_ctl->partition){ mmc_send_ext_csd(host_ctl->mmc->card,l_buf); if (l_buf[179] & 0x7) { /* set back to access user area */ l_buf[179] &= ~0x7; l_buf[179] |= 0x0; mmc_switch(host_ctl->mmc->card, 0, 179, l_buf[179], 1000); } } mmc_release_host(host_ctl->mmc); if (msdc_cmd.error) msdc_ctl->result = msdc_cmd.error; if (msdc_data.error){ msdc_ctl->result = msdc_data.error; } else { msdc_ctl->result = 0; } dma_force[host_ctl->id] = FORCE_NOTHING; return msdc_ctl->result; }
/* * Starting point for SD card init. */ int mmc_attach_sd(struct mmc_host *host, u32 ocr) { int err; #ifdef CONFIG_MMC_PARANOID_SD_INIT int retries; #endif BUG_ON(!host); WARN_ON(!host->claimed); mmc_attach_bus(host, &mmc_sd_ops); /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { mmc_go_idle(host); err = mmc_spi_read_ocr(host, 0, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } if (ocr & MMC_VDD_165_195) { printk(KERN_WARNING "%s: SD card claims to support the " "incompletely defined 'low voltage range'. This " "will be ignored.\n", mmc_hostname(host)); ocr &= ~MMC_VDD_165_195; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ #ifdef CONFIG_MMC_PARANOID_SD_INIT retries = 5; while (retries) { err = mmc_sd_init_card(host, host->ocr, NULL); if (err) { retries--; continue; } break; } if (!retries) { printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n", mmc_hostname(host), err); goto err; } #else err = mmc_sd_init_card(host, host->ocr, NULL); if (err) goto err; #endif mmc_release_host(host); err = mmc_add_card(host->card); if (err) goto remove_card; return 0; remove_card: mmc_remove_card(host->card); host->card = NULL; mmc_claim_host(host); err: mmc_detach_bus(host); mmc_release_host(host); printk(KERN_ERR "%s: error %d whilst initialising SD card\n", mmc_hostname(host), err); return err; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; unsigned int eint0msk = 0; int ext_CD_int = 0; ext_CD_int = readl(S3C64XX_GPNDAT); ext_CD_int &= 0x40; /* GPN6 */ if( system_rev >= 0x20 ) ext_CD_int = !ext_CD_int; mmc_bus_get(host); printk("kimhyuns mmc_rescan hostindex=%d\n", host->index); if (host->bus_ops == NULL) { printk("kimhyuns mmc_rescan host->bus_ops == NULL \n"); /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); goto out; } mmc_release_host(host); mmc_power_off(host); } else { printk("kimhyuns mmc_rescan else \n"); if (host->bus_ops->detect && !host->bus_dead) host->bus_ops->detect(host); mmc_bus_put(host); if(host->index ==0 && g_rescan_retry) { ext_CD_int = readl(S3C64XX_GPNDAT); ext_CD_int &= 0x40; /* GPN6 */ if( system_rev >= 0x20 ) ext_CD_int = !ext_CD_int; if (!ext_CD_int) { mmc_detect_change(host, msecs_to_jiffies(200)); g_rescan_retry = 0; // if(is_inited_wake_lock==1) //kimhyuns probe에서 수정함. { printk("kimhyuns mmc_rescan unlock 1 \n"); wake_unlock(&sdcard_scan_wake_lock);//kimhyuns_add } } } } if (!ext_CD_int) { eint0msk = __raw_readl(S3C64XX_EINT0MASK); eint0msk &= 0x0FFFFFFF & ~(1 << 6); __raw_writel(eint0msk, S3C64XX_EINT0MASK); } out: if(host->index ==0 && g_rescan_retry) { g_rescan_retry = 0; // if(is_inited_wake_lock==1) //kimhyuns probe에서 수정함. { printk("kimhyuns mmc_rescan unlock 2 \n"); //wake_unlock(&sdcard_scan_wake_lock);//kimhyuns_add wake_lock_timeout(&sdcard_scan_wake_lock, msecs_to_jiffies(5000)); //give some timer for the media scanner to run } } if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
static int sdio_irq_thread(void *_host) { struct mmc_host *host = _host; struct sched_param param = { .sched_priority = 1 }; unsigned long period, idle_period; int ret; sched_setscheduler(current, SCHED_FIFO, ¶m); /* * We want to allow for SDIO cards to work even on non SDIO * aware hosts. One thing that non SDIO host cannot do is * asynchronous notification of pending SDIO card interrupts * hence we poll for them in that case. */ idle_period = msecs_to_jiffies(10); period = (host->caps & MMC_CAP_SDIO_IRQ) ? MAX_SCHEDULE_TIMEOUT : idle_period; pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n", mmc_hostname(host), period); do { /* * We claim the host here on drivers behalf for a couple * reasons: * * 1) it is already needed to retrieve the CCCR_INTx; * 2) we want the driver(s) to clear the IRQ condition ASAP; * 3) we need to control the abort condition locally. * * Just like traditional hard IRQ handlers, we expect SDIO * IRQ handlers to be quick and to the point, so that the * holding of the host lock does not cover too much work * that doesn't require that lock to be held. */ ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); if (ret) break; ret = process_sdio_pending_irqs(host->card); mmc_release_host(host); /* * Give other threads a chance to run in the presence of * errors. */ if (ret < 0) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule_timeout(HZ); set_current_state(TASK_RUNNING); } /* * Adaptive polling frequency based on the assumption * that an interrupt will be closely followed by more. * This has a substantial benefit for network devices. */ if (!(host->caps & MMC_CAP_SDIO_IRQ)) { if (ret > 0) period /= 2; else { period++; if (period > idle_period) period = idle_period; } } set_current_state(TASK_INTERRUPTIBLE); if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 1); if (!kthread_should_stop()) schedule_timeout(period); set_current_state(TASK_RUNNING); } while (!kthread_should_stop()); if (host->caps & MMC_CAP_SDIO_IRQ) host->ops->enable_sdio_irq(host, 0); pr_debug("%s: IRQ thread exiting with code %d\n", mmc_hostname(host), ret); return ret; } static int sdio_card_irq_get(struct mmc_card *card) { struct mmc_host *host = card->host; WARN_ON(!host->claimed); if (!host->sdio_irqs++) { atomic_set(&host->sdio_irq_thread_abort, 0); host->sdio_irq_thread = kthread_run(sdio_irq_thread, host, "ksdioirqd/%s", mmc_hostname(host)); if (IS_ERR(host->sdio_irq_thread)) { int err = PTR_ERR(host->sdio_irq_thread); host->sdio_irqs--; return err; } } return 0; } static int sdio_card_irq_put(struct mmc_card *card) { struct mmc_host *host = card->host; WARN_ON(!host->claimed); BUG_ON(host->sdio_irqs < 1); if (!--host->sdio_irqs) { atomic_set(&host->sdio_irq_thread_abort, 1); #ifndef CONFIG_HUAWEI_WIFI_SDCC kthread_stop(host->sdio_irq_thread); #else if (host->claimed) { mmc_release_host(host); kthread_stop(host->sdio_irq_thread); mmc_claim_host(host); } else { kthread_stop(host->sdio_irq_thread); } #endif } return 0; } /** * sdio_claim_irq - claim the IRQ for a SDIO function * @func: SDIO function * @handler: IRQ handler callback * * Claim and activate the IRQ for the given SDIO function. The provided * handler will be called when that IRQ is asserted. The host is always * claimed already when the handler is called so the handler must not * call sdio_claim_host() nor sdio_release_host(). */ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler) { int ret; unsigned char reg; BUG_ON(!func); BUG_ON(!func->card); pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func)); if (func->irq_handler) { pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func)); return -EBUSY; } #ifdef CONFIG_HUAWEI_WIFI_SDCC func->irq_handler = handler; ret = sdio_card_irq_get(func->card); if (ret) func->irq_handler = NULL; #endif ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®); if (ret) return ret; reg |= 1 << func->num; reg |= 1; /* Master interrupt enable */ ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL); if (ret) return ret; #ifndef CONFIG_HUAWEI_WIFI_SDCC func->irq_handler = handler; ret = sdio_card_irq_get(func->card); if (ret) func->irq_handler = NULL; #endif return ret; }
static int _mmc_cmd_log_dump(struct mmc_host *host, struct seq_file *s) { int i; if (!host->mmc_cmd_log) return 0; mmc_claim_host(host); i = host->mmc_cmd_log_idx; /* next slot should be the oldest */ do { u32 cmd = host->mmc_cmd_log[i++]; u32 arg = host->mmc_cmd_log[i++]; u32 resp = 0; u32 when = 0; if (host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_TIME) when = host->mmc_cmd_log[i++]; if (host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_RESP) resp = host->mmc_cmd_log[i++]; if (i >= host->mmc_cmd_log_len) i = 0; /* Skip empty or partial records */ if (cmd == UINT_MAX || resp == UINT_MAX) continue; if ((host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_TIME) && !(host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_DELTA)) { if (s) seq_printf(s, "[%u] ", when); else pr_info("[%u] ", when); } if (s) seq_printf(s, "CMD%d: 0x%08X", cmd & 0x3F, arg); else pr_info("CMD%d: 0x%08X", cmd & 0x3F, arg); if (host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_RESP) { if (s) seq_printf(s, " R:0x%08X", resp); else pr_info(" R:0x%08X", resp); } if ((host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_TIME) && (host->mmc_cmd_log_mode & MMC_CMD_LOG_MODE_DELTA)) { if (s) seq_printf(s, " %uns", when); else pr_info(" %uns", when); } if (s) seq_printf(s, "\n"); else pr_info("\n"); } while (i != host->mmc_cmd_log_idx); mmc_release_host(host); return 0; }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; int extend_wakelock = 0; mmc_bus_get(host); /* if there is a card registered, check whether it is still present */ if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) { host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; } mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); #ifdef UCONFIG_DDE_MMC_HAVE_SDIO /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } #endif /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
void mmc_rescan(struct work_struct *work) { struct mmc_host *host = container_of(work, struct mmc_host, detect.work); u32 ocr; int err; unsigned long flags; int extend_wakelock = 0; spin_lock_irqsave(&host->lock, flags); if (host->rescan_disable) { spin_unlock_irqrestore(&host->lock, flags); return; } spin_unlock_irqrestore(&host->lock, flags); mmc_bus_get(host); /* * if there is a _removable_ card registered, check whether it is * still present */ #ifdef CONFIG_TIWLAN_SDIO if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) #else #ifndef CONFIG_WILINK_NLCP if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) /* if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead && mmc_card_is_removable(host)) */ #else if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead && !(host->caps & MMC_CAP_NONREMOVABLE)) #endif #endif host->bus_ops->detect(host); /* If the card was removed the bus will be marked * as dead - extend the wakelock so userspace * can respond */ if (host->bus_dead) extend_wakelock = 1; mmc_bus_put(host); mmc_bus_get(host); /* if there still is a card present, stop here */ if (host->bus_ops != NULL) { mmc_bus_put(host); goto out; } /* detect a newly inserted card */ /* * Only we can add a new handler, so it's safe to * release the lock here. */ mmc_bus_put(host); if (host->ops->get_cd && host->ops->get_cd(host) == 0) goto out; mmc_claim_host(host); mmc_power_up(host); sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); /* * First we search for SDIO... */ err = mmc_send_io_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sdio(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...then normal SD... */ err = mmc_send_app_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_sd(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } /* * ...and finally MMC. */ err = mmc_send_op_cond(host, 0, &ocr); if (!err) { if (mmc_attach_mmc(host, ocr)) mmc_power_off(host); extend_wakelock = 1; goto out; } mmc_release_host(host); mmc_power_off(host); out: if (extend_wakelock) wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2); else wake_unlock(&mmc_delayed_work_wake_lock); if (host->caps & MMC_CAP_NEEDS_POLL) mmc_schedule_delayed_work(&host->detect, HZ); }
static int mmc_ext_csd_open(struct inode *inode, struct file *filp) #endif { #ifdef CONFIG_MACH_LGE /* */ struct mmc_card *card = s->private; #else struct mmc_card *card = inode->i_private; char *buf; ssize_t n = 0; #endif u8 *ext_csd; #ifdef CONFIG_MACH_LGE /* */ u8 ext_csd_rev; int err; const char *str; char *buf_for_health_report; char *buf_for_firmwware_version; ssize_t output = 0; int cnt; #else int err, i; buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); if (!buf) return -ENOMEM; #endif ext_csd = kmalloc(512, GFP_KERNEL); if (!ext_csd) { err = -ENOMEM; goto out_free; } mmc_rpm_hold(card->host, &card->dev); mmc_claim_host(card->host); err = mmc_send_ext_csd(card, ext_csd); mmc_release_host(card->host); mmc_rpm_release(card->host, &card->dev); if (err) goto out_free; #ifdef CONFIG_MACH_LGE /* */ ext_csd_rev = ext_csd[192]; #else for (i = 511; i >= 0; i--) n += sprintf(buf + n, "%02x", ext_csd[i]); n += sprintf(buf + n, "\n"); BUG_ON(n != EXT_CSD_STR_LEN); filp->private_data = buf; kfree(ext_csd); return 0; #endif #ifdef CONFIG_MACH_LGE /* */ switch (ext_csd_rev) { case 7: str = "5.0"; break; case 6: str = "4.5"; break; case 5: str = "4.41"; break; case 3: str = "4.3"; break; case 2: str = "4.2"; break; case 1: str = "4.1"; break; case 0: str = "4.0"; break; default: goto out_free; } seq_printf(s, "Extended CSD rev 1.%d (MMC %s)\n", ext_csd_rev, str); if (ext_csd_rev < 3) goto out_free; /* No ext_csd */ /* Parse the Extended CSD registers. * Reserved bit should be read as "0" in case of spec older * than A441. */ if (ext_csd_rev >= 7) { seq_printf(s, "[505] Extended Security Commands Error, ext_security_err: 0x%02x\n", ext_csd[505]); } seq_printf(s, "[504] Supported Command Sets, s_cmd_set: 0x%02x\n", ext_csd[504]); seq_printf(s, "[503] HPI features, hpi_features: 0x%02x\n", ext_csd[503]); seq_printf(s, "[502] Background operations support, bkops_support: 0x%02x\n", ext_csd[502]); if (ext_csd_rev >= 6) { seq_printf(s, "[501] Max packed read commands, max_packed_reads: 0x%02x\n", ext_csd[501]); seq_printf(s, "[500] Max packed write commands, max_packed_writes: 0x%02x\n", ext_csd[500]); seq_printf(s, "[499] Data Tag Support, data_tag_support: 0x%02x\n", ext_csd[499]); seq_printf(s, "[498] Tag Unit Size, tag_unit_size: 0x%02x\n", ext_csd[498]); seq_printf(s, "[497] Tag Resources Size, tag_res_size: 0x%02x\n", ext_csd[497]); seq_printf(s, "[496] Context management capabilities, context_capabilities: 0x%02x\n", ext_csd[496]); seq_printf(s, "[495] Large Unit size, large_unit_size_m1: 0x%02x\n", ext_csd[495]); seq_printf(s, "[494] Extended partitions attribute support, ext_support: 0x%02x\n", ext_csd[494]); if (ext_csd_rev >= 7) { buf_for_health_report = kmalloc(66, GFP_KERNEL); if (!buf_for_health_report) return -ENOMEM; buf_for_firmwware_version = kmalloc(18, GFP_KERNEL); if (!buf_for_firmwware_version) return -ENOMEM; seq_printf(s, "[493] Supported modes, supported_modes: 0x%02x\n", ext_csd[493]); seq_printf(s, "[492] Ffu features, ffu_features: 0x%02x\n", ext_csd[492]); seq_printf(s, "[491] Operation codes timeout, operation_code_timeout: 0x%02x\n", ext_csd[491]); seq_printf(s, "[490:487] Ffu features, ffu_features: 0x%08x\n", (ext_csd[487] << 0) | (ext_csd[488] << 8) | (ext_csd[489] << 16) | (ext_csd[490] << 24)); seq_printf(s, "[305:302] Number of FW sectors correctly programmed, number_of_fw_sectors_correctly_programmed: 0x%08x\n", (ext_csd[302] << 0) | (ext_csd[303] << 8) | (ext_csd[304] << 16) | (ext_csd[305] << 24)); output = 0; for (cnt = 301 ; cnt >= 270 ; cnt--) output += sprintf(buf_for_health_report + output, "%02x", ext_csd[cnt]); output += sprintf(buf_for_health_report + output, "\n"); seq_printf(s, "[301:270] Vendor proprietary health report, vendor_proprietary_health_report(raw data): %s", buf_for_health_report); //mina.park; kfree(buf_for_health_report); seq_printf(s, "[269] Device life time estimation type B, device_life_time_est_typ_b: 0x%02x\n", ext_csd[269]); seq_printf(s, "[268] Device life time estimation type A, device_life_time_est_typ_a: 0x%02x\n", ext_csd[268]); seq_printf(s, "[267] Pre EOL information, pre_eol_info: 0x%02x\n", ext_csd[267]); seq_printf(s, "[266] Optimal read size, optimal_read_size: 0x%02x\n", ext_csd[266]); seq_printf(s, "[265] Optimal write size, optimal_write_size: 0x%02x\n", ext_csd[265]); seq_printf(s, "[264] Optimal trim unit size, optimal_trim_unit_size: 0x%02x\n", ext_csd[264]); seq_printf(s, "[263:262] Device version, device_version: 0x%02x\n", (ext_csd[262] << 0) | (ext_csd[263] << 8)); output=0; for (cnt = 261 ; cnt >= 254 ; cnt--) output += sprintf(buf_for_firmwware_version + output, "%02x", ext_csd[cnt]); output += sprintf(buf_for_firmwware_version + output, "\n"); seq_printf(s, "[261:254] Firmware version, firmwware_version(raw data): %s", buf_for_firmwware_version); //mina.park; kfree(buf_for_firmwware_version); seq_printf(s, "[253] Power class for 200MHz, DDR at VCC=3.6V, pwr_cl_ddr_200_360: 0x%02x\n", ext_csd[253]); } seq_printf(s, "[252:249] Cache size, cache_size %d KiB\n", (ext_csd[249] << 0) | (ext_csd[250] << 8) | (ext_csd[251] << 16) | (ext_csd[252] << 24)); seq_printf(s, "[248] Generic CMD6 timeout, generic_cmd6_time: 0x%02x\n", ext_csd[248]); seq_printf(s, "[247] Power off notification timeout, power_off_long_time: 0x%02x\n", ext_csd[247]); seq_printf(s, "[246] Background operations status, bkops_status: 0x%02x\n", ext_csd[246]); seq_printf(s, "[245:242] Number of correctly programmed sectors, correctly_prg_sectors_num %d KiB\n", (ext_csd[242] << 0) | (ext_csd[243] << 8) | (ext_csd[244] << 16) | (ext_csd[245] << 24)); } /* A441: Reserved [501:247] A43: reserved [246:229] */ if (ext_csd_rev >= 5) { seq_printf(s, "[241] 1st initialization time after partitioning, ini_timeout_ap: 0x%02x\n", ext_csd[241]); /* A441: reserved [240] */ seq_printf(s, "[239] Power class for 52MHz, DDR at 3.6V, pwr_cl_ddr_52_360: 0x%02x\n", ext_csd[239]); seq_printf(s, "[238] POwer class for 52MHz, DDR at 1.95V, pwr_cl_ddr_52_195: 0x%02x\n", ext_csd[238]); /* A441: reserved [237-236] */ if (ext_csd_rev >= 6) { seq_printf(s, "[237] Power class for 200MHz, SDR at 3.6V, pwr_cl_200_360: 0x%02x\n", ext_csd[237]); seq_printf(s, "[236] Power class for 200MHz, SDR at 1.95V, pwr_cl_200_195: 0x%02x\n", ext_csd[236]); } seq_printf(s, "[235] Minimun Write Performance for 8bit at 52MHz in DDR mode, min_perf_ddr_w_8_52: 0x%02x\n", ext_csd[235]); seq_printf(s, "[234] Minimun Read Performance for 8bit at 52MHz in DDR modemin_perf_ddr_r_8_52: 0x%02x\n", ext_csd[234]); /* A441: reserved [233] */ seq_printf(s, "[232] TRIM Multiplier, trim_mult: 0x%02x\n", ext_csd[232]); seq_printf(s, "[231] Secure Feature support, sec_feature_support: 0x%02x\n", ext_csd[231]); } if (ext_csd_rev == 5 || ext_csd_rev == 7) { /* Obsolete in 4.5 */ /*---->revived in 5.0*/ seq_printf(s, "[230] Secure Erase Multiplier, sec_erase_mult: 0x%02x\n", ext_csd[230]); seq_printf(s, "[229] Secure TRIM Multiplier, sec_trim_mult: 0x%02x\n", ext_csd[229]); } seq_printf(s, "[228] Boot information, boot_info: 0x%02x\n", ext_csd[228]); /* A441/A43: reserved [227] */ seq_printf(s, "[226] Boot partition size, boot_size_mult : 0x%02x\n", ext_csd[226]); seq_printf(s, "[225] Access size, acc_size: 0x%02x\n", ext_csd[225]); seq_printf(s, "[224] High-capacity erase unit size, hc_erase_grp_size: 0x%02x\n", ext_csd[224]); seq_printf(s, "[223] High-capacity erase timeout, erase_timeout_mult: 0x%02x\n", ext_csd[223]); seq_printf(s, "[222] Reliable write sector count, rel_wr_sec_c: 0x%02x\n", ext_csd[222]); seq_printf(s, "[221] High-capacity write protect group size, hc_wp_grp_size: 0x%02x\n", ext_csd[221]); seq_printf(s, "[220] Sleep current(VCC), s_c_vcc: 0x%02x\n", ext_csd[220]); seq_printf(s, "[219] Sleep current(VCCQ), s_c_vccq: 0x%02x\n", ext_csd[219]); if (ext_csd_rev == 7) { seq_printf(s, "[218] Production state awareness timeout, production_state_awareness_timeout: 0x%02x\n", ext_csd[218]); } /* A441/A43: reserved [218] */ seq_printf(s, "[217] Sleep/awake timeout, s_a_timeout: 0x%02x\n", ext_csd[217]); if (ext_csd_rev == 7) { seq_printf(s, "[216] Sleep notification timeout, sleep_notification_time: 0x%02x\n", ext_csd[216]); } /* A441/A43: reserved [216] */ seq_printf(s, "[215:212] Sector Count, sec_count: 0x%08x\n", (ext_csd[215] << 24) |(ext_csd[214] << 16) | (ext_csd[213] << 8) | ext_csd[212]); /* A441/A43: reserved [211] */ seq_printf(s, "[210] Minimum Write Performance for 8bit at 52MHz, min_perf_w_8_52: 0x%02x\n", ext_csd[210]); seq_printf(s, "[209] Minimum Read Performance for 8bit at 52MHz, min_perf_r_8_52: 0x%02x\n", ext_csd[209]); seq_printf(s, "[208] Minimum Write Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_w_8_26_4_52: 0x%02x\n", ext_csd[208]); seq_printf(s, "[207] Minimum Read Performance for 8bit at 26MHz, for 4bit at 52MHz, min_perf_r_8_26_4_52: 0x%02x\n", ext_csd[207]); seq_printf(s, "[206] Minimum Write Performance for 4bit at 26MHz, min_perf_w_4_26: 0x%02x\n", ext_csd[206]); seq_printf(s, "[205] Minimum Read Performance for 4bit at 26MHz, min_perf_r_4_26: 0x%02x\n", ext_csd[205]); /* A441/A43: reserved [204] */ seq_printf(s, "[203] Power class for 26MHz at 3.6V, pwr_cl_26_360: 0x%02x\n", ext_csd[203]); seq_printf(s, "[202] Power class for 52MHz at 3.6V, pwr_cl_52_360: 0x%02x\n", ext_csd[202]); seq_printf(s, "[201] Power class for 26MHz at 1.95V, pwr_cl_26_195: 0x%02x\n", ext_csd[201]); seq_printf(s, "[200] Power class for 52MHz at 1.95V, pwr_cl_52_195: 0x%02x\n", ext_csd[200]); /* A43: reserved [199:198] */ if (ext_csd_rev >= 5) { seq_printf(s, "[199] Partition switching timing, partition_switch_time: 0x%02x\n", ext_csd[199]); seq_printf(s, "[198] Out-of-interrupt busy timing, out_of_interrupt_time: 0x%02x\n", ext_csd[198]); } /* A441/A43: reserved [197] [195] [193] [190] [188] * [186] [184] [182] [180] [176] */ if (ext_csd_rev >= 6) seq_printf(s, "[197] IO Driver Strength, driver_strength: 0x%02x\n", ext_csd[197]); seq_printf(s, "[196] Device type, device_type: 0x%02x\n", ext_csd[196]); seq_printf(s, "[194] CSD structure version, csd_structure: 0x%02x\n", ext_csd[194]); seq_printf(s, "[192] Extended CSD revision, ext_csd_rev: 0x%02x\n", ext_csd[192]); seq_printf(s, "[191] Command set, cmd_set: 0x%02x\n", ext_csd[191]); seq_printf(s, "[189] Command set revision, cmd_set_rev: 0x%02x\n", ext_csd[189]); seq_printf(s, "[187] Power class, power_class: 0x%02x\n", ext_csd[187]); seq_printf(s, "[185] High-speed interface timing, hs_timing: 0x%02x\n", ext_csd[185]); /* bus_width: ext_csd[183] not readable */ seq_printf(s, "[181] Erased memory content, erased_mem_cont: 0x%02x\n", ext_csd[181]); seq_printf(s, "[179] Partition configuration, partition_config: 0x%02x\n", ext_csd[179]); seq_printf(s, "[178] Boot config protection, boot_config_prot: 0x%02x\n", ext_csd[178]); seq_printf(s, "[177] Boot bus Conditions, boot_bus_conditions: 0x%02x\n", ext_csd[177]); seq_printf(s, "[175] High-density erase group definition, erase_group_def: 0x%02x\n", ext_csd[175]); /* A43: reserved [174:0] */ if (ext_csd_rev >= 5) { seq_printf(s, "[174] Boot write protection status registers, boot_wp_status: 0x%02x\n", ext_csd[174]); seq_printf(s, "[173] Boot area write protection register, boot_wp: 0x%02x\n", ext_csd[173]); /* A441: reserved [172] */ seq_printf(s, "[171] User area write protection register, user_wp: 0x%02x\n", ext_csd[171]); /* A441: reserved [170] */ seq_printf(s, "[169] FW configuration, fw_config: 0x%02x\n", ext_csd[169]); seq_printf(s, "[168] RPMB Size, rpmb_size_mult: 0x%02x\n", ext_csd[168]); seq_printf(s, "[167] Write reliability setting register, wr_rel_set: 0x%02x\n", ext_csd[167]); seq_printf(s, "[166] Write reliability parameter register, wr_rel_param: 0x%02x\n", ext_csd[166]); /* sanitize_start ext_csd[165]: not readable * bkops_start ext_csd[164]: only writable */ seq_printf(s, "[163] Enable background operations handshake, bkops_en: 0x%02x\n", ext_csd[163]); seq_printf(s, "[162] H/W reset function, rst_n_function: 0x%02x\n", ext_csd[162]); seq_printf(s, "[161] HPI management, hpi_mgmt: 0x%02x\n", ext_csd[161]); seq_printf(s, "[160] Partitioning Support, partitioning_support: 0x%02x\n", ext_csd[160]); seq_printf(s, "[159:157] Max Enhanced Area Size, max_enh_size_mult: 0x%06x\n", (ext_csd[159] << 16) | (ext_csd[158] << 8) |ext_csd[157]); seq_printf(s, "[156] Partitions attribute, partitions_attribute: 0x%02x\n", ext_csd[156]); seq_printf(s, "[155] Partitioning Setting, partition_setting_completed: 0x%02x\n", ext_csd[155]); seq_printf(s, "[154:152] General Purpose Partition Size, gp_size_mult_4: 0x%06x\n", (ext_csd[154] << 16) |(ext_csd[153] << 8) | ext_csd[152]); seq_printf(s, "[151:149] General Purpose Partition Size, gp_size_mult_3: 0x%06x\n", (ext_csd[151] << 16) |(ext_csd[150] << 8) | ext_csd[149]); seq_printf(s, "[148:146] General Purpose Partition Size, gp_size_mult_2: 0x%06x\n", (ext_csd[148] << 16) |(ext_csd[147] << 8) | ext_csd[146]); seq_printf(s, "[145:143] General Purpose Partition Size, gp_size_mult_1: 0x%06x\n", (ext_csd[145] << 16) |(ext_csd[144] << 8) | ext_csd[143]); seq_printf(s, "[142:140] Enhanced User Data Area Size, enh_size_mult: 0x%06x\n", (ext_csd[142] << 16) |(ext_csd[141] << 8) | ext_csd[140]); seq_printf(s, "[139:136] Enhanced User Data Start Address, enh_start_addr: 0x%06x\n", (ext_csd[139] << 24) | (ext_csd[138] << 16) | (ext_csd[137] << 8) | ext_csd[136]); /* A441: reserved [135] */ seq_printf(s, "[134] Bad Block Management mode, sec_bad_blk_mgmnt: 0x%02x\n", ext_csd[134]); /* A441: reserved [133:0] */ } /* B45 */ if (ext_csd_rev >= 6) { int j; /* tcase_support ext_csd[132] not readable */ seq_printf(s, "[131] Periodic Wake-up, periodic_wakeup: 0x%02x\n", ext_csd[131]); seq_printf(s, "[130] Program CID CSD in DDR mode support, program_cid_csd_ddr_support: 0x%02x\n", ext_csd[130]); for (j = 127; j >= 64; j--) seq_printf(s, "[127:64] Vendor Specific Fields, vendor_specific_field[%d]: 0x%02x\n", j, ext_csd[j]); seq_printf(s, "[63] Native sector size, native_sector_size: 0x%02x\n", ext_csd[63]); seq_printf(s, "[62] Sector size emulation, use_native_sector: 0x%02x\n", ext_csd[62]); seq_printf(s, "[61] Sector size, data_sector_size: 0x%02x\n", ext_csd[61]); seq_printf(s, "[60] 1st initialization after disabling sector size emulation, ini_timeout_emu: 0x%02x\n", ext_csd[60]); seq_printf(s, "[59] Class 6 commands control, class_6_ctrl: 0x%02x\n", ext_csd[59]); seq_printf(s, "[58] Number of addressed group to be Released, dyncap_needed: 0x%02x\n", ext_csd[58]); seq_printf(s, "[57:56] Exception events control, exception_events_ctrl: 0x%04x\n", (ext_csd[57] << 8) | ext_csd[56]); seq_printf(s, "[55:54] Exception events status, exception_events_status: 0x%04x\n", (ext_csd[55] << 8) | ext_csd[54]); seq_printf(s, "[53:52] Extended Partitions Attribute, ext_partitions_attribute: 0x%04x\n", (ext_csd[53] << 8) | ext_csd[52]); for (j = 51; j >= 37; j--) seq_printf(s, "[51:37]Context configuration, context_conf[%d]: 0x%02x\n", j, ext_csd[j]); seq_printf(s, "[36] Packed command status, packed_command_status: 0x%02x\n", ext_csd[36]); seq_printf(s, "[35] Packed command failure index, packed_failure_index: 0x%02x\n", ext_csd[35]); seq_printf(s, "[34] Power Off Notification, power_off_notification: 0x%02x\n", ext_csd[34]); seq_printf(s, "[33] Control to turn the Cache On Off, cache_ctrl: 0x%02x\n", ext_csd[33]); /* flush_cache ext_csd[32] not readable */ /*Reserved [31:0] */ } #endif out_free: #ifndef CONFIG_MACH_LGE /* */ kfree(buf); #endif kfree(ext_csd); return err; }
/* * Starting point for MMC card init. */ int mmc_attach_mmc(struct mmc_host *host, u32 ocr) { int err; int i = 0; BUG_ON(!host); WARN_ON(!host->claimed); mmc_attach_bus_ops(host); /* * We need to get OCR a different way for SPI. */ if (mmc_host_is_spi(host)) { err = mmc_spi_read_ocr(host, 1, &ocr); if (err) goto err; } /* * Sanity check the voltages that the card claims to * support. */ if (ocr & 0x7F) { printk(KERN_WARNING "%s: card claims to support voltages " "below the defined range. These will be ignored.\n", mmc_hostname(host)); ocr &= ~0x7F; } host->ocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ if (!host->ocr) { err = -EINVAL; goto err; } /* * Detect and init the card. */ err = mmc_init_card(host, host->ocr, NULL); if (err) goto err; /* WA : Lock/Unlock CMD in case of 32nm iNAND */ /*check iNAND*/ if (host->card->cid.manfid == 0x45 || host->card->cid.manfid == 0x02) /*check 32nm*/ if (!(host->card->ext_csd.hpi & 0x1)) { printk(KERN_DEBUG "%s: Lock-unlock started, MID=0x%x, HPI=0x%x\n", __func__, host->card->cid.manfid, host->card->ext_csd.hpi); for (i = 0 ; i < 50 ; i++) { if (mmc_send_lock_cmd(host, 1)) { printk(KERN_ERR "%s: eMMC lock CMD is failed.\n", mmc_hostname(host)); goto remove_card; } if (mmc_send_lock_cmd(host, 0)) { printk(KERN_ERR "%s: eMMC unlock CMD is failed.\n", mmc_hostname(host)); goto remove_card; } } printk(KERN_DEBUG "%s:COMPLETED\n",__func__); } mmc_release_host(host); err = mmc_add_card(host->card); if (err) goto remove_card; return 0; remove_card: mmc_remove_card(host->card); host->card = NULL; mmc_claim_host(host); err: mmc_detach_bus(host); mmc_release_host(host); printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", mmc_hostname(host), err); return err; }