static void bcmsdh_sdmmc_remove(struct sdio_func *func) { if (func == NULL) { sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__)); return; } sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); sd_info(("sdio_device: 0x%04x\n", func->device)); sd_info(("Function#: 0x%04x\n", func->num)); if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) sdioh_remove(func); }
static int bcmsdh_sdmmc_resume(struct device *pdev) { #if defined(OOB_INTR_ONLY) struct sdio_func *func = dev_to_sdio_func(pdev); #endif sd_trace(("%s Enter\n", __FUNCTION__)); dhd_mmc_suspend = FALSE; #if defined(OOB_INTR_ONLY) if ((func->num == 2) && dhd_os_check_if_up(bcmsdh_get_drvdata())) bcmsdh_oob_intr_set(1); #endif /* (OOB_INTR_ONLY) */ smp_mb(); return 0; }
static void sdstd_3_tuning_timer(ulong data) { struct sdos_info *sdos = (struct sdos_info *)data; /* uint8 timeout = 0; */ unsigned long int_flags; sd_trace(("%s: enter\n", __FUNCTION__)); /* schedule tasklet */ /* * disable ISR's */ local_irq_save(int_flags); if (sdstd_3_check_and_set_retuning(sdos->sd)) tasklet_schedule(&sdos->tuning_tasklet); /* * enable back ISR's */ local_irq_restore(int_flags); }
void sdio_function_cleanup(void) { sd_trace(("%s Enter\n", __FUNCTION__)); #if defined(CONFIG_BRCM_LGE_WL_HOSTWAKEUP) sdio_claim_host(gInstance->func[0]); dhd_enable_sdio_irq(FALSE); sdio_release_host(gInstance->func[0]); dhd_unregister_early_suspend(); dhd_suspend_context = TRUE; #endif /* defined(CONFIG_BRCM_LGE_WL_HOSTWAKEUP) */ sdio_unregister_driver(&bcmsdh_sdmmc_driver); if (gInstance) kfree(gInstance); }
/* initilize tuning related OS structures */ void sdstd_3_osinit_tuning(sdioh_info_t *sd) { struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info; sd_trace(("%s Enter\n", __FUNCTION__)); /* initialize timer and tasklet for tuning */ init_timer(&sdos->tuning_timer); sdos->tuning_timer.data = (ulong)sdos; sdos->tuning_timer.function = sdstd_3_tuning_timer; sdos->tuning_timer_exp = 2 * (sdstd_3_get_tuning_exp(sdos->sd)); tasklet_init(&sdos->tuning_tasklet, sdstd_3_ostasklet, (ulong)sdos); if (sdos->tuning_timer_exp) { sdos->tuning_timer.expires = jiffies + sdos->tuning_timer_exp * HZ; add_timer(&sdos->tuning_timer); atomic_set(&sdos->timer_enab, TRUE); } }
/* Enable client interrupt */ void spi_unlock(sdioh_info_t *sd) { ulong flags; struct sdos_info *sdos; sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); ASSERT(sd->lockcount > 0); sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); spin_lock_irqsave(&sdos->lock, flags); if (--sd->lockcount == 0 && sd->client_intr_enabled) { spi_devintr_on(sd); } spin_unlock_irqrestore(&sdos->lock, flags); }
void sdstd_3_start_tuning(sdioh_info_t *sd) { int tune_state; unsigned long int_flags = 0; unsigned int timer_enab; struct sdos_info *sdos = (struct sdos_info *)sd->sdos_info; sd_trace(("%s: enter\n", __FUNCTION__)); /* * disable ISR's */ local_irq_save(int_flags); timer_enab = atomic_read(&sdos->timer_enab); tune_state = sdstd_3_get_tune_state(sd); if (tune_state == TUNING_ONGOING) { /* do nothing */ local_irq_restore(int_flags); goto exit; } /* change state */ sdstd_3_set_tune_state(sd, TUNING_ONGOING); /* * enable ISR's */ local_irq_restore(int_flags); sdstd_3_clk_tuning(sd, sdstd_3_get_uhsi_clkmode(sd)); /* * disable ISR's */ local_irq_save(int_flags); sdstd_3_set_tune_state(sd, TUNING_IDLE); /* * enable ISR's */ local_irq_restore(int_flags); /* enable retuning intrrupt */ sdstd_3_enable_retuning_int(sd); /* start retuning timer if enabled */ if ((sdos->tuning_timer_exp) && (timer_enab)) { if (sd->sd3_tuning_reqd) { sdos->tuning_timer.expires = jiffies + sdos->tuning_timer_exp * HZ; mod_timer(&sdos->tuning_timer, sdos->tuning_timer.expires); } } exit: return; }
static int bcmsdh_sdmmc_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret = 0; static struct sdio_func sdio_func_0; sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class)); sd_trace(("sdio_vendor: 0x%04x\n", func->vendor)); sd_trace(("sdio_device: 0x%04x\n", func->device)); sd_trace(("Function#: 0x%04x\n", func->num)); /*Linux native mmc stack enables high speed only if card's CCCR version >=1.20. BCM4329 reports CCCR Version 1.10 but it supports high speed*/ #ifdef MMC_SDIO_BROKEN_CCCR_REV if( func->vendor == SDIO_VENDOR_ID_BROADCOM && \ func->device == SDIO_DEVICE_ID_BROADCOM_4329) { sd_trace(("setting high speed support ignoring card CCCR\n")); func->card->cccr.high_speed = 1; } #endif if (func->num == 1) { #ifdef CUSTOMER_HW4 dhd_reset_chip(); sdio_reset_comm(func->card); #endif sdio_func_0.num = 0; sdio_func_0.card = func->card; gInstance->func[0] = &sdio_func_0; if(func->device == 0x4) { /* 4318 */ gInstance->func[2] = NULL; sd_trace(("NIC found, calling bcmsdh_probe...\n")); ret = bcmsdh_probe(&sdmmc_dev); } } gInstance->func[func->num] = func; if (func->num == 2) { sd_trace(("F2 found, calling bcmsdh_probe...\n")); ret = bcmsdh_probe(&sdmmc_dev); } return ret; }
static int bcmsdh_sdmmc_suspend(struct device *pdev) { struct sdio_func *func = dev_to_sdio_func(pdev); if (func->num != 2) return 0; sd_trace(("%s Enter\n", __FUNCTION__)); if (dhd_os_check_wakelock(bcmsdh_get_drvdata())) return -EBUSY; #if defined(OOB_INTR_ONLY) bcmsdh_oob_intr_set(0); #endif /* defined(OOB_INTR_ONLY) */ dhd_mmc_suspend = TRUE; smp_mb(); return 0; }
void spi_devintr_on(sdioh_info_t *sd) { spih_info_t *si = (spih_info_t *)sd->controller; osl_t *osh = si->osh; spih_regs_t *regs = si->regs; ASSERT(sd->lockcount == 0); sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); if (sd->use_client_ints) { if (SPIPCI_RREG(osh, ®s->spih_ctrl) & 0x02) { SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_DEV_INTR); } sd->intmask |= SPIH_DEV_INTR; SPIPCI_WREG(osh, ®s->spih_int_mask, sd->intmask); } }
/* Enable device interrupt */ void spi_devintr_on(sdioh_info_t *sd) { spih_info_t *si = (spih_info_t *)sd->controller; osl_t *osh = si->osh; spih_regs_t *regs = si->regs; ASSERT(sd->lockcount == 0); sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); if (sd->use_client_ints) { if (SPIPCI_RREG(osh, ®s->spih_ctrl) & 0x02) { /* Ack in case one was pending but is no longer... */ SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_DEV_INTR); } sd->intmask |= SPIH_DEV_INTR; /* Set device intr in Intmask */ SPIPCI_WREG(osh, ®s->spih_int_mask, sd->intmask); } }
/* Protect against reentrancy (disable device interrupts while executing) */ void spi_lock(sdioh_info_t *sd) { ulong flags; struct sdos_info *sdos; sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); sd_trace(("%s: %d\n", __FUNCTION__, sd->lockcount)); spin_lock_irqsave(&sdos->lock, flags); if (sd->lockcount) { sd_err(("%s: Already locked!\n", __FUNCTION__)); ASSERT(sd->lockcount == 0); } spi_devintr_off(sd); sd->lockcount++; spin_unlock_irqrestore(&sdos->lock, flags); }
/* * module init */ int sdio_function_init(void) { int error = 0; sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL); if (!gInstance) return -ENOMEM; bzero(&sdmmc_dev, sizeof(sdmmc_dev)); error = sdio_register_driver(&bcmsdh_sdmmc_driver); #if defined(CONFIG_BRCM_LGE_WL_HOSTWAKEUP) if (!error) { dhd_register_early_suspend(); DHD_TRACE(("%s: registered with Android PM\n", __FUNCTION__)); } #endif /* defined(CONFIG_BRCM_LGE_WL_HOSTWAKEUP) */ return error; }
/* Enable client interrupt */ void spi_unlock(sdioh_info_t *sd) { ulong flags; struct sdos_info *sdos; sd_trace(("%s: %d, %d\n", __FUNCTION__, sd->lockcount, sd->client_intr_enabled)); ASSERT(sd->lockcount > 0); sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); spin_lock_irqsave(&sdos->lock, flags); if (--sd->lockcount == 0 && sd->client_intr_enabled) { #ifdef BCMSPI_ANDROID bcmsdh_oob_intr_set(1); #else spi_devintr_on(sd); #endif /* BCMSPI_ANDROID */ } spin_unlock_irqrestore(&sdos->lock, flags); }
/* Interrupt enable/disable */ SDIOH_API_RC sdioh_interrupt_set(sdioh_info_t *sd, bool enable) { ulong flags; struct sdos_info *sdos; sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling")); sdos = (struct sdos_info *)sd->sdos_info; ASSERT(sdos); if (!(sd->host_init_done && sd->card_init_done)) { sd_err(("%s: Card & Host are not initted - bailing\n", __FUNCTION__)); return SDIOH_API_RC_FAIL; } #ifndef BCMSPI_ANDROID if (enable && !(sd->intr_handler && sd->intr_handler_arg)) { sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__)); return SDIOH_API_RC_FAIL; } #endif /* !BCMSPI_ANDROID */ /* Ensure atomicity for enable/disable calls */ spin_lock_irqsave(&sdos->lock, flags); sd->client_intr_enabled = enable; #ifndef BCMSPI_ANDROID if (enable && !sd->lockcount) spi_devintr_on(sd); else spi_devintr_off(sd); #endif /* !BCMSPI_ANDROID */ spin_unlock_irqrestore(&sdos->lock, flags); return SDIOH_API_RC_SUCCESS; }
static void bcmsdh_sdmmc_remove(struct sdio_func *func) { if (func == NULL) { sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__)); return; } sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); sd_info(("sdio_bcmsdh: func->class=%x\n", func->class)); sd_info(("sdio_vendor: 0x%04x\n", func->vendor)); sd_info(("sdio_device: 0x%04x\n", func->device)); sd_info(("Function#: 0x%04x\n", func->num)); if ((func->num == 2) || (func->num == 1 && func->device == 0x4)) sdioh_remove(func); #ifdef CONFIG_MACH_NOTLE if (func->num == 2) { cancel_delayed_work_sync(&bcmshd_resume_work); destroy_workqueue(sdmmc_pm_workqueue); } #endif }
static int bcmsdh_sdmmc_suspend(struct device *pdev) { struct sdio_func *func = dev_to_sdio_func(pdev); mmc_pm_flag_t sdio_flags; int ret; if (func->num != 2) return 0; sd_trace(("%s Enter\n", __FUNCTION__)); if (dhd_os_check_wakelock(bcmsdh_get_drvdata())) return -EBUSY; sdio_flags = sdio_get_host_pm_caps(func); if (!(sdio_flags & MMC_PM_KEEP_POWER)) { sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__)); return -EINVAL; } /* keep power while host suspended */ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) { sd_err(("%s: error while trying to keep power\n", __FUNCTION__)); return ret; } #if defined(OOB_INTR_ONLY) bcmsdh_oob_intr_set(0); #endif /* defined(OOB_INTR_ONLY) */ dhd_mmc_suspend = TRUE; smp_mb(); return 0; }
void spi_waitbits(sdioh_info_t *sd, bool yield) { #ifndef BCMSDYIELD ASSERT(!yield); #endif sd_trace(("%s: yield %d canblock %d\n", __FUNCTION__, yield, BLOCKABLE())); /* Clear the "interrupt happened" flag and last intrstatus */ sd->got_hcint = FALSE; #ifdef BCMSDYIELD if (yield && BLOCKABLE()) { struct sdos_info *sdos; sdos = (struct sdos_info *)sd->sdos_info; /* Wait for the indication, the interrupt will be masked when the ISR fires. */ wait_event_interruptible(sdos->intr_wait_queue, (sd->got_hcint)); } else #endif /* BCMSDYIELD */ { spi_spinbits(sd); } }
static int bcmsdh_sdmmc_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret = 0; static struct sdio_func sdio_func_0; if (!gInstance) return -EINVAL; if (func) { sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class)); sd_trace(("sdio_vendor: 0x%04x\n", func->vendor)); sd_trace(("sdio_device: 0x%04x\n", func->device)); sd_trace(("Function#: 0x%04x\n", func->num)); if (func->num == 1) { chip_id = (int)func->device; func->card->host->bus_resume_flags = 0; sdio_func_0.num = 0; sdio_func_0.card = func->card; gInstance->func[0] = &sdio_func_0; if(func->device == 0x4) { /* 4318 */ gInstance->func[2] = NULL; sd_trace(("NIC found, calling bcmsdh_probe...\n")); ret = bcmsdh_probe(&func->dev); } } gInstance->func[func->num] = func; if (func->num == 2) { #ifdef WL_CFG80211 wl_cfg80211_set_parent_dev(&func->dev); #endif sd_trace(("F2 found, calling bcmsdh_probe...\n")); ret = bcmsdh_probe(&func->dev); if (ret < 0) gInstance->func[2] = NULL; } } else {
static int bcmsdh_sdmmc_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret = 0; static struct sdio_func sdio_func_0; sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__)); sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class)); sd_trace(("sdio_vendor: 0x%04x\n", func->vendor)); sd_trace(("sdio_device: 0x%04x\n", func->device)); sd_trace(("Function#: 0x%04x\n", func->num)); if (func->num == 1) { sdio_func_0.num = 0; sdio_func_0.card = func->card; gInstance->func[0] = &sdio_func_0; if(func->device == 0x4) { /* 4318 */ gInstance->func[2] = NULL; sd_trace(("NIC found, calling bcmsdh_probe_bcmdhd...\n")); ret = bcmsdh_probe_bcmdhd(&func->dev); } } gInstance->func[func->num] = func; if (func->num == 2) { #ifdef WL_CFG80211 wl_cfg80211_set_parent_dev(&func->dev); #endif sd_trace(("F2 found, calling bcmsdh_probe_bcmdhd...\n")); ret = bcmsdh_probe_bcmdhd(&func->dev); if (mmc_power_save_host(func->card->host)) sd_err(("%s: card power save fail", __FUNCTION__)); } return ret; }
/* Send/Receive an SPI Packet */ void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen) { spih_info_t *si = (spih_info_t *)sd->controller; osl_t *osh = si->osh; spih_regs_t *regs = si->regs; uint32 count; uint32 spi_data_out; uint32 spi_data_in; bool yield; sd_trace(("%s: enter\n", __FUNCTION__)); if (bcmpcispi_dump) { printf("SENDRECV(len=%d)\n", msglen); hexdump(" OUT: ", msg_out, msglen); } #ifdef BCMSDYIELD /* Only yield the CPU and wait for interrupt on Rev 8 and newer FPGA images. */ yield = ((msglen > 500) && (si->rev >= 8)); #else yield = FALSE; #endif /* BCMSDYIELD */ ASSERT(msglen % 4 == 0); SPIPCI_ANDREG(osh, ®s->spih_gpio_data, ~SPIH_CS); /* Set GPIO CS# Low (asserted) */ for (count = 0; count < (uint32)msglen/4; count++) { spi_data_out = ((uint32)((uint32 *)msg_out)[count]); SPIPCI_WREG(osh, ®s->spih_data, spi_data_out); } #ifdef BCMSDYIELD if (yield) { /* Ack the interrupt in the interrupt controller */ SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_WFIFO_INTR); SPIPCI_RREG(osh, ®s->spih_int_status); /* Enable the FIFO Empty Interrupt */ sd->intmask |= SPIH_WFIFO_INTR; sd->got_hcint = FALSE; SPIPCI_WREG(osh, ®s->spih_int_mask, sd->intmask); } #endif /* BCMSDYIELD */ /* Wait for write fifo to empty... */ SPIPCI_ANDREG(osh, ®s->spih_gpio_data, ~0x00000020); /* Set GPIO 5 Low */ if (yield) { ASSERT((SPIPCI_RREG(sd->osh, ®s->spih_stat) & SPIH_WFEMPTY) == 0); } spi_waitbits(sd, yield); SPIPCI_ORREG(osh, ®s->spih_gpio_data, 0x00000020); /* Set GPIO 5 High (de-asserted) */ for (count = 0; count < (uint32)msglen/4; count++) { spi_data_in = SPIPCI_RREG(osh, ®s->spih_data); ((uint32 *)msg_in)[count] = spi_data_in; } /* Set GPIO CS# High (de-asserted) */ SPIPCI_ORREG(osh, ®s->spih_gpio_data, SPIH_CS); if (bcmpcispi_dump) { hexdump(" IN : ", msg_in, msglen); } }
static int bcmsdh_sdmmc_resume(struct device *dev) { sd_trace(("bcmsdh_sdmmc_resume !!!!\n")); return 0; }
static int bcmsdh_sdmmc_suspend(struct device *dev) { sd_trace(("bcmsdh_sdmmc_suspend !!!!\n")); return 0; }
/* * module cleanup */ void spi_function_cleanup(void) { sd_trace(("%s Enter\n", __FUNCTION__)); spi_unregister_driver(&bcmsdh_spi_driver); }
void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen) { spih_info_t *si = (spih_info_t *)sd->controller; osl_t *osh = si->osh; spih_regs_t *regs = si->regs; uint32 count; uint32 spi_data_out; uint32 spi_data_in; bool yield; sd_trace(("%s: enter\n", __FUNCTION__)); if (bcmpcispi_dump) { printf("SENDRECV(len=%d)\n", msglen); hexdump(" OUT: ", msg_out, msglen); } #ifdef BCMSDYIELD yield = ((msglen > 500) && (si->rev >= 8)); #else yield = FALSE; #endif ASSERT(msglen % 4 == 0); SPIPCI_ANDREG(osh, ®s->spih_gpio_data, ~SPIH_CS); for (count = 0; count < (uint32)msglen/4; count++) { spi_data_out = ((uint32)((uint32 *)msg_out)[count]); SPIPCI_WREG(osh, ®s->spih_data, spi_data_out); } #ifdef BCMSDYIELD if (yield) { SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_WFIFO_INTR); SPIPCI_RREG(osh, ®s->spih_int_status); sd->intmask |= SPIH_WFIFO_INTR; sd->got_hcint = FALSE; SPIPCI_WREG(osh, ®s->spih_int_mask, sd->intmask); } #endif SPIPCI_ANDREG(osh, ®s->spih_gpio_data, ~0x00000020); if (yield) { ASSERT((SPIPCI_RREG(sd->osh, ®s->spih_stat) & SPIH_WFEMPTY) == 0); } spi_waitbits(sd, yield); SPIPCI_ORREG(osh, ®s->spih_gpio_data, 0x00000020); for (count = 0; count < (uint32)msglen/4; count++) { spi_data_in = SPIPCI_RREG(osh, ®s->spih_data); ((uint32 *)msg_in)[count] = spi_data_in; } SPIPCI_ORREG(osh, ®s->spih_gpio_data, SPIH_CS); if (bcmpcispi_dump) { hexdump(" IN : ", msg_in, msglen); } }
bool spi_hw_attach(sdioh_info_t *sd) { osl_t *osh; spih_info_t *si; sd_trace(("%s: enter\n", __FUNCTION__)); osh = sd->osh; if ((si = (spih_info_t *)MALLOC(osh, sizeof(spih_info_t))) == NULL) { sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); return FALSE; } bzero(si, sizeof(spih_info_t)); sd->controller = si; si->osh = sd->osh; si->rev = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_REV, 4) & 0xFF; if (si->rev < 3) { sd_err(("Host controller %d not supported, please upgrade to rev >= 3\n", si->rev)); MFREE(osh, si, sizeof(spih_info_t)); return (FALSE); } sd_err(("Attaching to Generic PCI SPI Host Controller Rev %d\n", si->rev)); ASSERT(si->rev >= 3); si->bar0 = sd->bar0; if (si->rev < 10) { si->pciregs = (spih_pciregs_t *)spi_reg_map(osh, (uintptr)si->bar0, sizeof(spih_pciregs_t)); sd_err(("Mapped PCI Core regs to BAR0 at %p\n", si->pciregs)); si->bar1 = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR1, 4); si->regs = (spih_regs_t *)spi_reg_map(osh, (uintptr)si->bar1, sizeof(spih_regs_t)); sd_err(("Mapped SPI Controller regs to BAR1 at %p\n", si->regs)); } else { si->regs = (spih_regs_t *)spi_reg_map(osh, (uintptr)si->bar0, sizeof(spih_regs_t)); sd_err(("Mapped SPI Controller regs to BAR0 at %p\n", si->regs)); si->pciregs = NULL; } SPIPCI_WREG(osh, &si->regs->spih_ctrl, 0x000000d1); SPIPCI_WREG(osh, &si->regs->spih_ext, 0x00000000); SPIPCI_WREG(osh, &si->regs->spih_gpio_data, SPIH_CS); SPIPCI_WREG(osh, &si->regs->spih_gpio_ctrl, (SPIH_CS | SPIH_SLOT_POWER)); while ((SPIPCI_RREG(osh, &si->regs->spih_stat) & SPIH_RFEMPTY) == 0) { SPIPCI_RREG(osh, &si->regs->spih_data); } OSL_DELAY(250000); if (si->rev >= 4) { if (SPIPCI_RREG(osh, &si->regs->spih_gpio_data) & SPIH_CARD_DETECT) { sd_err(("%s: no card detected in SD slot\n", __FUNCTION__)); spi_reg_unmap(osh, (uintptr)si->regs, sizeof(spih_regs_t)); if (si->pciregs) { spi_reg_unmap(osh, (uintptr)si->pciregs, sizeof(spih_pciregs_t)); } MFREE(osh, si, sizeof(spih_info_t)); return FALSE; } } SPIPCI_WREG(osh, &si->regs->spih_int_edge, 0x80000000); SPIPCI_WREG(osh, &si->regs->spih_int_pol, 0x40000004); if (si->pciregs) { SPIPCI_WREG(osh, &si->pciregs->ICR, PCI_INT_PROP_EN); } sd_trace(("%s: exit\n", __FUNCTION__)); return TRUE; }
bool spi_check_client_intr(sdioh_info_t *sd, int *is_dev_intr) { spih_info_t *si = (spih_info_t *)sd->controller; osl_t *osh = si->osh; spih_regs_t *regs = si->regs; bool ours = FALSE; uint32 raw_int, cur_int; ASSERT(sd); if (is_dev_intr) *is_dev_intr = FALSE; raw_int = SPIPCI_RREG(osh, ®s->spih_int_status); cur_int = raw_int & sd->intmask; if (cur_int & SPIH_DEV_INTR) { if (sd->client_intr_enabled && sd->use_client_ints) { sd->intrcount++; ASSERT(sd->intr_handler); ASSERT(sd->intr_handler_arg); (sd->intr_handler)(sd->intr_handler_arg); if (is_dev_intr) *is_dev_intr = TRUE; } else { sd_trace(("%s: Not ready for intr: enabled %d, handler 0x%p\n", __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); } SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_DEV_INTR); SPIPCI_RREG(osh, ®s->spih_int_status); ours = TRUE; } else if (cur_int & SPIH_CTLR_INTR) { sd_trace(("%s: SPI CTLR interrupt: raw_int 0x%08x cur_int 0x%08x\n", __FUNCTION__, raw_int, cur_int)); SPIPCI_WREG(osh, ®s->spih_stat, 0x00000080); SPIPCI_WREG(osh, ®s->spih_int_status, SPIH_CTLR_INTR); SPIPCI_RREG(osh, ®s->spih_int_status); ours = TRUE; } else if (cur_int & SPIH_WFIFO_INTR) { sd_trace(("%s: SPI WR FIFO Empty interrupt: raw_int 0x%08x cur_int 0x%08x\n", __FUNCTION__, raw_int, cur_int)); sd->intmask &= ~SPIH_WFIFO_INTR; SPIPCI_WREG(osh, ®s->spih_int_mask, sd->intmask); sd->local_intrcount++; sd->got_hcint = TRUE; ours = TRUE; } else { sd_trace(("%s: Not my interrupt: raw_int 0x%08x cur_int 0x%08x\n", __FUNCTION__, raw_int, cur_int)); ours = FALSE; } return ours; }
/* Attach to PCI-SPI Host Controller Hardware */ bool spi_hw_attach(sdioh_info_t *sd) { osl_t *osh; spih_info_t *si; sd_trace(("%s: enter\n", __FUNCTION__)); osh = sd->osh; if ((si = (spih_info_t *)MALLOC(osh, sizeof(spih_info_t))) == NULL) { sd_err(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); return FALSE; } bzero(si, sizeof(spih_info_t)); sd->controller = si; si->osh = sd->osh; si->rev = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_REV, 4) & 0xFF; if (si->rev < 3) { sd_err(("Host controller %d not supported, please upgrade to rev >= 3\n", si->rev)); MFREE(osh, si, sizeof(spih_info_t)); return (FALSE); } sd_err(("Attaching to Generic PCI SPI Host Controller Rev %d\n", si->rev)); /* FPGA Revision < 3 not supported by driver anymore. */ ASSERT(si->rev >= 3); si->bar0 = sd->bar0; /* Rev < 10 PciSpiHost has 2 BARs: * BAR0 = PCI Core Registers * BAR1 = PciSpiHost Registers (all other cores on backplane) * * Rev 10 and up use a different PCI core which only has a single * BAR0 which contains the PciSpiHost Registers. */ if (si->rev < 10) { si->pciregs = (spih_pciregs_t *)spi_reg_map(osh, (uintptr)si->bar0, sizeof(spih_pciregs_t)); sd_err(("Mapped PCI Core regs to BAR0 at %p\n", si->pciregs)); si->bar1 = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR1, 4); si->regs = (spih_regs_t *)spi_reg_map(osh, (uintptr)si->bar1, sizeof(spih_regs_t)); sd_err(("Mapped SPI Controller regs to BAR1 at %p\n", si->regs)); } else { si->regs = (spih_regs_t *)spi_reg_map(osh, (uintptr)si->bar0, sizeof(spih_regs_t)); sd_err(("Mapped SPI Controller regs to BAR0 at %p\n", si->regs)); si->pciregs = NULL; } /* Enable SPI Controller, 16.67MHz SPI Clock */ SPIPCI_WREG(osh, &si->regs->spih_ctrl, 0x000000d1); /* Set extended feature register to defaults */ SPIPCI_WREG(osh, &si->regs->spih_ext, 0x00000000); /* Set GPIO CS# High (de-asserted) */ SPIPCI_WREG(osh, &si->regs->spih_gpio_data, SPIH_CS); /* set GPIO[0] to output for CS# */ /* set GPIO[1] to output for power control */ /* set GPIO[2] to input for card detect */ SPIPCI_WREG(osh, &si->regs->spih_gpio_ctrl, (SPIH_CS | SPIH_SLOT_POWER)); /* Clear out the Read FIFO in case there is any stuff left in there from a previous run. */ while ((SPIPCI_RREG(osh, &si->regs->spih_stat) & SPIH_RFEMPTY) == 0) { SPIPCI_RREG(osh, &si->regs->spih_data); } /* Wait for power to stabilize to the SDIO Card (100msec was insufficient) */ OSL_DELAY(250000); /* Check card detect on FPGA Revision >= 4 */ if (si->rev >= 4) { if (SPIPCI_RREG(osh, &si->regs->spih_gpio_data) & SPIH_CARD_DETECT) { sd_err(("%s: no card detected in SD slot\n", __FUNCTION__)); spi_reg_unmap(osh, (uintptr)si->regs, sizeof(spih_regs_t)); if (si->pciregs) { spi_reg_unmap(osh, (uintptr)si->pciregs, sizeof(spih_pciregs_t)); } MFREE(osh, si, sizeof(spih_info_t)); return FALSE; } } /* Interrupts are level sensitive */ SPIPCI_WREG(osh, &si->regs->spih_int_edge, 0x80000000); /* Interrupts are active low. */ SPIPCI_WREG(osh, &si->regs->spih_int_pol, 0x40000004); /* Enable interrupts through PCI Core. */ if (si->pciregs) { SPIPCI_WREG(osh, &si->pciregs->ICR, PCI_INT_PROP_EN); } sd_trace(("%s: exit\n", __FUNCTION__)); return TRUE; }