/* * Loads and program into SRAM, then tells the DCM to run it, and then waits * for it to finish. */ static int run_program(struct fsl_dcm_data *dcm, u8 addr, unsigned int len, ...) { u8 v, n; va_list args; if (addr + len > 0xff) { dev_err(dcm->dev, "address/length of %u/%u is out of bounds\n", addr, len); return 0; } /* load the program into SRAM */ va_start(args, len); for (n = addr; n < addr + len; n++) { v = va_arg(args, int); write_sram(dcm, n, v); } va_end(args); /* start the DCM */ out_8(dcm->omsg, addr); out_8(dcm->ocmd, PX_OCMD_MSG); /* wait for ack or error */ v = spin_event_timeout(in_8(dcm->mack) & (PX_OACK_ERR | PX_OACK_ACK), 50000, 1000); if ((!v) || (v & PX_OACK_ERR)) { dev_err(dcm->dev, "timeout or error waiting for start ack\n"); return 0; } /* 4. allow the host to read SRAM */ out_8(dcm->ocmd, 0); /* 5. wait for DCM to stop (ack == 0) or error (err == 1) */ spin_event_timeout( ((v = in_8(dcm->mack)) & (PX_OACK_ERR | PX_OACK_ACK)) != PX_OACK_ACK, 50000, 1000); /* 6. check for error or timeout */ if (v & (PX_OACK_ERR | PX_OACK_ACK)) { dev_err(dcm->dev, "timeout or error waiting for stop ack\n"); return 0; } return 1; }
static int rcpm_v2_suspend_enter(suspend_state_t state) { int ret = 0; int result; switch (state) { case PM_SUSPEND_STANDBY: /* clear previous LPM20 status */ setbits32(&rcpm2_regs->powmgtcsr, RCPM_POWMGTCSR_P_LPM20_ST); /* enter LPM20 status */ setbits32(&rcpm2_regs->powmgtcsr, RCPM_POWMGTCSR_LPM20_RQ); /* At this point, the device is in LPM20 status. */ /* resume ... */ result = spin_event_timeout( (in_be32(&rcpm2_regs->powmgtcsr) & RCPM_POWMGTCSR_LPM20_ST) == 0, 10000, 10); if (!result) { pr_err("%s: timeout waiting for LPM20 bit to be cleared\n", __func__); ret = -ETIMEDOUT; } break; default: ret = -EINVAL; } return ret; }
static int rcpm_suspend_enter(suspend_state_t state) { int ret = 0; int result; switch (state) { case PM_SUSPEND_STANDBY: flush_dcache_L1(); flush_backside_L2_cache(); setbits32(&rcpm1_regs->powmgtcsr, RCPM_POWMGTCSR_SLP); /* At this point, the device is in sleep mode. */ /* Upon resume, wait for SLP bit to be clear. */ result = spin_event_timeout( (in_be32(&rcpm1_regs->powmgtcsr) & RCPM_POWMGTCSR_SLP) == 0, 10000, 10); if (!result) { pr_err("%s: timeout waiting for SLP bit " "to be cleared\n", __func__); ret = -ETIMEDOUT; } break; default: ret = -EINVAL; } return ret; }
static int pmc_suspend_enter(suspend_state_t state) { int ret; setbits32(&pmc_regs->pmcsr, PMCSR_SLP); /* At this point, the CPU is asleep. */ /* Upon resume, wait for SLP bit to be clear. */ ret = spin_event_timeout((in_be32(&pmc_regs->pmcsr) & PMCSR_SLP) == 0, 10000, 10) ? 0 : -ETIMEDOUT; if (ret) dev_err(pmc_dev, "tired waiting for SLP bit to clear\n"); return ret; }
/* * Wait till the MDIO read or write operation is complete */ static int xgmac_wait_until_done(struct device *dev, struct tgec_mdio_controller __iomem *regs) { uint32_t status; /* Wait till the MDIO write is complete */ status = spin_event_timeout( !((in_be32(®s->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); if (!status) { dev_err(dev, "timeout waiting for operation to complete\n"); return -ETIMEDOUT; } return 0; }
/* * Wait until the MDIO bus is free */ static int xgmac_wait_until_free(struct device *dev, struct tgec_mdio_controller __iomem *regs) { uint32_t status; /* Wait till the bus is free */ status = spin_event_timeout( !((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); if (!status) { dev_err(dev, "timeout waiting for bus to be free\n"); return -ETIMEDOUT; } return 0; }
static int pmc_suspend_enter(suspend_state_t state) { int ret = 0; int result; switch (state) { #ifdef CONFIG_PPC_85xx case PM_SUSPEND_MEM: #ifdef CONFIG_SPE enable_kernel_spe(); #endif enable_kernel_fp(); pr_debug("%s: Entering deep sleep\n", __func__); local_irq_disable(); mpc85xx_enter_deep_sleep(get_immrbase(), POWMGTCSR_DPSLP); pr_debug("%s: Resumed from deep sleep\n", __func__); break; #endif case PM_SUSPEND_STANDBY: local_irq_disable(); flush_dcache_L1(); setbits32(&pmc_regs->powmgtcsr, POWMGTCSR_SLP); /* At this point, the CPU is asleep. */ /* Upon resume, wait for SLP bit to be clear. */ result = spin_event_timeout( (in_be32(&pmc_regs->powmgtcsr) & POWMGTCSR_SLP) == 0, 10000, 10); if (!result) { pr_err("%s: timeout waiting for SLP bit " "to be cleared\n", __func__); ret = -ETIMEDOUT; } break; default: ret = -EINVAL; } return ret; }
static int pmc_suspend_enter(suspend_state_t state) { int ret; u32 powmgtreq = 0x00500000; switch (state) { case PM_SUSPEND_MEM: #ifdef CONFIG_SPE enable_kernel_spe(); #endif pr_debug("Entering deep sleep\n"); local_irq_disable(); mpc85xx_enter_deep_sleep(get_immrbase(), powmgtreq); pr_debug("Resumed from deep sleep\n"); return 0; /* else fall-through */ case PM_SUSPEND_STANDBY: local_irq_disable(); /* Start the power monitor using FPGA */ pixis_start_pm_sleep(); setbits32(&pmc_regs->pmcsr, PMCSR_SLP); /* At this point, the CPU is asleep. */ /* Upon resume, wait for SLP bit to be clear. */ ret = spin_event_timeout((in_be32(&pmc_regs->pmcsr) & PMCSR_SLP) == 0, 10000, 10) ? 0 : -ETIMEDOUT; if (ret) dev_err(pmc_dev, "timeout waiting for SLP bit to be cleared\n"); /* Stop the power monitor using FPGA */ pixis_stop_pm_sleep(); return 0; default: return -EINVAL; } }
int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) { unsigned long flags; u8 mcn_shift = 0, dev_shift = 0; u32 ret; spin_lock_irqsave(&qe_lock, flags); if (cmd == QE_RESET) { out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG)); } else { if (cmd == QE_ASSIGN_PAGE) { /* Here device is the SNUM, not sub-block */ dev_shift = QE_CR_SNUM_SHIFT; } else if (cmd == QE_ASSIGN_RISC) { /* Here device is the SNUM, and mcnProtocol is * e_QeCmdRiscAssignment value */ dev_shift = QE_CR_SNUM_SHIFT; mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT; } else { if (device == QE_CR_SUBBLOCK_USB) mcn_shift = QE_CR_MCN_USB_SHIFT; else mcn_shift = QE_CR_MCN_NORMAL_SHIFT; } out_be32(&qe_immr->cp.cecdr, cmd_input); out_be32(&qe_immr->cp.cecr, (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) mcn_protocol << mcn_shift)); } /* wait for the QE_CR_FLG to clear */ ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0, 100, 0); /* On timeout (e.g. failure), the expression will be false (ret == 0), otherwise it will be true (ret == 1). */ spin_unlock_irqrestore(&qe_lock, flags); return ret == 1; }
static int __cpuinit smp_85xx_kick_cpu(int nr) { unsigned long flags; const u64 *cpu_rel_addr; __iomem struct epapr_spin_table *spin_table; struct device_node *np; int hw_cpu = get_hard_smp_processor_id(nr); int ioremappable; int ret = 0; WARN_ON(nr < 0 || nr >= NR_CPUS); WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS); pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); np = of_get_cpu_node(nr, NULL); cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); if (cpu_rel_addr == NULL) { printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); return -ENOENT; } /* * A secondary core could be in a spinloop in the bootpage * (0xfffff000), somewhere in highmem, or somewhere in lowmem. * The bootpage and highmem can be accessed via ioremap(), but * we need to directly access the spinloop if its in lowmem. */ ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); /* Map the spin table */ if (ioremappable) spin_table = ioremap(*cpu_rel_addr, sizeof(struct epapr_spin_table)); else spin_table = phys_to_virt(*cpu_rel_addr); local_irq_save(flags); #ifdef CONFIG_PPC32 #ifdef CONFIG_HOTPLUG_CPU /* Corresponding to generic_set_cpu_dead() */ generic_set_cpu_up(nr); if (system_state == SYSTEM_RUNNING) { out_be32(&spin_table->addr_l, 0); /* * We don't set the BPTR register here since it already points * to the boot page properly. */ mpic_reset_core(hw_cpu); /* wait until core is ready... */ if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1, 10000, 100)) { pr_err("%s: timeout waiting for core %d to reset\n", __func__, hw_cpu); ret = -ENOENT; goto out; } /* clear the acknowledge status */ __secondary_hold_acknowledge = -1; } #endif out_be32(&spin_table->pir, hw_cpu); out_be32(&spin_table->addr_l, __pa(__early_start)); if (!ioremappable) flush_dcache_range((ulong)spin_table, (ulong)spin_table + sizeof(struct epapr_spin_table)); /* Wait a bit for the CPU to ack. */ if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu, 10000, 100)) { pr_err("%s: timeout waiting for core %d to ack\n", __func__, hw_cpu); ret = -ENOENT; goto out; } out: #else smp_generic_kick_cpu(nr); out_be32(&spin_table->pir, hw_cpu); out_be64((u64 *)(&spin_table->addr_h), __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); if (!ioremappable) flush_dcache_range((ulong)spin_table, (ulong)spin_table + sizeof(struct epapr_spin_table)); #endif local_irq_restore(flags); if (ioremappable) iounmap(spin_table); return ret; }