void imx6q_restart(char mode, const char *cmd) { struct device_node *np; void __iomem *wdog_base; np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-wdt"); wdog_base = of_iomap(np, 0); if (!wdog_base) goto soft; imx_src_prepare_restart(); /* enable wdog */ writew_relaxed(1 << 2, wdog_base); /* write twice to ensure the request will not get ignored */ writew_relaxed(1 << 2, wdog_base); /* wait for reset to assert ... */ mdelay(500); pr_err("Watchdog reset failed to assert reset\n"); /* delay to allow the serial port to show the message */ mdelay(50); soft: /* we'll take a jump through zero as a poor second */ soft_restart(0); }
static int cpc_write(struct cpc_reg *reg, u64 val) { int ret_val = 0; if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { void __iomem *vaddr = GET_PCC_VADDR(reg->address); switch (reg->bit_width) { case 8: writeb_relaxed(val, vaddr); break; case 16: writew_relaxed(val, vaddr); break; case 32: writel_relaxed(val, vaddr); break; case 64: writeq_relaxed(val, vaddr); break; default: pr_debug("Error: Cannot write %u bit width to PCC\n", reg->bit_width); ret_val = -EFAULT; break; } } else ret_val = acpi_os_write_memory((acpi_physical_address)reg->address, val, reg->bit_width); return ret_val; }
static void stm_send(void __iomem *addr, const void *data, u32 size, u8 write_bytes) { u8 paload[8]; if (stm_addr_unaligned(data, write_bytes)) { memcpy(paload, data, size); data = paload; } /* now we are 64bit/32bit aligned */ switch (size) { #ifdef CONFIG_64BIT case 8: writeq_relaxed(*(u64 *)data, addr); break; #endif case 4: writel_relaxed(*(u32 *)data, addr); break; case 2: writew_relaxed(*(u16 *)data, addr); break; case 1: writeb_relaxed(*(u8 *)data, addr); break; default: break; } }
static inline void vf610_mscm_ir_restore(struct vf610_mscm_ir_chip_data *data) { int i; for (i = 0; i < MSCM_IRSPRC_NUM; i++) writew_relaxed(data->saved_irsprc[i], data->mscm_ir_base + MSCM_IRSPRC(i)); }
static inline void sdhci_sprd_writew(struct sdhci_host *host, u16 val, int reg) { /* SDHCI_BLOCK_COUNT is Read Only on Spreadtrum's platform */ if (unlikely(reg == SDHCI_BLOCK_COUNT)) return; writew_relaxed(val, host->ioaddr + reg); }
static void vf610_mscm_ir_disable(struct irq_data *data) { irq_hw_number_t hwirq = data->hwirq; struct vf610_mscm_ir_chip_data *chip_data = data->chip_data; writew_relaxed(0x0, chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); irq_chip_mask_parent(data); }
/* This function may sleep*/ static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) { u32 req_type = 0; req_type = __sdhci_msm_check_write(host, val, reg); writew_relaxed(val, host->ioaddr + reg); if (req_type) sdhci_msm_check_power_status(host, req_type); }
static void omap_mcbsp_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val) { void __iomem *addr = mcbsp->io_base + reg * mcbsp->pdata->reg_step; if (mcbsp->pdata->reg_size == 2) { ((u16 *)mcbsp->reg_cache)[reg] = (u16)val; writew_relaxed((u16)val, addr); } else { ((u32 *)mcbsp->reg_cache)[reg] = val; writel_relaxed(val, addr); } }
static void vf610_mscm_ir_enable(struct irq_data *data) { irq_hw_number_t hwirq = data->hwirq; struct vf610_mscm_ir_chip_data *chip_data = data->chip_data; u16 irsprc; irsprc = readw_relaxed(chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); irsprc &= MSCM_IRSPRC_CPEN_MASK; WARN_ON(irsprc & ~chip_data->cpu_mask); writew_relaxed(chip_data->cpu_mask, chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); irq_chip_unmask_parent(data); }
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) { int ret_val = 0; void __iomem *vaddr = 0; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) vaddr = reg_res->sys_mem_vaddr; else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) return cpc_write_ffh(cpu, reg, val); else return acpi_os_write_memory((acpi_physical_address)reg->address, val, reg->bit_width); switch (reg->bit_width) { case 8: writeb_relaxed(val, vaddr); break; case 16: writew_relaxed(val, vaddr); break; case 32: writel_relaxed(val, vaddr); break; case 64: writeq_relaxed(val, vaddr); break; default: pr_debug("Error: Cannot write %u bit width to PCC\n", reg->bit_width); ret_val = -EFAULT; break; } return ret_val; }
static void restore_uart(void) { int cnt; int retries = 100; unsigned int cr; void __iomem *membase; u16 dummy; bool show_warn = false; membase = context_uart.base; clk_enable(context_uart.uart_clk); writew_relaxed(context_uart.ifls, membase + UART011_IFLS); cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE; writew_relaxed(cr, membase + UART011_CR); writew_relaxed(0, membase + UART011_FBRD); writew_relaxed(1, membase + UART011_IBRD); writew_relaxed(0, membase + ST_UART011_LCRH_RX); if (context_uart.lcrh_tx != ST_UART011_LCRH_RX) { int i; /* * Wait 10 PCLKs before writing LCRH_TX register, * to get this delay write read only register 10 times */ for (i = 0; i < 10; ++i) dummy = readw(membase + ST_UART011_LCRH_RX); writew_relaxed(0, membase + ST_UART011_LCRH_TX); } writew(0, membase + UART01x_DR); do { if (!(readw(membase + UART01x_FR) & UART01x_FR_BUSY)) break; cpu_relax(); } while (retries-- > 0); if (retries < 0) /* * We can't print out a warning here since the uart is * not fully restored. Do it later. */ show_warn = true; writel_relaxed(context_uart.dma_wm, membase + ST_UART011_DMAWM); writel_relaxed(context_uart.timeout, membase + ST_UART011_TIMEOUT); writel_relaxed(context_uart.lcrh_rx, membase + ST_UART011_LCRH_RX); writel_relaxed(context_uart.ilpr, membase + UART01x_ILPR); writel_relaxed(context_uart.ibrd, membase + UART011_IBRD); writel_relaxed(context_uart.fbrd, membase + UART011_FBRD); /* * Wait 10 PCLKs before writing LCRH_TX register, * to get this delay write read only register 10-3 * times, as already there are 3 writes after * ST_UART011_LCRH_RX */ for (cnt = 0; cnt < 7; cnt++) dummy = readw(membase + ST_UART011_LCRH_RX); writel_relaxed(context_uart.lcrh_tx, membase + ST_UART011_LCRH_TX); writel_relaxed(context_uart.ifls, membase + UART011_IFLS); writel_relaxed(context_uart.dmacr, membase + UART011_DMACR); writel_relaxed(context_uart.xfcr, membase + ST_UART011_XFCR); writel_relaxed(context_uart.xon1, membase + ST_UART011_XON1); writel_relaxed(context_uart.xon2, membase + ST_UART011_XON2); writel_relaxed(context_uart.xoff1, membase + ST_UART011_XOFF1); writel_relaxed(context_uart.xoff2, membase + ST_UART011_XOFF2); writel_relaxed(context_uart.abcr, membase + ST_UART011_ABCR); writel_relaxed(context_uart.abimsc, membase + ST_UART011_ABIMSC); writel_relaxed(context_uart.cr, membase + UART011_CR); writel(context_uart.imsc, membase + UART011_IMSC); clk_disable(context_uart.uart_clk); if (show_warn) pr_warning("%s:uart tx busy\n", __func__); }
static inline void omap_i2c_write_reg(struct omap_i2c_dev *i2c_dev, int reg, u16 val) { writew_relaxed(val, i2c_dev->base + (i2c_dev->regs[reg] << i2c_dev->reg_shift)); }
static inline void ti_dma_xbar_write(void __iomem *iomem, int xbar, u16 val) { writew_relaxed(val, iomem + (xbar * 2)); }
/* * This function transfers the ownership of the PCC to the platform * So it must be called while holding write_lock(pcc_lock) */ static int send_pcc_cmd(int pcc_ss_id, u16 cmd) { int ret = -EIO, i; struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; struct acpi_pcct_shared_memory *generic_comm_base = (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr; unsigned int time_delta; /* * For CMD_WRITE we know for a fact the caller should have checked * the channel before writing to PCC space */ if (cmd == CMD_READ) { /* * If there are pending cpc_writes, then we stole the channel * before write completion, so first send a WRITE command to * platform */ if (pcc_ss_data->pending_pcc_write_cmd) send_pcc_cmd(pcc_ss_id, CMD_WRITE); ret = check_pcc_chan(pcc_ss_id, false); if (ret) goto end; } else /* CMD_WRITE */ pcc_ss_data->pending_pcc_write_cmd = FALSE; /* * Handle the Minimum Request Turnaround Time(MRTT) * "The minimum amount of time that OSPM must wait after the completion * of a command before issuing the next command, in microseconds" */ if (pcc_ss_data->pcc_mrtt) { time_delta = ktime_us_delta(ktime_get(), pcc_ss_data->last_cmd_cmpl_time); if (pcc_ss_data->pcc_mrtt > time_delta) udelay(pcc_ss_data->pcc_mrtt - time_delta); } /* * Handle the non-zero Maximum Periodic Access Rate(MPAR) * "The maximum number of periodic requests that the subspace channel can * support, reported in commands per minute. 0 indicates no limitation." * * This parameter should be ideally zero or large enough so that it can * handle maximum number of requests that all the cores in the system can * collectively generate. If it is not, we will follow the spec and just * not send the request to the platform after hitting the MPAR limit in * any 60s window */ if (pcc_ss_data->pcc_mpar) { if (pcc_ss_data->mpar_count == 0) { time_delta = ktime_ms_delta(ktime_get(), pcc_ss_data->last_mpar_reset); if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) { pr_debug("PCC cmd not sent due to MPAR limit"); ret = -EIO; goto end; } pcc_ss_data->last_mpar_reset = ktime_get(); pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar; } pcc_ss_data->mpar_count--; } /* Write to the shared comm region. */ writew_relaxed(cmd, &generic_comm_base->command); /* Flip CMD COMPLETE bit */ writew_relaxed(0, &generic_comm_base->status); pcc_ss_data->platform_owns_pcc = true; /* Ring doorbell */ ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd); if (ret < 0) { pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", cmd, ret); goto end; } /* wait for completion and check for PCC errro bit */ ret = check_pcc_chan(pcc_ss_id, true); if (pcc_ss_data->pcc_mrtt) pcc_ss_data->last_cmd_cmpl_time = ktime_get(); if (pcc_ss_data->pcc_channel->mbox->txdone_irq) mbox_chan_txdone(pcc_ss_data->pcc_channel, ret); else mbox_client_txdone(pcc_ss_data->pcc_channel, ret); end: if (cmd == CMD_WRITE) { if (unlikely(ret)) { for_each_possible_cpu(i) { struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); if (!desc) continue; if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt) desc->write_cmd_status = ret; } } pcc_ss_data->pcc_write_cnt++; wake_up_all(&pcc_ss_data->pcc_write_wait_q); } return ret; }
void omap_ctrl_writew(u16 val, u16 offset) { writew_relaxed(val, OMAP_CTRL_REGADDR(offset)); }
static int send_pcc_cmd(u16 cmd) { int ret = -EIO; struct acpi_pcct_shared_memory *generic_comm_base = (struct acpi_pcct_shared_memory *) pcc_comm_addr; static ktime_t last_cmd_cmpl_time, last_mpar_reset; static int mpar_count; unsigned int time_delta; /* * For CMD_WRITE we know for a fact the caller should have checked * the channel before writing to PCC space */ if (cmd == CMD_READ) { ret = check_pcc_chan(); if (ret) return ret; } /* * Handle the Minimum Request Turnaround Time(MRTT) * "The minimum amount of time that OSPM must wait after the completion * of a command before issuing the next command, in microseconds" */ if (pcc_mrtt) { time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); if (pcc_mrtt > time_delta) udelay(pcc_mrtt - time_delta); } /* * Handle the non-zero Maximum Periodic Access Rate(MPAR) * "The maximum number of periodic requests that the subspace channel can * support, reported in commands per minute. 0 indicates no limitation." * * This parameter should be ideally zero or large enough so that it can * handle maximum number of requests that all the cores in the system can * collectively generate. If it is not, we will follow the spec and just * not send the request to the platform after hitting the MPAR limit in * any 60s window */ if (pcc_mpar) { if (mpar_count == 0) { time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); if (time_delta < 60 * MSEC_PER_SEC) { pr_debug("PCC cmd not sent due to MPAR limit"); return -EIO; } last_mpar_reset = ktime_get(); mpar_count = pcc_mpar; } mpar_count--; } /* Write to the shared comm region. */ writew_relaxed(cmd, &generic_comm_base->command); /* Flip CMD COMPLETE bit */ writew_relaxed(0, &generic_comm_base->status); /* Ring doorbell */ ret = mbox_send_message(pcc_channel, &cmd); if (ret < 0) { pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", cmd, ret); return ret; } /* * For READs we need to ensure the cmd completed to ensure * the ensuing read()s can proceed. For WRITEs we dont care * because the actual write()s are done before coming here * and the next READ or WRITE will check if the channel * is busy/free at the entry of this call. * * If Minimum Request Turnaround Time is non-zero, we need * to record the completion time of both READ and WRITE * command for proper handling of MRTT, so we need to check * for pcc_mrtt in addition to CMD_READ */ if (cmd == CMD_READ || pcc_mrtt) { ret = check_pcc_chan(); if (pcc_mrtt) last_cmd_cmpl_time = ktime_get(); } mbox_client_txdone(pcc_channel, ret); return ret; }