static void config_pcm_i2s_mode(int mode) { void __iomem *cfg_ptr; u8 reg2; cfg_ptr = ioremap_nocache(FPGA_MSM_CNTRL_REG2, sizeof(char)); if (!cfg_ptr) return; if (mode) { /*enable the pcm mode in FPGA*/ reg2 = readb_relaxed(cfg_ptr); if (reg2 == 0) { reg2 = 1; writeb_relaxed(reg2, cfg_ptr); } } else { /*enable i2s mode in FPGA*/ reg2 = readb_relaxed(cfg_ptr); if (reg2 == 1) { reg2 = 0; writeb_relaxed(reg2, cfg_ptr); } } iounmap(cfg_ptr); }
static void rfbi_write_pixels(const void __iomem *buf, int scr_width, u16 x, u16 y, u16 w, u16 h) { int start_offset = scr_width * y + x; int horiz_offset = scr_width - w; int i; if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { const u16 __iomem *pd = buf; pd += start_offset; for (; h; --h) { for (i = 0; i < w; ++i) { const u8 __iomem *b = (const u8 __iomem *)pd; rfbi_write_reg(RFBI_PARAM, readb_relaxed(b+1)); rfbi_write_reg(RFBI_PARAM, readb_relaxed(b+0)); ++pd; } pd += horiz_offset; } } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_24 && rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { const u32 __iomem *pd = buf; pd += start_offset; for (; h; --h) { for (i = 0; i < w; ++i) { const u8 __iomem *b = (const u8 __iomem *)pd; rfbi_write_reg(RFBI_PARAM, readb_relaxed(b+2)); rfbi_write_reg(RFBI_PARAM, readb_relaxed(b+1)); rfbi_write_reg(RFBI_PARAM, readb_relaxed(b+0)); ++pd; } pd += horiz_offset; } } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_16) { const u16 __iomem *pd = buf; pd += start_offset; for (; h; --h) { for (i = 0; i < w; ++i) { rfbi_write_reg(RFBI_PARAM, readw_relaxed(pd)); ++pd; } pd += horiz_offset; } } else { BUG(); } }
static void digicolor_uart_rx(struct uart_port *port) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); while (1) { u8 status, ch; unsigned int ch_flag; if (digicolor_uart_rx_empty(port)) break; ch = readb_relaxed(port->membase + UA_EMI_REC); status = readb_relaxed(port->membase + UA_STATUS); port->icount.rx++; ch_flag = TTY_NORMAL; if (status) { if (status & UA_STATUS_PARITY_ERR) port->icount.parity++; else if (status & UA_STATUS_FRAME_ERR) port->icount.frame++; else if (status & UA_STATUS_OVERRUN_ERR) port->icount.overrun++; status &= port->read_status_mask; if (status & UA_STATUS_PARITY_ERR) ch_flag = TTY_PARITY; else if (status & UA_STATUS_FRAME_ERR) ch_flag = TTY_FRAME; else if (status & UA_STATUS_OVERRUN_ERR) ch_flag = TTY_OVERRUN; } if (status & port->ignore_status_mask) continue; uart_insert_char(port, status, UA_STATUS_OVERRUN_ERR, ch, ch_flag); } spin_unlock_irqrestore(&port->lock, flags); tty_flip_buffer_push(&port->state->port); }
static void digicolor_uart_stop_tx(struct uart_port *port) { u8 int_enable = readb_relaxed(port->membase + UA_INT_ENABLE); int_enable &= ~UA_INT_TX; writeb_relaxed(int_enable, port->membase + UA_INT_ENABLE); }
static void neponset_set_mctrl(struct uart_port *port, u_int mctrl) { void __iomem *base = nep_base; u_int mdm_ctl0; if (!base) return; mdm_ctl0 = readb_relaxed(base + MDM_CTL_0); if (port->mapbase == _Ser1UTCR0) { if (mctrl & TIOCM_RTS) mdm_ctl0 &= ~MDM_CTL0_RTS2; else mdm_ctl0 |= MDM_CTL0_RTS2; if (mctrl & TIOCM_DTR) mdm_ctl0 &= ~MDM_CTL0_DTR2; else mdm_ctl0 |= MDM_CTL0_DTR2; } else if (port->mapbase == _Ser3UTCR0) { if (mctrl & TIOCM_RTS) mdm_ctl0 &= ~MDM_CTL0_RTS1; else mdm_ctl0 |= MDM_CTL0_RTS1; if (mctrl & TIOCM_DTR) mdm_ctl0 &= ~MDM_CTL0_DTR1; else mdm_ctl0 |= MDM_CTL0_DTR1; } writeb_relaxed(mdm_ctl0, base + MDM_CTL_0); }
static int cpc_read(struct cpc_reg *reg, u64 *val) { int ret_val = 0; *val = 0; if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { void __iomem *vaddr = GET_PCC_VADDR(reg->address); switch (reg->bit_width) { case 8: *val = readb_relaxed(vaddr); break; case 16: *val = readw_relaxed(vaddr); break; case 32: *val = readl_relaxed(vaddr); break; case 64: *val = readq_relaxed(vaddr); break; default: pr_debug("Error: Cannot read %u bit width from PCC\n", reg->bit_width); ret_val = -EFAULT; } } else ret_val = acpi_os_read_memory((acpi_physical_address)reg->address, val, reg->bit_width); return ret_val; }
static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy) { u8 val; dev_dbg(phy->phy.dev, "enabling QMP autonomous mode with cable %s\n", get_cable_status_str(phy)); /* clear LFPS RXTERM interrupt */ writeb_relaxed(1, phy->base + PCIE_USB3_PHY_LFPS_RXTERM_IRQ_CLEAR); /* flush the previous write before next write */ wmb(); writeb_relaxed(0, phy->base + PCIE_USB3_PHY_LFPS_RXTERM_IRQ_CLEAR); val = readb_relaxed(phy->base + PCIE_USB3_PHY_AUTONOMOUS_MODE_CTRL); val |= ARCVR_DTCT_EN; if (phy->cable_connected) { val |= ALFPS_DTCT_EN; /* Detect detach */ val &= ~ARCVR_DTCT_EVENT_SEL; } else { val &= ~ALFPS_DTCT_EN; /* Detect attach */ val |= ARCVR_DTCT_EVENT_SEL; } writeb_relaxed(val, phy->base + PCIE_USB3_PHY_AUTONOMOUS_MODE_CTRL); }
static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy) { u8 val; dev_info(phy->phy.dev, "enabling QMP autonomous mode with cable %s\n", get_cable_status_str(phy)); writeb_relaxed(1, phy->base + PCIE_USB3_PHY_LFPS_RXTERM_IRQ_CLEAR); wmb(); writeb_relaxed(0, phy->base + PCIE_USB3_PHY_LFPS_RXTERM_IRQ_CLEAR); val = readb_relaxed(phy->base + PCIE_USB3_PHY_AUTONOMOUS_MODE_CTRL); val |= ARCVR_DTCT_EN; if (phy->cable_connected) { val |= ALFPS_DTCT_EN; val &= ~ARCVR_DTCT_EVENT_SEL; } else { val &= ~ALFPS_DTCT_EN; val |= ARCVR_DTCT_EVENT_SEL; } writeb_relaxed(val, phy->base + PCIE_USB3_PHY_AUTONOMOUS_MODE_CTRL); }
static inline void miphy365x_set_comp(struct miphy365x_phy *miphy_phy, struct miphy365x_dev *miphy_dev) { u8 val, mask; if (miphy_phy->sata_gen == SATA_GEN1) writeb_relaxed(COMP_2MHZ_RAT_GEN1, miphy_phy->base + COMP_CTRL2_REG); else writeb_relaxed(COMP_2MHZ_RAT, miphy_phy->base + COMP_CTRL2_REG); if (miphy_phy->sata_gen != SATA_GEN3) { writeb_relaxed(COMSR_COMP_REF, miphy_phy->base + COMP_CTRL3_REG); /* * Force VCO current to value defined by address 0x5A * and disable PCIe100Mref bit * Enable auto load compensation for pll_i_bias */ writeb_relaxed(BYPASS_PLL_CAL, miphy_phy->base + PLL_CTRL2_REG); writeb_relaxed(COMZC_IDLL, miphy_phy->base + COMP_IDLL_REG); } /* * Force restart compensation and enable auto load * for Comzc_Tx, Comzc_Rx and Comsr on macro */ val = START_COMSR | START_COMZC | COMP_AUTO_LOAD; writeb_relaxed(val, miphy_phy->base + COMP_CTRL1_REG); mask = COMSR_DONE | COMZC_DONE; while ((readb_relaxed(miphy_phy->base + COMP_CTRL1_REG) & mask) != mask) cpu_relax(); }
static u_int neponset_get_mctrl(struct uart_port *port) { void __iomem *base = nep_base; u_int ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR; u_int mdm_ctl1; if (!base) return ret; mdm_ctl1 = readb_relaxed(base + MDM_CTL_1); if (port->mapbase == _Ser1UTCR0) { if (mdm_ctl1 & MDM_CTL1_DCD2) ret &= ~TIOCM_CD; if (mdm_ctl1 & MDM_CTL1_CTS2) ret &= ~TIOCM_CTS; if (mdm_ctl1 & MDM_CTL1_DSR2) ret &= ~TIOCM_DSR; } else if (port->mapbase == _Ser3UTCR0) { if (mdm_ctl1 & MDM_CTL1_DCD1) ret &= ~TIOCM_CD; if (mdm_ctl1 & MDM_CTL1_CTS1) ret &= ~TIOCM_CTS; if (mdm_ctl1 & MDM_CTL1_DSR1) ret &= ~TIOCM_DSR; } return ret; }
static int dc_gpio_get(struct gpio_chip *chip, unsigned gpio) { struct dc_pinmap *pmap = gpiochip_get_data(chip); int reg_off = GP_INPUT(gpio/PINS_PER_COLLECTION); int bit_off = gpio % PINS_PER_COLLECTION; u8 input; input = readb_relaxed(pmap->regs + reg_off); return !!(input & BIT(bit_off)); }
/* * Install handler for Neponset IRQ. Note that we have to loop here * since the ETHERNET and USAR IRQs are level based, and we need to * ensure that the IRQ signal is deasserted before returning. This * is rather unfortunate. */ static void neponset_irq_handler(struct irq_desc *desc) { struct neponset_drvdata *d = irq_desc_get_handler_data(desc); unsigned int irr; while (1) { /* * Acknowledge the parent IRQ. */ desc->irq_data.chip->irq_ack(&desc->irq_data); /* * Read the interrupt reason register. Let's have all * active IRQ bits high. Note: there is a typo in the * Neponset user's guide for the SA1111 IRR level. */ irr = readb_relaxed(d->base + IRR); irr ^= IRR_ETHERNET | IRR_USAR; if ((irr & (IRR_ETHERNET | IRR_USAR | IRR_SA1111)) == 0) break; /* * Since there is no individual mask, we have to * mask the parent IRQ. This is safe, since we'll * recheck the register for any pending IRQs. */ if (irr & (IRR_ETHERNET | IRR_USAR)) { desc->irq_data.chip->irq_mask(&desc->irq_data); /* * Ack the interrupt now to prevent re-entering * this neponset handler. Again, this is safe * since we'll check the IRR register prior to * leaving. */ desc->irq_data.chip->irq_ack(&desc->irq_data); if (irr & IRR_ETHERNET) generic_handle_irq(d->irq_base + NEP_IRQ_SMC91X); if (irr & IRR_USAR) generic_handle_irq(d->irq_base + NEP_IRQ_USAR); desc->irq_data.chip->irq_unmask(&desc->irq_data); } if (irr & IRR_SA1111) generic_handle_irq(d->irq_base + NEP_IRQ_SA1111); } }
/* * Set the executing CPUs power mode as defined. This will be in * preparation for it executing a WFI instruction. * * This function must be called with preemption disabled, and as it * has the side effect of disabling coherency, caches must have been * flushed. Interrupts must also have been disabled. */ int scu_power_mode(void __iomem *scu_base, unsigned int mode) { unsigned int val; int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0); if (mode > 3 || mode == 1 || cpu > 3) return -EINVAL; val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03; val |= mode; writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu); return 0; }
static irqreturn_t digicolor_uart_int(int irq, void *dev_id) { struct uart_port *port = dev_id; u8 int_status = readb_relaxed(port->membase + UA_INT_STATUS); writeb_relaxed(UA_INT_RX | UA_INT_TX, port->membase + UA_INTFLAG_CLEAR); if (int_status & UA_INT_RX) digicolor_uart_rx(port); if (int_status & UA_INT_TX) digicolor_uart_tx(port); return IRQ_HANDLED; }
static int dc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { struct dc_pinmap *pmap = gpiochip_get_data(chip); int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION); int bit_off = gpio % PINS_PER_COLLECTION; u8 drive; unsigned long flags; spin_lock_irqsave(&pmap->lock, flags); drive = readb_relaxed(pmap->regs + reg_off); drive &= ~BIT(bit_off); writeb_relaxed(drive, pmap->regs + reg_off); spin_unlock_irqrestore(&pmap->lock, flags); return 0; }
static int dc_set_mux(struct pinctrl_dev *pctldev, unsigned selector, unsigned group) { struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev); int bit_off, reg_off; u8 reg; dc_client_sel(group, ®_off, &bit_off); reg = readb_relaxed(pmap->regs + reg_off); reg &= ~(3 << bit_off); reg |= (selector << bit_off); writeb_relaxed(reg, pmap->regs + reg_off); return 0; }
void neponset_ncr_frob(unsigned int mask, unsigned int val) { void __iomem *base = nep_base; if (base) { unsigned long flags; unsigned v; local_irq_save(flags); v = readb_relaxed(base + NCR_0); writeb_relaxed((v & ~mask) | val, base + NCR_0); local_irq_restore(flags); } else { WARN(1, "nep_base unset\n"); } }
static int dc_pmx_request_gpio(struct pinctrl_dev *pcdev, struct pinctrl_gpio_range *range, unsigned offset) { struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pcdev); int bit_off, reg_off; u8 reg; dc_client_sel(offset, ®_off, &bit_off); reg = readb_relaxed(pmap->regs + reg_off); if ((reg & (3 << bit_off)) != 0) return -EBUSY; return 0; }
static inline void sdhci_sprd_writeb(struct sdhci_host *host, u8 val, int reg) { /* * Since BIT(3) of SDHCI_SOFTWARE_RESET is reserved according to the * standard specification, sdhci_reset() write this register directly * without checking other reserved bits, that will clear BIT(3) which * is defined as hardware reset on Spreadtrum's platform and clearing * it by mistake will lead the card not work. So here we need to work * around it. */ if (unlikely(reg == SDHCI_SOFTWARE_RESET)) { if (readb_relaxed(host->ioaddr + reg) & SDHCI_HW_RESET_CARD) val |= SDHCI_HW_RESET_CARD; } writeb_relaxed(val, host->ioaddr + reg); }
static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value) { struct dc_pinmap *pmap = gpiochip_get_data(chip); int reg_off = GP_OUTPUT0(gpio/PINS_PER_COLLECTION); int bit_off = gpio % PINS_PER_COLLECTION; u8 output; unsigned long flags; spin_lock_irqsave(&pmap->lock, flags); output = readb_relaxed(pmap->regs + reg_off); if (value) output |= BIT(bit_off); else output &= ~BIT(bit_off); writeb_relaxed(output, pmap->regs + reg_off); spin_unlock_irqrestore(&pmap->lock, flags); }
static int rwdt_start(struct watchdog_device *wdev) { struct rwdt_priv *priv = watchdog_get_drvdata(wdev); pm_runtime_get_sync(wdev->parent); rwdt_write(priv, 0, RWTCSRB); rwdt_write(priv, priv->cks, RWTCSRA); rwdt_init_timeout(wdev); while (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WRFLG) cpu_relax(); rwdt_write(priv, priv->cks | RWTCSRA_TME, RWTCSRA); return 0; }
static inline int miphy365x_rdy(struct miphy365x_phy *miphy_phy, struct miphy365x_dev *miphy_dev) { unsigned long timeout = jiffies + msecs_to_jiffies(HFC_TIMEOUT); u8 mask = IDLL_RDY | PLL_RDY; u8 regval; do { regval = readb_relaxed(miphy_phy->base + STATUS_REG); if ((regval & mask) == mask) return 0; usleep_range(2000, 2500); } while (time_before(jiffies, timeout)); dev_err(miphy_dev->dev, "PHY not ready timeout!\n"); return -EBUSY; }
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) { int ret_val = 0; void __iomem *vaddr = 0; int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); struct cpc_reg *reg = ®_res->cpc_entry.reg; if (reg_res->type == ACPI_TYPE_INTEGER) { *val = reg_res->cpc_entry.int_value; return ret_val; } *val = 0; if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) vaddr = reg_res->sys_mem_vaddr; else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) return cpc_read_ffh(cpu, reg, val); else return acpi_os_read_memory((acpi_physical_address)reg->address, val, reg->bit_width); switch (reg->bit_width) { case 8: *val = readb_relaxed(vaddr); break; case 16: *val = readw_relaxed(vaddr); break; case 32: *val = readl_relaxed(vaddr); break; case 64: *val = readq_relaxed(vaddr); break; default: pr_debug("Error: Cannot read %u bit width from PCC\n", reg->bit_width); ret_val = -EFAULT; } return ret_val; }
static void at91_twi_read_next_byte(struct at91_twi_dev *dev) { /* * If we are in this case, it means there is garbage data in RHR, so * delete them. */ if (!dev->buf_len) { at91_twi_read(dev, AT91_TWI_RHR); return; } /* 8bit read works with and without FIFO */ *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR); --dev->buf_len; /* return if aborting, we only needed to read RHR to clear RXRDY*/ if (dev->recv_len_abort) return; /* handle I2C_SMBUS_BLOCK_DATA */ if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) { /* ensure length byte is a valid value */ if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) { dev->msg->flags &= ~I2C_M_RECV_LEN; dev->buf_len += *dev->buf; dev->msg->len = dev->buf_len + 1; dev_dbg(dev->dev, "received block length %zu\n", dev->buf_len); } else { /* abort and send the stop by reading one more byte */ dev->recv_len_abort = true; dev->buf_len = 1; } } /* send stop if second but last byte has been read */ if (!dev->use_alt_cmd && dev->buf_len == 1) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len); ++dev->buf; }
static int dc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip); int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION); int bit_off = gpio % PINS_PER_COLLECTION; u8 drive; unsigned long flags; dc_gpio_set(chip, gpio, value); spin_lock_irqsave(&pmap->lock, flags); drive = readb_relaxed(pmap->regs + reg_off); drive |= BIT(bit_off); writeb_relaxed(drive, pmap->regs + reg_off); spin_unlock_irqrestore(&pmap->lock, flags); return 0; }
static void sdhci_sprd_hw_reset(struct sdhci_host *host) { int val; /* * Note: don't use sdhci_writeb() API here since it is redirected to * sdhci_sprd_writeb() in which we have a workaround for * SDHCI_SOFTWARE_RESET which would make bit SDHCI_HW_RESET_CARD can * not be cleared. */ val = readb_relaxed(host->ioaddr + SDHCI_SOFTWARE_RESET); val &= ~SDHCI_HW_RESET_CARD; writeb_relaxed(val, host->ioaddr + SDHCI_SOFTWARE_RESET); /* wait for 10 us */ usleep_range(10, 20); val |= SDHCI_HW_RESET_CARD; writeb_relaxed(val, host->ioaddr + SDHCI_SOFTWARE_RESET); usleep_range(300, 500); }
static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy) { u8 val; dev_dbg(phy->phy.dev, "enabling QMP autonomous mode with cable %s\n", get_cable_status_str(phy)); val = readb_relaxed(phy->base + PCIE_USB3_PHY_AUTONOMOUS_MODE_CTRL); val |= ARCVR_DTCT_EN; if (phy->cable_connected) { val |= ALFPS_DTCT_EN; /* Detect detach */ val &= ~ARCVR_DTCT_EVENT_SEL; } else { val &= ~ALFPS_DTCT_EN; /* Detect attach */ val |= ARCVR_DTCT_EVENT_SEL; } writeb_relaxed(val, phy->base + PCIE_USB3_PHY_AUTONOMOUS_MODE_CTRL); }
static void digicolor_uart_console_write(struct console *co, const char *c, unsigned n) { struct uart_port *port = digicolor_ports[co->index]; u8 status; unsigned long flags; int locked = 1; if (oops_in_progress) locked = spin_trylock_irqsave(&port->lock, flags); else spin_lock_irqsave(&port->lock, flags); uart_console_write(port, c, n, digicolor_uart_console_putchar); if (locked) spin_unlock_irqrestore(&port->lock, flags); /* Wait for transmitter to become empty */ do { status = readb_relaxed(port->membase + UA_STATUS); } while ((status & UA_STATUS_TX_READY) == 0); }
static int rwdt_probe(struct platform_device *pdev) { struct rwdt_priv *priv; struct resource *res; struct clk *clk; unsigned long clks_per_sec; int ret, i; if (rwdt_blacklisted(&pdev->dev)) return -ENODEV; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); priv->clk_rate = clk_get_rate(clk); priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0; pm_runtime_put(&pdev->dev); if (!priv->clk_rate) { ret = -ENOENT; goto out_pm_disable; } for (i = ARRAY_SIZE(clk_divs) - 1; i >= 0; i--) { clks_per_sec = priv->clk_rate / clk_divs[i]; if (clks_per_sec && clks_per_sec < 65536) { priv->cks = i; break; } } if (i < 0) { dev_err(&pdev->dev, "Can't find suitable clock divider\n"); ret = -ERANGE; goto out_pm_disable; } priv->wdev.info = &rwdt_ident, priv->wdev.ops = &rwdt_ops, priv->wdev.parent = &pdev->dev; priv->wdev.min_timeout = 1; priv->wdev.max_timeout = DIV_BY_CLKS_PER_SEC(priv, 65536); priv->wdev.timeout = min(priv->wdev.max_timeout, RWDT_DEFAULT_TIMEOUT); platform_set_drvdata(pdev, priv); watchdog_set_drvdata(&priv->wdev, priv); watchdog_set_nowayout(&priv->wdev, nowayout); watchdog_set_restart_priority(&priv->wdev, 0); /* This overrides the default timeout only if DT configuration was found */ ret = watchdog_init_timeout(&priv->wdev, 0, &pdev->dev); if (ret) dev_warn(&pdev->dev, "Specified timeout value invalid, using default\n"); ret = watchdog_register_device(&priv->wdev); if (ret < 0) goto out_pm_disable; return 0; out_pm_disable: pm_runtime_disable(&pdev->dev); return ret; }
static inline u8 usbhs_readb(void __iomem *base, u8 reg) { return readb_relaxed(base + reg); }