/* * Configure all PLL counter parameters. */ static int altpll_write_params(struct altpll_softc *sc, uint32_t mul, uint32_t div, uint32_t c0) { uint32_t status; int retry; altpll_write_param(sc, ALTPLL_OFF_TYPE_N, div); altpll_write_param(sc, ALTPLL_OFF_TYPE_M, mul); altpll_write_param(sc, ALTPLL_OFF_TYPE_C0, c0); /* * Program C1 with the same parameters as C0. It seems the PLL does not * run correctly otherwise. */ altpll_write_param(sc, ALTPLL_OFF_TYPE_C1, c0); /* Trigger the transfer. */ bus_write_4(sc->ap_reg_res, ALTPLL_OFF_TRANSFER, htole32(0xff)); /* Wait for the transfer to complete. */ status = bus_read_4(sc->ap_reg_res, ALTPLL_OFF_TRANSFER); for (retry = 0; status != htole32(ALTPLL_TRANSFER_COMPLETE) && retry < 10; retry++) status = bus_read_4(sc->ap_reg_res, ALTPLL_OFF_TRANSFER); if (status != htole32(ALTPLL_TRANSFER_COMPLETE)) { device_printf(sc->ap_dev, "timed out waiting for transfer to PLL\n"); /* XXXEM ignore error for now - not set by old FPGA bitfiles. */ } return (0); }
static void davbus_cint(void *ptr) { struct davbus_softc *d = ptr; u_int reg, status, mask; mtx_lock(&d->mutex); reg = bus_read_4(d->reg, DAVBUS_SOUND_CTRL); if (reg & DAVBUS_PORTCHG) { status = bus_read_4(d->reg, DAVBUS_CODEC_STATUS); if (d->read_status && d->set_outputs) { mask = (*d->read_status)(d, status); (*d->set_outputs)(d, mask); } /* Clear the interrupt. */ bus_write_4(d->reg, DAVBUS_SOUND_CTRL, reg); } mtx_unlock(&d->mutex); }
void gdt_mpr_intr(struct gdt_softc *gdt, struct gdt_intr_ctx *ctx) { int i; GDT_DPRINTF(GDT_D_INTR, ("gdt_mpr_intr(%p) ", gdt)); bus_write_1(gdt->sc_dpmem, GDT_MPR_EDOOR, 0xff); if (ctx->istatus & 0x80) { /* error flag */ ctx->istatus &= ~0x80; ctx->cmd_status = bus_read_2(gdt->sc_dpmem, GDT_MPR_STATUS); } else /* no error */ ctx->cmd_status = GDT_S_OK; ctx->info = bus_read_4(gdt->sc_dpmem, GDT_MPR_INFO); ctx->service = bus_read_2(gdt->sc_dpmem, GDT_MPR_SERVICE); ctx->info2 = bus_read_4(gdt->sc_dpmem, GDT_MPR_INFO + sizeof (u_int32_t)); /* event string */ if (ctx->istatus == GDT_ASYNCINDEX) { if (ctx->service != GDT_SCREENSERVICE && (gdt->sc_fw_vers & 0xff) >= 0x1a) { gdt->sc_dvr.severity = bus_read_1(gdt->sc_dpmem, GDT_SEVERITY); for (i = 0; i < 256; ++i) { gdt->sc_dvr.event_string[i] = bus_read_1(gdt->sc_dpmem, GDT_EVT_BUF + i); if (gdt->sc_dvr.event_string[i] == 0) break; } } } bus_write_1(gdt->sc_dpmem, GDT_MPR_SEMA1, 0); }
static int ismt_submit(struct ismt_softc *sc, struct ismt_desc *desc, uint8_t slave, uint8_t is_read) { uint32_t err, fmhp, val; desc->control |= ISMT_DESC_FAIR; if (sc->using_msi) desc->control |= ISMT_DESC_INT; desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(slave, is_read); desc->dptr_low = (sc->dma_buffer_bus_addr & 0xFFFFFFFFLL); desc->dptr_high = (sc->dma_buffer_bus_addr >> 32); wmb(); fmhp = sc->head << 16; val = bus_read_4(sc->mmio_res, ISMT_MSTR_MCTRL); val &= ~ISMT_MCTRL_FMHP; val |= fmhp; bus_write_4(sc->mmio_res, ISMT_MSTR_MCTRL, val); /* set the start bit */ val = bus_read_4(sc->mmio_res, ISMT_MSTR_MCTRL); val |= ISMT_MCTRL_SS; bus_write_4(sc->mmio_res, ISMT_MSTR_MCTRL, val); err = tsleep(sc, PWAIT, "ismt_wait", 5 * hz); if (err != 0) { ISMT_DEBUG(sc->pcidev, "%s timeout\n", __func__); return (SMB_ETIMEOUT); } ISMT_DEBUG(sc->pcidev, "%s status=0x%x\n", __func__, desc->status); if (desc->status & ISMT_DESC_SCS) return (SMB_ENOERR); if (desc->status & ISMT_DESC_NAK) return (SMB_ENOACK); if (desc->status & ISMT_DESC_CRC) return (SMB_EBUSERR); if (desc->status & ISMT_DESC_COL) return (SMB_ECOLLI); if (desc->status & ISMT_DESC_LPR) return (SMB_EINVAL); if (desc->status & (ISMT_DESC_DLTO | ISMT_DESC_CLTO)) return (SMB_ETIMEOUT); return (SMB_EBUSERR); }
/* * Parallel port DMA interrupt */ static int lsi64854_pp_intr(void *arg) { struct lsi64854_softc *sc = arg; bus_dma_tag_t dmat; bus_dmamap_t dmam; size_t dmasize; int ret, trans, resid = 0; uint32_t csr; csr = L64854_GCSR(sc); DPRINTF(LDB_PP, ("%s: addr 0x%x, csr %b\n", __func__, bus_read_4(sc->sc_res, L64854_REG_ADDR), csr, PDMACSR_BITS)); if ((csr & (P_ERR_PEND | P_SLAVE_ERR)) != 0) { resid = bus_read_4(sc->sc_res, L64854_REG_CNT); device_printf(sc->sc_dev, "error: resid %d csr=%b\n", resid, csr, PDMACSR_BITS); csr &= ~P_EN_DMA; /* Stop DMA. */ /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */ csr |= P_INVALIDATE | P_SLAVE_ERR; L64854_SCSR(sc, csr); return (-1); } ret = (csr & P_INT_PEND) != 0; if (sc->sc_active != 0) { DMA_DRAIN(sc, 0); resid = bus_read_4(sc->sc_res, L64854_REG_CNT); } /* DMA has stopped */ csr &= ~D_EN_DMA; L64854_SCSR(sc, csr); sc->sc_active = 0; dmasize = sc->sc_dmasize; trans = dmasize - resid; if (trans < 0) /* transferred < 0? */ trans = dmasize; *sc->sc_dmalen -= trans; *sc->sc_dmaaddr = (char *)*sc->sc_dmaaddr + trans; if (dmasize != 0) { dmat = sc->sc_buffer_dmat; dmam = sc->sc_dmamap; bus_dmamap_sync(dmat, dmam, (csr & D_WRITE) != 0 ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dmat, dmam); } return (ret != 0); }
static inline uint32_t aju_control_read(struct altera_jtag_uart_softc *sc) { return (le32toh(bus_read_4(sc->ajus_mem_res, ALTERA_JTAG_UART_CONTROL_OFF))); }
static int screamer_init(struct snd_mixer *m) { struct davbus_softc *d; d = mix_getdevinfo(m); d->read_status = screamer_read_status; d->set_outputs = screamer_set_outputs; mtx_lock(&d->mutex); screamer_write_locked(d, SCREAMER_CODEC_ADDR0, SCREAMER_INPUT_CD | SCREAMER_DEFAULT_CD_GAIN); screamer_set_outputs(d, screamer_read_status(d, bus_read_4(d->reg, DAVBUS_CODEC_STATUS))); screamer_write_locked(d, SCREAMER_CODEC_ADDR2, 0); screamer_write_locked(d, SCREAMER_CODEC_ADDR4, 0); screamer_write_locked(d, SCREAMER_CODEC_ADDR5, 0); screamer_write_locked(d, SCREAMER_CODEC_ADDR6, 0); mtx_unlock(&d->mutex); mix_setdevs(m, SOUND_MASK_VOLUME); return (0); }
/** * Read a data item from the bridged address space at the given @p offset * from @p addr. * * A dynamic register window will be used to map @p addr. * * @param probe The bhndb_pci probe state to be used to perform the * read. * @param addr The base address. * @param offset The offset from @p addr at which to read a data item of * @p width bytes. * @param width Item width (1, 2, or 4 bytes). */ static uint32_t bhndb_pci_probe_read(struct bhndb_pci_probe *probe, bhnd_addr_t addr, bhnd_size_t offset, u_int width) { struct resource *r; bus_size_t res_offset; int error; /* Map the target address */ error = bhndb_pci_probe_map(probe, addr, offset, width, &r, &res_offset); if (error) { device_printf(probe->dev, "error mapping %#jx+%#jx for " "reading: %d\n", addr, offset, error); return (UINT32_MAX); } /* Perform read */ switch (width) { case 1: return (bus_read_1(r, res_offset)); case 2: return (bus_read_2(r, res_offset)); case 4: return (bus_read_4(r, res_offset)); default: panic("unsupported width: %u", width); } }
/* * All I/O to/from the MTL register device must be 32-bit, and aligned to * 32-bit. */ static int terasic_mtl_reg_read(struct cdev *dev, struct uio *uio, int flag) { struct terasic_mtl_softc *sc; u_long offset, size; uint32_t v; int error; if (uio->uio_offset < 0 || uio->uio_offset % 4 != 0 || uio->uio_resid % 4 != 0) return (ENODEV); sc = dev->si_drv1; size = rman_get_size(sc->mtl_reg_res); error = 0; if ((uio->uio_offset + uio->uio_resid < 0) || (uio->uio_offset + uio->uio_resid > size)) return (ENODEV); while (uio->uio_resid > 0) { offset = uio->uio_offset; if (offset + sizeof(v) > size) return (ENODEV); v = bus_read_4(sc->mtl_reg_res, offset); error = uiomove(&v, sizeof(v), uio); if (error) return (error); } return (error); }
static u_int hpet_get_timecount(struct timecounter *tc) { struct hpet_softc *sc; sc = tc->tc_priv; return (bus_read_4(sc->mem_res, HPET_MAIN_COUNTER)); }
uint32_t imx6_anatop_read_4(bus_size_t offset) { KASSERT(imx6_anatop_sc != NULL, ("imx6_anatop_read_4 sc NULL")); return (bus_read_4(imx6_anatop_sc->res[MEMRES], offset)); }
static int macgpio_suspend(device_t dev) { struct macgpio_softc *sc; int i; sc = device_get_softc(dev); sc->sc_saved_gpio_levels[0] = bus_read_4(sc->sc_gpios, GPIO_LEVELS_0); sc->sc_saved_gpio_levels[1] = bus_read_4(sc->sc_gpios, GPIO_LEVELS_1); for (i = 0; i < GPIO_COUNT; i++) sc->sc_saved_gpios[i] = bus_read_1(sc->sc_gpios, GPIO_BASE + i); for (i = 0; i < GPIO_EXTINT_COUNT; i++) sc->sc_saved_extint_gpios[i] = bus_read_1(sc->sc_gpios, GPIO_EXTINT_BASE + i); return (0); }
static int usbphy_utmi_disable(struct usbphy_softc *sc) { int rv; uint32_t val; usbphy_utmi_phy_clk(sc, false); if (sc->dr_mode == USB_DR_MODE_DEVICE) { val = RD4(sc, IF_USB_SUSP_CTRL); val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0); val |= USB_WAKE_ON_CNNT_EN_DEV; val |= USB_WAKEUP_DEBOUNCE_COUNT(5); WR4(sc, IF_USB_SUSP_CTRL, val); } val = RD4(sc, IF_USB_SUSP_CTRL); val |= UTMIP_RESET; WR4(sc, IF_USB_SUSP_CTRL, val); val = RD4(sc, UTMIP_BAT_CHRG_CFG0); val |= UTMIP_PD_CHRG; WR4(sc, UTMIP_BAT_CHRG_CFG0, val); val = RD4(sc, UTMIP_XCVR_CFG0); val |= UTMIP_FORCE_PD_POWERDOWN; val |= UTMIP_FORCE_PD2_POWERDOWN; val |= UTMIP_FORCE_PDZI_POWERDOWN; WR4(sc, UTMIP_XCVR_CFG0, val); val = RD4(sc, UTMIP_XCVR_CFG1); val |= UTMIP_FORCE_PDDISC_POWERDOWN; val |= UTMIP_FORCE_PDCHRP_POWERDOWN; val |= UTMIP_FORCE_PDDR_POWERDOWN; WR4(sc, UTMIP_XCVR_CFG1, val); usbpby_enable_cnt--; if (usbpby_enable_cnt <= 0) { rv = clk_enable(sc->clk_pads); if (rv != 0) { device_printf(sc->dev, "Cannot enable 'utmi-pads' clock\n"); return (rv); } val =bus_read_4(sc->pads_res, UTMIP_BIAS_CFG0); val |= UTMIP_OTGPD; val |= UTMIP_BIASPD; bus_write_4(sc->pads_res, UTMIP_BIAS_CFG0, val); rv = clk_disable(sc->clk_pads); if (rv != 0) { device_printf(sc->dev, "Cannot disable 'utmi-pads' clock\n"); return (rv); } } return (0); }
static __inline uint32_t reg_read(ig4iic_softc_t *sc, uint32_t reg) { uint32_t value; bus_barrier(sc->regs_res, reg, 4, BUS_SPACE_BARRIER_READ); value = bus_read_4(sc->regs_res, reg); return (value); }
void terasic_mtl_reg_textframebufaddr_get(struct terasic_mtl_softc *sc, uint32_t *addrp) { uint32_t addr; addr = bus_read_4(sc->mtl_reg_res, TERASIC_MTL_OFF_TEXTFRAMEBUFADDR); *addrp = le32toh(addr); }
static void hpet_disable(struct hpet_softc *sc) { uint32_t val; val = bus_read_4(sc->mem_res, HPET_CONFIG); val &= ~HPET_CNF_ENABLE; bus_write_4(sc->mem_res, HPET_CONFIG, val); }
static void screamer_write_locked(struct davbus_softc *d, u_int reg, u_int val) { u_int x; KASSERT(val == (val & 0xfff), ("bad val")); while (bus_read_4(d->reg, DAVBUS_CODEC_CTRL) & DAVBUS_CODEC_BUSY) DELAY(100); x = reg; x |= SCREAMER_CODEC_EMSEL0; x |= val; bus_write_4(d->reg, DAVBUS_CODEC_CTRL, x); while (bus_read_4(d->reg, DAVBUS_CODEC_CTRL) & DAVBUS_CODEC_BUSY) DELAY(100); }
int macio_enable_wireless(device_t dev, bool enable) { struct macio_softc *sc = device_get_softc(dev); uint32_t x; if (enable) { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* Enable card slot. */ bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 5); DELAY(1000); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 4); DELAY(1000); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 4); */ bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0b, 0); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0a, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0e, 0x28); bus_write_4(sc->sc_memr, 0x1c000, 0); /* Initialize the card. */ bus_write_4(sc->sc_memr, 0x1a3e0, 0x41); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); } else { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 0); */ } return (0); }
static void bcm_dma_reset(device_t dev, int ch) { struct bcm_dma_softc *sc = device_get_softc(dev); struct bcm_dma_cb *cb; uint32_t cs; int count; if (ch < 0 || ch >= BCM_DMA_CH_MAX) return; cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); if (cs & CS_ACTIVE) { /* pause current task */ bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 0); count = 1000; do { cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); } while (!(cs & CS_ISPAUSED) && (count-- > 0)); if (!(cs & CS_ISPAUSED)) { device_printf(dev, "Can't abort DMA transfer at channel %d\n", ch); } bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); /* Complete everything, clear interrupt */ bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ABORT | CS_INT | CS_END| CS_ACTIVE); } /* clear control blocks */ bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 0); bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); /* Reset control block */ cb = sc->sc_dma_ch[ch].cb; bzero(cb, sizeof(*cb)); cb->info = INFO_WAIT_RESP; }
uint32_t rt305x_ic_get(uint32_t reg) { struct rt305x_ic_softc *sc = rt305x_ic_softc; if (!sc) return (0); return (bus_read_4(sc->mem_res, reg)); }
static void bcm_dma_intr(void *arg) { struct bcm_dma_softc *sc = bcm_dma_sc; struct bcm_dma_ch *ch = (struct bcm_dma_ch *)arg; uint32_t cs, debug; /* my interrupt? */ cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch->ch)); if (!(cs & (CS_INT | CS_ERR))) return; /* running? */ if (!(ch->flags & BCM_DMA_CH_USED)) { device_printf(sc->sc_dev, "unused DMA intr CH=%d, CS=%x\n", ch->ch, cs); return; } if (cs & CS_ERR) { debug = bus_read_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch)); device_printf(sc->sc_dev, "DMA error %d on CH%d\n", debug & DEBUG_ERROR_MASK, ch->ch); bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch), debug & DEBUG_ERROR_MASK); bcm_dma_reset(sc->sc_dev, ch->ch); } if (cs & CS_INT) { /* acknowledge interrupt */ bus_write_4(sc->sc_mem, BCM_DMA_CS(ch->ch), CS_INT | CS_END); /* Prepare for possible access to len field */ bus_dmamap_sync(sc->sc_dma_tag, ch->dma_map, BUS_DMASYNC_POSTWRITE); /* save callback function and argument */ if (ch->intr_func) ch->intr_func(ch->ch, ch->intr_arg); } }
static inline uint32_t gpio_read(struct tegra_gpio_softc *sc, bus_size_t reg, struct gpio_pin *pin) { int bit; uint32_t val; bit = GPIO_BIT(pin->gp_pin); val = bus_read_4(sc->mem_res, reg + GPIO_REGNUM(pin->gp_pin)); return (val >> bit) & 1; }
static int tegra_gpio_intr(void *arg) { u_int irq, i, j, val, basepin; struct tegra_gpio_softc *sc; struct trapframe *tf; struct tegra_gpio_irqsrc *tgi; struct tegra_gpio_irq_cookie *cookie; cookie = (struct tegra_gpio_irq_cookie *)arg; sc = cookie->sc; tf = curthread->td_intr_frame; for (i = 0; i < GPIO_REGS_IN_BANK; i++) { basepin = cookie->bank_num * GPIO_REGS_IN_BANK * GPIO_PINS_IN_REG + i * GPIO_PINS_IN_REG; val = bus_read_4(sc->mem_res, GPIO_INT_STA + GPIO_REGNUM(basepin)); val &= bus_read_4(sc->mem_res, GPIO_INT_ENB + GPIO_REGNUM(basepin)); /* Interrupt handling */ for (j = 0; j < GPIO_PINS_IN_REG; j++) { if ((val & (1 << j)) == 0) continue; irq = basepin + j; tgi = &sc->isrcs[irq]; if (!tegra_gpio_isrc_is_level(tgi)) tegra_gpio_isrc_eoi(sc, tgi); if (intr_isrc_dispatch(&tgi->isrc, tf) != 0) { tegra_gpio_isrc_mask(sc, tgi, 0); if (tegra_gpio_isrc_is_level(tgi)) tegra_gpio_isrc_eoi(sc, tgi); device_printf(sc->dev, "Stray irq %u disabled\n", irq); } } } return (FILTER_HANDLED); }
/** * Read a 32-bit entry value from the EROM table without advancing the * read position. * * @param erom EROM read state. * @param entry Will contain the read result on success. * @retval 0 success * @retval ENOENT The end of the EROM table was reached. * @retval non-zero The read could not be completed. */ int bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry) { if (erom->offset >= BCMA_EROM_TABLE_SIZE) { EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n"); return (EINVAL); } *entry = bus_read_4(erom->r, erom->start + erom->offset); return (0); }
static int proto_read(struct cdev *cdev, struct uio *uio, int ioflag) { union { uint8_t x1[8]; uint16_t x2[4]; uint32_t x4[2]; uint64_t x8[1]; } buf; struct proto_softc *sc; struct proto_res *r; device_t dev; off_t ofs; u_long width; int error; sc = cdev->si_drv1; dev = sc->sc_dev; r = cdev->si_drv2; width = uio->uio_resid; if (width < 1 || width > 8 || bitcount16(width) > 1) return (EIO); ofs = uio->uio_offset; if (ofs + width > r->r_size) return (EIO); switch (width) { case 1: buf.x1[0] = (r->r_type == PROTO_RES_PCICFG) ? pci_read_config(dev, ofs, 1) : bus_read_1(r->r_d.res, ofs); break; case 2: buf.x2[0] = (r->r_type == PROTO_RES_PCICFG) ? pci_read_config(dev, ofs, 2) : bus_read_2(r->r_d.res, ofs); break; case 4: buf.x4[0] = (r->r_type == PROTO_RES_PCICFG) ? pci_read_config(dev, ofs, 4) : bus_read_4(r->r_d.res, ofs); break; #ifndef __i386__ case 8: if (r->r_type == PROTO_RES_PCICFG) return (EINVAL); buf.x8[0] = bus_read_8(r->r_d.res, ofs); break; #endif default: return (EIO); } error = uiomove(&buf, width, uio); return (error); }
void terasic_mtl_reg_textcursor_get(struct terasic_mtl_softc *sc, uint8_t *colp, uint8_t *rowp) { uint32_t v; v = bus_read_4(sc->mtl_reg_res, TERASIC_MTL_OFF_TEXTCURSOR); v = le32toh(v); *colp = (v & TERASIC_MTL_TEXTCURSOR_COL_MASK) >> TERASIC_MTL_TEXTCURSOR_COL_SHIFT; *rowp = (v & TERASIC_MTL_TEXTCURSOR_ROW_MASK); }
static int burgundy_init(struct snd_mixer *m) { struct davbus_softc *d; d = mix_getdevinfo(m); d->read_status = burgundy_read_status; d->set_outputs = burgundy_set_outputs; /* * We configure the Burgundy codec as follows: * * o Input subframe 0 is connected to input digital * stream A (ISA). * o Stream A (ISA) is mixed in mixer 2 (MIX2). * o Output of mixer 2 (MIX2) is routed to output sources * OS0 and OS1 which can be converted to analog. * */ mtx_lock(&d->mutex); burgundy_write_locked(d, 0x16700, 0x40); burgundy_write_locked(d, BURGUNDY_MIX0_REG, 0); burgundy_write_locked(d, BURGUNDY_MIX1_REG, 0); burgundy_write_locked(d, BURGUNDY_MIX2_REG, BURGUNDY_MIX_ISA); burgundy_write_locked(d, BURGUNDY_MIX3_REG, 0); burgundy_write_locked(d, BURGUNDY_OS_REG, BURGUNDY_OS0_MIX2 | BURGUNDY_OS1_MIX2); burgundy_write_locked(d, BURGUNDY_SDIN_REG, BURGUNDY_ISA_SF0); /* Set several digital scalers to unity gain. */ burgundy_write_locked(d, BURGUNDY_MXS2L_REG, BURGUNDY_MXS_UNITY); burgundy_write_locked(d, BURGUNDY_MXS2R_REG, BURGUNDY_MXS_UNITY); burgundy_write_locked(d, BURGUNDY_OSS0L_REG, BURGUNDY_OSS_UNITY); burgundy_write_locked(d, BURGUNDY_OSS0R_REG, BURGUNDY_OSS_UNITY); burgundy_write_locked(d, BURGUNDY_OSS1L_REG, BURGUNDY_OSS_UNITY); burgundy_write_locked(d, BURGUNDY_OSS1R_REG, BURGUNDY_OSS_UNITY); burgundy_write_locked(d, BURGUNDY_ISSAL_REG, BURGUNDY_ISS_UNITY); burgundy_write_locked(d, BURGUNDY_ISSAR_REG, BURGUNDY_ISS_UNITY); burgundy_set_outputs(d, burgundy_read_status(d, bus_read_4(d->reg, DAVBUS_CODEC_STATUS))); mtx_unlock(&d->mutex); mix_setdevs(m, SOUND_MASK_VOLUME); return (0); }
static inline uint32_t RD4(bus_size_t off) { if (timer_softc == NULL) { uint32_t *p = (uint32_t *)(AT91_BASE + AT91RM92_ST_BASE + off); return *p; } return (bus_read_4(timer_softc->sc_mem_res, off)); }
static int pinmux_config_grp(struct pinmux_softc *sc, char *grp_name, const struct tegra_grp *grp, struct pincfg *cfg) { uint32_t reg; reg = bus_read_4(sc->pad_mem_res, grp->reg); if (cfg->params[PROP_ID_HIGH_SPEED_MODE] != -1) { reg &= ~(1 << TEGRA_GRP_HSM_SHIFT); reg |= (cfg->params[PROP_ID_HIGH_SPEED_MODE] & 1) << TEGRA_GRP_HSM_SHIFT; } if (cfg->params[PROP_ID_SCHMITT] != -1) { reg &= ~(1 << TEGRA_GRP_SCHMT_SHIFT); reg |= (cfg->params[PROP_ID_SCHMITT] & 1) << TEGRA_GRP_SCHMT_SHIFT; } if (cfg->params[PROP_ID_DRIVE_TYPE] != -1) { reg &= ~(TEGRA_GRP_DRV_TYPE_MASK << TEGRA_GRP_DRV_TYPE_SHIFT); reg |= (cfg->params[PROP_ID_DRIVE_TYPE] & TEGRA_GRP_DRV_TYPE_MASK) << TEGRA_GRP_DRV_TYPE_SHIFT; } if (cfg->params[PROP_ID_SLEW_RATE_RISING] != -1) { reg &= ~(TEGRA_GRP_DRV_DRVDN_SLWR_MASK << TEGRA_GRP_DRV_DRVDN_SLWR_SHIFT); reg |= (cfg->params[PROP_ID_SLEW_RATE_RISING] & TEGRA_GRP_DRV_DRVDN_SLWR_MASK) << TEGRA_GRP_DRV_DRVDN_SLWR_SHIFT; } if (cfg->params[PROP_ID_SLEW_RATE_FALLING] != -1) { reg &= ~(TEGRA_GRP_DRV_DRVUP_SLWF_MASK << TEGRA_GRP_DRV_DRVUP_SLWF_SHIFT); reg |= (cfg->params[PROP_ID_SLEW_RATE_FALLING] & TEGRA_GRP_DRV_DRVUP_SLWF_MASK) << TEGRA_GRP_DRV_DRVUP_SLWF_SHIFT; } if ((cfg->params[PROP_ID_DRIVE_DOWN_STRENGTH] != -1) && (grp->drvdn_mask != -1)) { reg &= ~(grp->drvdn_shift << grp->drvdn_mask); reg |= (cfg->params[PROP_ID_DRIVE_DOWN_STRENGTH] & grp->drvdn_mask) << grp->drvdn_shift; } if ((cfg->params[PROP_ID_DRIVE_UP_STRENGTH] != -1) && (grp->drvup_mask != -1)) { reg &= ~(grp->drvup_shift << grp->drvup_mask); reg |= (cfg->params[PROP_ID_DRIVE_UP_STRENGTH] & grp->drvup_mask) << grp->drvup_shift; } bus_write_4(sc->pad_mem_res, grp->reg, reg); return (0); }
static void hpet_enable(struct hpet_softc *sc) { uint32_t val; val = bus_read_4(sc->mem_res, HPET_CONFIG); if (sc->legacy_route) val |= HPET_CNF_LEG_RT; else val &= ~HPET_CNF_LEG_RT; val |= HPET_CNF_ENABLE; bus_write_4(sc->mem_res, HPET_CONFIG, val); }