void bcm_dmac_dump_regs(void) { struct bcm_dmac_softc *sc; device_t dev; int index; dev = device_find_by_driver_unit("bcmdmac", 0); if (dev == NULL) return; sc = device_private(dev); for (index = 0; index < sc->sc_nchannels; index++) { if ((sc->sc_channelmask & __BIT(index)) == 0) continue; printf("%d_CS: %08X\n", index, DMAC_READ(sc, DMAC_CS(index))); printf("%d_CONBLK_AD: %08X\n", index, DMAC_READ(sc, DMAC_CONBLK_AD(index))); printf("%d_DEBUG: %08X\n", index, DMAC_READ(sc, DMAC_DEBUG(index))); } }
static int coram_intr(void *v) { device_t self = v; struct coram_softc *sc; uint32_t val; sc = device_private(self); val = bus_space_read_4(sc->sc_memt, sc->sc_memh, PCI_INT_MSTAT ); if (val == 0) return 0; /* not ours */ /* vid c */ if (val & __BIT(2)) coram_mpeg_intr(sc); if (val & ~__BIT(2)) printf("%s %08x\n", __func__, val); bus_space_write_4(sc->sc_memt, sc->sc_memh, PCI_INT_STAT, val); return 1; }
static void tegra_gpio_attach_bank(struct tegra_gpio_softc *sc, u_int bankno) { struct tegra_gpio_bank *bank = &sc->sc_banks[bankno]; struct gpiobus_attach_args gba; u_int pin; bank->bank_sc = sc; bank->bank_pb = &tegra_gpio_pinbanks[bankno]; bank->bank_gc.gp_cookie = bank; bank->bank_gc.gp_pin_read = tegra_gpio_pin_read; bank->bank_gc.gp_pin_write = tegra_gpio_pin_write; bank->bank_gc.gp_pin_ctl = tegra_gpio_pin_ctl; const uint32_t cnf = GPIO_READ(bank, GPIO_CNF_REG); for (pin = 0; pin < __arraycount(bank->bank_pins); pin++) { bank->bank_pins[pin].pin_num = pin; /* skip pins in SFIO mode */ if ((cnf & __BIT(pin)) == 0) continue; bank->bank_pins[pin].pin_caps = GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | GPIO_PIN_TRISTATE; bank->bank_pins[pin].pin_state = tegra_gpio_pin_read(bank, pin); } memset(&gba, 0, sizeof(gba)); gba.gba_gc = &bank->bank_gc; gba.gba_pins = bank->bank_pins; gba.gba_npins = __arraycount(bank->bank_pins); bank->bank_dev = config_found_ia(sc->sc_dev, "gpiobus", &gba, tegra_gpio_cfprint); }
/* * Generator of valid combinations of options * Usage: i = 0; while ((o = get_options_wait6(i++)) != -1) {} */ static int get_options6(size_t pos) { int rv = 0; size_t n; /* * waitid(2) must specify at least one of WEXITED, WUNTRACED, * WSTOPPED, WTRAPPED or WCONTINUED. Single option WNOWAIT * isn't valid. */ const int matrix[] = { WNOWAIT, /* First in order to blacklist it easily */ WEXITED, WUNTRACED, WSTOPPED, /* SUS compatibility, equal to WUNTRACED */ WTRAPPED, WCONTINUED }; const size_t M = (1 << __arraycount(matrix)) - 1; /* Skip empty and sole WNOWAIT option */ pos+=2; if (pos > M) return -1; for (n = 0; n < __arraycount(matrix); n++) { if (pos & __BIT(n)) rv |= matrix[n]; } return rv; }
static void rmixl_pcix_attach(device_t parent, device_t self, void *aux) { rmixl_pcix_softc_t *sc = device_private(self); struct obio_attach_args *obio = aux; struct rmixl_config *rcp = &rmixl_configuration; struct pcibus_attach_args pba; uint32_t bar; rmixl_pcix_found = 1; sc->sc_dev = self; sc->sc_29bit_dmat = obio->obio_29bit_dmat; sc->sc_32bit_dmat = obio->obio_32bit_dmat; sc->sc_64bit_dmat = obio->obio_64bit_dmat; sc->sc_tmsk = obio->obio_tmsk; aprint_normal(": RMI XLR PCI-X Interface\n"); mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_HIGH); rmixl_pcix_intcfg(sc); rmixl_pcix_errata(sc); /* * check XLR Control Register */ DPRINTF(("%s: XLR_CONTROL=%#x\n", __func__, RMIXL_PCIXREG_READ(RMIXL_PCIX_ECFG_XLR_CONTROL))); /* * HBAR[0] if a 32 bit BAR, or * HBAR[0,1] if a 64 bit BAR pair * must cover all RAM */ extern u_quad_t mem_cluster_maxaddr; uint64_t hbar_addr; uint64_t hbar_size; uint32_t hbar_size_lo, hbar_size_hi; uint32_t hbar_addr_lo, hbar_addr_hi; hbar_addr_lo = RMIXL_PCIXREG_READ(RMIXL_PCIX_ECFG_HOST_BAR0_ADDR); hbar_addr_hi = RMIXL_PCIXREG_READ(RMIXL_PCIX_ECFG_HOST_BAR1_ADDR); hbar_size_lo = RMIXL_PCIXREG_READ(RMIXL_PCIX_ECFG_HOST_BAR0_SIZE); hbar_size_hi = RMIXL_PCIXREG_READ(RMIXL_PCIX_ECFG_HOST_BAR1_SIZE); hbar_addr = (u_quad_t)(hbar_addr_lo & PCI_MAPREG_MEM_ADDR_MASK); hbar_size = hbar_size_lo; if ((hbar_size_lo & PCI_MAPREG_MEM_TYPE_64BIT) != 0) { hbar_addr |= (uint64_t)hbar_addr_hi << 32; hbar_size |= (uint64_t)hbar_size_hi << 32; } if ((hbar_addr != 0) || (hbar_size < mem_cluster_maxaddr)) { int error; aprint_error_dev(self, "HostBAR0 addr %#x, size %#x\n", hbar_addr_lo, hbar_size_lo); if ((hbar_size_lo & PCI_MAPREG_MEM_TYPE_64BIT) != 0) aprint_error_dev(self, "HostBAR1 addr %#x, size %#x\n", hbar_addr_hi, hbar_size_hi); aprint_error_dev(self, "WARNING: firmware PCI-X setup error: " "RAM %#"PRIx64"..%#"PRIx64" not accessible by Host BAR, " "enabling DMA bounce buffers\n", hbar_size, mem_cluster_maxaddr-1); /* * force use of bouce buffers for inaccessible RAM addrs */ if (hbar_size < ((uint64_t)1 << 32)) { error = bus_dmatag_subregion(sc->sc_32bit_dmat, 0, (bus_addr_t)hbar_size, &sc->sc_32bit_dmat, BUS_DMA_NOWAIT); if (error) panic("%s: failed to subregion 32-bit dma tag:" " error %d", __func__, error); sc->sc_64bit_dmat = NULL; } else { error = bus_dmatag_subregion(sc->sc_64bit_dmat, 0, (bus_addr_t)hbar_size, &sc->sc_64bit_dmat, BUS_DMA_NOWAIT); if (error) panic("%s: failed to subregion 64-bit dma tag:" " error %d", __func__, error); } } /* * check PCI-X interface byteswap setup * ensure 'Match Byte Lane' is disabled */ uint32_t mble; mble = RMIXL_PCIXREG_READ(RMIXL_PCIX_ECFG_XLR_MBLE); #ifdef PCI_DEBUG uint32_t mba, mbs; mba = RMIXL_PCIXREG_READ(RMIXL_PCIX_ECFG_MATCH_BIT_ADDR); mbs = RMIXL_PCIXREG_READ(RMIXL_PCIX_ECFG_MATCH_BIT_SIZE); DPRINTF(("%s: MBLE=%#x, MBA=%#x, MBS=%#x\n", __func__, mble, mba, mbs)); #endif if ((mble & __BIT(40)) != 0) RMIXL_PCIXREG_WRITE(RMIXL_PCIX_ECFG_XLR_MBLE, 0); /* * get PCI config space base addr from SBC PCIe CFG BAR * initialize it if necessary */ bar = RMIXL_IOREG_READ(RMIXL_IO_DEV_BRIDGE + RMIXLR_SBC_PCIX_CFG_BAR); DPRINTF(("%s: PCIX_CFG_BAR %#x\n", __func__, bar)); if ((bar & RMIXL_PCIX_CFG_BAR_ENB) == 0) { u_long n = RMIXL_PCIX_CFG_SIZE / (1024 * 1024); RMIXL_PCIX_BAR_INIT(CFG, bar, n, n); } rcp->rc_pci_cfg_pbase = (bus_addr_t)RMIXL_PCIX_CFG_BAR_TO_BA(bar); rcp->rc_pci_cfg_size = (bus_size_t)RMIXL_PCIX_CFG_SIZE; /* * get PCI MEM space base [addr, size] from SBC PCIe MEM BAR * initialize it if necessary */ bar = RMIXL_IOREG_READ(RMIXL_IO_DEV_BRIDGE + RMIXLR_SBC_PCIX_MEM_BAR); DPRINTF(("%s: PCIX_MEM_BAR %#x\n", __func__, bar)); if ((bar & RMIXL_PCIX_MEM_BAR_ENB) == 0) { u_long n = 256; /* 256 MB */ RMIXL_PCIX_BAR_INIT(MEM, bar, n, n); } rcp->rc_pci_mem_pbase = (bus_addr_t)RMIXL_PCIX_MEM_BAR_TO_BA(bar); rcp->rc_pci_mem_size = (bus_size_t)RMIXL_PCIX_MEM_BAR_TO_SIZE(bar); /* * get PCI IO space base [addr, size] from SBC PCIe IO BAR * initialize it if necessary */ bar = RMIXL_IOREG_READ(RMIXL_IO_DEV_BRIDGE + RMIXLR_SBC_PCIX_IO_BAR); DPRINTF(("%s: PCIX_IO_BAR %#x\n", __func__, bar)); if ((bar & RMIXL_PCIX_IO_BAR_ENB) == 0) { u_long n = 32; /* 32 MB */ RMIXL_PCIX_BAR_INIT(IO, bar, n, n); } rcp->rc_pci_io_pbase = (bus_addr_t)RMIXL_PCIX_IO_BAR_TO_BA(bar); rcp->rc_pci_io_size = (bus_size_t)RMIXL_PCIX_IO_BAR_TO_SIZE(bar); /* * initialize the PCI CFG bus space tag */ rmixl_pci_cfg_bus_mem_init(&rcp->rc_pci_cfg_memt, rcp); sc->sc_pci_cfg_memt = &rcp->rc_pci_cfg_memt; /* * initialize the PCI MEM and IO bus space tags */ rmixl_pci_bus_mem_init(&rcp->rc_pci_memt, rcp); rmixl_pci_bus_io_init(&rcp->rc_pci_iot, rcp); /* * initialize the extended configuration regs */ rmixl_pcix_init_errors(sc); /* * initialize the PCI chipset tag */ rmixl_pcix_init(sc); /* * attach the PCI bus */ memset(&pba, 0, sizeof(pba)); pba.pba_memt = &rcp->rc_pci_memt; pba.pba_iot = &rcp->rc_pci_iot; pba.pba_dmat = sc->sc_32bit_dmat; pba.pba_dmat64 = sc->sc_64bit_dmat; pba.pba_pc = &sc->sc_pci_chipset; pba.pba_bus = 0; pba.pba_bridgetag = NULL; pba.pba_intrswiz = 0; pba.pba_intrtag = 0; pba.pba_flags = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY | PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY | PCI_FLAGS_MWI_OKAY; (void) config_found_ia(self, "pcibus", &pba, pcibusprint); }
#define PADGRP(_n, _p, _dt, _dd, _du, _slwf) \ { \ .pg_reg = PADGRP_ ## _n ## _REG,\ .pg_preemp = (_p), \ .pg_hsm = __BIT(2), \ .pg_schmt = __BIT(3), \ .pg_drv_type = (_dt), \ .pg_drvdn = (_dd), \ .pg_drvup = (_du), \ .pg_slwr = __BITS(29,28), \ .pg_slwf = (_slwf) \ } static const struct tegra_mpio_padgrp tegra_mpio_padgrp[] = { PADGRP(GMACFG, __BIT(0), __BITS(7,6), __BITS(18,14), __BITS(24,20), __BITS(31,30)), PADGRP(SDIO1CFG, 0, 0, __BITS(18,12), __BITS(26,20), __BITS(31,30)), PADGRP(SDIO3CFG, 0, 0, __BITS(18,12), __BITS(26,20), __BITS(31,30)), PADGRP(SDIO4CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(AOCFG0, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(AOCFG1, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(AOCFG2, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(AOCFG3, 0, 0, __BITS(16,12), 0, 0), PADGRP(AOCFG4, 0, __BITS(7,6), __BITS(18,12), __BITS(26,20), __BITS(31,30)), PADGRP(CDEV1CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(CDEV2CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(CECCFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(DAP1CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(DAP2CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(DAP3CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(DAP4CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)),
static void awin_ahci_phy_init(struct awin_ahci_softc *asc) { bus_space_tag_t bst = asc->asc_sc.sc_ahcit; bus_space_handle_t bsh = asc->asc_sc.sc_ahcih; u_int timeout; uint32_t v; /* * This is dark magic. */ delay(5000); bus_space_write_4(bst, bsh, AWIN_AHCI_RWCR_REG, 0); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS1R_REG, __BIT(19), 0); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS0R_REG, __BIT(26)|__BIT(24)|__BIT(23)|__BIT(18), __BIT(25)); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS1R_REG, __BIT(17)|__BIT(10)|__BIT(9)|__BIT(7), __BIT(16)|__BIT(12)|__BIT(11)|__BIT(8)|__BIT(6)); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS1R_REG, __BIT(28)|__BIT(15), 0); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS1R_REG, 0, __BIT(19)); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS0R_REG, __BIT(21)|__BIT(20), __BIT(22)); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS2R_REG, __BIT(9)|__BIT(8)|__BIT(5), __BIT(7)|__BIT(6)); delay(10); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS0R_REG, __BIT(19), 0); timeout = 1000; do { delay(1); v = bus_space_read_4(bst, bsh, AWIN_AHCI_PHYCS0R_REG); } while (--timeout && __SHIFTOUT(v, __BITS(30,28)) != 2); if (!timeout) { aprint_error_dev( asc->asc_sc.sc_atac.atac_dev, "SATA PHY power failed (%#x)\n", v); } else { awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS2R_REG, __BIT(24), 0); timeout = 1000; do { delay(10); v = bus_space_read_4(bst, bsh, AWIN_AHCI_PHYCS2R_REG); } while (--timeout && (v & __BIT(24))); if (!timeout) { aprint_error_dev( asc->asc_sc.sc_atac.atac_dev, "SATA PHY calibration failed (%#x)\n", v); } } delay(10); bus_space_write_4(bst, bsh, AWIN_AHCI_RWCR_REG, 7); }
void awinusb_attach(device_t parent, device_t self, void *aux) { struct awinusb_softc * const usbsc = device_private(self); const struct awinio_attach_args * const aio = aux; const struct awin_locators * const loc = &aio->aio_loc; bus_space_handle_t usb_bsh; bool has_ohci = true; awinusb_ports |= __BIT(loc->loc_port); usbsc->usbsc_bst = aio->aio_core_bst; usbsc->usbsc_dmat = aio->aio_dmat; usbsc->usbsc_number = loc->loc_port + 1; usb_bsh = awin_chip_id() == AWIN_CHIP_ID_A80 ? aio->aio_a80_usb_bsh : aio->aio_core_bsh; bus_space_subregion(usbsc->usbsc_bst, usb_bsh, loc->loc_offset + AWIN_EHCI_OFFSET, AWIN_EHCI_SIZE, &usbsc->usbsc_ehci_bsh); bus_space_subregion(usbsc->usbsc_bst, usb_bsh, loc->loc_offset + AWIN_OHCI_OFFSET, AWIN_OHCI_SIZE, &usbsc->usbsc_ohci_bsh); if (awin_chip_id() != AWIN_CHIP_ID_A80) { bus_space_subregion(usbsc->usbsc_bst, usb_bsh, AWIN_USB0_OFFSET + AWIN_USB0_PHY_CTL_REG, 4, &usbsc->usbsc_usb0_phy_csr_bsh); } if (awin_chip_id() == AWIN_CHIP_ID_A80 && loc->loc_port == 1) { has_ohci = false; } aprint_naive("\n"); aprint_normal("\n"); if (awin_chip_id() == AWIN_CHIP_ID_A31) { /* Enable USB PHY */ awin_reg_set_clear(usbsc->usbsc_bst, aio->aio_ccm_bsh, AWIN_USB_CLK_REG, awinusb_usb_clk_set_a31[loc->loc_port], 0); /* AHB gate enable */ awin_reg_set_clear(usbsc->usbsc_bst, aio->aio_ccm_bsh, AWIN_AHB_GATING0_REG, AWIN_A31_AHB_GATING0_USB0 | awinusb_ahb_gating_a31[loc->loc_port], 0); /* Soft reset */ awin_reg_set_clear(usbsc->usbsc_bst, aio->aio_ccm_bsh, AWIN_A31_AHB_RESET0_REG, awinusb_usb_ahb_reset_a31[loc->loc_port], 0); } else if (awin_chip_id() == AWIN_CHIP_ID_A80) { /* Gate enable */ awin_reg_set_clear(usbsc->usbsc_bst, aio->aio_ccm_bsh, AWIN_A80_CCU_SCLK_BUS_CLK_GATING1_REG, AWIN_A80_CCU_SCLK_BUS_CLK_GATING1_USB_HOST, 0); /* Enable USB PHY */ awin_reg_set_clear(usbsc->usbsc_bst, usb_bsh, AWIN_A80_USBPHY_OFFSET + AWIN_A80_USBPHY_HCI_PCR_REG, awinusb_usb_pcr_a80[loc->loc_port], 0); awin_reg_set_clear(usbsc->usbsc_bst, usb_bsh, AWIN_A80_USBPHY_OFFSET + AWIN_A80_USBPHY_HCI_SCR_REG, awinusb_usb_scr_a80[loc->loc_port], 0); if (!has_ohci) { /* No OHCI for USB1, force EHCI mode */ awin_reg_set_clear(usbsc->usbsc_bst, usb_bsh, loc->loc_offset + AWIN_USB_PMU_IRQ_REG, AWIN_USB_PMU_IRQ_EHCI_HS_FORCE | AWIN_USB_PMU_IRQ_HSIC_CONNECT_DET | AWIN_USB_PMU_IRQ_HSIC, 0); } } else { /* * Access to the USB phy is off USB0 so make sure it's on. */ awin_reg_set_clear(usbsc->usbsc_bst, aio->aio_ccm_bsh, AWIN_AHB_GATING0_REG, AWIN_AHB_GATING0_USB0 | awinusb_ahb_gating[loc->loc_port], 0); /* * Enable the USB phy for this port. */ awin_reg_set_clear(usbsc->usbsc_bst, aio->aio_ccm_bsh, AWIN_USB_CLK_REG, awinusb_usb_clk_set[loc->loc_port], 0); } /* * Allow USB DMA engine access to the DRAM. */ awin_reg_set_clear(usbsc->usbsc_bst, usb_bsh, loc->loc_offset + AWIN_USB_PMU_IRQ_REG, AWIN_USB_PMU_IRQ_AHB_INCR8 | AWIN_USB_PMU_IRQ_AHB_INCR4 | AWIN_USB_PMU_IRQ_AHB_INCRX | AWIN_USB_PMU_IRQ_ULPI_BYPASS, 0); if (awin_chip_id() == AWIN_CHIP_ID_A20) { awin_reg_set_clear(usbsc->usbsc_bst, aio->aio_core_bsh, AWIN_DRAM_OFFSET + awinusb_dram_hpcr_regs[loc->loc_port], AWIN_DRAM_HPCR_ACCESS_EN, 0); } /* initialize the USB phy */ if (awin_chip_id() != AWIN_CHIP_ID_A80) { awin_usb_phy_write(usbsc, 0x20, 0x14, 5); awin_usb_phy_write(usbsc, 0x2a, 0x03, 2); } /* * Now get the GPIO that enables the power to the port and * turn it on. */ if (awin_gpio_pin_reserve(awinusb_drvpin_names[loc->loc_port], &usbsc->usbsc_drv_pin)) { awin_gpio_pindata_write(&usbsc->usbsc_drv_pin, 1); } else { aprint_error_dev(self, "no power gpio found\n"); } if (awin_gpio_pin_reserve(awinusb_restrictpin_names[loc->loc_port], &usbsc->usbsc_restrict_pin)) { awin_gpio_pindata_write(&usbsc->usbsc_restrict_pin, 1); } else { aprint_debug_dev(self, "no restrict gpio found\n"); } /* * Disable interrupts */ #if NOHCI > 0 if (has_ohci) { bus_space_write_4(usbsc->usbsc_bst, usbsc->usbsc_ohci_bsh, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); } #endif #if NEHCI > 0 bus_size_t caplength = bus_space_read_1(usbsc->usbsc_bst, usbsc->usbsc_ehci_bsh, EHCI_CAPLENGTH); bus_space_write_4(usbsc->usbsc_bst, usbsc->usbsc_ehci_bsh, caplength + EHCI_USBINTR, 0); #endif #if NOHCI > 0 if (has_ohci) { struct awinusb_attach_args usbaa_ohci = { .usbaa_name = "ohci", .usbaa_dmat = usbsc->usbsc_dmat, .usbaa_bst = usbsc->usbsc_bst, .usbaa_bsh = usbsc->usbsc_ohci_bsh, .usbaa_ccm_bsh = aio->aio_ccm_bsh, .usbaa_size = AWIN_OHCI_SIZE, .usbaa_port = loc->loc_port, }; usbsc->usbsc_ohci_dev = config_found(self, &usbaa_ohci, NULL); } #endif #if NEHCI > 0 struct awinusb_attach_args usbaa_ehci = { .usbaa_name = "ehci", .usbaa_dmat = usbsc->usbsc_dmat, .usbaa_bst = usbsc->usbsc_bst, .usbaa_bsh = usbsc->usbsc_ehci_bsh, .usbaa_ccm_bsh = aio->aio_ccm_bsh, .usbaa_size = AWIN_EHCI_SIZE, .usbaa_port = loc->loc_port, }; config_found(self, &usbaa_ehci, NULL); #endif }
void * pic_establish_intr(struct pic_softc *pic, int irq, int ipl, int type, int (*func)(void *), void *arg) { struct intrsource *is; int off, nipl; if (pic->pic_sources[irq]) { printf("pic_establish_intr: pic %s irq %d already present\n", pic->pic_name, irq); return NULL; } is = kmem_zalloc(sizeof(*is), KM_SLEEP); if (is == NULL) return NULL; is->is_pic = pic; is->is_irq = irq; is->is_ipl = ipl; is->is_type = type; is->is_func = func; is->is_arg = arg; if (pic->pic_ops->pic_source_name) (*pic->pic_ops->pic_source_name)(pic, irq, is->is_source, sizeof(is->is_source)); else snprintf(is->is_source, sizeof(is->is_source), "irq %d", irq); /* * Now attach the per-cpu evcnts. */ percpu_foreach(pic->pic_percpu, pic_percpu_evcnt_attach, is); pic->pic_sources[irq] = is; /* * First try to use an existing slot which is empty. */ for (off = pic_ipl_offset[ipl]; off < pic_ipl_offset[ipl+1]; off++) { if (pic__iplsources[off] == NULL) { is->is_iplidx = off - pic_ipl_offset[ipl]; pic__iplsources[off] = is; return is; } } /* * Move up all the sources by one. */ if (ipl < NIPL) { off = pic_ipl_offset[ipl+1]; memmove(&pic__iplsources[off+1], &pic__iplsources[off], sizeof(pic__iplsources[0]) * (pic_ipl_offset[NIPL] - off)); } /* * Advance the offset of all IPLs higher than this. Include an * extra one as well. Thus the number of sources per ipl is * pic_ipl_offset[ipl+1] - pic_ipl_offset[ipl]. */ for (nipl = ipl + 1; nipl <= NIPL; nipl++) pic_ipl_offset[nipl]++; /* * Insert into the previously made position at the end of this IPL's * sources. */ off = pic_ipl_offset[ipl + 1] - 1; is->is_iplidx = off - pic_ipl_offset[ipl]; pic__iplsources[off] = is; (*pic->pic_ops->pic_establish_irq)(pic, is); (*pic->pic_ops->pic_unblock_irqs)(pic, is->is_irq & ~0x1f, __BIT(is->is_irq & 0x1f)); /* We're done. */ return is; }
void pic_deliver_irqs(struct pic_softc *pic, int ipl, void *frame) { const uint32_t ipl_mask = __BIT(ipl); struct intrsource *is; volatile uint32_t *ipending = pic->pic_pending_irqs; volatile uint32_t *iblocked = pic->pic_blocked_irqs; size_t irq_base; #if PIC_MAXSOURCES > 32 size_t irq_count; int poi = 0; /* Possibility of interrupting */ #endif uint32_t pending_irqs; uint32_t blocked_irqs; int irq; bool progress = false; KASSERT(pic->pic_pending_ipls & ipl_mask); irq_base = 0; #if PIC_MAXSOURCES > 32 irq_count = 0; #endif for (;;) { pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, *ipending, ipl); KASSERT((pending_irqs & *ipending) == pending_irqs); KASSERT((pending_irqs & ~(*ipending)) == 0); if (pending_irqs == 0) { #if PIC_MAXSOURCES > 32 irq_count += 32; if (__predict_true(irq_count >= pic->pic_maxsources)) { if (!poi) /*Interrupt at this level was handled.*/ break; irq_base = 0; irq_count = 0; poi = 0; ipending = pic->pic_pending_irqs; iblocked = pic->pic_blocked_irqs; } else { irq_base += 32; ipending++; iblocked++; KASSERT(irq_base <= pic->pic_maxsources); } continue; #else break; #endif } progress = true; blocked_irqs = 0; do { irq = ffs(pending_irqs) - 1; KASSERT(irq >= 0); atomic_and_32(ipending, ~__BIT(irq)); is = pic->pic_sources[irq_base + irq]; if (is != NULL) { cpsie(I32_bit); pic_dispatch(is, frame); cpsid(I32_bit); #if PIC_MAXSOURCES > 32 /* * There is a possibility of interrupting * from cpsie() to cpsid(). */ poi = 1; #endif blocked_irqs |= __BIT(irq); } else { KASSERT(0); } pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base, *ipending, ipl); } while (pending_irqs); if (blocked_irqs) { atomic_or_32(iblocked, blocked_irqs); atomic_or_32(&pic_blocked_pics, __BIT(pic->pic_id)); } } KASSERT(progress); /* * Since interrupts are disabled, we don't have to be too careful * about these. */ if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) atomic_and_32(&pic_pending_pics, ~__BIT(pic->pic_id)); }
static int coram_mpeg_trigger(struct coram_softc *sc, void *buf) { struct coram_dma *p; struct coram_sram_ch *ch; uint32_t v; ch = &coram_sram_chs[CORAM_SRAM_CH6]; for (p = sc->sc_dma; p && KERNADDR(p) != buf; p = p->next) continue; if (p == NULL) { printf("%s: coram_mpeg_trigger: bad addr %p\n", device_xname(sc->sc_dev), buf); return ENOENT; } /* disable fifo + risc */ bus_space_write_4(sc->sc_memt, sc->sc_memh, VID_C_DMA_CTL, 0); coram_risc_buffer(sc, CORAM_TS_PKTSIZE, 1); coram_sram_ch_setup(sc, ch, CORAM_TS_PKTSIZE); /* let me hope this bit is the same as on the 2388[0-3] */ /* software reset */ bus_space_write_4(sc->sc_memt, sc->sc_memh, VID_C_GEN_CTL, 0x0040); delay (100*1000); bus_space_write_4(sc->sc_memt, sc->sc_memh, VID_C_LNGTH, CORAM_TS_PKTSIZE); bus_space_write_4(sc->sc_memt, sc->sc_memh, VID_C_HW_SOP_CTL, 0x47 << 16 | 188 << 4); bus_space_write_4(sc->sc_memt, sc->sc_memh, VID_C_TS_CLK_EN, 1); bus_space_write_4(sc->sc_memt, sc->sc_memh, VID_C_VLD_MISC, 0); bus_space_write_4(sc->sc_memt, sc->sc_memh, VID_C_GEN_CTL, 12); delay (100*1000); v = bus_space_read_4(sc->sc_memt, sc->sc_memh, PAD_CTRL); v &= ~0x4; /* Clear TS2_SOP_OE */ bus_space_write_4(sc->sc_memt, sc->sc_memh, PAD_CTRL, v); v = bus_space_read_4(sc->sc_memt, sc->sc_memh, VID_C_INT_MSK); v |= 0x111111; bus_space_write_4(sc->sc_memt, sc->sc_memh, VID_C_INT_MSK, v); v = bus_space_read_4(sc->sc_memt, sc->sc_memh, VID_C_DMA_CTL); v |= 0x11; /* Enable RISC controller and FIFO */ bus_space_write_4(sc->sc_memt, sc->sc_memh, VID_C_DMA_CTL, v); v = bus_space_read_4(sc->sc_memt, sc->sc_memh, DEV_CNTRL2); v |= __BIT(5); /* Enable RISC controller */ bus_space_write_4(sc->sc_memt, sc->sc_memh, DEV_CNTRL2, v); v = bus_space_read_4(sc->sc_memt, sc->sc_memh, PCI_INT_MSK); v |= 0x001f00; v |= 0x04; bus_space_write_4(sc->sc_memt, sc->sc_memh, PCI_INT_MSK, v); v = bus_space_read_4(sc->sc_memt, sc->sc_memh, VID_C_GEN_CTL); #ifdef CORAM_DEBUG printf("%s, %06x %08x\n", __func__, VID_C_GEN_CTL, v); #endif v = bus_space_read_4(sc->sc_memt, sc->sc_memh, VID_C_SOP_STATUS); #ifdef CORAM_DEBUG printf("%s, %06x %08x\n", __func__, VID_C_SOP_STATUS, v); #endif delay(100*1000); v = bus_space_read_4(sc->sc_memt, sc->sc_memh, VID_C_GEN_CTL); #ifdef CORAM_DEBUG printf("%s, %06x %08x\n", __func__, VID_C_GEN_CTL, v); #endif v = bus_space_read_4(sc->sc_memt, sc->sc_memh, VID_C_SOP_STATUS); #ifdef CORAM_DEBUG printf("%s, %06x %08x\n", __func__, VID_C_SOP_STATUS, v); #endif return 0; }
static int acpicpu_md_pstate_fidvid_set(struct acpicpu_pstate *ps) { const uint64_t ctrl = ps->ps_control; uint32_t cfid, cvid, fid, i, irt; uint32_t pll, vco_cfid, vco_fid; uint32_t val, vid, vst; int rv; rv = acpicpu_md_pstate_fidvid_read(&cfid, &cvid); if (rv != 0) return rv; fid = __SHIFTOUT(ctrl, ACPI_0FH_CONTROL_FID); vid = __SHIFTOUT(ctrl, ACPI_0FH_CONTROL_VID); irt = __SHIFTOUT(ctrl, ACPI_0FH_CONTROL_IRT); vst = __SHIFTOUT(ctrl, ACPI_0FH_CONTROL_VST); pll = __SHIFTOUT(ctrl, ACPI_0FH_CONTROL_PLL); vst = vst * 20; pll = pll * 1000 / 5; irt = 10 * __BIT(irt); /* * Phase 1. */ while (cvid > vid) { val = 1 << __SHIFTOUT(ctrl, ACPI_0FH_CONTROL_MVS); val = (val > cvid) ? 0 : cvid - val; acpicpu_md_pstate_fidvid_write(cfid, val, 1, vst); rv = acpicpu_md_pstate_fidvid_read(NULL, &cvid); if (rv != 0) return rv; } i = __SHIFTOUT(ctrl, ACPI_0FH_CONTROL_RVO); for (; i > 0 && cvid > 0; --i) { acpicpu_md_pstate_fidvid_write(cfid, cvid - 1, 1, vst); rv = acpicpu_md_pstate_fidvid_read(NULL, &cvid); if (rv != 0) return rv; } /* * Phase 2. */ if (cfid != fid) { vco_fid = FID_TO_VCO_FID(fid); vco_cfid = FID_TO_VCO_FID(cfid); while (abs(vco_fid - vco_cfid) > 2) { if (fid <= cfid) val = cfid - 2; else { val = (cfid > 6) ? cfid + 2 : FID_TO_VCO_FID(cfid) + 2; } acpicpu_md_pstate_fidvid_write(val, cvid, pll, irt); rv = acpicpu_md_pstate_fidvid_read(&cfid, NULL); if (rv != 0) return rv; vco_cfid = FID_TO_VCO_FID(cfid); } acpicpu_md_pstate_fidvid_write(fid, cvid, pll, irt); rv = acpicpu_md_pstate_fidvid_read(&cfid, NULL); if (rv != 0) return rv; } /* * Phase 3. */ if (cvid != vid) { acpicpu_md_pstate_fidvid_write(cfid, vid, 1, vst); rv = acpicpu_md_pstate_fidvid_read(NULL, &cvid); if (rv != 0) return rv; } return 0; }
static uint64_t imx51_get_pll_freq(u_int pll_no) { uint32_t dp_ctrl; uint32_t dp_op; uint32_t dp_mfd; uint32_t dp_mfn; uint32_t mfi; int32_t mfn; uint32_t mfd; uint32_t pdf; uint32_t ccr; uint64_t freq = 0; u_int ref = 0; bus_space_tag_t iot = ccm_softc->sc_iot; bus_space_handle_t ioh = ccm_softc->sc_pll[pll_no-1].pll_ioh; KASSERT(1 <= pll_no && pll_no <= IMX51_N_DPLLS); dp_ctrl = bus_space_read_4(iot, ioh, DPLL_DP_CTL); if (dp_ctrl & DP_CTL_HFSM) { dp_op = bus_space_read_4(iot, ioh, DPLL_DP_HFS_OP); dp_mfd = bus_space_read_4(iot, ioh, DPLL_DP_HFS_MFD); dp_mfn = bus_space_read_4(iot, ioh, DPLL_DP_HFS_MFN); } else { dp_op = bus_space_read_4(iot, ioh, DPLL_DP_OP); dp_mfd = bus_space_read_4(iot, ioh, DPLL_DP_MFD); dp_mfn = bus_space_read_4(iot, ioh, DPLL_DP_MFN); } pdf = dp_op & DP_OP_PDF; mfi = max(5, __SHIFTOUT(dp_op, DP_OP_MFI)); mfd = dp_mfd; if (dp_mfn & __BIT(26)) /* 27bit signed value */ mfn = (int32_t)(__BITS(31,27) | dp_mfn); else mfn = dp_mfn; switch (dp_ctrl & DP_CTL_REF_CLK_SEL) { case DP_CTL_REF_CLK_SEL_COSC: /* Internal Oscillator */ ref = IMX51_OSC_FREQ; break; case DP_CTL_REF_CLK_SEL_FPM: ccr = bus_space_read_4(iot, ccm_softc->sc_ioh, CCMC_CCR); if (ccr & CCR_FPM_MULT) ref = IMX51_CKIL_FREQ * 1024; else ref = IMX51_CKIL_FREQ * 512; break; default: ref = 0; } if (dp_ctrl & DP_CTL_REF_CLK_DIV) ref /= 2; #if 0 if (dp_ctrl & DP_CTL_DPDCK0_2_EN) ref *= 2; ref /= (pdf + 1); freq = ref * mfn; freq /= (mfd + 1); freq = (ref * mfi) + freq; #endif ref *= 4; freq = (int64_t)ref * mfi + (int64_t)ref * mfn / (mfd + 1); freq /= pdf + 1; if (!(dp_ctrl & DP_CTL_DPDCK0_2_EN)) freq /= 2; #ifdef IMXCCMDEBUG printf("dp_ctl: %08x ", dp_ctrl); printf("pdf: %3d ", pdf); printf("mfi: %3d ", mfi); printf("mfd: %3d ", mfd); printf("mfn: %3d ", mfn); printf("pll: %lld\n", freq); #endif ccm_softc->sc_pll[pll_no-1].pll_freq = freq; return freq; }