void gdium_powerdown(void) { REGVAL(BONITO_GPIODATA) |= 0x00000002; REGVAL(BONITO_GPIOIE) &= ~0x00000002; printf("Powering down...\n"); while(1) delay(1000); }
static void pwmclock_shutdown(void *cookie) { struct pwmclock_softc *sc = cookie; /* just in case the interrupt handler runs again after this */ sc->sc_step_wanted = 7; /* set the clock to full speed */ REGVAL(LS2F_CHIPCFG0) = (REGVAL(LS2F_CHIPCFG0) & ~LS2FCFG_FREQSCALE_MASK) | 7; }
void irongate_conf_write(void *ipv, pcitag_t tag, int offset, pcireg_t data) { int s; PCI_CONF_LOCK(s); REGVAL(PCI_CONF_ADDR) = (CONFADDR_ENABLE | tag | (offset & 0xff)); alpha_mb(); REGVAL(PCI_CONF_DATA) = data; alpha_mb(); REGVAL(PCI_CONF_ADDR) = 0; alpha_mb(); PCI_CONF_UNLOCK(s); }
pcireg_t irongate_conf_read0(void *ipv, pcitag_t tag, int offset) { pcireg_t data; int s; PCI_CONF_LOCK(s); REGVAL(PCI_CONF_ADDR) = (CONFADDR_ENABLE | tag | (offset & 0xff)); alpha_mb(); data = REGVAL(PCI_CONF_DATA); REGVAL(PCI_CONF_ADDR) = 0; alpha_mb(); PCI_CONF_UNLOCK(s); return (data); }
static __inline void apecs_swiz_set_hae_mem(u_int32_t *pa) { int s; u_int32_t msb; if(*pa >= REG1){ msb = *pa & 0xf8000000; *pa -= msb; s = splhigh(); if (msb != apecs_hae_mem) { apecs_hae_mem = msb; REGVAL(EPIC_HAXR1) = apecs_hae_mem; alpha_mb(); apecs_hae_mem = REGVAL(EPIC_HAXR1); } splx(s); } }
int ykbec_suspend() { struct ykbec_softc *sc = ykbec_sc; int ctrl; /* * Set up wakeup sources: currently only the internal keyboard. */ loongson_set_isa_imr(1 << 1); /* USB */ DPRINTF(("USB\n")); ykbec_write(sc, REG_USB0, USB_FLAG_OFF); ykbec_write(sc, REG_USB1, USB_FLAG_OFF); ykbec_write(sc, REG_USB2, USB_FLAG_OFF); /* EC */ DPRINTF(("REG_PMUCFG\n")); ctrl = PMUCFG_SCI_WAKEUP | PMUCFG_WDT_WAKEUP | PMUCFG_GPWU_WAKEUP | PMUCFG_LPC_WAKEUP | PMUCFG_STOP_MODE | PMUCFG_RESET_8051; ykbec_write(sc, REG_PMUCFG, ctrl); /* FAN */ DPRINTF(("FAN\n")); ykbec_write(sc, REG_FAN_CONTROL, REG_FAN_OFF); /* CPU */ DPRINTF(("CPU\n")); ykbec_chip_config = REGVAL(LOONGSON_CHIP_CONFIG0); enableintr(); REGVAL(LOONGSON_CHIP_CONFIG0) = ykbec_chip_config & ~0x7; (void)REGVAL(LOONGSON_CHIP_CONFIG0); /* * When a resume interrupt fires, we will enter the interrupt * dispatcher, which will do nothing because we are at splhigh, * and execution flow will return here and continue. */ (void)disableintr(); return 0; }
int pwmclock_intr(void *cookie) { struct clockframe *cf = cookie; struct pwmclock_softc *sc = pwmclock; uint32_t reg, now, diff; /* is it us? */ reg = bus_space_read_4(sc->sc_memt, sc->sc_regh, SM502_PWM1); if ((reg & SM502_PWM_INTR_PENDING) == 0) return 0; /* yes, it's us, so clear the interrupt */ bus_space_write_4(sc->sc_memt, sc->sc_regh, SM502_PWM1, sc->sc_reg); /* * this looks kinda funny but what we want here is this: * - reading the counter and changing the CPU clock should be as * close together as possible in order to remain halfway accurate * - we need to use the previous sc_step in order to scale the * interval passed since the last clock interrupt correctly, so * we only change sc_step after doing that */ if (sc->sc_step_wanted != sc->sc_step) { REGVAL(LS2F_CHIPCFG0) = (REGVAL(LS2F_CHIPCFG0) & ~LS2FCFG_FREQSCALE_MASK) | sc->sc_step_wanted; } now = mips3_cp0_count_read(); diff = now - sc->sc_last; sc->sc_count += scale(diff, sc->sc_step); sc->sc_last = now; if (sc->sc_step_wanted != sc->sc_step) { sc->sc_step = sc->sc_step_wanted; } hardclock(cf); return 1; }
static ulong iadd(Ureg *ur, Instr i) { ulong Rd = REG(i, 12, 15); ulong Rn = REG(i, 16, 19); if(Rd != 15 || !condpass(i, ur->psr)) return ur->pc+4; return REGVAL(ur, Rn) + shifterval(ur, i); }
static int apecs_probe(device_t dev) { int memwidth; if (apecs0) return ENXIO; apecs0 = dev; memwidth = (REGVAL(COMANCHE_GCR) & COMANCHE_GCR_WIDEMEM) != 0 ? 128 : 64; if(memwidth == 64){ device_set_desc(dev, "DECchip 21071 Core Logic chipset"); } else { device_set_desc(dev, "DECchip 21072 Core Logic chipset"); } apecs_hae_mem = REGVAL(EPIC_HAXR1); pci_init_resources(); isa0 = device_add_child(dev, "isa", 0, 0); return 0; }
int ykbec_resume() { struct ykbec_softc *sc = ykbec_sc; /* CPU */ DPRINTF(("CPU\n")); REGVAL(LOONGSON_CHIP_CONFIG0) = ykbec_chip_config; (void)REGVAL(LOONGSON_CHIP_CONFIG0); /* FAN */ DPRINTF(("FAN\n")); ykbec_write(sc, REG_FAN_CONTROL, REG_FAN_ON); /* USB */ DPRINTF(("USB\n")); ykbec_write(sc, REG_USB0, USB_FLAG_ON); ykbec_write(sc, REG_USB1, USB_FLAG_ON); ykbec_write(sc, REG_USB2, USB_FLAG_ON); ykbec_refresh(sc); return 0; }
void bonito_mainbus_attach(device_t parent, device_t self, void *aux) { struct bonito_softc *sc = device_private(self); struct pcibus_attach_args pba; struct bonito_config *bc; pcireg_t rev; /* * There is only one PCI controller on an Algorithmics board. */ #if defined(ALGOR_P6032) bc = &p6032_configuration.ac_bonito; #endif sc->sc_bonito = bc; rev = PCI_REVISION(REGVAL(BONITO_PCICLASS)); printf(": BONITO Memory and PCI controller, %s rev. %d.%d\n", BONITO_REV_FPGA(rev) ? "FPGA" : "ASIC", BONITO_REV_MAJOR(rev), BONITO_REV_MINOR(rev)); pba.pba_flags = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY; pba.pba_bus = 0; pba.pba_bridgetag = NULL; #if defined(ALGOR_P6032) { struct p6032_config *acp = &p6032_configuration; pba.pba_iot = &acp->ac_iot; pba.pba_memt = &acp->ac_memt; pba.pba_dmat = &acp->ac_pci_dmat; pba.pba_dmat64 = NULL; pba.pba_pc = &acp->ac_pc; } #endif /* ALGOR_P6032 */ (void) config_found_ia(self, "pcibus", &pba, pcibusprint); }
void ehci_arbus_attach(device_t parent, device_t self, void *aux) { ehci_softc_t *sc = device_private(self); struct arbus_attach_args * const aa = aux; void *ih = NULL; int error; sc->iot = aa->aa_bst_le; sc->sc_size = aa->aa_size; //sc->sc_bus.ub_hcpriv = sc; sc->sc_bus.ub_dmatag = aa->aa_dmat; sc->sc_bus.ub_revision = USBREV_1_0; sc->sc_flags |= EHCIF_ETTF; sc->sc_vendor_init = ehci_arbus_init; error = bus_space_map(aa->aa_bst, aa->aa_addr, aa->aa_size, 0, &sc->ioh); if (error) { aprint_error(": failed to map registers: %d\n", error); return; } /* The recommended value is 0x20 for both ports and the host */ REGVAL(AR9344_USB_CONFIG_BASE) = 0x20c00; /* magic */ DELAY(1000); /* get offset to operational regs */ uint32_t r = bus_space_read_4(aa->aa_bst, sc->ioh, 0); if (r != 0x40) { aprint_error(": error: CAPLENGTH (%#x) != 0x40\n", sc->sc_offs); return; } sc->sc_offs = EREAD1(sc, EHCI_CAPLENGTH); aprint_normal("\n"); /* Disable EHCI interrupts */ EOWRITE4(sc, EHCI_USBINTR, 0); /* establish interrupt */ ih = arbus_intr_establish(aa->aa_cirq, aa->aa_mirq, ehci_intr, sc); if (ih == NULL) panic("%s: couldn't establish interrupt", device_xname(self)); /* * There are no companion controllers */ sc->sc_ncomp = 0; error = ehci_init(sc); if (error) { aprint_error("%s: init failed, error=%d\n", device_xname(self), error); if (ih != NULL) arbus_intr_disestablish(ih); return; } /* Attach USB device */ sc->sc_child = config_found(self, &sc->sc_bus, usbctlprint); }
static void pwmclock_attach(device_t parent, device_t self, void *aux) { struct pwmclock_softc *sc = device_private(self); struct voyager_attach_args *vaa = aux; const struct sysctlnode *sysctl_node, *me, *freq; uint32_t reg, last, curr, diff, acc; int i, clk; sc->sc_dev = self; sc->sc_memt = vaa->vaa_tag; sc->sc_regh = vaa->vaa_regh; aprint_normal("\n"); /* NULL here gets us the clockframe */ voyager_establish_intr(parent, 22, pwmclock_intr, NULL); reg = voyager_set_pwm(100, 100); /* 100Hz, 10% duty cycle */ reg |= SM502_PWM_ENABLE | SM502_PWM_ENABLE_INTR | SM502_PWM_INTR_PENDING; sc->sc_reg = reg; pwmclock = sc; initclocks_ptr = pwmclock_start; /* * Establish a hook so on shutdown we can set the CPU clock back to * full speed. This is necessary because PMON doesn't change the * clock scale register on a warm boot, the MIPS clock code gets * confused if we're too slow and the loongson-specific bits run * too late in the boot process */ sc->sc_shutdown_cookie = shutdownhook_establish(pwmclock_shutdown, sc); /* ok, let's see how far the cycle counter gets between interrupts */ DPRINTF("calibrating CPU timer...\n"); for (clk = 1; clk < 8; clk++) { REGVAL(LS2F_CHIPCFG0) = (REGVAL(LS2F_CHIPCFG0) & ~LS2FCFG_FREQSCALE_MASK) | clk; bus_space_write_4(sc->sc_memt, sc->sc_regh, SM502_PWM1, sc->sc_reg); acc = 0; last = pwmclock_wait_edge(sc); for (i = 0; i < 16; i++) { curr = pwmclock_wait_edge(sc); diff = curr - last; acc += diff; last = curr; } sc->sc_scale[clk] = (acc >> 4) / 5000; } #ifdef PWMCLOCK_DEBUG for (clk = 1; clk < 8; clk++) { aprint_normal_dev(sc->sc_dev, "%d/8: %d\n", clk + 1, sc->sc_scale[clk]); } #endif sc->sc_step = 7; sc->sc_step_wanted = 7; /* now setup sysctl */ if (sysctl_createv(NULL, 0, NULL, &me, CTLFLAG_READWRITE, CTLTYPE_NODE, "loongson", NULL, NULL, 0, NULL, 0, CTL_MACHDEP, CTL_CREATE, CTL_EOL) != 0) aprint_error_dev(sc->sc_dev, "couldn't create 'loongson' node\n"); if (sysctl_createv(NULL, 0, NULL, &freq, CTLFLAG_READWRITE, CTLTYPE_NODE, "frequency", NULL, NULL, 0, NULL, 0, CTL_MACHDEP, me->sysctl_num, CTL_CREATE, CTL_EOL) != 0) aprint_error_dev(sc->sc_dev, "couldn't create 'frequency' node\n"); if (sysctl_createv(NULL, 0, NULL, &sysctl_node, CTLFLAG_READWRITE | CTLFLAG_OWNDESC, CTLTYPE_INT, "target", "CPU speed", pwmclock_cpuspeed_temp, 0, (void *)sc, 0, CTL_MACHDEP, me->sysctl_num, freq->sysctl_num, CTL_CREATE, CTL_EOL) == 0) { } else aprint_error_dev(sc->sc_dev, "couldn't create 'target' node\n"); if (sysctl_createv(NULL, 0, NULL, &sysctl_node, CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL, pwmclock_cpuspeed_cur, 1, (void *)sc, 0, CTL_MACHDEP, me->sysctl_num, freq->sysctl_num, CTL_CREATE, CTL_EOL) == 0) { } else aprint_error_dev(sc->sc_dev, "couldn't create 'current' node\n"); if (sysctl_createv(NULL, 0, NULL, &sysctl_node, CTLFLAG_READWRITE, CTLTYPE_STRING, "available", NULL, pwmclock_cpuspeed_available, 2, (void *)sc, 0, CTL_MACHDEP, me->sysctl_num, freq->sysctl_num, CTL_CREATE, CTL_EOL) == 0) { } else aprint_error_dev(sc->sc_dev, "couldn't create 'available' node\n"); }
void mcpcia_dma_init(struct mcpcia_config *ccp) { bus_dma_tag_t t; /* * Initialize the DMA tag used for direct-mapped DMA. */ t = &ccp->cc_dmat_direct; t->_cookie = ccp; t->_wbase = MCPCIA_DIRECT_MAPPED_BASE; t->_wsize = MCPCIA_DIRECT_MAPPED_SIZE; t->_next_window = &ccp->cc_dmat_pci_sgmap; t->_boundary = 0; t->_sgmap = NULL; t->_get_tag = mcpcia_dma_get_tag; t->_dmamap_create = _bus_dmamap_create; t->_dmamap_destroy = _bus_dmamap_destroy; t->_dmamap_load = _bus_dmamap_load_direct; t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct; t->_dmamap_load_uio = _bus_dmamap_load_uio_direct; t->_dmamap_load_raw = _bus_dmamap_load_raw_direct; t->_dmamap_unload = _bus_dmamap_unload; t->_dmamap_sync = _bus_dmamap_sync; t->_dmamem_alloc = _bus_dmamem_alloc; t->_dmamem_free = _bus_dmamem_free; t->_dmamem_map = _bus_dmamem_map; t->_dmamem_unmap = _bus_dmamem_unmap; t->_dmamem_mmap = _bus_dmamem_mmap; /* * Initialize the DMA tag used for sgmap-mapped PCI DMA. */ t = &ccp->cc_dmat_pci_sgmap; t->_cookie = ccp; t->_wbase = MCPCIA_PCI_SG_MAPPED_BASE; t->_wsize = MCPCIA_PCI_SG_MAPPED_SIZE; t->_next_window = NULL; t->_boundary = 0; t->_sgmap = &ccp->cc_pci_sgmap; t->_pfthresh = MCPCIA_SG_MAPPED_PFTHRESH; t->_get_tag = mcpcia_dma_get_tag; t->_dmamap_create = alpha_sgmap_dmamap_create; t->_dmamap_destroy = alpha_sgmap_dmamap_destroy; t->_dmamap_load = mcpcia_bus_dmamap_load_sgmap; t->_dmamap_load_mbuf = mcpcia_bus_dmamap_load_mbuf_sgmap; t->_dmamap_load_uio = mcpcia_bus_dmamap_load_uio_sgmap; t->_dmamap_load_raw = mcpcia_bus_dmamap_load_raw_sgmap; t->_dmamap_unload = mcpcia_bus_dmamap_unload_sgmap; t->_dmamap_sync = _bus_dmamap_sync; t->_dmamem_alloc = _bus_dmamem_alloc; t->_dmamem_free = _bus_dmamem_free; t->_dmamem_map = _bus_dmamem_map; t->_dmamem_unmap = _bus_dmamem_unmap; t->_dmamem_mmap = _bus_dmamem_mmap; /* * Initialize the DMA tag used for sgmap-mapped ISA DMA. */ t = &ccp->cc_dmat_isa_sgmap; t->_cookie = ccp; t->_wbase = MCPCIA_ISA_SG_MAPPED_BASE; t->_wsize = MCPCIA_ISA_SG_MAPPED_SIZE; t->_next_window = NULL; t->_boundary = 0; t->_sgmap = &ccp->cc_isa_sgmap; t->_pfthresh = MCPCIA_SG_MAPPED_PFTHRESH; t->_get_tag = mcpcia_dma_get_tag; t->_dmamap_create = alpha_sgmap_dmamap_create; t->_dmamap_destroy = alpha_sgmap_dmamap_destroy; t->_dmamap_load = mcpcia_bus_dmamap_load_sgmap; t->_dmamap_load_mbuf = mcpcia_bus_dmamap_load_mbuf_sgmap; t->_dmamap_load_uio = mcpcia_bus_dmamap_load_uio_sgmap; t->_dmamap_load_raw = mcpcia_bus_dmamap_load_raw_sgmap; t->_dmamap_unload = mcpcia_bus_dmamap_unload_sgmap; t->_dmamap_sync = _bus_dmamap_sync; t->_dmamem_alloc = _bus_dmamem_alloc; t->_dmamem_free = _bus_dmamem_free; t->_dmamem_map = _bus_dmamem_map; t->_dmamem_unmap = _bus_dmamem_unmap; t->_dmamem_mmap = _bus_dmamem_mmap; /* * Initialize the SGMAPs. */ alpha_sgmap_init(&ccp->cc_dmat_pci_sgmap, &ccp->cc_pci_sgmap, "mcpcia pci sgmap", MCPCIA_PCI_SG_MAPPED_BASE, 0, MCPCIA_PCI_SG_MAPPED_SIZE, sizeof(uint64_t), NULL, 0); alpha_sgmap_init(&ccp->cc_dmat_isa_sgmap, &ccp->cc_isa_sgmap, "mcpcia isa sgmap", MCPCIA_ISA_SG_MAPPED_BASE, 0, MCPCIA_ISA_SG_MAPPED_SIZE, sizeof(uint64_t), NULL, 0); /* * Disable windows first. */ REGVAL(MCPCIA_W0_BASE(ccp)) = 0; REGVAL(MCPCIA_W1_BASE(ccp)) = 0; REGVAL(MCPCIA_W2_BASE(ccp)) = 0; REGVAL(MCPCIA_W3_BASE(ccp)) = 0; REGVAL(MCPCIA_T0_BASE(ccp)) = 0; REGVAL(MCPCIA_T1_BASE(ccp)) = 0; REGVAL(MCPCIA_T2_BASE(ccp)) = 0; REGVAL(MCPCIA_T3_BASE(ccp)) = 0; alpha_mb(); /* * Set up window 0 as an 8MB SGMAP-mapped window starting at 8MB. */ REGVAL(MCPCIA_W0_MASK(ccp)) = MCPCIA_WMASK_8M; REGVAL(MCPCIA_T0_BASE(ccp)) = ccp->cc_isa_sgmap.aps_ptpa >> MCPCIA_TBASEX_SHIFT; alpha_mb(); REGVAL(MCPCIA_W0_BASE(ccp)) = MCPCIA_WBASE_EN | MCPCIA_WBASE_SG | MCPCIA_ISA_SG_MAPPED_BASE; alpha_mb(); MCPCIA_SGTLB_INVALIDATE(ccp); /* * Set up window 1 as a 2 GB Direct-mapped window starting at 2GB. */ REGVAL(MCPCIA_W1_MASK(ccp)) = MCPCIA_WMASK_2G; REGVAL(MCPCIA_T1_BASE(ccp)) = 0; alpha_mb(); REGVAL(MCPCIA_W1_BASE(ccp)) = MCPCIA_DIRECT_MAPPED_BASE | MCPCIA_WBASE_EN; alpha_mb(); /* * Set up window 2 as a 1G SGMAP-mapped window starting at 1G. */ REGVAL(MCPCIA_W2_MASK(ccp)) = MCPCIA_WMASK_1G; REGVAL(MCPCIA_T2_BASE(ccp)) = ccp->cc_pci_sgmap.aps_ptpa >> MCPCIA_TBASEX_SHIFT; alpha_mb(); REGVAL(MCPCIA_W2_BASE(ccp)) = MCPCIA_WBASE_EN | MCPCIA_WBASE_SG | MCPCIA_PCI_SG_MAPPED_BASE; alpha_mb(); /* XXX XXX BEGIN XXX XXX */ { /* XXX */ extern paddr_t alpha_XXX_dmamap_or; /* XXX */ alpha_XXX_dmamap_or = MCPCIA_DIRECT_MAPPED_BASE;/* XXX */ } /* XXX */ /* XXX XXX END XXX XXX */ }
void ciaattach(device_t parent, device_t self, void *aux) { struct cia_softc *sc = device_private(self); struct cia_config *ccp; struct pcibus_attach_args pba; char bits[64]; const char *name; int pass; /* note that we've attached the chipset; can't have 2 CIAs. */ ciafound = 1; sc->sc_dev = self; /* * set up the chipset's info; done once at console init time * (maybe), but we must do it here as well to take care of things * that need to use memory allocation. */ ccp = sc->sc_ccp = &cia_configuration; cia_init(ccp, 1); if (ccp->cc_flags & CCF_ISPYXIS) { name = "Pyxis"; pass = ccp->cc_rev; } else { name = "ALCOR/ALCOR2"; pass = ccp->cc_rev + 1; } aprint_normal(": DECchip 2117x Core Logic Chipset (%s), pass %d\n", name, pass); if (ccp->cc_cnfg) { snprintb(bits, sizeof(bits), CIA_CSR_CNFG_BITS, ccp->cc_cnfg); aprint_normal_dev(self, "extended capabilities: %s\n", bits); } switch (ccp->cc_flags & (CCF_PCI_USE_BWX|CCF_BUS_USE_BWX)) { case CCF_PCI_USE_BWX|CCF_BUS_USE_BWX: name = "PCI config and bus"; break; case CCF_PCI_USE_BWX: name = "PCI config"; break; case CCF_BUS_USE_BWX: name = "bus"; break; default: name = NULL; break; } if (name != NULL) aprint_normal_dev(self, "using BWX for %s access\n", name); #ifdef DEC_550 if (cputype == ST_DEC_550 && (hwrpb->rpb_variation & SV_ST_MASK) < SV_ST_MIATA_1_5) { /* * Miata 1 systems have a bug: DMA cannot cross * an 8k boundary! Make sure PCI read prefetching * is disabled on these chips. Note that secondary * PCI busses don't have this problem, because of * the way PPBs handle PCI read requests. * * In the 21174 Technical Reference Manual, this is * actually documented as "Pyxis Pass 1", but apparently * there are chips that report themselves as "Pass 1" * which do not have the bug! Miatas with the Cypress * PCI-ISA bridge (i.e. Miata 1.5 and Miata 2) do not * have the bug, so we use this check. * * NOTE: This bug is actually worked around in cia_dma.c, * when direct-mapped DMA maps are created. * * XXX WE NEED TO THINK ABOUT HOW TO HANDLE THIS FOR * XXX SGMAP DMA MAPPINGS! */ uint32_t ctrl; /* XXX no bets... */ aprint_error_dev(self, "WARNING: Pyxis pass 1 DMA bug; no bets...\n"); ccp->cc_flags |= CCF_PYXISBUG; alpha_mb(); ctrl = REGVAL(CIA_CSR_CTRL); ctrl &= ~(CTRL_RD_TYPE|CTRL_RL_TYPE|CTRL_RM_TYPE); REGVAL(CIA_CSR_CTRL) = ctrl; alpha_mb(); } #endif /* DEC_550 */ cia_dma_init(ccp); switch (cputype) { #ifdef DEC_KN20AA case ST_DEC_KN20AA: pci_kn20aa_pickintr(ccp); break; #endif #ifdef DEC_EB164 case ST_EB164: pci_eb164_pickintr(ccp); break; #endif #ifdef DEC_550 case ST_DEC_550: pci_550_pickintr(ccp); break; #endif #ifdef DEC_1000A case ST_DEC_1000A: pci_1000a_pickintr(ccp, &ccp->cc_iot, &ccp->cc_memt, &ccp->cc_pc); break; #endif #ifdef DEC_1000 case ST_DEC_1000: pci_1000_pickintr(ccp, &ccp->cc_iot, &ccp->cc_memt, &ccp->cc_pc); break; #endif default: panic("ciaattach: shouldn't be here, really..."); } pba.pba_iot = &ccp->cc_iot; pba.pba_memt = &ccp->cc_memt; pba.pba_dmat = alphabus_dma_get_tag(&ccp->cc_dmat_direct, ALPHA_BUS_PCI); pba.pba_dmat64 = NULL; pba.pba_pc = &ccp->cc_pc; pba.pba_bus = 0; pba.pba_bridgetag = NULL; pba.pba_flags = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY; if ((ccp->cc_flags & CCF_PYXISBUG) == 0) pba.pba_flags |= PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY | PCI_FLAGS_MWI_OKAY; config_found_ia(self, "pcibus", &pba, pcibusprint); }
/* * Set up the chipset's function pointers. */ void cia_init(struct cia_config *ccp, int mallocsafe) { int pci_use_bwx = cia_pci_use_bwx; int bus_use_bwx = cia_bus_use_bwx; ccp->cc_hae_mem = REGVAL(CIA_CSR_HAE_MEM); ccp->cc_hae_io = REGVAL(CIA_CSR_HAE_IO); ccp->cc_rev = REGVAL(CIA_CSR_REV) & REV_MASK; /* * Determine if we have a Pyxis. Only two systypes can * have this: the EB164 systype (AlphaPC164LX and AlphaPC164SX) * and the DEC_550 systype (Miata). */ if ((cputype == ST_EB164 && (hwrpb->rpb_variation & SV_ST_MASK) >= SV_ST_ALPHAPC164LX_400) || cputype == ST_DEC_550) { ccp->cc_flags |= CCF_ISPYXIS; if (cia_pyxis_force_bwx) pci_use_bwx = bus_use_bwx = 1; } /* * ALCOR/ALCOR2 Revisions >= 2 and Pyxis have the CNFG register. */ if (ccp->cc_rev >= 2 || (ccp->cc_flags & CCF_ISPYXIS) != 0) ccp->cc_cnfg = REGVAL(CIA_CSR_CNFG); else ccp->cc_cnfg = 0; /* * Use BWX iff: * * - It hasn't been disbled by the user, * - it's enabled in CNFG, * - we're implementation version ev5, * - BWX is enabled in the CPU's capabilities mask */ if ((pci_use_bwx || bus_use_bwx) && (ccp->cc_cnfg & CNFG_BWEN) != 0 && (cpu_amask & ALPHA_AMASK_BWX) != 0) { uint32_t ctrl; if (pci_use_bwx) ccp->cc_flags |= CCF_PCI_USE_BWX; if (bus_use_bwx) ccp->cc_flags |= CCF_BUS_USE_BWX; /* * For whatever reason, the firmware seems to enable PCI * loopback mode if it also enables BWX. Make sure it's * enabled if we have an old, buggy firmware rev. */ alpha_mb(); ctrl = REGVAL(CIA_CSR_CTRL); if ((ctrl & CTRL_PCI_LOOP_EN) == 0) { REGVAL(CIA_CSR_CTRL) = ctrl | CTRL_PCI_LOOP_EN; alpha_mb(); } } if (!ccp->cc_initted) { /* don't do these twice since they set up extents */ if (ccp->cc_flags & CCF_BUS_USE_BWX) { cia_bwx_bus_io_init(&ccp->cc_iot, ccp); cia_bwx_bus_mem_init(&ccp->cc_memt, ccp); /* * We have one window for both PCI I/O and MEM * in BWX mode. */ alpha_bus_window_count[ALPHA_BUS_TYPE_PCI_IO] = 1; alpha_bus_window_count[ALPHA_BUS_TYPE_PCI_MEM] = 1; } else { cia_swiz_bus_io_init(&ccp->cc_iot, ccp); cia_swiz_bus_mem_init(&ccp->cc_memt, ccp); /* * We have two I/O windows and 4 MEM windows in * SWIZ mode. */ alpha_bus_window_count[ALPHA_BUS_TYPE_PCI_IO] = 2; alpha_bus_window_count[ALPHA_BUS_TYPE_PCI_MEM] = 4; } alpha_bus_get_window = cia_bus_get_window; } ccp->cc_mallocsafe = mallocsafe; cia_pci_init(&ccp->cc_pc, ccp); alpha_pci_chipset = &ccp->cc_pc; ccp->cc_initted = 1; }
void gdium_reset(void) { REGVAL(BONITO_GPIODATA) &= ~0x00000002; REGVAL(BONITO_GPIOIE) &= ~0x00000002; }