void mec_reset(struct mec_softc *sc) { bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; uint64_t address, control; int i; /* Reset chip. */ bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET); delay(1000); bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0); delay(1000); /* Set Ethernet address. */ address = 0; for (i = 0; i < ETHER_ADDR_LEN; i++) { address = address << 8; address += sc->sc_ac.ac_enaddr[i]; } bus_space_write_8(st, sh, MEC_STATION, address); /* Default to 100/half and let auto-negotiation work its magic. */ control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI | MEC_MAC_IPG_DEFAULT; bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0); DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n", bus_space_read_8(st, sh, MEC_MAC_CONTROL))); }
/* * This is called from the psycho and sbus drivers. It does not directly * attach to the nexus because it shares register space with the bridge in * question. */ void sparc64_counter_init(const char *name, bus_space_tag_t tag, bus_space_handle_t handle, bus_addr_t offset) { struct timecounter *tc; struct ct_softc *sc; printf("initializing counter-timer\n"); /* * Turn off interrupts from both counters. Set the limit to the * maximum value (although that should not change anything with * CTLR_INTEN and CTLR_PERIODIC off). */ bus_space_write_8(tag, handle, offset + CTR_CT0 + CTR_LIMIT, COUNTER_MASK); bus_space_write_8(tag, handle, offset + CTR_CT1 + CTR_LIMIT, COUNTER_MASK); /* Register as a time counter. */ tc = malloc(sizeof(*tc), M_DEVBUF, M_WAITOK | M_ZERO); sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK); sc->sc_tag = tag; sc->sc_handle = handle; sc->sc_offset = offset + CTR_CT0; tc->tc_get_timecount = counter_get_timecount; tc->tc_counter_mask = COUNTER_MASK; tc->tc_frequency = COUNTER_FREQ; tc->tc_name = strdup(name, M_DEVBUF); tc->tc_priv = sc; tc->tc_quality = COUNTER_QUALITY; tc_init(tc); }
int mec_init(struct ifnet *ifp) { struct mec_softc *sc = ifp->if_softc; bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; struct mec_rxdesc *rxd; int i; /* Cancel any pending I/O. */ mec_stop(ifp); /* Reset device. */ mec_reset(sc); /* Setup filter for multicast or promisc mode. */ mec_setfilter(sc); /* Set the TX ring pointer to the base address. */ bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0)); sc->sc_txpending = 0; sc->sc_txdirty = 0; sc->sc_txlast = MEC_NTXDESC - 1; /* Put RX buffers into FIFO. */ for (i = 0; i < MEC_NRXDESC; i++) { rxd = &sc->sc_rxdesc[i]; rxd->rxd_stat = 0; MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD); bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); } sc->sc_rxptr = 0; #if 0 /* XXX no info */ bus_space_write_8(st, sh, MEC_TIMER, 0); #endif /* * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes * spurious interrupts when TX buffers are empty. */ bus_space_write_8(st, sh, MEC_DMA_CONTROL, (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) | (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) | MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */ MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE); timeout_add(&sc->sc_tick_ch, hz); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; mec_start(ifp); mii_mediachg(&sc->sc_mii); return 0; }
void __inline bcon_set_cursor_pos( uint32 x, uint32 y ) { uint32 offset = x+(y*TERM_COLS); bus_space_write_8( cur_io_type, cur_io_base, cur_index_off, 15 ); bus_space_write_8( cur_io_type, cur_io_base, cur_data_off, (offset&0x00FF) ); bus_space_write_8( cur_io_type, cur_io_base, cur_index_off, 14 ); bus_space_write_8( cur_io_type, cur_io_base, cur_data_off, (offset >> 8) ); }
void obio_intr_init(void) { int cpuid = cpu_number(); bus_space_write_8(&obio_tag, obio_h, CIU_IP2_EN0(cpuid), 0); bus_space_write_8(&obio_tag, obio_h, CIU_IP3_EN0(cpuid), 0); bus_space_write_8(&obio_tag, obio_h, CIU_IP2_EN1(cpuid), 0); bus_space_write_8(&obio_tag, obio_h, CIU_IP3_EN1(cpuid), 0); }
void bcon_clear_line( int y ) { int i; for( i = 0; i < TERM_COLS*2; i += 2 ) { bus_space_write_8( vid_io_type, vid_io_base, (y*TERM_COLS*2)+i, ' ' ); bus_space_write_8( vid_io_type, vid_io_base, (y*TERM_COLS*2)+i+1, bcon_attrs ); } }
void __inline bcon_write_char( int c, int attributes ) { uint32 offset = _get_cur_offset(); bus_space_write_8( vid_io_type, vid_io_base, offset*2, (c&0xFF) ); bus_space_write_8( vid_io_type, vid_io_base, (offset*2)+1, attributes ); offset++; bus_space_write_8( cur_io_type, cur_io_base, cur_index_off, 15 ); bus_space_write_8( cur_io_type, cur_io_base, cur_data_off, (offset&0x00FF) ); bus_space_write_8( cur_io_type, cur_io_base, cur_index_off, 14 ); bus_space_write_8( cur_io_type, cur_io_base, cur_data_off, (offset >> 8) ); }
static uint32 __inline _get_cur_offset() { uint32 offset, offset_h, offset_l; bus_space_write_8( cur_io_type, cur_io_base, cur_index_off, 14 ); bus_space_read_8( cur_io_type, cur_io_base, cur_data_off, offset_h ); offset_h <<= 8; bus_space_write_8( cur_io_type, cur_io_base, cur_index_off, 15 ); bus_space_read_8( cur_io_type, cur_io_base, cur_data_off, offset_l ); offset = offset_h|offset_l; return offset; }
static void thunder_pem_slix_s2m_regx_acc_modify(struct thunder_pem_softc *sc, int sli_group, int slix) { uint64_t regval; bus_space_handle_t handle = 0; KASSERT(slix >= 0 && slix <= SLI_ACC_REG_CNT, ("Invalid SLI index")); if (sli_group == 0) handle = sli0_s2m_regx_base; else if (sli_group == 1) handle = sli1_s2m_regx_base; else device_printf(sc->dev, "SLI group is not correct\n"); if (handle) { /* Clear lower 32-bits of the SLIx register */ regval = bus_space_read_8(sc->reg_bst, handle, PEM_CFG_SLIX_TO_REG(slix)); regval &= ~(0xFFFFFFFFUL); bus_space_write_8(sc->reg_bst, handle, PEM_CFG_SLIX_TO_REG(slix), regval); } }
static int thunder_pem_link_init(struct thunder_pem_softc *sc) { uint64_t regval; /* check whether PEM is safe to access. */ regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_ON_REG); if ((regval & PEM_CFG_LINK_MASK) != PEM_CFG_LINK_RDY) { device_printf(sc->dev, "PEM%d is not ON\n", sc->id); return (ENXIO); } regval = bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS); regval |= PEM_LINK_ENABLE; bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CTL_STATUS, regval); /* Wait 1ms as per Cavium specification */ DELAY(1000); regval = thunder_pem_config_reg_read(sc, PCIERC_CFG032); if (((regval & PEM_LINK_DLLA) == 0) || ((regval & PEM_LINK_LT) != 0)) { device_printf(sc->dev, "PCIe RC: Port %d Link Timeout\n", sc->id); return (ENXIO); } return (0); }
int mec_mii_readreg(struct device *self, int phy, int reg) { struct mec_softc *sc = (void *)self; bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; uint32_t val; int i; if (mec_mii_wait(sc) != 0) return 0; bus_space_write_4(st, sh, MEC_PHY_ADDRESS, (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER)); bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1); delay(25); for (i = 0; i < 20; i++) { delay(30); val = bus_space_read_4(st, sh, MEC_PHY_DATA); if ((val & MEC_PHY_DATA_BUSY) == 0) return val & MEC_PHY_DATA_VALUE; } return 0; }
void mec_stop(struct ifnet *ifp) { struct mec_softc *sc = ifp->if_softc; struct mec_txsoft *txs; int i; DPRINTF(MEC_DEBUG_STOP, ("mec_stop\n")); ifp->if_timer = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); timeout_del(&sc->sc_tick_ch); mii_down(&sc->sc_mii); /* Disable DMA. */ bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_DMA_CONTROL, 0); /* Release any TX buffers. */ for (i = 0; i < MEC_NTXDESC; i++) { txs = &sc->sc_txsoft[i]; if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) { bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } } }
void macebusattach(struct device *parent, struct device *self, void *aux) { u_int32_t creg; uint i; /* * Map and setup CRIME control registers. */ if (bus_space_map(&crimebus_tag, 0x00000000, 0x400, 0, &crime_h)) { printf(": can't map CRIME control registers\n"); return; } creg = bus_space_read_8(&crimebus_tag, crime_h, CRIME_REVISION); printf(": crime rev %d.%d\n", (creg & 0xf0) >> 4, creg & 0xf); bus_space_write_8(&crimebus_tag, crime_h, CRIME_CPU_ERROR_STAT, 0); bus_space_write_8(&crimebus_tag, crime_h, CRIME_MEM_ERROR_STAT, 0); bus_space_write_8(&crimebus_tag, crime_h, CRIME_INT_MASK, 0); bus_space_write_8(&crimebus_tag, crime_h, CRIME_INT_SOFT, 0); bus_space_write_8(&crimebus_tag, crime_h, CRIME_INT_HARD, 0); bus_space_write_8(&crimebus_tag, crime_h, CRIME_INT_STAT, 0); /* * Map and setup MACE ISA control registers. */ if (bus_space_map(&macebus_tag, MACE_ISA_OFFS, 0x400, 0, &mace_h)) { printf("%s: can't map MACE control registers\n", self->dv_xname); return; } bus_space_write_8(&macebus_tag, mace_h, MACE_ISA_INT_MASK, 0); bus_space_write_8(&macebus_tag, mace_h, MACE_ISA_INT_STAT, 0); /* * On O2 systems all interrupts are handled by the macebus interrupt * handler. Register all except clock. */ set_intr(INTPRI_MACEIO, CR_INT_0, macebus_iointr); register_splx_handler(macebus_splx); /* Set up a handler called when clock interrupts go off. */ set_intr(INTPRI_MACEAUX, CR_INT_5, macebus_aux); /* * Attach subdevices. */ for (i = 0; i < nitems(macebus_children); i++) config_found_sm(self, macebus_children + i, macebusprint, macebussubmatch); }
uint64_t cn30xxfpa_int_summary(void) { struct cn30xxfpa_softc *sc = &cn30xxfpa_softc; uint64_t summary; summary = bus_space_read_8(sc->sc_regt, sc->sc_regh, FPA_INT_SUM_OFFSET); bus_space_write_8(sc->sc_regt, sc->sc_regh, FPA_INT_SUM_OFFSET, summary); return summary; }
/* * Write to the command FIFO. */ static inline void impact_cmd_fifo_write(struct impact_screen *scr, uint64_t reg, uint32_t val, int exec) { uint64_t cmd; cmd = IMPACTSR_CFIFO_WRITE | (reg << IMPACTSR_CFIFO_REG_SHIFT); if (exec) cmd |= IMPACTSR_CFIFO_EXEC; bus_space_write_8(scr->iot, scr->ioh, IMPACTSR_CFIFO, cmd | val); }
void cn30xxfpa_int_enable(struct cn30xxfpa_softc *sc, int enable) { const uint64_t int_xxx = FPA_INT_ENB_Q7_PERR | FPA_INT_ENB_Q7_COFF | FPA_INT_ENB_Q7_UND | FPA_INT_ENB_Q6_PERR | FPA_INT_ENB_Q6_COFF | FPA_INT_ENB_Q6_UND | FPA_INT_ENB_Q5_PERR | FPA_INT_ENB_Q5_COFF | FPA_INT_ENB_Q5_UND | FPA_INT_ENB_Q4_PERR | FPA_INT_ENB_Q4_COFF | FPA_INT_ENB_Q4_UND | FPA_INT_ENB_Q3_PERR | FPA_INT_ENB_Q3_COFF | FPA_INT_ENB_Q3_UND | FPA_INT_ENB_Q2_PERR | FPA_INT_ENB_Q2_COFF | FPA_INT_ENB_Q2_UND | FPA_INT_ENB_Q1_PERR | FPA_INT_ENB_Q1_COFF | FPA_INT_ENB_Q1_UND | FPA_INT_ENB_Q0_PERR | FPA_INT_ENB_Q0_COFF | FPA_INT_ENB_Q0_UND | FPA_INT_ENB_FED1_DBE | FPA_INT_ENB_FED1_SBE | FPA_INT_ENB_FED0_DBE | FPA_INT_ENB_FED0_SBE; bus_space_write_8(sc->sc_regt, sc->sc_regh, FPA_INT_SUM_OFFSET, int_xxx); if (enable) bus_space_write_8(sc->sc_regt, sc->sc_regh, FPA_INT_ENB_OFFSET, int_xxx); }
void octohci_attach(struct device *parent, struct device *self, void *aux) { struct octohci_softc *sc = (struct octohci_softc *)self; struct octuctl_attach_args *aa = aux; char *devname; uint64_t port_ctl; int rc; int s; sc->sc_ohci.iot = aa->aa_bust; sc->sc_ohci.sc_bus.pipe_size = sizeof(struct usbd_pipe); sc->sc_ohci.sc_bus.dmatag = aa->aa_dmat; rc = bus_space_map(sc->sc_ohci.iot, UCTL_OHCI_BASE, UCTL_OHCI_SIZE, 0, &sc->sc_ohci.ioh); KASSERT(rc == 0); port_ctl = bus_space_read_8(aa->aa_octuctl_bust, aa->aa_ioh, UCTL_OHCI_CTL); port_ctl &= ~UCTL_OHCI_CTL_L2C_ADDR_MSB_MASK; port_ctl |= (1 << UCTL_OHCI_CTL_L2C_DESC_EMOD_SHIFT); port_ctl |= (1 << UCTL_OHCI_CTL_L2C_BUFF_EMOD_SHIFT); bus_space_write_8(aa->aa_octuctl_bust, aa->aa_ioh, UCTL_OHCI_CTL, port_ctl); s = splusb(); sc->sc_ohci.sc_id_vendor = 0; strlcpy(sc->sc_ohci.sc_vendor, "Octeon", sizeof(sc->sc_ohci.sc_vendor)); sc->sc_ih = octeon_intr_establish(CIU_INT_USB, IPL_USB, ohci_intr, (void *)&sc->sc_ohci, devname); KASSERT(sc->sc_ih != NULL); if ((ohci_checkrev(&sc->sc_ohci) != USBD_NORMAL_COMPLETION) || (ohci_handover(&sc->sc_ohci) != USBD_NORMAL_COMPLETION)) goto failed; /* ignore interrupts for now */ sc->sc_ohci.sc_bus.dying = 1; config_defer(self, octohci_attach_deferred); splx(s); return; failed: octeon_intr_disestablish(sc->sc_ih); bus_space_unmap(sc->sc_ohci.iot, sc->sc_ohci.ioh, UCTL_OHCI_SIZE); splx(s); return; }
static void cn30xxfpa_init_regs(struct cn30xxfpa_softc *sc) { bus_space_write_8(sc->sc_regt, sc->sc_regh, FPA_CTL_STATUS_OFFSET, FPA_CTL_STATUS_ENB); /* XXX */ #ifdef OCTEON_ETH_DEBUG bus_space_write_8(sc->sc_regt, sc->sc_regh, FPA_INT_ENB_OFFSET, FPA_INT_ENB_Q7_PERR | FPA_INT_ENB_Q7_COFF | FPA_INT_ENB_Q7_UND | FPA_INT_ENB_Q6_PERR | FPA_INT_ENB_Q6_COFF | FPA_INT_ENB_Q6_UND | FPA_INT_ENB_Q5_PERR | FPA_INT_ENB_Q5_COFF | FPA_INT_ENB_Q5_UND | FPA_INT_ENB_Q4_PERR | FPA_INT_ENB_Q4_COFF | FPA_INT_ENB_Q4_UND | FPA_INT_ENB_Q3_PERR | FPA_INT_ENB_Q3_COFF | FPA_INT_ENB_Q3_UND | FPA_INT_ENB_Q2_PERR | FPA_INT_ENB_Q2_COFF | FPA_INT_ENB_Q2_UND | FPA_INT_ENB_Q1_PERR | FPA_INT_ENB_Q1_COFF | FPA_INT_ENB_Q1_UND | FPA_INT_ENB_Q0_PERR | FPA_INT_ENB_Q0_COFF | FPA_INT_ENB_Q0_UND | FPA_INT_ENB_FED1_DBE | FPA_INT_ENB_FED1_SBE | FPA_INT_ENB_FED0_DBE | FPA_INT_ENB_FED0_SBE); #endif }
/* * Establish an interrupt handler called from the dispatcher. * The interrupt function established should return zero if there was nothing * to serve (no int) and non-zero when an interrupt was serviced. * * Interrupts are numbered from 1 and up where 1 maps to HW int 0. * XXX There is no reason to keep this... except for hardcoded interrupts * XXX in kernel configuration files... */ void * macebus_intr_establish(int irq, uint32_t mace_irqmask, int type, int level, int (*ih_fun)(void *), void *ih_arg, const char *ih_what) { struct crime_intrhand **p, *q, *ih; int s; #ifdef DIAGNOSTIC if (irq >= CRIME_NINTS || irq < 0) panic("intr_establish: illegal irq %d", irq); #endif ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT); if (ih == NULL) return NULL; ih->ih.ih_next = NULL; ih->ih.ih_fun = ih_fun; ih->ih.ih_arg = ih_arg; ih->ih.ih_level = level; ih->ih.ih_irq = irq; ih->mace_irqmask = mace_irqmask; evcount_attach(&ih->ih.ih_count, ih_what, &ih->ih.ih_irq); s = splhigh(); /* * Figure out where to put the handler. * This is O(N^2), but we want to preserve the order, and N is * generally small. */ for (p = &crime_intrhand[irq]; (q = *p) != NULL; p = (struct crime_intrhand **)&q->ih.ih_next) ; *p = ih; crime_intem |= 1UL << irq; macebus_intr_makemasks(); /* enable further MACE sources if necessary */ if (mace_irqmask != 0) { mace_intem |= mace_irqmask; bus_space_write_8(&macebus_tag, mace_h, MACE_ISA_INT_MASK, mace_intem); } splx(s); /* causes hw mask update */ return (ih); }
static uint64_t thunder_pem_config_reg_read(struct thunder_pem_softc *sc, int reg) { uint64_t data; /* Write to ADDR register */ bus_space_write_8(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD, PEM_CFG_RD_REG_ALIGN(reg)); bus_space_barrier(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD, 8, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Read from DATA register */ data = PEM_CFG_RD_REG_DATA(bus_space_read_8(sc->reg_bst, sc->reg_bsh, PEM_CFG_RD)); return (data); }
void mec_setfilter(struct mec_softc *sc) { struct arpcom *ec = &sc->sc_ac; struct ifnet *ifp = &sc->sc_ac.ac_if; struct ether_multi *enm; struct ether_multistep step; bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; uint64_t mchash; uint32_t control, hash; int mcnt; control = bus_space_read_8(st, sh, MEC_MAC_CONTROL); control &= ~MEC_MAC_FILTER_MASK; if (ifp->if_flags & IFF_PROMISC) { control |= MEC_MAC_FILTER_PROMISC; bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL); bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); return; } mcnt = 0; mchash = 0; ETHER_FIRST_MULTI(step, ec, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { /* Set allmulti for a range of multicast addresses. */ control |= MEC_MAC_FILTER_ALLMULTI; bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL); bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); return; } #define mec_calchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26) hash = mec_calchash(enm->enm_addrlo); mchash |= 1 << hash; mcnt++; ETHER_NEXT_MULTI(step, enm); } ifp->if_flags &= ~IFF_ALLMULTI; if (mcnt > 0) control |= MEC_MAC_FILTER_MATCHMULTI; bus_space_write_8(st, sh, MEC_MULTICAST, mchash); bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); }
static void le_lebuffer_copytodesc(struct lance_softc *sc, void *fromv, int off, int len) { struct le_lebuffer_softc *lesc = (struct le_lebuffer_softc *)sc; caddr_t from = fromv; for (; len >= 8; len -= 8, off += 8, from += 8) bus_space_write_8(lesc->sc_buft, lesc->sc_bufh, off, be64dec(from)); for (; len >= 4; len -= 4, off += 4, from += 4) bus_space_write_4(lesc->sc_buft, lesc->sc_bufh, off, be32dec(from)); for (; len >= 2; len -= 2, off += 2, from += 2) bus_space_write_2(lesc->sc_buft, lesc->sc_bufh, off, be16dec(from)); if (len == 1) bus_space_write_1(lesc->sc_buft, lesc->sc_bufh, off, *from); }
/* * cfi_cmd - write a CFI command word. * * The offset 'off' is given for 64-bit port width and will be scaled * down to the actual port width of the chip. * The command word will be constructed out of 'val' regarding port- and * chip width. */ void cfi_cmd(struct cfi * const cfi, bus_size_t off, uint32_t val) { const bus_space_tag_t bst = cfi->cfi_bst; bus_space_handle_t bsh = cfi->cfi_bsh; uint64_t cmd; int cw, pw; off >>= 3 - cfi->cfi_portwidth; pw = 1 << cfi->cfi_portwidth; cw = 1 << cfi->cfi_chipwidth; cmd = 0; while (pw > 0) { cmd <<= cw << 3; cmd += val; pw -= cw; } DPRINTF(("%s: %p %x %x %" PRIx64 "\n", __func__, bst, bsh, off, cmd)); switch (cfi->cfi_portwidth) { case 0: bus_space_write_1(bst, bsh, off, cmd); break; case 1: bus_space_write_2(bst, bsh, off, cmd); break; case 2: bus_space_write_4(bst, bsh, off, cmd); break; #ifdef NOTYET case 3: bus_space_write_8(bst, bsh, off, cmd); break; #endif default: panic("%s: bad portwidth %d bytes\n", __func__, 1 << cfi->cfi_portwidth); } }
void mec_statchg(struct device *self) { struct mec_softc *sc = (void *)self; bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; uint32_t control; control = bus_space_read_8(st, sh, MEC_MAC_CONTROL); control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 | MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT); /* Must also set IPG here for duplex stuff... */ if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) { control |= MEC_MAC_FULL_DUPLEX; } else { /* Set IPG. */ control |= MEC_MAC_IPG_DEFAULT; } bus_space_write_8(st, sh, MEC_MAC_CONTROL, control); }
/* * Macebus auxilary functions run each clock interrupt. */ uint32_t macebus_aux(uint32_t hwpend, struct trapframe *cf) { u_int64_t mask; mask = bus_space_read_8(&macebus_tag, mace_h, MACE_ISA_MISC_REG); mask |= MACE_ISA_MISC_RLED_OFF | MACE_ISA_MISC_GLED_OFF; /* GREEN - Idle */ /* AMBER - System mode */ /* RED - User mode */ if (cf->sr & SR_KSU_USER) { mask &= ~MACE_ISA_MISC_RLED_OFF; } else if (curproc == NULL || curproc == curcpu()->ci_schedstate.spc_idleproc) { mask &= ~MACE_ISA_MISC_GLED_OFF; } else { mask &= ~(MACE_ISA_MISC_RLED_OFF | MACE_ISA_MISC_GLED_OFF); } bus_space_write_8(&macebus_tag, mace_h, MACE_ISA_MISC_REG, mask); return 0; /* Real clock int handler will claim the interrupt. */ }
void mec_rxintr(struct mec_softc *sc, uint32_t stat) { bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; struct ifnet *ifp = &sc->sc_ac.ac_if; struct mbuf *m; struct mec_rxdesc *rxd; uint64_t rxstat; u_int len; int i, last; DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: called\n")); bus_space_write_8(st, sh, MEC_RX_ALIAS, 0); last = (stat & MEC_INT_RX_MCL_FIFO_ALIAS) >> 8; /* XXX does alias count mod 32 even if 16 descs are set up? */ last &= MEC_NRXDESC_MASK; if (stat & MEC_INT_RX_FIFO_UNDERFLOW) last = (last - 1) & MEC_NRXDESC_MASK; DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxptr %d last %d\n", sc->sc_rxptr, last)); for (i = sc->sc_rxptr; i != last; i = MEC_NEXTRX(i)) { MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD); rxd = &sc->sc_rxdesc[i]; rxstat = rxd->rxd_stat; DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxstat = 0x%llx, rxptr = %d\n", rxstat, i)); DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: rxfifo = 0x%x\n", (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO))); if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) { /* Status not received but FIFO counted? Drop it! */ goto dropit; } len = rxstat & MEC_RXSTAT_LEN; if (len < ETHER_MIN_LEN || len > ETHER_MAX_LEN) { /* Invalid length packet; drop it. */ DPRINTF(MEC_DEBUG_RXINTR, ("mec_rxintr: wrong packet\n")); dropit: ifp->if_ierrors++; rxd->rxd_stat = 0; MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); continue; } if (rxstat & (MEC_RXSTAT_BADPACKET | MEC_RXSTAT_LONGEVENT | MEC_RXSTAT_INVALID | MEC_RXSTAT_CRCERROR | MEC_RXSTAT_VIOLATION)) { printf("%s: mec_rxintr: status = 0x%llx\n", sc->sc_dev.dv_xname, rxstat); goto dropit; } /* * Now allocate an mbuf (and possibly a cluster) to hold * the received packet. */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("%s: unable to allocate RX mbuf\n", sc->sc_dev.dv_xname); goto dropit; } if (len > (MHLEN - ETHER_ALIGN)) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { printf("%s: unable to allocate RX cluster\n", sc->sc_dev.dv_xname); m_freem(m); m = NULL; goto dropit; } } /* * Note MEC chip seems to insert 2 byte padding at the start of * RX buffer, but we copy whole buffer to avoid unaligned copy. */ MEC_RXBUFSYNC(sc, i, len + ETHER_ALIGN, BUS_DMASYNC_POSTREAD); memcpy(mtod(m, caddr_t), rxd->rxd_buf, ETHER_ALIGN + len - ETHER_CRC_LEN); MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD); m->m_data += ETHER_ALIGN; /* Put RX buffer into FIFO again. */ rxd->rxd_stat = 0; MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD); bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i)); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; ifp->if_ipackets++; #if NBPFILTER > 0 /* * Pass this up to any BPF listeners, but only * pass it up the stack if it is for us. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); #endif /* Pass it on. */ ether_input_mbuf(ifp, m); } /* Update RX pointer. */ sc->sc_rxptr = i; bus_space_write_8(st, sh, MEC_RX_ALIAS, (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) | MEC_DMA_RX_INT_ENABLE); }
void mec_start(struct ifnet *ifp) { struct mec_softc *sc = ifp->if_softc; struct mbuf *m0; struct mec_txdesc *txd; struct mec_txsoft *txs; bus_dmamap_t dmamap; bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; uint64_t txdaddr; int error, firsttx, nexttx, opending; int len, bufoff, buflen, unaligned, txdlen; if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) return; /* * Remember the previous txpending and the first transmit descriptor. */ opending = sc->sc_txpending; firsttx = MEC_NEXTTX(sc->sc_txlast); DPRINTF(MEC_DEBUG_START, ("mec_start: opending = %d, firsttx = %d\n", opending, firsttx)); for (;;) { /* Grab a packet off the queue. */ IFQ_POLL(&ifp->if_snd, m0); if (m0 == NULL) break; if (sc->sc_txpending == MEC_NTXDESC) { break; } /* * Get the next available transmit descriptor. */ nexttx = MEC_NEXTTX(sc->sc_txlast); txd = &sc->sc_txdesc[nexttx]; txs = &sc->sc_txsoft[nexttx]; buflen = 0; bufoff = 0; txdaddr = 0; /* XXX gcc */ txdlen = 0; /* XXX gcc */ len = m0->m_pkthdr.len; DPRINTF(MEC_DEBUG_START, ("mec_start: len = %d, nexttx = %d\n", len, nexttx)); IFQ_DEQUEUE(&ifp->if_snd, m0); if (len < ETHER_PAD_LEN) { /* * I don't know if MEC chip does auto padding, * so if the packet is small enough, * just copy it to the buffer in txdesc. * Maybe this is the simple way. */ DPRINTF(MEC_DEBUG_START, ("mec_start: short packet\n")); bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN); m_copydata(m0, 0, m0->m_pkthdr.len, txd->txd_buf + bufoff); memset(txd->txd_buf + bufoff + len, 0, ETHER_PAD_LEN - len); len = buflen = ETHER_PAD_LEN; txs->txs_flags = MEC_TXS_TXDBUF | buflen; } else { /* * If the packet won't fit the buffer in txdesc, * we have to use concatenate pointer to handle it. * While MEC can handle up to three segments to * concatenate, MEC requires that both the second and * third segments have to be 8 byte aligned. * Since it's unlikely for mbuf clusters, we use * only the first concatenate pointer. If the packet * doesn't fit in one DMA segment, allocate new mbuf * and copy the packet to it. * * Besides, if the start address of the first segments * is not 8 byte aligned, such part have to be copied * to the txdesc buffer. (XXX see below comments) */ DPRINTF(MEC_DEBUG_START, ("mec_start: long packet\n")); dmamap = txs->txs_dmamap; if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) { struct mbuf *m; DPRINTF(MEC_DEBUG_START, ("mec_start: re-allocating mbuf\n")); MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("%s: unable to allocate " "TX mbuf\n", sc->sc_dev.dv_xname); break; } if (len > (MHLEN - ETHER_ALIGN)) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { printf("%s: unable to allocate " "TX cluster\n", sc->sc_dev.dv_xname); m_freem(m); break; } } /* * Each packet has the Ethernet header, so * in many cases the header isn't 4-byte aligned * and data after the header is 4-byte aligned. * Thus adding 2-byte offset before copying to * new mbuf avoids unaligned copy and this may * improve performance. * As noted above, unaligned part has to be * copied to txdesc buffer so this may cause * extra copy ops, but for now MEC always * requires some data in txdesc buffer, * so we always have to copy some data anyway. */ m->m_data += ETHER_ALIGN; m_copydata(m0, 0, len, mtod(m, caddr_t)); m->m_pkthdr.len = m->m_len = len; m_freem(m0); m0 = m; error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); if (error) { printf("%s: unable to load TX buffer, " "error = %d\n", sc->sc_dev.dv_xname, error); m_freem(m); break; } } /* Handle unaligned part. */ txdaddr = MEC_TXD_ROUNDUP(dmamap->dm_segs[0].ds_addr); txs->txs_flags = MEC_TXS_TXDPTR1; unaligned = dmamap->dm_segs[0].ds_addr & (MEC_TXD_ALIGN - 1); DPRINTF(MEC_DEBUG_START, ("mec_start: ds_addr = 0x%x, unaligned = %d\n", (u_int)dmamap->dm_segs[0].ds_addr, unaligned)); if (unaligned != 0) { buflen = MEC_TXD_ALIGN - unaligned; bufoff = MEC_TXD_BUFSTART(buflen); DPRINTF(MEC_DEBUG_START, ("mec_start: unaligned, " "buflen = %d, bufoff = %d\n", buflen, bufoff)); memcpy(txd->txd_buf + bufoff, mtod(m0, caddr_t), buflen); txs->txs_flags |= MEC_TXS_TXDBUF | buflen; } #if 1 else { /* * XXX needs hardware info XXX * It seems MEC always requires some data * in txd_buf[] even if buffer is * 8-byte aligned otherwise DMA abort error * occurs later... */ buflen = MEC_TXD_ALIGN; bufoff = MEC_TXD_BUFSTART(buflen); memcpy(txd->txd_buf + bufoff, mtod(m0, caddr_t), buflen); DPRINTF(MEC_DEBUG_START, ("mec_start: aligned, " "buflen = %d, bufoff = %d\n", buflen, bufoff)); txs->txs_flags |= MEC_TXS_TXDBUF | buflen; txdaddr += MEC_TXD_ALIGN; } #endif txdlen = len - buflen; DPRINTF(MEC_DEBUG_START, ("mec_start: txdaddr = 0x%llx, txdlen = %d\n", txdaddr, txdlen)); /* * Sync the DMA map for TX mbuf. * * XXX unaligned part doesn't have to be sync'ed, * but it's harmless... */ bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); } #if NBPFILTER > 0 /* * Pass packet to bpf if there is a listener. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); #endif /* * Setup the transmit descriptor. */ /* TXINT bit will be set later on the last packet. */ txd->txd_cmd = (len - 1); /* But also set TXINT bit on a half of TXDESC. */ if (sc->sc_txpending == (MEC_NTXDESC / 2)) txd->txd_cmd |= MEC_TXCMD_TXINT; if (txs->txs_flags & MEC_TXS_TXDBUF) txd->txd_cmd |= TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen); if (txs->txs_flags & MEC_TXS_TXDPTR1) { txd->txd_cmd |= MEC_TXCMD_PTR1; txd->txd_ptr[0] = TXPTR_LEN(txdlen - 1) | txdaddr; /* * Store a pointer to the packet so we can * free it later. */ txs->txs_mbuf = m0; } else { txd->txd_ptr[0] = 0; /* * In this case all data are copied to buffer in txdesc, * we can free TX mbuf here. */ m_freem(m0); } DPRINTF(MEC_DEBUG_START, ("mec_start: txd_cmd = 0x%llx, txd_ptr = 0x%llx\n", txd->txd_cmd, txd->txd_ptr[0])); DPRINTF(MEC_DEBUG_START, ("mec_start: len = %d (0x%04x), buflen = %d (0x%02x)\n", len, len, buflen, buflen)); /* Sync TX descriptor. */ MEC_TXDESCSYNC(sc, nexttx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Advance the TX pointer. */ sc->sc_txpending++; sc->sc_txlast = nexttx; } if (sc->sc_txpending == MEC_NTXDESC) { /* No more slots; notify upper layer. */ ifp->if_flags |= IFF_OACTIVE; } if (sc->sc_txpending != opending) { /* * Cause a TX interrupt to happen on the last packet * we enqueued. */ sc->sc_txdesc[sc->sc_txlast].txd_cmd |= MEC_TXCMD_TXINT; MEC_TXCMDSYNC(sc, sc->sc_txlast, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Start TX. */ bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(sc->sc_txlast)); /* * If the transmitter was idle, * reset the txdirty pointer and re-enable TX interrupt. */ if (opending == 0) { sc->sc_txdirty = firsttx; bus_space_write_8(st, sh, MEC_TX_ALIAS, MEC_TX_ALIAS_INT_ENABLE); } /* Set a watchdog timer in case the chip flakes out. */ ifp->if_timer = 5; } }
void mec_txintr(struct mec_softc *sc, uint32_t stat) { struct ifnet *ifp = &sc->sc_ac.ac_if; struct mec_txdesc *txd; struct mec_txsoft *txs; bus_dmamap_t dmamap; uint64_t txstat; int i, last; u_int col; ifp->if_flags &= ~IFF_OACTIVE; DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: called\n")); bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_TX_ALIAS, 0); last = (stat & MEC_INT_TX_RING_BUFFER_ALIAS) >> 16; DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: dirty %d last %d\n", sc->sc_txdirty, last)); for (i = sc->sc_txdirty; i != last && sc->sc_txpending != 0; i = MEC_NEXTTX(i), sc->sc_txpending--) { txd = &sc->sc_txdesc[i]; MEC_TXDESCSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); txstat = txd->txd_stat; DPRINTF(MEC_DEBUG_TXINTR, ("mec_txintr: dirty = %d, txstat = 0x%llx\n", i, txstat)); if ((txstat & MEC_TXSTAT_SENT) == 0) { MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD); break; } if ((txstat & MEC_TXSTAT_SUCCESS) == 0) { printf("%s: TX error: txstat = 0x%llx\n", sc->sc_dev.dv_xname, txstat); ifp->if_oerrors++; continue; } txs = &sc->sc_txsoft[i]; if ((txs->txs_flags & MEC_TXS_TXDPTR1) != 0) { dmamap = txs->txs_dmamap; bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, dmamap); m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT; ifp->if_collisions += col; ifp->if_opackets++; } /* Update the dirty TX buffer pointer. */ sc->sc_txdirty = i; DPRINTF(MEC_DEBUG_INTR, ("mec_txintr: sc_txdirty = %2d, sc_txpending = %2d\n", sc->sc_txdirty, sc->sc_txpending)); /* Cancel the watchdog timer if there are no pending TX packets. */ if (sc->sc_txpending == 0) ifp->if_timer = 0; else if (!(stat & MEC_INT_TX_EMPTY)) bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_TX_ALIAS, MEC_TX_ALIAS_INT_ENABLE); }
/* * Interrupt dispatcher. */ uint32_t obio_iointr(uint32_t hwpend, struct trap_frame *frame) { struct cpu_info *ci = curcpu(); int cpuid = cpu_number(); uint64_t imr, isr, mask; int ipl; int bit; struct intrhand *ih; int rc; uint64_t sum0 = CIU_IP2_SUM0(cpuid); uint64_t en0 = CIU_IP2_EN0(cpuid); isr = bus_space_read_8(&obio_tag, obio_h, sum0); imr = bus_space_read_8(&obio_tag, obio_h, en0); bit = 63; isr &= imr; if (isr == 0) return 0; /* not for us */ /* * Mask all pending interrupts. */ bus_space_write_8(&obio_tag, obio_h, en0, imr & ~isr); /* * If interrupts are spl-masked, mask them and wait for splx() * to reenable them when necessary. */ if ((mask = isr & obio_imask[cpuid][frame->ipl]) != 0) { isr &= ~mask; imr &= ~mask; } /* * Now process allowed interrupts. */ if (isr != 0) { int lvl, bitno; uint64_t tmpisr; __asm__ (".set noreorder\n"); ipl = ci->ci_ipl; __asm__ ("sync\n\t.set reorder\n"); /* Service higher level interrupts first */ for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) { tmpisr = isr & (obio_imask[cpuid][lvl] ^ obio_imask[cpuid][lvl - 1]); if (tmpisr == 0) continue; for (bitno = bit, mask = 1UL << bitno; mask != 0; bitno--, mask >>= 1) { if ((tmpisr & mask) == 0) continue; rc = 0; for (ih = (struct intrhand *)obio_intrhand[bitno]; ih != NULL; ih = ih->ih_next) { #ifdef MULTIPROCESSOR u_int32_t sr; #endif splraise(ih->ih_level); #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { sr = getsr(); ENABLEIPI(); if (ipl < IPL_SCHED) __mp_lock(&kernel_lock); } #endif if ((*ih->ih_fun)(ih->ih_arg) != 0) { rc = 1; atomic_add_uint64(&ih->ih_count.ec_count, 1); } #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { if (ipl < IPL_SCHED) __mp_unlock(&kernel_lock); setsr(sr); } #endif __asm__ (".set noreorder\n"); ci->ci_ipl = ipl; __asm__ ("sync\n\t.set reorder\n"); } if (rc == 0) printf("spurious crime interrupt %d\n", bitno); isr ^= mask; if ((tmpisr ^= mask) == 0) break; } } /* * Reenable interrupts which have been serviced. */ bus_space_write_8(&obio_tag, obio_h, en0, imr); }
int mec_intr(void *arg) { struct mec_softc *sc = arg; bus_space_tag_t st = sc->sc_st; bus_space_handle_t sh = sc->sc_sh; struct ifnet *ifp = &sc->sc_ac.ac_if; uint32_t statreg, statack, dmac; int handled, sent; DPRINTF(MEC_DEBUG_INTR, ("mec_intr: called\n")); handled = sent = 0; for (;;) { statreg = bus_space_read_8(st, sh, MEC_INT_STATUS); DPRINTF(MEC_DEBUG_INTR, ("mec_intr: INT_STAT = 0x%x\n", statreg)); statack = statreg & MEC_INT_STATUS_MASK; if (statack == 0) break; bus_space_write_8(st, sh, MEC_INT_STATUS, statack); handled = 1; if (statack & (MEC_INT_RX_THRESHOLD | MEC_INT_RX_FIFO_UNDERFLOW)) { mec_rxintr(sc, statreg); } dmac = bus_space_read_8(st, sh, MEC_DMA_CONTROL); DPRINTF(MEC_DEBUG_INTR, ("mec_intr: DMA_CONT = 0x%x\n", dmac)); if (statack & (MEC_INT_TX_EMPTY | MEC_INT_TX_PACKET_SENT | MEC_INT_TX_ABORT)) { mec_txintr(sc, statreg); sent = 1; } if (statack & (MEC_INT_TX_LINK_FAIL | MEC_INT_TX_MEM_ERROR | MEC_INT_TX_ABORT | MEC_INT_RX_DMA_UNDERFLOW)) { printf("%s: mec_intr: interrupt status = 0x%x\n", sc->sc_dev.dv_xname, statreg); } } if (sent) { /* Try to get more packets going. */ mec_start(ifp); } return handled; }