void pci_1000a_pickintr(void *core, bus_space_tag_t iot, bus_space_tag_t memt, pci_chipset_tag_t pc) { char *cp; int i; mystery_icu_iot = iot; pc_tag = pc; if (bus_space_map(iot, 0x54a, 2, 0, mystery_icu_ioh + 0) || bus_space_map(iot, 0x54c, 2, 0, mystery_icu_ioh + 1)) panic("pci_1000a_pickintr"); pc->pc_intr_v = core; pc->pc_intr_map = dec_1000a_intr_map; pc->pc_intr_string = dec_1000a_intr_string; pc->pc_intr_evcnt = dec_1000a_intr_evcnt; pc->pc_intr_establish = dec_1000a_intr_establish; pc->pc_intr_disestablish = dec_1000a_intr_disestablish; pc->pc_pciide_compat_intr_establish = NULL; dec_1000a_pci_intr = alpha_shared_intr_alloc(PCI_NIRQ, 8); for (i = 0; i < PCI_NIRQ; i++) { alpha_shared_intr_set_maxstrays(dec_1000a_pci_intr, i, PCI_STRAY_MAX); cp = alpha_shared_intr_string(dec_1000a_pci_intr, i); sprintf(cp, "irq %d", i); evcnt_attach_dynamic(alpha_shared_intr_evcnt( dec_1000a_pci_intr, i), EVCNT_TYPE_INTR, NULL, "dec_1000a", cp); } pci_1000a_imi(); #if NSIO > 0 || NPCEB > 0 sio_intr_setup(pc, iot); #endif }
void interrupt_init(void) { static const char *softintr_names[] = IPL_SOFTNAMES; struct playstation2_soft_intr *asi; int i; evcnt_attach_static(&_playstation2_evcnt.clock); evcnt_attach_static(&_playstation2_evcnt.sbus); evcnt_attach_static(&_playstation2_evcnt.dmac); for (i = 0; i < _IPL_NSOFT; i++) { asi = &playstation2_soft_intrs[i]; TAILQ_INIT(&asi->softintr_q); asi->softintr_ipl = IPL_SOFT + i; simple_lock_init(&asi->softintr_slock); evcnt_attach_dynamic(&asi->softintr_evcnt, EVCNT_TYPE_INTR, NULL, "soft", softintr_names[i]); } /* XXX Establish legacy soft interrupt handlers. */ softnet_intrhand = softintr_establish(IPL_SOFTNET, (void (*)(void *))netintr, NULL); KDASSERT(softnet_intrhand != NULL); /* install software interrupt handler */ intc_intr_establish(I_CH10_TIMER1, IPL_SOFT, timer1_intr, 0); intc_intr_establish(I_CH11_TIMER2, IPL_SOFTCLOCK, timer2_intr, 0); /* IPL_SOFTNET and IPL_SOFTSERIAL are shared interrupt. */ intc_intr_establish(I_CH12_TIMER3, IPL_SOFTNET, timer3_intr, 0); /* enable SIF BIOS access */ md_imask = ~D_STAT_CIM_BIT(D_CH5_SIF0); mips_cp0_status_write(0x00010801); }
static void * necpb_intr_establish(pci_chipset_tag_t pc, pci_intr_handle_t ih, int level, int (*func)(void *), void *arg) { struct necpb_intrhand *n, *p; uint32_t mask; if (ih >= 4) panic("%s: bogus handle", __func__); n = malloc(sizeof(struct necpb_intrhand), M_DEVBUF, M_NOWAIT); if (n == NULL) panic("%s: can't malloc interrupt handle", __func__); n->ih_func = func; n->ih_arg = arg; n->ih_next = NULL; n->ih_intn = ih; strlcpy(n->ih_evname, necpb_intr_string(pc, ih), sizeof(n->ih_evname)); evcnt_attach_dynamic(&n->ih_evcnt, EVCNT_TYPE_INTR, NULL, "necpb", n->ih_evname); if (necpb_inttbl[ih] == NULL) { necpb_inttbl[ih] = n; mask = in32(RD94_SYS_PCI_INTMASK); mask |= 1 << ih; out32(RD94_SYS_PCI_INTMASK, mask); } else { p = necpb_inttbl[ih]; while (p->ih_next != NULL) p = p->ih_next; p->ih_next = n; } return n; }
/* * Attach a found zs. * * Match slave number to zs unit number, so that misconfiguration will * not set up the keyboard as ttya, etc. */ static void zs_hpc_attach(device_t parent, device_t self, void *aux) { struct zsc_softc *zsc = device_private(self); struct hpc_attach_args *haa = aux; struct zsc_attach_args zsc_args; struct zs_chanstate *cs; struct zs_channel *ch; int zs_unit, channel, err, s; const char *promconsdev; promconsdev = arcbios_GetEnvironmentVariable("ConsoleOut"); zsc->zsc_dev = self; zsc->zsc_bustag = haa->ha_st; if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, haa->ha_devoff, 0x10, &zsc->zsc_base)) != 0) { aprint_error(": unable to map 85c30 registers, error = %d\n", err); return; } zs_unit = device_unit(self); aprint_normal("\n"); /* * Initialize software state for each channel. * * Done in reverse order of channels since the first serial port * is actually attached to the *second* channel, and vice versa. * Doing it this way should force a 'zstty*' to attach zstty0 to * channel 1 and zstty1 to channel 0. They couldn't have wired * it up in a more sensible fashion, could they? */ for (channel = 1; channel >= 0; channel--) { zsc_args.channel = channel; ch = &zsc->zsc_cs_store[channel]; cs = zsc->zsc_cs[channel] = (struct zs_chanstate *)ch; zs_lock_init(cs); cs->cs_reg_csr = NULL; cs->cs_reg_data = NULL; cs->cs_channel = channel; cs->cs_private = NULL; cs->cs_ops = &zsops_null; cs->cs_brg_clk = PCLK / 16; if (bus_space_subregion(zsc->zsc_bustag, zsc->zsc_base, zs_chan_offset[channel], sizeof(struct zschan), &ch->cs_regs) != 0) { aprint_error_dev(self, "cannot map regs\n"); return; } ch->cs_bustag = zsc->zsc_bustag; memcpy(cs->cs_creg, zs_init_reg, 16); memcpy(cs->cs_preg, zs_init_reg, 16); zsc_args.hwflags = 0; zsc_args.consdev = NULL; if (zs_consunit == -1 && zs_conschan == -1) { /* * If this channel is being used by the PROM console, * pass the generic zs driver a 'no reset' flag so the * channel gets left in the appropriate state after * attach. * * Note: the channel mappings are swapped. */ if (promconsdev != NULL && strlen(promconsdev) == 9 && strncmp(promconsdev, "serial", 6) == 0 && (promconsdev[7] == '0' || promconsdev[7] == '1')) { if (promconsdev[7] == '1' && channel == 0) zsc_args.hwflags |= ZS_HWFLAG_NORESET; else if (promconsdev[7] == '0' && channel == 1) zsc_args.hwflags |= ZS_HWFLAG_NORESET; } } /* If console, don't stomp speed, let zstty know */ if (zs_unit == zs_consunit && channel == zs_conschan) { zsc_args.consdev = &zs_cn; zsc_args.hwflags = ZS_HWFLAG_CONSOLE; cs->cs_defspeed = zs_get_speed(cs); } else cs->cs_defspeed = zs_defspeed; cs->cs_defcflag = zs_def_cflag; /* Make these correspond to cs_defcflag (-crtscts) */ cs->cs_rr0_dcd = ZSRR0_DCD; cs->cs_rr0_cts = 0; cs->cs_wr5_dtr = ZSWR5_DTR | ZSWR5_RTS; cs->cs_wr5_rts = 0; /* * Clear the master interrupt enable. * The INTENA is common to both channels, * so just do it on the A channel. */ if (channel == 0) { zs_write_reg(cs, 9, 0); } /* * Look for a child driver for this channel. * The child attach will setup the hardware. */ if (!config_found(self, (void *)&zsc_args, zs_print)) { /* No sub-driver. Just reset it. */ uint8_t reset = (channel == 0) ? ZSWR9_A_RESET : ZSWR9_B_RESET; s = splhigh(); zs_write_reg(cs, 9, reset); splx(s); } } zsc->sc_si = softint_establish(SOFTINT_SERIAL, zssoft, zsc); cpu_intr_establish(haa->ha_irq, IPL_TTY, zshard, NULL); evcnt_attach_dynamic(&zsc->zsc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(self), "intr"); /* * Set the master interrupt enable and interrupt vector. * (common to both channels, do it on A) */ cs = zsc->zsc_cs[0]; s = splhigh(); /* interrupt vector */ zs_write_reg(cs, 2, zs_init_reg[2]); /* master interrupt control (enable) */ zs_write_reg(cs, 9, zs_init_reg[9]); splx(s); }
void csa_attach(device_t parent, device_t self, void *aux) { struct csa_softc *sc = device_private(self); struct ncr5380_softc *ncr_sc = &sc->sc_ncr5380; struct podule_attach_args *pa = aux; uint8_t *iobase; char hi_option[sizeof(self->dv_xname) + 8]; ncr_sc->sc_dev = self; /* Note the podule number and validate */ if (pa->pa_podule_number == -1) panic("Podule has disappeared !"); sc->sc_podule_number = pa->pa_podule_number; sc->sc_podule = pa->pa_podule; podules[sc->sc_podule_number].attached = 1; ncr_sc->sc_flags |= NCR5380_FORCE_POLLING; ncr_sc->sc_min_dma_len = 0; ncr_sc->sc_no_disconnect = 0x00; ncr_sc->sc_parity_disable = 0x00; ncr_sc->sc_dma_alloc = NULL; ncr_sc->sc_dma_free = NULL; ncr_sc->sc_dma_poll = NULL; ncr_sc->sc_dma_setup = NULL; ncr_sc->sc_dma_start = NULL; ncr_sc->sc_dma_eop = NULL; ncr_sc->sc_dma_stop = NULL; ncr_sc->sc_intr_on = NULL; ncr_sc->sc_intr_off = NULL; iobase = (uint8_t *)pa->pa_podule->slow_base + CSA_NCR5380_OFFSET; ncr_sc->sci_r0 = iobase + 0; ncr_sc->sci_r1 = iobase + 4; ncr_sc->sci_r2 = iobase + 8; ncr_sc->sci_r3 = iobase + 12; ncr_sc->sci_r4 = iobase + 16; ncr_sc->sci_r5 = iobase + 20; ncr_sc->sci_r6 = iobase + 24; ncr_sc->sci_r7 = iobase + 28; ncr_sc->sc_rev = NCR_VARIANT_NCR5380; sc->sc_ctrl = (uint8_t *)pa->pa_podule->slow_base + CSA_CTRL_OFFSET; sc->sc_status = (uint8_t *)pa->pa_podule->slow_base + CSA_STAT_OFFSET; sc->sc_data = (uint8_t *)pa->pa_podule->slow_base + CSA_DATA_OFFSET; ncr_sc->sc_pio_in = ncr5380_pio_in; ncr_sc->sc_pio_out = ncr5380_pio_out; /* Provide an override for the host id */ ncr_sc->sc_channel.chan_id = 7; sprintf(hi_option, "%s.hostid", device_xname(self)); (void)get_bootconf_option(boot_args, hi_option, BOOTOPT_TYPE_INT, &ncr_sc->sc_channel.chan_id); ncr_sc->sc_adapter.adapt_minphys = minphys; aprint_normal(": host=%d, using 8 bit PIO", ncr_sc->sc_channel.chan_id); sc->sc_irqstatus = (uint8_t *)pa->pa_podule->slow_base + CSA_INTR_OFFSET; sc->sc_irqmask = CSA_INTR_MASK; evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(self), "intr"); sc->sc_ih = podulebus_irq_establish(pa->pa_ih, IPL_BIO, csa_intr, sc, &sc->sc_intrcnt); if (sc->sc_ih == NULL) ncr_sc->sc_flags |= NCR5380_FORCE_POLLING; if (ncr_sc->sc_flags & NCR5380_FORCE_POLLING) aprint_normal(", polling"); aprint_normal("\n"); *sc->sc_ctrl = 0; ncr5380_attach(ncr_sc); }
static void sq_attach(struct device *parent, struct device *self, void *aux) { int i, err; char* macaddr; struct sq_softc *sc = (void *)self; struct hpc_attach_args *haa = aux; struct ifnet *ifp = &sc->sc_ethercom.ec_if; sc->sc_hpct = haa->ha_st; if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, haa->ha_dmaoff, HPC_ENET_REGS_SIZE, &sc->sc_hpch)) != 0) { printf(": unable to map HPC DMA registers, error = %d\n", err); goto fail_0; } sc->sc_regt = haa->ha_st; if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, haa->ha_devoff, HPC_ENET_DEVREGS_SIZE, &sc->sc_regh)) != 0) { printf(": unable to map Seeq registers, error = %d\n", err); goto fail_0; } sc->sc_dmat = haa->ha_dmat; if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) { printf(": unable to allocate control data, error = %d\n", err); goto fail_0; } if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, sizeof(struct sq_control), (caddr_t *)&sc->sc_control, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { printf(": unable to map control data, error = %d\n", err); goto fail_1; } if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control), 1, sizeof(struct sq_control), PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { printf(": unable to create DMA map for control data, error " "= %d\n", err); goto fail_2; } if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control, sizeof(struct sq_control), NULL, BUS_DMA_NOWAIT)) != 0) { printf(": unable to load DMA map for control data, error " "= %d\n", err); goto fail_3; } memset(sc->sc_control, 0, sizeof(struct sq_control)); /* Create transmit buffer DMA maps */ for (i = 0; i < SQ_NTXDESC; i++) { if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_txmap[i])) != 0) { printf(": unable to create tx DMA map %d, error = %d\n", i, err); goto fail_4; } } /* Create transmit buffer DMA maps */ for (i = 0; i < SQ_NRXDESC; i++) { if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rxmap[i])) != 0) { printf(": unable to create rx DMA map %d, error = %d\n", i, err); goto fail_5; } } /* Pre-allocate the receive buffers. */ for (i = 0; i < SQ_NRXDESC; i++) { if ((err = sq_add_rxbuf(sc, i)) != 0) { printf(": unable to allocate or map rx buffer %d\n," " error = %d\n", i, err); goto fail_6; } } if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) { printf(": unable to get MAC address!\n"); goto fail_6; } evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, self->dv_xname, "intr"); if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { printf(": unable to establish interrupt!\n"); goto fail_6; } /* Reset the chip to a known state. */ sq_reset(sc); /* * Determine if we're an 8003 or 80c03 by setting the first * MAC address register to non-zero, and then reading it back. * If it's zero, we have an 80c03, because we will have read * the TxCollLSB register. */ bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5); if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0) sc->sc_type = SQ_TYPE_80C03; else sc->sc_type = SQ_TYPE_8003; bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00); printf(": SGI Seeq %s\n", sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); enaddr_aton(macaddr, sc->sc_enaddr); printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, ether_sprintf(sc->sc_enaddr)); strcpy(ifp->if_xname, sc->sc_dev.dv_xname); ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; ifp->if_init = sq_init; ifp->if_stop = sq_stop; ifp->if_start = sq_start; ifp->if_ioctl = sq_ioctl; ifp->if_watchdog = sq_watchdog; ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST; IFQ_SET_READY(&ifp->if_snd); if_attach(ifp); ether_ifattach(ifp, sc->sc_enaddr); memset(&sq_trace, 0, sizeof(sq_trace)); /* Done! */ return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_6: for (i = 0; i < SQ_NRXDESC; i++) { if (sc->sc_rxmbuf[i] != NULL) { bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); m_freem(sc->sc_rxmbuf[i]); } } fail_5: for (i = 0; i < SQ_NRXDESC; i++) { if (sc->sc_rxmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); } fail_4: for (i = 0; i < SQ_NTXDESC; i++) { if (sc->sc_txmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); } bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); fail_2: bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control, sizeof(struct sq_control)); fail_1: bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); fail_0: return; }
static void ascattach(struct device *parent, struct device *self, void *aux) { struct confargs *ca = aux; struct asc_softc *esc = (void *)self; struct ncr53c9x_softc *sc = &esc->sc_ncr53c9x; /* * Set up glue for MI code early; we use some of it here. */ sc->sc_glue = &asc_glue; esc->sc_bst = ca->ca_bustag; esc->sc_dmat = ca->ca_dmatag; if (bus_space_map(ca->ca_bustag, ca->ca_addr, 16*4, /* sizeof (ncr53c9xreg) */ BUS_SPACE_MAP_LINEAR, &esc->sc_bsh) != 0) { printf(": cannot map registers\n"); return; } if (bus_space_map(ca->ca_bustag, RAMBO_BASE, sizeof(struct rambo_ch), BUS_SPACE_MAP_LINEAR, &esc->dm_bsh) != 0) { printf(": cannot map dma registers\n"); return; } if (bus_dmamap_create(esc->sc_dmat, MAX_DMA_SZ, DMA_SEGS, MAX_DMA_SZ, RB_BOUNDRY, BUS_DMA_WAITOK, &esc->sc_dmamap) != 0) { printf(": failed to create dmamap\n"); return; } evcnt_attach_dynamic(&esc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, self->dv_xname, "intr"); esc->sc_flags = DMA_IDLE; asc_dma_reset(sc); /* Other settings */ sc->sc_id = 7; sc->sc_freq = 24; /* 24 MHz clock */ /* * Setup for genuine NCR 53C94 SCSI Controller */ sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB; sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_FE; sc->sc_cfg3 = NCRCFG3_CDB | NCRCFG3_QTE | NCRCFG3_FSCSI; sc->sc_rev = NCR_VARIANT_NCR53C94; sc->sc_minsync = (1000 / sc->sc_freq) * 5 / 4; sc->sc_maxxfer = MAX_SCSI_XFER; #ifdef OLDNCR if (!NCR_READ_REG(sc, NCR_CFG3)) { printf(" [old revision]"); sc->sc_cfg2 = 0; sc->sc_cfg3 = 0; sc->sc_minsync = 0; } #endif sc->sc_adapter.adapt_minphys = minphys; sc->sc_adapter.adapt_request = ncr53c9x_scsipi_request; ncr53c9x_attach(sc); bus_intr_establish(esc->sc_bst, SYS_INTR_SCSI, 0, 0, asc_intr, esc); }
/* * Interface exists: make available by filling in network interface * record. System will initialize the interface when it is ready * to accept packets. */ void qeattach(device_t parent, device_t self, void *aux) { struct uba_attach_args *ua = aux; struct qe_softc *sc = device_private(self); struct ifnet *ifp = &sc->sc_if; struct qe_ring *rp; u_int8_t enaddr[ETHER_ADDR_LEN]; int i, error; char *nullbuf; sc->sc_dev = self; sc->sc_uh = device_private(parent); sc->sc_iot = ua->ua_iot; sc->sc_ioh = ua->ua_ioh; sc->sc_dmat = ua->ua_dmat; /* * Allocate DMA safe memory for descriptors and setup memory. */ sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN; if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) { aprint_error(": unable to ubmemalloc(), error = %d\n", error); return; } sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr; sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr; /* * Zero the newly allocated memory. */ memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN); nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata); /* * Create the transmit descriptor DMA maps. We take advantage * of the fact that the Qbus address space is big, and therefore * allocate map registers for all transmit descriptors also, * so that we can avoid this each time we send a packet. */ for (i = 0; i < TXDESCS; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &sc->sc_xmtmap[i]))) { aprint_error( ": unable to create tx DMA map %d, error = %d\n", i, error); goto fail_4; } } /* * Create receive buffer DMA maps. */ for (i = 0; i < RXDESCS; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]))) { aprint_error( ": unable to create rx DMA map %d, error = %d\n", i, error); goto fail_5; } } /* * Pre-allocate the receive buffers. */ for (i = 0; i < RXDESCS; i++) { if ((error = qe_add_rxbuf(sc, i)) != 0) { aprint_error( ": unable to allocate or map rx buffer %d," " error = %d\n", i, error); goto fail_6; } } if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { aprint_error( ": unable to create pad buffer DMA map, error = %d\n", error); goto fail_6; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { aprint_error( ": unable to load pad buffer DMA map, error = %d\n", error); goto fail_7; } bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, BUS_DMASYNC_PREWRITE); /* * Create ring loops of the buffer chains. * This is only done once. */ rp = sc->sc_qedata->qc_recv; rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]); rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) | QE_VALID | QE_CHAIN; rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET; rp = sc->sc_qedata->qc_xmit; rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]); rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) | QE_VALID | QE_CHAIN; rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET; /* * Get the vector that were set at match time, and remember it. */ sc->sc_intvec = sc->sc_uh->uh_lastiv; QE_WCSR(QE_CSR_CSR, QE_RESET); DELAY(1000); QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET); /* * Read out ethernet address and tell which type this card is. */ for (i = 0; i < 6; i++) enaddr[i] = QE_RCSR(i * 2) & 0xff; QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1); aprint_normal(": %s, hardware address %s\n", QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna", ether_sprintf(enaddr)); QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */ uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr, sc, &sc->sc_intrcnt); evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt, device_xname(sc->sc_dev), "intr"); strcpy(ifp->if_xname, device_xname(sc->sc_dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = qestart; ifp->if_ioctl = qeioctl; ifp->if_watchdog = qetimeout; IFQ_SET_READY(&ifp->if_snd); /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, enaddr); return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_7: bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); fail_6: for (i = 0; i < RXDESCS; i++) { if (sc->sc_rxmbuf[i] != NULL) { bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]); m_freem(sc->sc_rxmbuf[i]); } } fail_5: for (i = 0; i < RXDESCS; i++) { if (sc->sc_rcvmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]); } fail_4: for (i = 0; i < TXDESCS; i++) { if (sc->sc_xmtmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]); } }
void cscattach(device_t parent, device_t self, void *aux) { struct csc_softc *sc = device_private(self); struct podule_attach_args *pa; csc_regmap_p rp = &sc->sc_regmap; vu_char *fas; int loop; pa = aux; if (pa->pa_podule_number == -1) panic("Podule has disappeared !"); sc->sc_specific.sc_podule_number = pa->pa_podule_number; sc->sc_specific.sc_podule = pa->pa_podule; sc->sc_specific.sc_iobase = (vu_char *)sc->sc_specific.sc_podule->mod_base; rp->status0 = &sc->sc_specific.sc_iobase[CSC_STATUS0]; rp->alatch = &sc->sc_specific.sc_iobase[CSC_ALATCH]; rp->dack = (vu_short *)&sc->sc_specific.sc_iobase[CSC_DACK]; fas = &sc->sc_specific.sc_iobase[CSC_FAS_OFFSET_BASE]; rp->FAS216.sfas_tc_low = &fas[CSC_FAS_OFFSET_TCL]; rp->FAS216.sfas_tc_mid = &fas[CSC_FAS_OFFSET_TCM]; rp->FAS216.sfas_fifo = &fas[CSC_FAS_OFFSET_FIFO]; rp->FAS216.sfas_command = &fas[CSC_FAS_OFFSET_COMMAND]; rp->FAS216.sfas_dest_id = &fas[CSC_FAS_OFFSET_DESTID]; rp->FAS216.sfas_timeout = &fas[CSC_FAS_OFFSET_TIMEOUT]; rp->FAS216.sfas_syncper = &fas[CSC_FAS_OFFSET_PERIOD]; rp->FAS216.sfas_syncoff = &fas[CSC_FAS_OFFSET_OFFSET]; rp->FAS216.sfas_config1 = &fas[CSC_FAS_OFFSET_CONFIG1]; rp->FAS216.sfas_clkconv = &fas[CSC_FAS_OFFSET_CLKCONV]; rp->FAS216.sfas_test = &fas[CSC_FAS_OFFSET_TEST]; rp->FAS216.sfas_config2 = &fas[CSC_FAS_OFFSET_CONFIG2]; rp->FAS216.sfas_config3 = &fas[CSC_FAS_OFFSET_CONFIG3]; rp->FAS216.sfas_tc_high = &fas[CSC_FAS_OFFSET_TCH]; rp->FAS216.sfas_fifo_bot = &fas[CSC_FAS_OFFSET_FIFOBOT]; sc->sc_softc.sc_dev = self; sc->sc_softc.sc_fas = (sfas_regmap_p)rp; sc->sc_softc.sc_spec = &sc->sc_specific; sc->sc_softc.sc_led = csc_led; sc->sc_softc.sc_setup_dma = csc_setup_dma; sc->sc_softc.sc_build_dma_chain = csc_build_dma_chain; sc->sc_softc.sc_need_bump = csc_need_bump; sc->sc_softc.sc_clock_freq = 8; /* Cumana runs at 8MHz */ sc->sc_softc.sc_timeout = 250; /* Set default timeout to 250ms */ sc->sc_softc.sc_config_flags = SFAS_NO_DMA /*| SFAS_NF_DEBUG*/; sc->sc_softc.sc_host_id = 7; /* Should check the jumpers */ sc->sc_softc.sc_bump_sz = PAGE_SIZE; sc->sc_softc.sc_bump_pa = 0x0; sfasinitialize((struct sfas_softc *)sc); sc->sc_softc.sc_adapter.adapt_dev = self; sc->sc_softc.sc_adapter.adapt_nchannels = 1; sc->sc_softc.sc_adapter.adapt_openings = 7; sc->sc_softc.sc_adapter.adapt_max_periph = 1; sc->sc_softc.sc_adapter.adapt_ioctl = NULL; sc->sc_softc.sc_adapter.adapt_minphys = sfas_minphys; sc->sc_softc.sc_adapter.adapt_request = sfas_scsi_request; sc->sc_softc.sc_channel.chan_adapter = &sc->sc_softc.sc_adapter; sc->sc_softc.sc_channel.chan_bustype = &scsi_bustype; sc->sc_softc.sc_channel.chan_channel = 0; sc->sc_softc.sc_channel.chan_ntargets = 8; sc->sc_softc.sc_channel.chan_nluns = 8; sc->sc_softc.sc_channel.chan_id = sc->sc_softc.sc_host_id; /* Provide an override for the host id */ (void)get_bootconf_option(boot_args, "csc.hostid", BOOTOPT_TYPE_INT, &sc->sc_softc.sc_channel.chan_id); printf(": host=%d", sc->sc_softc.sc_channel.chan_id); /* initialise the alatch */ sc->sc_specific.sc_alatch_defs = (CSC_POLL?0:CSC_ALATCH_DEFS_INTEN); for (loop = 0; loop < 8; loop ++) { if(loop != 3) *rp->alatch = (loop << 1) | ((sc->sc_specific.sc_alatch_defs & (1 << loop))?1:0); } #if CSC_POLL == 0 evcnt_attach_dynamic(&sc->sc_softc.sc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(self), "intr"); sc->sc_softc.sc_ih = podulebus_irq_establish(pa->pa_ih, IPL_BIO, csc_intr, &sc->sc_softc, &sc->sc_softc.sc_intrcnt); if (sc->sc_softc.sc_ih == NULL) panic("%s: Cannot install IRQ handler", device_xname(self)); #else printf(" polling"); sc->sc_softc.sc_adapter.adapt_flags |= SCSIPI_ADAPT_POLL_ONLY; #endif printf("\n"); /* attach all scsi units on us */ config_found(self, &sc->sc_softc.sc_channel, scsiprint); }
static void com_obio_attach(device_t parent, device_t self, void *aux) { struct com_obio_softc *osc = device_private(self); struct com_softc *sc = &osc->osc_com; union obio_attach_args *uoba = aux; struct sbus_attach_args *sa = &uoba->uoba_sbus; bus_space_handle_t ioh; bus_space_tag_t iot; bus_addr_t iobase; sc->sc_dev = self; if (strcmp("modem", sa->sa_name) == 0) { osc->osc_tadpole = 1; } /* * We're living on an obio that looks like an sbus slot. */ iot = sa->sa_bustag; iobase = sa->sa_offset; sc->sc_frequency = COM_FREQ; /* * XXX: It would be nice to be able to split console input and * output to different devices. For now switch to serial * console if PROM stdin is on serial (so that we can use DDB). */ if (prom_instance_to_package(prom_stdin()) == sa->sa_node) comcnattach(iot, iobase, B9600, sc->sc_frequency, COM_TYPE_NORMAL, (CLOCAL | CREAD | CS8)); if (!com_is_console(iot, iobase, &ioh) && sbus_bus_map(iot, sa->sa_slot, iobase, sa->sa_size, BUS_SPACE_MAP_LINEAR, &ioh) != 0) { aprint_error(": can't map registers\n"); return; } COM_INIT_REGS(sc->sc_regs, iot, ioh, iobase); if (osc->osc_tadpole) { *AUXIO4M_REG |= (AUXIO4M_LED|AUXIO4M_LTE); do { DELAY(100); } while (!com_probe_subr(&sc->sc_regs)); #if 0 printf("modem: attach: lcr=0x%02x iir=0x%02x\n", bus_space_read_1(sc->sc_regs.iot, sc->sc_regs.ioh, 3), bus_space_read_1(sc->sc_regs.iot, sc->sc_regs.ioh, 2)); #endif } com_attach_subr(sc); if (sa->sa_nintr != 0) { (void)bus_intr_establish(sc->sc_regs.cr_iot, sa->sa_pri, IPL_SERIAL, comintr, sc); evcnt_attach_dynamic(&osc->osc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(self), "intr"); } if (!pmf_device_register1(self, com_suspend, com_resume, com_cleanup)) { aprint_error_dev(self, "could not establish shutdown hook"); } }
void tsattach(device_t parent, device_t self, void *aux) { struct ts_softc *sc = device_private(self); struct uba_attach_args *ua = aux; int error; const char *t; sc->sc_dev = self; sc->sc_uh = device_private(parent); sc->sc_iot = ua->ua_iot; sc->sc_ioh = ua->ua_ioh; sc->sc_dmat = ua->ua_dmat; sc->sc_uu.uu_dev = self; sc->sc_uu.uu_ready = tsready; tsinit(sc); /* reset and map */ error = bus_dmamap_create(sc->sc_dmat, (64*1024), 16, (64*1024), 0, BUS_DMA_NOWAIT, &sc->sc_dmam); if (error) { aprint_error(": failed create DMA map %d\n", error); return; } bufq_alloc(&sc->sc_bufq, "fcfs", 0); /* * write the characteristics (again) */ sc->sc_state = TS_INIT; /* tsintr() checks this ... */ tswchar(sc); if (tsleep(sc, PRIBIO, "tsattach", 100)) { aprint_error(": failed SET CHARACTERISTICS\n"); return; } sc->sc_state = TS_RUNNING; if (sc->sc_uh->uh_type == UBA_UBA) { if (sc->sc_vts->status.xst2 & TS_SF_TU80) { sc->sc_type = TYPE_TU80; t = "TU80"; } else { sc->sc_type = TYPE_TS11; t = "TS11"; } } else { sc->sc_type = TYPE_TS05; t = "TS05"; } aprint_normal(": %s\n", t); aprint_normal_dev(sc->sc_dev, "rev %d, extended features %s, transport %s\n", (sc->sc_vts->status.xst2 & TS_SF_MCRL) >> 2, (sc->sc_vts->status.xst2 & TS_SF_EFES ? "enabled" : "disabled"), (TS_RCSR(TSSR) & TS_OFL ? "offline" : "online")); uba_intr_establish(ua->ua_icookie, ua->ua_cvec, tsintr, sc, &sc->sc_intrcnt); evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt, device_xname(sc->sc_dev), "intr"); }
void sio_intr_setup(pci_chipset_tag_t pc, bus_space_tag_t iot) { char *cp; int i; sio_iot = iot; sio_pc = pc; if (bus_space_map(sio_iot, IO_ICU1, 2, 0, &sio_ioh_icu1) || bus_space_map(sio_iot, IO_ICU2, 2, 0, &sio_ioh_icu2)) panic("sio_intr_setup: can't map ICU I/O ports"); for (i = 0; sio_elcr_setup_funcs[i] != NULL; i++) if ((*sio_elcr_setup_funcs[i])() == 0) break; if (sio_elcr_setup_funcs[i] == NULL) panic("sio_intr_setup: can't map ELCR"); #ifdef BROKEN_PROM_CONSOLE /* * Remember the initial values, so we can restore them later. */ initial_ocw1[0] = bus_space_read_1(sio_iot, sio_ioh_icu1, 1); initial_ocw1[1] = bus_space_read_1(sio_iot, sio_ioh_icu2, 1); initial_elcr[0] = (*sio_read_elcr)(0); /* XXX */ initial_elcr[1] = (*sio_read_elcr)(1); /* XXX */ shutdownhook_establish(sio_intr_shutdown, 0); #endif #define PCI_SIO_IRQ_STR 8 sio_intr = alpha_shared_intr_alloc(ICU_LEN, PCI_SIO_IRQ_STR); /* * set up initial values for interrupt enables. */ for (i = 0; i < ICU_LEN; i++) { alpha_shared_intr_set_maxstrays(sio_intr, i, STRAY_MAX); cp = alpha_shared_intr_string(sio_intr, i); snprintf(cp, PCI_SIO_IRQ_STR, "irq %d", i); evcnt_attach_dynamic(alpha_shared_intr_evcnt(sio_intr, i), EVCNT_TYPE_INTR, NULL, "isa", cp); switch (i) { case 0: case 1: case 8: case 13: /* * IRQs 0, 1, 8, and 13 must always be * edge-triggered. */ sio_setirqstat(i, 0, IST_EDGE); alpha_shared_intr_set_dfltsharetype(sio_intr, i, IST_EDGE); specific_eoi(i); break; case 2: /* * IRQ 2 must be edge-triggered, and should be * enabled (otherwise IRQs 8-15 are ignored). */ sio_setirqstat(i, 1, IST_EDGE); alpha_shared_intr_set_dfltsharetype(sio_intr, i, IST_UNUSABLE); break; default: /* * Otherwise, disable the IRQ and set its * type to (effectively) "unknown." */ sio_setirqstat(i, 0, IST_NONE); alpha_shared_intr_set_dfltsharetype(sio_intr, i, IST_NONE); specific_eoi(i); break; } } }
void ascattach(device_t parent, device_t self, void *aux) { /* volatile struct sdmac *rp;*/ struct asc_softc *sc; struct sbic_softc *sbic; struct podule_attach_args *pa; sc = device_private(self); pa = aux; if (pa->pa_podule_number == -1) panic("Podule has disappeared !"); sc->sc_podule_number = pa->pa_podule_number; sc->sc_podule = pa->pa_podule; podules[sc->sc_podule_number].attached = 1; sbic = &sc->sc_softc; sbic->sc_dev = self; sbic->sc_enintr = asc_enintr; sbic->sc_dmaok = asc_dmaok; sbic->sc_dmasetup = asc_dmasetup; sbic->sc_dmanext = asc_dmanext; sbic->sc_dmastop = asc_dmastop; sbic->sc_dmafinish = asc_dmafinish; /* Map sbic */ sbic->sc_sbicp.sc_sbiciot = pa->pa_iot; if (bus_space_map (sbic->sc_sbicp.sc_sbiciot, sc->sc_podule->mod_base + ASC_SBIC, ASC_SBIC_SPACE, 0, &sbic->sc_sbicp.sc_sbicioh)) panic("%s: Cannot map SBIC", device_xname(self)); sbic->sc_clkfreq = sbic_clock_override ? sbic_clock_override : 143; sbic->sc_adapter.adapt_dev = self; sbic->sc_adapter.adapt_nchannels = 1; sbic->sc_adapter.adapt_openings = 7; sbic->sc_adapter.adapt_max_periph = 1; sbic->sc_adapter.adapt_ioctl = NULL; sbic->sc_adapter.adapt_minphys = asc_minphys; sbic->sc_adapter.adapt_request = sbic_scsi_request; sbic->sc_channel.chan_adapter = &sbic->sc_adapter; sbic->sc_channel.chan_bustype = &scsi_bustype; sbic->sc_channel.chan_channel = 0; sbic->sc_channel.chan_ntargets = 8; sbic->sc_channel.chan_nluns = 8; sbic->sc_channel.chan_id = 7; /* Provide an override for the host id */ (void)get_bootconf_option(boot_args, "asc.hostid", BOOTOPT_TYPE_INT, &sbic->sc_channel.chan_id); printf(": hostid=%d", sbic->sc_channel.chan_id); #if ASC_POLL > 0 if (boot_args) get_bootconf_option(boot_args, "ascpoll", BOOTOPT_TYPE_BOOLEAN, &asc_poll); if (asc_poll) { sbic->sc_adapter.adapt_flags |= SCSIPI_ADAPT_POLL_ONLY; printf(" polling"); } #endif printf("\n"); sc->sc_pagereg = sc->sc_podule->fast_base + ASC_PAGEREG; sc->sc_intstat = sc->sc_podule->fast_base + ASC_INTSTATUS; /* Reset the card */ WriteByte(sc->sc_pagereg, 0x80); DELAY(500000); WriteByte(sc->sc_pagereg, 0x00); DELAY(250000); sbicinit(sbic); /* If we are polling only, we don't need a interrupt handler. */ #ifdef ASC_POLL if (!asc_poll) #endif { evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(self), "intr"); sc->sc_ih = podulebus_irq_establish(pa->pa_ih, IPL_BIO, asc_intr, sc, &sc->sc_intrcnt); if (sc->sc_ih == NULL) panic("%s: Cannot claim podule IRQ", device_xname(self)); } /* * attach all scsi units on us */ config_found(self, &sbic->sc_channel, scsiprint); }
/* ARGSUSED */ void pccattach(struct device *parent, struct device *self, void *args) { struct mainbus_attach_args *ma; struct pcc_attach_args npa; struct pcc_softc *sc; uint8_t reg; int i; ma = args; sc = sys_pcc = (struct pcc_softc *)self; /* Get a handle to the PCC's registers. */ sc->sc_bust = ma->ma_bust; bus_space_map(sc->sc_bust, ma->ma_offset, PCCREG_SIZE, 0, &sc->sc_bush); /* Tell the chip the base interrupt vector */ pcc_reg_write(sc, PCCREG_VECTOR_BASE, PCC_VECBASE); printf(": Peripheral Channel Controller, " "rev %d, vecbase 0x%x\n", pcc_reg_read(sc, PCCREG_REVISION), pcc_reg_read(sc, PCCREG_VECTOR_BASE)); evcnt_attach_dynamic(&sc->sc_evcnt, EVCNT_TYPE_INTR, isrlink_evcnt(7), "nmi", "abort sw"); /* Hook up interrupt handler for abort button, and enable it */ pccintr_establish(PCCV_ABORT, pccintr, 7, NULL, &sc->sc_evcnt); pcc_reg_write(sc, PCCREG_ABORT_INTR_CTRL, PCC_ABORT_IEN | PCC_ABORT_ACK); /* * Install a handler for Software Interrupt 1 * and arrange to schedule soft interrupts on demand. */ pccintr_establish(PCCV_SOFT1, pccsoftintr, 1, sc, &sc->sc_evcnt); #ifdef notyet _softintr_chipset_assert = pccsoftintrassert; #endif /* Make sure the global interrupt line is hot. */ reg = pcc_reg_read(sc, PCCREG_GENERAL_CONTROL) | PCC_GENCR_IEN; pcc_reg_write(sc, PCCREG_GENERAL_CONTROL, reg); /* * Calculate the board's VMEbus slave base address, for the * benefit of the VMEchip driver. * (Weird that this register is in the PCC ...) */ reg = pcc_reg_read(sc, PCCREG_SLAVE_BASE_ADDR) & PCC_SLAVE_BASE_MASK; pcc_slave_base_addr = (bus_addr_t)reg * mem_clusters[0].size; /* * Attach configured children. */ npa._pa_base = ma->ma_offset; for (i = 0; pcc_devices[i].pcc_name != NULL; ++i) { /* * Note that IPL is filled in by match function. */ npa.pa_name = pcc_devices[i].pcc_name; npa.pa_ipl = -1; npa.pa_dmat = ma->ma_dmat; npa.pa_bust = ma->ma_bust; npa.pa_offset = pcc_devices[i].pcc_offset + ma->ma_offset; /* Attach the device if configured. */ (void)config_found(self, &npa, pccprint); } }
static void sq_attach(device_t parent, device_t self, void *aux) { int i, err; const char* macaddr; struct sq_softc *sc = device_private(self); struct hpc_attach_args *haa = aux; struct ifnet *ifp = &sc->sc_ethercom.ec_if; sc->sc_dev = self; sc->sc_hpct = haa->ha_st; sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */ if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, haa->ha_dmaoff, sc->hpc_regs->enet_regs_size, &sc->sc_hpch)) != 0) { printf(": unable to map HPC DMA registers, error = %d\n", err); goto fail_0; } sc->sc_regt = haa->ha_st; if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, haa->ha_devoff, sc->hpc_regs->enet_devregs_size, &sc->sc_regh)) != 0) { printf(": unable to map Seeq registers, error = %d\n", err); goto fail_0; } sc->sc_dmat = haa->ha_dmat; if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) { printf(": unable to allocate control data, error = %d\n", err); goto fail_0; } if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, sizeof(struct sq_control), (void **)&sc->sc_control, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { printf(": unable to map control data, error = %d\n", err); goto fail_1; } if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control), 1, sizeof(struct sq_control), PAGE_SIZE, BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { printf(": unable to create DMA map for control data, error " "= %d\n", err); goto fail_2; } if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control, sizeof(struct sq_control), NULL, BUS_DMA_NOWAIT)) != 0) { printf(": unable to load DMA map for control data, error " "= %d\n", err); goto fail_3; } memset(sc->sc_control, 0, sizeof(struct sq_control)); /* Create transmit buffer DMA maps */ for (i = 0; i < SQ_NTXDESC; i++) { if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_txmap[i])) != 0) { printf(": unable to create tx DMA map %d, error = %d\n", i, err); goto fail_4; } } /* Create receive buffer DMA maps */ for (i = 0; i < SQ_NRXDESC; i++) { if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rxmap[i])) != 0) { printf(": unable to create rx DMA map %d, error = %d\n", i, err); goto fail_5; } } /* Pre-allocate the receive buffers. */ for (i = 0; i < SQ_NRXDESC; i++) { if ((err = sq_add_rxbuf(sc, i)) != 0) { printf(": unable to allocate or map rx buffer %d\n," " error = %d\n", i, err); goto fail_6; } } memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR], ETHER_ADDR_LEN); /* * If our mac address is bogus, obtain it from ARCBIOS. This will * be true of the onboard HPC3 on IP22, since there is no eeprom, * but rather the DS1386 RTC's battery-backed ram is used. */ if (sc->sc_enaddr[0] != SGI_OUI_0 || sc->sc_enaddr[1] != SGI_OUI_1 || sc->sc_enaddr[2] != SGI_OUI_2) { macaddr = arcbios_GetEnvironmentVariable("eaddr"); if (macaddr == NULL) { printf(": unable to get MAC address!\n"); goto fail_6; } ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr); } evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(self), "intr"); if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { printf(": unable to establish interrupt!\n"); goto fail_6; } /* Reset the chip to a known state. */ sq_reset(sc); /* * Determine if we're an 8003 or 80c03 by setting the first * MAC address register to non-zero, and then reading it back. * If it's zero, we have an 80c03, because we will have read * the TxCollLSB register. */ sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5); if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0) sc->sc_type = SQ_TYPE_80C03; else sc->sc_type = SQ_TYPE_8003; sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00); printf(": SGI Seeq %s\n", sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); printf("%s: Ethernet address %s\n", device_xname(self), ether_sprintf(sc->sc_enaddr)); strcpy(ifp->if_xname, device_xname(self)); ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; ifp->if_init = sq_init; ifp->if_stop = sq_stop; ifp->if_start = sq_start; ifp->if_ioctl = sq_ioctl; ifp->if_watchdog = sq_watchdog; ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST; IFQ_SET_READY(&ifp->if_snd); if_attach(ifp); ether_ifattach(ifp, sc->sc_enaddr); memset(&sc->sq_trace, 0, sizeof(sc->sq_trace)); /* Done! */ return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_6: for (i = 0; i < SQ_NRXDESC; i++) { if (sc->sc_rxmbuf[i] != NULL) { bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); m_freem(sc->sc_rxmbuf[i]); } } fail_5: for (i = 0; i < SQ_NRXDESC; i++) { if (sc->sc_rxmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); } fail_4: for (i = 0; i < SQ_NTXDESC; i++) { if (sc->sc_txmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); } bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); fail_2: bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control, sizeof(struct sq_control)); fail_1: bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); fail_0: return; }
static void admsw_attach(device_t parent, device_t self, void *aux) { uint8_t enaddr[ETHER_ADDR_LEN]; struct admsw_softc *sc = device_private(self); struct obio_attach_args *aa = aux; struct ifnet *ifp; bus_dma_segment_t seg; int error, i, rseg; prop_data_t pd; printf(": ADM5120 Switch Engine, %d ports\n", SW_DEVS); sc->sc_dev = self; sc->sc_dmat = aa->oba_dt; sc->sc_st = aa->oba_st; pd = prop_dictionary_get(device_properties(self), "mac-address"); if (pd == NULL) { enaddr[0] = 0x02; enaddr[1] = 0xaa; enaddr[2] = 0xbb; enaddr[3] = 0xcc; enaddr[4] = 0xdd; enaddr[5] = 0xee; } else memcpy(enaddr, prop_data_data_nocopy(pd), sizeof(enaddr)); memcpy(sc->sc_enaddr, enaddr, sizeof(sc->sc_enaddr)); printf("%s: base Ethernet address %s\n", device_xname(sc->sc_dev), ether_sprintf(enaddr)); /* Map the device. */ if (bus_space_map(sc->sc_st, aa->oba_addr, 512, 0, &sc->sc_ioh) != 0) { printf("%s: unable to map device\n", device_xname(sc->sc_dev)); return; } /* Hook up the interrupt handler. */ sc->sc_ih = adm5120_intr_establish(aa->oba_irq, INTR_IRQ, admsw_intr, sc); if (sc->sc_ih == NULL) { printf("%s: unable to register interrupt handler\n", device_xname(sc->sc_dev)); return; } /* * Allocate the control data structures, and create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct admsw_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 0)) != 0) { printf("%s: unable to allocate control data, error = %d\n", device_xname(sc->sc_dev), error); return; } if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct admsw_control_data), (void *)&sc->sc_control_data, 0)) != 0) { printf("%s: unable to map control data, error = %d\n", device_xname(sc->sc_dev), error); return; } if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct admsw_control_data), 1, sizeof(struct admsw_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { printf("%s: unable to create control data DMA map, " "error = %d\n", device_xname(sc->sc_dev), error); return; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct admsw_control_data), NULL, 0)) != 0) { printf("%s: unable to load control data DMA map, error = %d\n", device_xname(sc->sc_dev), error); return; } /* * Create the transmit buffer DMA maps. */ for (i = 0; i < ADMSW_NTXHDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 2, MCLBYTES, 0, 0, &sc->sc_txhsoft[i].ds_dmamap)) != 0) { printf("%s: unable to create txh DMA map %d, " "error = %d\n", device_xname(sc->sc_dev), i, error); return; } sc->sc_txhsoft[i].ds_mbuf = NULL; } for (i = 0; i < ADMSW_NTXLDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 2, MCLBYTES, 0, 0, &sc->sc_txlsoft[i].ds_dmamap)) != 0) { printf("%s: unable to create txl DMA map %d, " "error = %d\n", device_xname(sc->sc_dev), i, error); return; } sc->sc_txlsoft[i].ds_mbuf = NULL; } /* * Create the receive buffer DMA maps. */ for (i = 0; i < ADMSW_NRXHDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, &sc->sc_rxhsoft[i].ds_dmamap)) != 0) { printf("%s: unable to create rxh DMA map %d, " "error = %d\n", device_xname(sc->sc_dev), i, error); return; } sc->sc_rxhsoft[i].ds_mbuf = NULL; } for (i = 0; i < ADMSW_NRXLDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, &sc->sc_rxlsoft[i].ds_dmamap)) != 0) { printf("%s: unable to create rxl DMA map %d, " "error = %d\n", device_xname(sc->sc_dev), i, error); return; } sc->sc_rxlsoft[i].ds_mbuf = NULL; } admsw_init_bufs(sc); admsw_reset(sc); for (i = 0; i < SW_DEVS; i++) { ifmedia_init(&sc->sc_ifmedia[i], 0, admsw_mediachange, admsw_mediastatus); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->sc_ifmedia[i], IFM_ETHER|IFM_AUTO); ifp = &sc->sc_ethercom[i].ec_if; strcpy(ifp->if_xname, device_xname(sc->sc_dev)); ifp->if_xname[5] += i; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = admsw_ioctl; ifp->if_start = admsw_start; ifp->if_watchdog = admsw_watchdog; ifp->if_init = admsw_init; ifp->if_stop = admsw_stop; ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx; IFQ_SET_MAXLEN(&ifp->if_snd, max(ADMSW_NTXLDESC, IFQ_MAXLEN)); IFQ_SET_READY(&ifp->if_snd); /* Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, enaddr); enaddr[5]++; } #ifdef ADMSW_EVENT_COUNTERS evcnt_attach_dynamic(&sc->sc_ev_txstall, EVCNT_TYPE_MISC, NULL, device_xname(sc->sc_dev), "txstall"); evcnt_attach_dynamic(&sc->sc_ev_rxstall, EVCNT_TYPE_MISC, NULL, device_xname(sc->sc_dev), "rxstall"); evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_MISC, NULL, device_xname(sc->sc_dev), "txintr"); evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_MISC, NULL, device_xname(sc->sc_dev), "rxintr"); #if 1 evcnt_attach_dynamic(&sc->sc_ev_rxsync, EVCNT_TYPE_MISC, NULL, device_xname(sc->sc_dev), "rxsync"); #endif #endif admwdog_attach(sc); /* Make sure the interface is shutdown during reboot. */ sc->sc_sdhook = shutdownhook_establish(admsw_shutdown, sc); if (sc->sc_sdhook == NULL) printf("%s: WARNING: unable to establish shutdown hook\n", device_xname(sc->sc_dev)); /* leave interrupts and cpu port disabled */ return; }
void midi_attach(struct midi_softc *sc) { struct midi_info mi; kmutex_t *dummy; static int first = 1; if (first) { mutex_init(&hwif_softc_lock, MUTEX_DEFAULT, IPL_NONE); first = 0; } sc->hw_if->get_locks(sc->hw_hdl, &sc->lock, &dummy); callout_init(&sc->xmt_asense_co, CALLOUT_MPSAFE); callout_init(&sc->rcv_asense_co, CALLOUT_MPSAFE); callout_setfunc(&sc->xmt_asense_co, midi_xmt_asense, sc); callout_setfunc(&sc->rcv_asense_co, midi_rcv_asense, sc); sc->sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE, midi_softint, sc); cv_init(&sc->rchan, "midird"); cv_init(&sc->wchan, "midiwr"); sc->dying = 0; sc->isopen = 0; mutex_enter(&hwif_softc_lock); mutex_enter(sc->lock); hwif_softc = sc; sc->hw_if->getinfo(sc->hw_hdl, &mi); hwif_softc = NULL; mutex_exit(sc->lock); mutex_exit(&hwif_softc_lock); sc->props = mi.props; if (!(sc->props & MIDI_PROP_NO_OUTPUT)) { evcnt_attach_dynamic(&sc->xmt.bytesDiscarded, EVCNT_TYPE_MISC, NULL, device_xname(sc->dev), "xmt bytes discarded"); evcnt_attach_dynamic(&sc->xmt.incompleteMessages, EVCNT_TYPE_MISC, NULL, device_xname(sc->dev), "xmt incomplete msgs"); } if (sc->props & MIDI_PROP_CAN_INPUT) { evcnt_attach_dynamic(&sc->rcv.bytesDiscarded, EVCNT_TYPE_MISC, NULL, device_xname(sc->dev), "rcv bytes discarded"); evcnt_attach_dynamic(&sc->rcv.incompleteMessages, EVCNT_TYPE_MISC, NULL, device_xname(sc->dev), "rcv incomplete msgs"); } aprint_naive("\n"); aprint_normal(": %s\n", mi.name); if (!pmf_device_register(sc->dev, NULL, NULL)) aprint_error_dev(sc->dev, "couldn't establish power handler\n"); }
void magma_attach(device_t parent, device_t self, void *aux) { struct sbus_attach_args *sa = aux; struct magma_softc *sc = device_private(self); struct magma_board_info *card; bus_space_handle_t bh; char *magma_prom, *clockstr; int cd_clock; int node, chip; sc->ms_dev = self; node = sa->sa_node; /* * Find the card model. * Older models all have sbus node name `MAGMA_Sp' (see * `supported_cards[]' above), and must be distinguished * by the `magma_prom' property. */ magma_prom = prom_getpropstring(node, "magma_prom"); for (card = supported_cards; card->mb_name != NULL; card++) { if (strcmp(sa->sa_name, card->mb_sbusname) != 0) /* Sbus node name doesn't match */ continue; if (strcmp(magma_prom, card->mb_name) == 0) /* Model name match */ break; } if( card->mb_name == NULL ) { printf(": %s (unsupported)\n", magma_prom); return; } dprintf((" addr %p", sc)); printf(": %s\n", card->mb_realname); sc->ms_board = card; sc->ms_ncd1400 = card->mb_ncd1400; sc->ms_ncd1190 = card->mb_ncd1190; if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset, sa->sa_size, BUS_SPACE_MAP_LINEAR, &bh) != 0) { aprint_error("%s @ sbus: cannot map registers\n", device_xname(self)); return; } /* the SVCACK* lines are daisychained */ sc->ms_svcackr = (char *)bus_space_vaddr(sa->sa_bustag, bh) + card->mb_svcackr; sc->ms_svcackt = (char *)bus_space_vaddr(sa->sa_bustag, bh) + card->mb_svcackt; sc->ms_svcackm = (char *)bus_space_vaddr(sa->sa_bustag, bh) + card->mb_svcackm; /* * Find the clock speed; it's the same for all CD1400 chips * on the board. */ clockstr = prom_getpropstring(node, "clock"); if (*clockstr == '\0') /* Default to 25MHz */ cd_clock = 25; else { cd_clock = 0; while (*clockstr != '\0') cd_clock = (cd_clock * 10) + (*clockstr++ - '0'); } /* init the cd1400 chips */ for( chip = 0 ; chip < card->mb_ncd1400 ; chip++ ) { struct cd1400 *cd = &sc->ms_cd1400[chip]; cd->cd_clock = cd_clock; cd->cd_reg = (char *)bus_space_vaddr(sa->sa_bustag, bh) + card->mb_cd1400[chip]; /* prom_getpropstring(node, "chiprev"); */ /* seemingly the Magma drivers just ignore the propstring */ cd->cd_chiprev = cd1400_read_reg(cd, CD1400_GFRCR); dprintf(("%s attach CD1400 %d addr %p rev %x clock %dMHz\n", device_xname(sc->ms_dev), chip, cd->cd_reg, cd->cd_chiprev, cd->cd_clock)); /* clear GFRCR */ cd1400_write_reg(cd, CD1400_GFRCR, 0x00); /* reset whole chip */ cd1400_write_ccr(cd, CD1400_CCR_CMDRESET | CD1400_CCR_FULLRESET); /* wait for revision code to be restored */ while( cd1400_read_reg(cd, CD1400_GFRCR) != cd->cd_chiprev ) ; /* set the Prescaler Period Register to tick at 1ms */ cd1400_write_reg(cd, CD1400_PPR, ((cd->cd_clock * 1000000 / CD1400_PPR_PRESCALER + 500) / 1000)); /* The LC2+1Sp card is the only card that doesn't have * a CD1190 for the parallel port, but uses channel 0 of * the CD1400, so we make a note of it for later and set up * the CD1400 for parallel mode operation. */ if( card->mb_npar && card->mb_ncd1190 == 0 ) { cd1400_write_reg(cd, CD1400_GCR, CD1400_GCR_PARALLEL); cd->cd_parmode = 1; } } /* init the cd1190 chips */ for( chip = 0 ; chip < card->mb_ncd1190 ; chip++ ) { struct cd1190 *cd = &sc->ms_cd1190[chip]; cd->cd_reg = (char *)bus_space_vaddr(sa->sa_bustag, bh) + card->mb_cd1190[chip]; /* XXX don't know anything about these chips yet */ printf("%s: CD1190 %d addr %p (unsupported)\n", device_xname(self), chip, cd->cd_reg); } /* configure the children */ (void)config_found(self, mtty_match, NULL); (void)config_found(self, mbpp_match, NULL); /* * Establish the interrupt handlers. */ if (sa->sa_nintr == 0) return; /* No interrupts to service!? */ (void)bus_intr_establish(sa->sa_bustag, sa->sa_pri, IPL_SERIAL, magma_hard, sc); sc->ms_sicookie = softint_establish(SOFTINT_SERIAL, magma_soft, sc); if (sc->ms_sicookie == NULL) { aprint_normal("\n"); aprint_error_dev(sc->ms_dev, "cannot establish soft int handler\n"); return; } evcnt_attach_dynamic(&sc->ms_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(sc->ms_dev), "intr"); }
static void tctrl_attach(device_t parent, device_t self, void *aux) { struct tctrl_softc *sc = device_private(self); union obio_attach_args *uoba = aux; struct sbus_attach_args *sa = &uoba->uoba_sbus; unsigned int i, v; /* We're living on a sbus slot that looks like an obio that * looks like an sbus slot. */ sc->sc_dev = self; sc->sc_memt = sa->sa_bustag; if (sbus_bus_map(sc->sc_memt, sa->sa_slot, sa->sa_offset - TS102_REG_UCTRL_INT, sa->sa_size, BUS_SPACE_MAP_LINEAR, &sc->sc_memh) != 0) { printf(": can't map registers\n"); return; } printf("\n"); sc->sc_tft_on = 1; /* clear any pending data. */ for (i = 0; i < 10000; i++) { if ((TS102_UCTRL_STS_RXNE_STA & tctrl_read(sc, TS102_REG_UCTRL_STS)) == 0) { break; } v = tctrl_read(sc, TS102_REG_UCTRL_DATA); tctrl_write(sc, TS102_REG_UCTRL_STS, TS102_UCTRL_STS_RXNE_STA); } if (sa->sa_nintr != 0) { (void)bus_intr_establish(sc->sc_memt, sa->sa_pri, IPL_NONE, tctrl_intr, sc); evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(sc->sc_dev), "intr"); } /* See what the external status is */ sc->sc_ext_status = 0; tctrl_read_ext_status(); if (sc->sc_ext_status != 0) { const char *sep; printf("%s: ", device_xname(sc->sc_dev)); v = sc->sc_ext_status; for (i = 0, sep = ""; v != 0; i++, v >>= 1) { if (v & 1) { printf("%s%s", sep, tctrl_ext_statuses[i]); sep = ", "; } } printf("\n"); } /* Get a current of the control bitport */ tctrl_setup_bitport_nop(); tctrl_write(sc, TS102_REG_UCTRL_INT, TS102_UCTRL_INT_RXNE_REQ|TS102_UCTRL_INT_RXNE_MSK); sc->sc_lid = (sc->sc_ext_status & TS102_EXT_STATUS_LID_DOWN) == 0; sc->sc_power_state = PWR_RESUME; sc->sc_extvga = (sc->sc_ext_status & TS102_EXT_STATUS_EXTERNAL_VGA_ATTACHED) != 0; sc->sc_video_callback = NULL; sc->sc_wantdata = 0; sc->sc_event_count = 0; sc->sc_ext_pending = 0; sc->sc_ext_pending = 0; mutex_init(&sc->sc_requestlock, MUTEX_DEFAULT, IPL_NONE); selinit(&sc->sc_rsel); /* setup sensors and register the power button */ tctrl_sensor_setup(sc); tctrl_lid_state(sc); tctrl_ac_state(sc); /* initialize the LCD */ tctrl_init_lcd(); /* initialize sc_lcdstate */ sc->sc_lcdstate = 0; sc->sc_lcdwanted = 0; tadpole_set_lcd(2, 0); /* fire up the LCD event thread */ sc->sc_events = 0; if (kthread_create(PRI_NONE, 0, NULL, tctrl_event_thread, sc, &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) { printf("%s: unable to create event kthread", device_xname(sc->sc_dev)); } }
void cpu_attach_common(device_t self, struct cpu_info *ci) { const char * const xname = device_xname(self); /* * Cross link cpu_info and its device together */ ci->ci_dev = self; self->dv_private = ci; KASSERT(ci->ci_idepth == 0); evcnt_attach_dynamic(&ci->ci_ev_count_compare, EVCNT_TYPE_INTR, NULL, xname, "int 5 (clock)"); evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed, EVCNT_TYPE_INTR, NULL, xname, "int 5 (clock) missed"); evcnt_attach_dynamic(&ci->ci_ev_fpu_loads, EVCNT_TYPE_MISC, NULL, xname, "fpu loads"); evcnt_attach_dynamic(&ci->ci_ev_fpu_saves, EVCNT_TYPE_MISC, NULL, xname, "fpu saves"); evcnt_attach_dynamic(&ci->ci_ev_dsp_loads, EVCNT_TYPE_MISC, NULL, xname, "dsp loads"); evcnt_attach_dynamic(&ci->ci_ev_dsp_saves, EVCNT_TYPE_MISC, NULL, xname, "dsp saves"); evcnt_attach_dynamic(&ci->ci_ev_tlbmisses, EVCNT_TYPE_TRAP, NULL, xname, "tlb misses"); #ifdef MULTIPROCESSOR if (ci != &cpu_info_store) { /* * Tail insert this onto the list of cpu_info's. */ KASSERT(cpuid_infos[ci->ci_cpuid] == NULL); cpuid_infos[ci->ci_cpuid] = ci; membar_producer(); } KASSERT(cpuid_infos[ci->ci_cpuid] != NULL); evcnt_attach_dynamic(&ci->ci_evcnt_synci_activate_rqst, EVCNT_TYPE_MISC, NULL, xname, "syncicache activate request"); evcnt_attach_dynamic(&ci->ci_evcnt_synci_deferred_rqst, EVCNT_TYPE_MISC, NULL, xname, "syncicache deferred request"); evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst, EVCNT_TYPE_MISC, NULL, xname, "syncicache ipi request"); evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst, EVCNT_TYPE_MISC, NULL, xname, "syncicache onproc request"); /* * Initialize IPI framework for this cpu instance */ ipi_init(ci); kcpuset_create(&ci->ci_multicastcpus, true); kcpuset_create(&ci->ci_watchcpus, true); kcpuset_create(&ci->ci_ddbcpus, true); #endif }
/* ARGSUSED */ void ie_pcctwo_attach(device_t parent, device_t self, void *aux) { struct pcctwo_attach_args *pa; struct ie_pcctwo_softc *ps; struct ie_softc *sc; bus_dma_segment_t seg; int rseg; pa = aux; ps = device_private(self); sc = &ps->ps_ie; sc->sc_dev = self; /* Map the MPU controller registers in PCCTWO space */ ps->ps_bust = pa->pa_bust; bus_space_map(pa->pa_bust, pa->pa_offset, IE_MPUREG_SIZE, 0, &ps->ps_bush); /* Get contiguous DMA-able memory for the IE chip */ if (bus_dmamem_alloc(pa->pa_dmat, ether_data_buff_size, PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ONBOARD_RAM | BUS_DMA_24BIT) != 0) { aprint_error_dev(self, "Failed to allocate ether buffer\n"); return; } if (bus_dmamem_map(pa->pa_dmat, &seg, rseg, ether_data_buff_size, (void **) & sc->sc_maddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) { aprint_error_dev(self, "Failed to map ether buffer\n"); bus_dmamem_free(pa->pa_dmat, &seg, rseg); return; } sc->bt = pa->pa_bust; sc->bh = (bus_space_handle_t) sc->sc_maddr; /* XXXSCW Better way? */ sc->sc_iobase = (void *) seg.ds_addr; sc->sc_msize = ether_data_buff_size; memset(sc->sc_maddr, 0, ether_data_buff_size); sc->hwreset = ie_reset; sc->hwinit = ie_hwinit; sc->chan_attn = ie_atten; sc->intrhook = ie_intrhook; sc->memcopyin = ie_copyin; sc->memcopyout = ie_copyout; sc->ie_bus_barrier = NULL; sc->ie_bus_read16 = ie_read_16; sc->ie_bus_write16 = ie_write_16; sc->ie_bus_write24 = ie_write_24; sc->sc_mediachange = NULL; sc->sc_mediastatus = NULL; sc->scp = 0; sc->iscp = sc->scp + ((IE_SCP_SZ + 15) & ~15); sc->scb = sc->iscp + IE_ISCP_SZ; sc->buf_area = sc->scb + IE_SCB_SZ; sc->buf_area_sz = sc->sc_msize - (sc->buf_area - sc->scp); /* * BUS_USE -> Interrupt Active High (edge-triggered), * Lock function enabled, * Internal bus throttle timer triggering, * 82586 operating mode. */ ie_write_16(sc, IE_SCP_BUS_USE(sc->scp), IE_BUS_USE); ie_write_24(sc, IE_SCP_ISCP(sc->scp), sc->iscp); ie_write_16(sc, IE_ISCP_SCB(sc->iscp), sc->scb); ie_write_24(sc, IE_ISCP_BASE(sc->iscp), sc->scp); /* This has the side-effect of resetting the chip */ i82586_proberam(sc); /* Attach the MI back-end */ i82586_attach(sc, "onboard", mvme_ea, NULL, 0, 0); /* Register the event counter */ evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_INTR, pcctwointr_evcnt(pa->pa_ipl), "ether", device_xname(self)); /* Finally, hook the hardware interrupt */ pcctwointr_establish(PCCTWOV_LANC_IRQ, i82586_intr, pa->pa_ipl, sc, &ps->ps_evcnt); }
/* * Interface exists: make available by filling in network interface * record. System will initialize the interface when it is ready * to accept packets. */ void sgec_attach(struct ze_softc *sc) { struct ifnet *ifp = &sc->sc_if; struct ze_tdes *tp; struct ze_rdes *rp; bus_dma_segment_t seg; int i, rseg, error; /* * Allocate DMA safe memory for descriptors and setup memory. */ error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata), PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); if (error) { aprint_error(": unable to allocate control data, error = %d\n", error); goto fail_0; } error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata), (void **)&sc->sc_zedata, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); if (error) { aprint_error( ": unable to map control data, error = %d\n", error); goto fail_1; } error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1, sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap); if (error) { aprint_error( ": unable to create control data DMA map, error = %d\n", error); goto fail_2; } error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata, sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT); if (error) { aprint_error( ": unable to load control data DMA map, error = %d\n", error); goto fail_3; } /* * Zero the newly allocated memory. */ memset(sc->sc_zedata, 0, sizeof(struct ze_cdata)); /* * Create the transmit descriptor DMA maps. */ for (i = 0; error == 0 && i < TXDESCS; i++) { error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &sc->sc_xmtmap[i]); } if (error) { aprint_error(": unable to create tx DMA map %d, error = %d\n", i, error); goto fail_4; } /* * Create receive buffer DMA maps. */ for (i = 0; error == 0 && i < RXDESCS; i++) { error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]); } if (error) { aprint_error(": unable to create rx DMA map %d, error = %d\n", i, error); goto fail_5; } /* * Pre-allocate the receive buffers. */ for (i = 0; error == 0 && i < RXDESCS; i++) { error = ze_add_rxbuf(sc, i); } if (error) { aprint_error( ": unable to allocate or map rx buffer %d, error = %d\n", i, error); goto fail_6; } /* For vmstat -i */ evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(sc->sc_dev), "intr"); evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr"); evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr"); evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain"); evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr"); evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr"); /* * Create ring loops of the buffer chains. * This is only done once. */ sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr; rp = sc->sc_zedata->zc_recv; rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW; rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA; rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv; tp = sc->sc_zedata->zc_xmit; tp[TXDESCS].ze_tdr = ZE_TDR_OW; tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA; tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit; if (zereset(sc)) return; strcpy(ifp->if_xname, device_xname(sc->sc_dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = zestart; ifp->if_ioctl = zeioctl; ifp->if_watchdog = zetimeout; IFQ_SET_READY(&ifp->if_snd); /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, sc->sc_enaddr); aprint_normal("\n"); aprint_normal_dev(sc->sc_dev, "hardware address %s\n", ether_sprintf(sc->sc_enaddr)); return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_6: for (i = 0; i < RXDESCS; i++) { if (sc->sc_rxmbuf[i] != NULL) { bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]); m_freem(sc->sc_rxmbuf[i]); } } fail_5: for (i = 0; i < RXDESCS; i++) { if (sc->sc_xmtmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]); } fail_4: for (i = 0; i < TXDESCS; i++) { if (sc->sc_rcvmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]); } bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap); fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap); fail_2: bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata, sizeof(struct ze_cdata)); fail_1: bus_dmamem_free(sc->sc_dmat, &seg, rseg); fail_0: return; }
/* * Interface exists: make available by filling in network interface * record. System will initialize the interface when it is ready * to accept packets. */ static void niattach(device_t parent, device_t self, void *aux) { struct bi_attach_args *ba = aux; struct ni_softc *sc = device_private(self); struct ifnet *ifp = (struct ifnet *)&sc->sc_if; struct ni_msg *msg; struct ni_ptdb *ptdb; void *va; int i, j, s, res; u_short type; sc->sc_dev = self; type = bus_space_read_2(ba->ba_iot, ba->ba_ioh, BIREG_DTYPE); printf(": DEBN%c\n", type == BIDT_DEBNA ? 'A' : type == BIDT_DEBNT ? 'T' : 'K'); sc->sc_iot = ba->ba_iot; sc->sc_ioh = ba->ba_ioh; sc->sc_dmat = ba->ba_dmat; bi_intr_establish(ba->ba_icookie, ba->ba_ivec, niintr, sc, &sc->sc_intrcnt); evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(self), "intr"); ni_getpgs(sc, sizeof(struct ni_gvppqb), (void **)&sc->sc_gvppqb, (paddr_t *)&sc->sc_pgvppqb); ni_getpgs(sc, sizeof(struct ni_fqb), (void **)&sc->sc_fqb, 0); ni_getpgs(sc, NBDESCS * sizeof(struct ni_bbd), (void **)&sc->sc_bbd, 0); /* * Zero the newly allocated memory. */ nipqb->np_veclvl = (ba->ba_ivec << 2) + 2; nipqb->np_node = ba->ba_intcpu; nipqb->np_vpqb = (u_int32_t)gvp; #ifdef __vax__ nipqb->np_spt = nipqb->np_gpt = mfpr(PR_SBR); nipqb->np_sptlen = nipqb->np_gptlen = mfpr(PR_SLR); #else #error Must fix support for non-vax. #endif nipqb->np_bvplvl = 1; nipqb->np_vfqb = (u_int32_t)fqb; nipqb->np_vbdt = (u_int32_t)bbd; nipqb->np_nbdr = NBDESCS; /* Free queue block */ nipqb->np_freeq = NQUEUES; fqb->nf_mlen = PKTHDR+MSGADD; fqb->nf_dlen = PKTHDR+TXADD; fqb->nf_rlen = PKTHDR+RXADD; strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = nistart; ifp->if_ioctl = niioctl; ifp->if_watchdog = nitimeout; IFQ_SET_READY(&ifp->if_snd); /* * Start init sequence. */ /* Reset the node */ NI_WREG(BIREG_VAXBICSR, NI_RREG(BIREG_VAXBICSR) | BICSR_NRST); DELAY(500000); i = 20; while ((NI_RREG(BIREG_VAXBICSR) & BICSR_BROKE) && --i) DELAY(500000); if (i == 0) { printf("%s: BROKE bit set after reset\n", device_xname(self)); return; } /* Check state */ if (failtest(sc, NI_PSR, PSR_STATE, PSR_UNDEF, "not undefined state")) return; /* Clear owner bits */ NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN); NI_WREG(NI_PCR, NI_RREG(NI_PCR) & ~PCR_OWN); /* kick off init */ NI_WREG(NI_PCR, (u_int32_t)sc->sc_pgvppqb | PCR_INIT | PCR_OWN); while (NI_RREG(NI_PCR) & PCR_OWN) DELAY(100000); /* Check state */ if (failtest(sc, NI_PSR, PSR_INITED, PSR_INITED, "failed initialize")) return; NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN); WAITREG(NI_PCR, PCR_OWN); NI_WREG(NI_PCR, PCR_OWN|PCR_ENABLE); WAITREG(NI_PCR, PCR_OWN); WAITREG(NI_PSR, PSR_OWN); /* Check state */ if (failtest(sc, NI_PSR, PSR_STATE, PSR_ENABLED, "failed enable")) return; NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~PSR_OWN); /* * The message queue packets must be located on the beginning * of a page. A VAX page is 512 bytes, but it clusters 8 pages. * This knowledge is used here when allocating pages. * !!! How should this be done on MIPS and Alpha??? !!! */ #if NBPG < 4096 #error pagesize too small #endif s = splvm(); /* Set up message free queue */ ni_getpgs(sc, NMSGBUF * 512, &va, 0); for (i = 0; i < NMSGBUF; i++) { msg = (void *)((char *)va + i * 512); res = INSQTI(msg, &fqb->nf_mforw); } WAITREG(NI_PCR, PCR_OWN); NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN); WAITREG(NI_PCR, PCR_OWN); /* Set up xmit queue */ ni_getpgs(sc, NTXBUF * 512, &va, 0); for (i = 0; i < NTXBUF; i++) { struct ni_dg *data; data = (void *)((char *)va + i * 512); data->nd_status = 0; data->nd_len = TXADD; data->nd_ptdbidx = 1; data->nd_opcode = BVP_DGRAM; for (j = 0; j < NTXFRAGS; j++) { data->bufs[j]._offset = 0; data->bufs[j]._key = 1; bbd[i * NTXFRAGS + j].nb_key = 1; bbd[i * NTXFRAGS + j].nb_status = 0; data->bufs[j]._index = i * NTXFRAGS + j; } res = INSQTI(data, &fqb->nf_dforw); } WAITREG(NI_PCR, PCR_OWN); NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN); WAITREG(NI_PCR, PCR_OWN); /* recv buffers */ ni_getpgs(sc, NRXBUF * 512, &va, 0); for (i = 0; i < NRXBUF; i++) { struct ni_dg *data; int idx; data = (void *)((char *)va + i * 512); data->nd_len = RXADD; data->nd_opcode = BVP_DGRAMRX; data->nd_ptdbidx = 2; data->bufs[0]._key = 1; idx = NTXBUF * NTXFRAGS + i; if (ni_add_rxbuf(sc, data, idx)) panic("niattach: ni_add_rxbuf: out of mbufs"); res = INSQTI(data, &fqb->nf_rforw); } WAITREG(NI_PCR, PCR_OWN); NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN); WAITREG(NI_PCR, PCR_OWN); splx(s); /* Set initial parameters */ msg = REMQHI(&fqb->nf_mforw); msg->nm_opcode = BVP_MSG; msg->nm_status = 0; msg->nm_len = sizeof(struct ni_param) + 6; msg->nm_opcode2 = NI_WPARAM; ((struct ni_param *)&msg->nm_text[0])->np_flags = NP_PAD; endwait = retry = 0; res = INSQTI(msg, &gvp->nc_forw0); retry: WAITREG(NI_PCR, PCR_OWN); NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN); WAITREG(NI_PCR, PCR_OWN); i = 1000; while (endwait == 0 && --i) DELAY(10000); if (endwait == 0) { if (++retry < 3) goto retry; printf("%s: no response to set params\n", device_xname(self)); return; } /* Clear counters */ msg = REMQHI(&fqb->nf_mforw); msg->nm_opcode = BVP_MSG; msg->nm_status = 0; msg->nm_len = sizeof(struct ni_param) + 6; msg->nm_opcode2 = NI_RCCNTR; res = INSQTI(msg, &gvp->nc_forw0); WAITREG(NI_PCR, PCR_OWN); NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN); WAITREG(NI_PCR, PCR_OWN); /* Enable transmit logic */ msg = REMQHI(&fqb->nf_mforw); msg->nm_opcode = BVP_MSG; msg->nm_status = 0; msg->nm_len = 18; msg->nm_opcode2 = NI_STPTDB; ptdb = (struct ni_ptdb *)&msg->nm_text[0]; memset(ptdb, 0, sizeof(struct ni_ptdb)); ptdb->np_index = 1; ptdb->np_fque = 1; res = INSQTI(msg, &gvp->nc_forw0); WAITREG(NI_PCR, PCR_OWN); NI_WREG(NI_PCR, PCR_CMDQNE|PCR_CMDQ0|PCR_OWN); WAITREG(NI_PCR, PCR_OWN); /* Wait for everything to finish */ WAITREG(NI_PSR, PSR_OWN); printf("%s: hardware address %s\n", device_xname(self), ether_sprintf(sc->sc_enaddr)); /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, sc->sc_enaddr); if (shutdownhook_establish(ni_shutdown, sc) == 0) aprint_error_dev(self, "WARNING: unable to establish shutdown hook\n"); }
void hcsc_attach(device_t parent, device_t self, void *aux) { struct hcsc_softc *sc = device_private(self); struct ncr5380_softc *ncr_sc = &sc->sc_ncr5380; struct podulebus_attach_args *pa = aux; #ifndef NCR5380_USE_BUS_SPACE uint8_t *iobase; #endif char hi_option[sizeof(self->dv_xname) + 8]; ncr_sc->sc_dev = self; ncr_sc->sc_min_dma_len = 0; ncr_sc->sc_no_disconnect = 0; ncr_sc->sc_parity_disable = 0; ncr_sc->sc_dma_alloc = NULL; ncr_sc->sc_dma_free = NULL; ncr_sc->sc_dma_poll = NULL; ncr_sc->sc_dma_setup = NULL; ncr_sc->sc_dma_start = NULL; ncr_sc->sc_dma_eop = NULL; ncr_sc->sc_dma_stop = NULL; ncr_sc->sc_intr_on = NULL; ncr_sc->sc_intr_off = NULL; #ifdef NCR5380_USE_BUS_SPACE ncr_sc->sc_regt = pa->pa_fast_t; bus_space_map(ncr_sc->sc_regt, pa->pa_fast_base + HCSC_DP8490_OFFSET, 8, 0, &ncr_sc->sc_regh); ncr_sc->sci_r0 = 0; ncr_sc->sci_r1 = 1; ncr_sc->sci_r2 = 2; ncr_sc->sci_r3 = 3; ncr_sc->sci_r4 = 4; ncr_sc->sci_r5 = 5; ncr_sc->sci_r6 = 6; ncr_sc->sci_r7 = 7; #else iobase = (u_char *)pa->pa_fast_base + HCSC_DP8490_OFFSET; ncr_sc->sci_r0 = iobase + 0; ncr_sc->sci_r1 = iobase + 4; ncr_sc->sci_r2 = iobase + 8; ncr_sc->sci_r3 = iobase + 12; ncr_sc->sci_r4 = iobase + 16; ncr_sc->sci_r5 = iobase + 20; ncr_sc->sci_r6 = iobase + 24; ncr_sc->sci_r7 = iobase + 28; #endif sc->sc_pdmat = pa->pa_mod_t; bus_space_map(sc->sc_pdmat, pa->pa_mod_base + HCSC_PDMA_OFFSET, 1, 0, &sc->sc_pdmah); ncr_sc->sc_rev = NCR_VARIANT_DP8490; ncr_sc->sc_pio_in = hcsc_pdma_in; ncr_sc->sc_pio_out = hcsc_pdma_out; /* Provide an override for the host id */ ncr_sc->sc_channel.chan_id = 7; snprintf(hi_option, sizeof(hi_option), "%s.hostid", device_xname(self)); (void)get_bootconf_option(boot_args, hi_option, BOOTOPT_TYPE_INT, &ncr_sc->sc_channel.chan_id); ncr_sc->sc_adapter.adapt_minphys = minphys; aprint_normal(": host ID %d\n", ncr_sc->sc_channel.chan_id); evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(self), "intr"); sc->sc_ih = podulebus_irq_establish(pa->pa_ih, IPL_BIO, ncr5380_intr, sc, &sc->sc_intrcnt); ncr5380_attach(ncr_sc); }
/* * Interface exists: make available by filling in network interface * record. System will initialize the interface when it is ready * to accept packets. We get the ethernet address here. */ void deattach(struct device *parent, struct device *self, void *aux) { struct uba_attach_args *ua = aux; struct de_softc *sc = (struct de_softc *)self; struct ifnet *ifp = &sc->sc_if; u_int8_t myaddr[ETHER_ADDR_LEN]; int csr1, error; char *c; sc->sc_iot = ua->ua_iot; sc->sc_ioh = ua->ua_ioh; sc->sc_dmat = ua->ua_dmat; /* * What kind of a board is this? * The error bits 4-6 in pcsr1 are a device id as long as * the high byte is zero. */ csr1 = DE_RCSR(DE_PCSR1); if (csr1 & 0xff60) c = "broken"; else if (csr1 & 0x10) c = "delua"; else c = "deuna"; /* * Reset the board and temporarily map * the pcbb buffer onto the Unibus. */ DE_WCSR(DE_PCSR0, 0); /* reset INTE */ DELAY(100); DE_WCSR(DE_PCSR0, PCSR0_RSET); dewait(sc, "reset"); sc->sc_ui.ui_size = sizeof(struct de_cdata); if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) return printf(": failed ubmemalloc(), error = %d\n", error); sc->sc_dedata = (struct de_cdata *)sc->sc_ui.ui_vaddr; /* * Tell the DEUNA about our PCB */ DE_WCSR(DE_PCSR2, LOWORD(sc->sc_ui.ui_baddr)); DE_WCSR(DE_PCSR3, HIWORD(sc->sc_ui.ui_baddr)); DE_WLOW(CMD_GETPCBB); dewait(sc, "pcbb"); sc->sc_dedata->dc_pcbb.pcbb0 = FC_RDPHYAD; DE_WLOW(CMD_GETCMD); dewait(sc, "read addr "); bcopy((caddr_t)&sc->sc_dedata->dc_pcbb.pcbb2, myaddr, sizeof (myaddr)); printf(": %s, address %s\n", c, ether_sprintf(myaddr)); uba_intr_establish(ua->ua_icookie, ua->ua_cvec, deintr, sc, &sc->sc_intrcnt); uba_reset_establish(dereset, &sc->sc_dev); evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt, sc->sc_dev.dv_xname, "intr"); strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI; ifp->if_ioctl = deioctl; ifp->if_start = destart; ifp->if_init = deinit; ifp->if_stop = destop; IFQ_SET_READY(&ifp->if_snd); if_attach(ifp); ether_ifattach(ifp, myaddr); ubmemfree((struct uba_softc *)parent, &sc->sc_ui); sc->sc_sh = shutdownhook_establish(deshutdown, sc); }
/* ARGSUSED */ void vmetwo_intr_establish(void *csc, int prior, int lvl, int vec, int first, int (*hand)(void *), void *arg, struct evcnt *evcnt) { struct vmetwo_softc *sc = csc; u_int32_t reg; int bitoff; int iloffset, ilshift; int s; s = splhigh(); #if NVMETWO > 0 /* * Sort out interrupts generated locally by the VMEChip2 from * those generated by VMEbus devices... */ if (vec >= VME2_VECTOR_LOCAL_MIN && vec <= VME2_VECTOR_LOCAL_MAX) { #endif /* * Local interrupts need to be bounced through some * trampoline code which acknowledges/clears them. */ vme_two_handlers[vec - VME2_VECTOR_LOCAL_MIN].isr_hand = hand; vme_two_handlers[vec - VME2_VECTOR_LOCAL_MIN].isr_arg = arg; hand = vmetwo_local_isr_trampoline; arg = (void *) (vec - VME2_VECTOR_BASE); /* * Interrupt enable/clear bit offset is 0x08 - 0x1f */ bitoff = vec - VME2_VECTOR_BASE; #if NVMETWO > 0 first = 1; /* Force the interrupt to be enabled */ } else { /* * Interrupts originating from the VMEbus are * controlled by an offset of 0x00 - 0x07 */ bitoff = lvl - 1; } #endif /* Hook the interrupt */ (*sc->sc_isrlink)(sc->sc_isrcookie, hand, arg, prior, vec, evcnt); /* * Do we need to tell the VMEChip2 to let the interrupt through? * (This is always true for locally-generated interrupts, but only * needs doing once for each VMEbus interrupt level which is hooked) */ #if NVMETWO > 0 if (first) { if (evcnt) evcnt_attach_dynamic(evcnt, EVCNT_TYPE_INTR, (*sc->sc_isrevcnt)(sc->sc_isrcookie, prior), device_xname(sc->sc_mvmebus.sc_dev), mvmebus_irq_name[lvl]); #endif iloffset = VME2_ILOFFSET_FROM_VECTOR(bitoff) + VME2LCSR_INTERRUPT_LEVEL_BASE; ilshift = VME2_ILSHIFT_FROM_VECTOR(bitoff); /* Program the specified interrupt to signal at 'prior' */ reg = vme2_lcsr_read(sc, iloffset); reg &= ~(VME2_INTERRUPT_LEVEL_MASK << ilshift); reg |= (prior << ilshift); vme2_lcsr_write(sc, iloffset, reg); /* Clear it */ vme2_lcsr_write(sc, VME2LCSR_LOCAL_INTERRUPT_CLEAR, VME2_LOCAL_INTERRUPT(bitoff)); /* Enable it. */ reg = vme2_lcsr_read(sc, VME2LCSR_LOCAL_INTERRUPT_ENABLE); reg |= VME2_LOCAL_INTERRUPT(bitoff); vme2_lcsr_write(sc, VME2LCSR_LOCAL_INTERRUPT_ENABLE, reg); #if NVMETWO > 0 } #ifdef DIAGNOSTIC else { /* Verify the interrupt priority is the same */ iloffset = VME2_ILOFFSET_FROM_VECTOR(bitoff) + VME2LCSR_INTERRUPT_LEVEL_BASE; ilshift = VME2_ILSHIFT_FROM_VECTOR(bitoff); reg = vme2_lcsr_read(sc, iloffset); reg &= (VME2_INTERRUPT_LEVEL_MASK << ilshift); if ((prior << ilshift) != reg) panic("vmetwo_intr_establish: priority mismatch!"); } #endif #endif splx(s); }
void * intr_establish_xname(int hwirq, int type, int ipl, int (*ih_fun)(void *), void *ih_arg, const char *xname) { struct intrhand **p, *q, *ih; struct pic_ops *pic; static struct intrhand fakehand; int maxipl = ipl; if (maxipl == IPL_NONE) maxipl = IPL_HIGH; if (hwirq >= max_base) { panic("%s: bogus IRQ %d, max is %d", __func__, hwirq, max_base - 1); } pic = find_pic_by_hwirq(hwirq); if (pic == NULL) { panic("%s: cannot find a pic for IRQ %d", __func__, hwirq); } const int virq = mapirq(hwirq); /* no point in sleeping unless someone can free memory. */ ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP); if (ih == NULL) panic("intr_establish: can't allocate handler info"); if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE) panic("intr_establish: bogus irq (%d) or type (%d)", hwirq, type); struct intr_source * const is = &intrsources[virq]; switch (is->is_type) { case IST_NONE: is->is_type = type; break; case IST_EDGE_FALLING: case IST_EDGE_RISING: case IST_LEVEL_LOW: case IST_LEVEL_HIGH: if (type == is->is_type) break; /* FALLTHROUGH */ case IST_PULSE: if (type != IST_NONE) panic("intr_establish: can't share %s with %s", intr_typename(is->is_type), intr_typename(type)); break; } if (is->is_hand == NULL) { snprintf(is->is_source, sizeof(is->is_source), "irq %d", is->is_hwirq); evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL, pic->pic_name, is->is_source); } /* * Figure out where to put the handler. * This is O(N^2), but we want to preserve the order, and N is * generally small. */ for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) { maxipl = max(maxipl, q->ih_ipl); } /* * Actually install a fake handler momentarily, since we might be doing * this with interrupts enabled and don't want the real routine called * until masking is set up. */ fakehand.ih_ipl = ipl; fakehand.ih_fun = fakeintr; *p = &fakehand; /* * Poke the real handler in now. */ ih->ih_fun = ih_fun; ih->ih_arg = ih_arg; ih->ih_next = NULL; ih->ih_ipl = ipl; ih->ih_virq = virq; strlcpy(ih->ih_xname, xname != NULL ? xname : "unknown", sizeof(ih->ih_xname)); *p = ih; if (pic->pic_establish_irq != NULL) pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase, is->is_type, maxipl); /* * Remember the highest IPL used by this handler. */ is->is_ipl = maxipl; /* * now that the handler is established we're actually ready to * calculate the masks */ intr_calculatemasks(); return ih; }
/*ARGSUSED*/ static void sysfpgaattach(struct device *parent, struct device *self, void *args) { struct sysfpga_softc *sc = (struct sysfpga_softc *)self; struct femi_attach_args *fa = args; struct sysfpga_attach_args sa; u_int32_t reg; int i; #if (NSUPERIO > 0) || defined(SM_SYSFPGA) || (NSH5PCI > 0) struct evcnt *ev; static const char sysfpga_intr[] = "sysfpga intr"; #endif sysfpga_sc = sc; sc->sc_bust = fa->fa_bust; bus_space_map(sc->sc_bust, fa->fa_offset + SYSFPGA_OFFSET_REGS, SYSFPGA_REG_SZ, 0, &sc->sc_bush); reg = sysfpga_reg_read(sc, SYSFPGA_REG_DATE); printf( ": Cayman System FPGA, Revision: %d - %02x/%02x/%02x (yy/mm/dd)\n", SYSFPGA_DATE_REV(reg), SYSFPGA_DATE_YEAR(reg), SYSFPGA_DATE_MONTH(reg), SYSFPGA_DATE_DATE(reg)); reg = sysfpga_reg_read(sc, SYSFPGA_REG_BDMR); printf("%s: CPUCLKSEL: %s, CPU Clock Mode: %d\n", sc->sc_dev.dv_xname, sysfpga_cpuclksel[SYSFPGA_BDMR_CPUCLKSEL(reg)], SYSFPGA_CPUMR_CLKMODE(sysfpga_reg_read(sc, SYSFPGA_REG_CPUMR))); #if (NSUPERIO > 0) || defined(SM_SYSFPGA) memset(sc->sc_ih_irl1, 0, sizeof(sc->sc_ih_irl1)); #endif #if NSH5PCI > 0 memset(sc->sc_ih_irl1, 0, sizeof(sc->sc_ih_irl2)); memset(sc->sc_ih_irl1, 0, sizeof(sc->sc_ih_irl3)); #endif for (i = 0; i < SYSFPGA_NGROUPS; i++) { sysfpga_reg_write(sc, SYSFPGA_REG_INTMR(i), 0); sc->sc_intmr[i] = 0; } #if (NSUPERIO > 0) || defined(SM_SYSFPGA) /* * Hook interrupts for IRL1 devices */ sc->sc_ih[SYSFPGA_IGROUP_IRL1] = sh5_intr_establish(INTC_INTEVT_IRL1, IST_LEVEL, IPL_SUPERIO, sysfpga_intr_handler_irl1, sc); if (sc->sc_ih[SYSFPGA_IGROUP_IRL1] == NULL) panic("sysfpga: failed to register irl1 isr"); ev = sh5_intr_evcnt(sc->sc_ih[SYSFPGA_IGROUP_IRL1]); for (i = 0; i < SYSFPGA_IRL1_NINTR; i++) { evcnt_attach_dynamic(&sysfpga_irl1_intr_events[i], EVCNT_TYPE_INTR, ev, (i >= SYSFPGA_IRL1_INUM_KBD) ? "isa intr" : sysfpga_intr, sysfpga_irl1_intr_names[i]); } #endif #if NSH5PCI > 0 /* * Hook interrupts from the PCI1 and PCI2 pins */ sc->sc_ih[SYSFPGA_IGROUP_IRL2] = sh5_intr_establish(INTC_INTEVT_IRL2, IST_LEVEL, IPL_SH5PCI, sysfpga_intr_handler_irl2, sc); sc->sc_ih[SYSFPGA_IGROUP_IRL3] = sh5_intr_establish(INTC_INTEVT_IRL3, IST_LEVEL, IPL_SH5PCI, sysfpga_intr_handler_irl3, sc); if (sc->sc_ih[SYSFPGA_IGROUP_IRL2] == NULL || sc->sc_ih[SYSFPGA_IGROUP_IRL3] == NULL) panic("sysfpga: failed to register pci isr"); ev = sh5_intr_evcnt(sc->sc_ih[SYSFPGA_IGROUP_IRL2]); evcnt_attach_dynamic(&sysfpga_irl2_intr_events, EVCNT_TYPE_INTR, ev, sysfpga_intr, "pci1"); ev = sh5_intr_evcnt(sc->sc_ih[SYSFPGA_IGROUP_IRL3]); evcnt_attach_dynamic(&sysfpga_irl3_intr_events, EVCNT_TYPE_INTR, ev, sysfpga_intr, "pci2"); #endif #ifdef DDB sysfpga_reg_write(sc, SYSFPGA_REG_NMIMR, 1); #endif /* * Arrange to twinkle the "Discrete LED" periodically * as a crude "heartbeat" indication. */ callout_init(&sc->sc_ledco); sysfpga_twinkle_led(sc); /* * Attach configured children */ sa._sa_base = fa->fa_offset; for (i = 0; sysfpga_devices[i].sd_name != NULL; i++) { sa.sa_name = sysfpga_devices[i].sd_name; sa.sa_bust = fa->fa_bust; sa.sa_dmat = fa->fa_dmat; sa.sa_offset = sysfpga_devices[i].sd_offset + sa._sa_base; (void) config_found(self, &sa, sysfpgaprint); } }
/* * Attach the CPU. * Discover interesting goop about the virtual address cache * (slightly funny place to do it, but this is where it is to be found). */ void cpu_attach(device_t parent, device_t dev, void *aux) { int node; long clk, sclk = 0; struct mainbus_attach_args *ma = aux; struct cpu_info *ci; const char *sep; register int i, l; int bigcache, cachesize; char buf[100]; int totalsize = 0; int linesize, dcachesize, icachesize; /* tell them what we have */ node = ma->ma_node; /* * Allocate cpu_info structure if needed. */ ci = alloc_cpuinfo((u_int)node); /* * Only do this on the boot cpu. Other cpu's call * cpu_reset_fpustate() from cpu_hatch() before they * call into the idle loop. * For other cpus, we need to call mi_cpu_attach() * and complete setting up cpcb. */ if (ci->ci_flags & CPUF_PRIMARY) { fpstate_cache = pool_cache_init(sizeof(struct fpstate64), SPARC64_BLOCK_SIZE, 0, 0, "fpstate", NULL, IPL_NONE, NULL, NULL, NULL); cpu_reset_fpustate(); } #ifdef MULTIPROCESSOR else { mi_cpu_attach(ci); ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); } for (i = 0; i < IPI_EVCNT_NUM; ++i) evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR, NULL, device_xname(dev), ipi_evcnt_names[i]); #endif evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL, device_xname(dev), "timer"); mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM); clk = prom_getpropint(node, "clock-frequency", 0); if (clk == 0) { /* * Try to find it in the OpenPROM root... */ clk = prom_getpropint(findroot(), "clock-frequency", 0); } if (clk) { /* Tell OS what frequency we run on */ ci->ci_cpu_clockrate[0] = clk; ci->ci_cpu_clockrate[1] = clk / 1000000; } sclk = prom_getpropint(findroot(), "stick-frequency", 0); ci->ci_system_clockrate[0] = sclk; ci->ci_system_clockrate[1] = sclk / 1000000; snprintf(buf, sizeof buf, "%s @ %s MHz", prom_getpropstring(node, "name"), clockfreq(clk)); snprintf(cpu_model, sizeof cpu_model, "%s (%s)", machine_model, buf); aprint_normal(": %s, UPA id %d\n", buf, ci->ci_cpuid); aprint_naive("\n"); if (ci->ci_system_clockrate[0] != 0) { aprint_normal_dev(dev, "system tick frequency %d MHz\n", (int)ci->ci_system_clockrate[1]); } aprint_normal_dev(dev, ""); bigcache = 0; icachesize = prom_getpropint(node, "icache-size", 0); if (icachesize > icache_size) icache_size = icachesize; linesize = l = prom_getpropint(node, "icache-line-size", 0); if (linesize > icache_line_size) icache_line_size = linesize; for (i = 0; (1 << i) < l && l; i++) /* void */; if ((1 << i) != l && l) panic("bad icache line size %d", l); totalsize = icachesize; if (totalsize == 0) totalsize = l * prom_getpropint(node, "icache-nlines", 64) * prom_getpropint(node, "icache-associativity", 1); cachesize = totalsize / prom_getpropint(node, "icache-associativity", 1); bigcache = cachesize; sep = ""; if (totalsize > 0) { aprint_normal("%s%ldK instruction (%ld b/l)", sep, (long)totalsize/1024, (long)linesize); sep = ", "; } dcachesize = prom_getpropint(node, "dcache-size", 0); if (dcachesize > dcache_size) dcache_size = dcachesize; linesize = l = prom_getpropint(node, "dcache-line-size", 0); if (linesize > dcache_line_size) dcache_line_size = linesize; for (i = 0; (1 << i) < l && l; i++) /* void */; if ((1 << i) != l && l) panic("bad dcache line size %d", l); totalsize = dcachesize; if (totalsize == 0) totalsize = l * prom_getpropint(node, "dcache-nlines", 128) * prom_getpropint(node, "dcache-associativity", 1); cachesize = totalsize / prom_getpropint(node, "dcache-associativity", 1); if (cachesize > bigcache) bigcache = cachesize; if (totalsize > 0) { aprint_normal("%s%ldK data (%ld b/l)", sep, (long)totalsize/1024, (long)linesize); sep = ", "; } linesize = l = prom_getpropint(node, "ecache-line-size", 0); for (i = 0; (1 << i) < l && l; i++) /* void */; if ((1 << i) != l && l) panic("bad ecache line size %d", l); totalsize = prom_getpropint(node, "ecache-size", 0); if (totalsize == 0) totalsize = l * prom_getpropint(node, "ecache-nlines", 32768) * prom_getpropint(node, "ecache-associativity", 1); cachesize = totalsize / prom_getpropint(node, "ecache-associativity", 1); if (cachesize > bigcache) bigcache = cachesize; if (totalsize > 0) { aprint_normal("%s%ldK external (%ld b/l)", sep, (long)totalsize/1024, (long)linesize); } aprint_normal("\n"); if (ecache_min_line_size == 0 || linesize < ecache_min_line_size) ecache_min_line_size = linesize; /* * Now that we know the size of the largest cache on this CPU, * re-color our pages. */ uvm_page_recolor(atop(bigcache)); /* XXX */ }
void amdpm_attach(struct device *parent, struct device *self, void *aux) { struct amdpm_softc *sc = (struct amdpm_softc *) self; struct pci_attach_args *pa = aux; struct timeval tv1, tv2; pcireg_t reg; int i; sc->sc_pc = pa->pa_pc; sc->sc_tag = pa->pa_tag; sc->sc_iot = pa->pa_iot; reg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMDPM_CONFREG); if ((reg & AMDPM_PMIOEN) == 0) { printf(": PMxx space isn't enabled\n"); return; } reg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMDPM_PMPTR); if (bus_space_map(sc->sc_iot, AMDPM_PMBASE(reg), AMDPM_PMSIZE, 0, &sc->sc_ioh)) { printf(": failed to map PMxx space\n"); return; } reg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMDPM_CONFREG); if (reg & AMDPM_RNGEN) { /* Check to see if we can read data from the RNG. */ (void) bus_space_read_4(sc->sc_iot, sc->sc_ioh, AMDPM_RNGDATA); /* benchmark the RNG */ microtime(&tv1); for (i = 2 * 1024; i--; ) { while(!(bus_space_read_1(sc->sc_iot, sc->sc_ioh, AMDPM_RNGSTAT) & AMDPM_RNGDONE)) ; (void) bus_space_read_4(sc->sc_iot, sc->sc_ioh, AMDPM_RNGDATA); } microtime(&tv2); timersub(&tv2, &tv1, &tv1); if (tv1.tv_sec) tv1.tv_usec += 1000000 * tv1.tv_sec; printf(": rng active, %dKb/sec", 8 * 1000000 / tv1.tv_usec); #ifdef AMDPM_RND_COUNTERS evcnt_attach_dynamic(&sc->sc_rnd_hits, EVCNT_TYPE_MISC, NULL, sc->sc_dev.dv_xname, "rnd hits"); evcnt_attach_dynamic(&sc->sc_rnd_miss, EVCNT_TYPE_MISC, NULL, sc->sc_dev.dv_xname, "rnd miss"); for (i = 0; i < 256; i++) { evcnt_attach_dynamic(&sc->sc_rnd_data[i], EVCNT_TYPE_MISC, NULL, sc->sc_dev.dv_xname, "rnd data"); } #endif timeout_set(&sc->sc_rnd_ch, amdpm_rnd_callout, sc); amdpm_rnd_callout(sc); } }