Beispiel #1
0
Static void
url_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status)
{
	struct url_chain *c = priv;
	struct url_softc *sc = c->url_sc;
	struct ifnet *ifp = GET_IFP(sc);
	struct mbuf *m;
	u_int32_t total_len;
	url_rxhdr_t rxhdr;
	int s;

	DPRINTF(("%s: %s: enter\n", device_xname(sc->sc_dev),__func__));

	if (sc->sc_dying)
		return;

	if (status != USBD_NORMAL_COMPLETION) {
		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
			return;
		sc->sc_rx_errs++;
		if (usbd_ratecheck(&sc->sc_rx_notice)) {
			printf("%s: %u usb errors on rx: %s\n",
			       device_xname(sc->sc_dev), sc->sc_rx_errs,
			       usbd_errstr(status));
			sc->sc_rx_errs = 0;
		}
		if (status == USBD_STALLED) {
			sc->sc_refcnt++;
			usbd_clear_endpoint_stall_async(sc->sc_pipe_rx);
			if (--sc->sc_refcnt < 0)
				usb_detach_wakeupold(sc->sc_dev);
		}
		goto done;
	}

	usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);

	memcpy(mtod(c->url_mbuf, char *), c->url_buf, total_len);

	if (total_len <= ETHER_CRC_LEN) {
		ifp->if_ierrors++;
		goto done;
	}

	memcpy(&rxhdr, c->url_buf + total_len - ETHER_CRC_LEN, sizeof(rxhdr));

	DPRINTF(("%s: RX Status: %dbytes%s%s%s%s packets\n",
		 device_xname(sc->sc_dev),
		 UGETW(rxhdr) & URL_RXHDR_BYTEC_MASK,
		 UGETW(rxhdr) & URL_RXHDR_VALID_MASK ? ", Valid" : "",
		 UGETW(rxhdr) & URL_RXHDR_RUNTPKT_MASK ? ", Runt" : "",
		 UGETW(rxhdr) & URL_RXHDR_PHYPKT_MASK ? ", Physical match" : "",
		 UGETW(rxhdr) & URL_RXHDR_MCASTPKT_MASK ? ", Multicast" : ""));

	if ((UGETW(rxhdr) & URL_RXHDR_VALID_MASK) == 0) {
		ifp->if_ierrors++;
		goto done;
	}

	ifp->if_ipackets++;
	total_len -= ETHER_CRC_LEN;

	m = c->url_mbuf;
	m->m_pkthdr.len = m->m_len = total_len;
	m->m_pkthdr.rcvif = ifp;

	s = splnet();

	if (url_newbuf(sc, c, NULL) == ENOBUFS) {
		ifp->if_ierrors++;
		goto done1;
	}

	bpf_mtap(ifp, m);

	DPRINTF(("%s: %s: deliver %d\n", device_xname(sc->sc_dev),
		 __func__, m->m_len));
	(*(ifp)->if_input)((ifp), (m));

 done1:
	splx(s);

 done:
	/* Setup new transfer */
	usbd_setup_xfer(xfer, sc->sc_pipe_rx, c, c->url_buf, URL_BUFSZ,
			USBD_SHORT_XFER_OK | USBD_NO_COPY,
			USBD_NO_TIMEOUT, url_rxeof);
	sc->sc_refcnt++;
	usbd_transfer(xfer);
	if (--sc->sc_refcnt < 0)
		usb_detach_wakeupold(sc->sc_dev);

	DPRINTF(("%s: %s: start rx\n", device_xname(sc->sc_dev), __func__));
}
Beispiel #2
0
static void
cy693_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
    struct pciide_channel *cp;
    pcireg_t interface = PCI_INTERFACE(pa->pa_class);

    if (pciide_chipen(sc, pa) == 0)
        return;

    /*
     * this chip has 2 PCI IDE functions, one for primary and one for
     * secondary. So we need to call pciide_mapregs_compat() with
     * the real channel
     */
    if (pa->pa_function == 1) {
        sc->sc_cy_compatchan = 0;
    } else if (pa->pa_function == 2) {
        sc->sc_cy_compatchan = 1;
    } else {
        aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                         "unexpected PCI function %d\n", pa->pa_function);
        return;
    }
    if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
        aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                           "bus-master DMA support present\n");
        pciide_mapreg_dma(sc, pa);
    } else {
        aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                          "hardware does not support DMA\n");
        sc->sc_dma_ok = 0;
    }

    sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
    if (sc->sc_cy_handle == NULL) {
        aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                         "unable to map hyperCache control registers\n");
        sc->sc_dma_ok = 0;
    }

    sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
    if (sc->sc_dma_ok) {
        sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
        sc->sc_wdcdev.irqack = pciide_irqack;
    }
    sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
    sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
    sc->sc_wdcdev.sc_atac.atac_set_modes = cy693_setup_channel;

    sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
    sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
    sc->sc_wdcdev.wdc_maxdrives = 2;

    wdc_allocate_regs(&sc->sc_wdcdev);

    /* Only one channel for this chip; if we are here it's enabled */
    cp = &sc->pciide_channels[0];
    sc->wdc_chanarray[0] = &cp->ata_channel;
    cp->name = PCIIDE_CHANNEL_NAME(0);
    cp->ata_channel.ch_channel = 0;
    cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
    cp->ata_channel.ch_queue =
        malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
    if (cp->ata_channel.ch_queue == NULL) {
        aprint_error("%s primary channel: "
                     "can't allocate memory for command queue",
                     device_xname(sc->sc_wdcdev.sc_atac.atac_dev));
        return;
    }
    aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                      "primary channel %s to ",
                      (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
                      "configured" : "wired");
    if (interface & PCIIDE_INTERFACE_PCI(0)) {
        aprint_normal("native-PCI mode\n");
        pciide_mapregs_native(pa, cp, pciide_pci_intr);
    } else {
        aprint_normal("compatibility mode\n");
        pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan);
        if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0)
            pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan);
    }
    wdcattach(&cp->ata_channel);
}
Beispiel #3
0
static void
ahc_eisa_attach(device_t parent, device_t self, void *aux)
{
	struct ahc_softc *ahc = device_private(self);
	struct eisa_attach_args *ea = aux;
	eisa_chipset_tag_t ec = ea->ea_ec;
	eisa_intr_handle_t ih;
	bus_space_tag_t iot = ea->ea_iot;
	bus_space_handle_t ioh;
	int irq, intrtype;
	const char *intrstr, *intrtypestr;
	u_int biosctrl;
	u_int scsiconf;
	u_int scsiconf1;
	u_char intdef;
#ifdef AHC_DEBUG
	int i;
#endif
	char intrbuf[EISA_INTRSTR_LEN];

	ahc->sc_dev = self;

	if (bus_space_map(iot, EISA_SLOT_ADDR(ea->ea_slot) +
	    AHC_EISA_SLOT_OFFSET, AHC_EISA_IOSIZE, 0, &ioh)) {
		aprint_error_dev(ahc->sc_dev, "could not map I/O addresses");
		return;
	}
	if ((irq = ahc_aic77xx_irq(iot, ioh)) < 0) {
		aprint_error_dev(ahc->sc_dev, "ahc_aic77xx_irq failed!");
		goto free_io;
	}

	if (strcmp(ea->ea_idstring, "ADP7770") == 0) {
		printf(": %s\n", EISA_PRODUCT_ADP7770);
	} else if (strcmp(ea->ea_idstring, "ADP7771") == 0) {
		printf(": %s\n", EISA_PRODUCT_ADP7771);
	} else {
		printf(": Unknown device type %s", ea->ea_idstring);
		goto free_io;
	}

	ahc_set_name(ahc, device_xname(ahc->sc_dev));
	ahc->parent_dmat = ea->ea_dmat;
	ahc->chip = AHC_AIC7770|AHC_EISA;
	ahc->features = AHC_AIC7770_FE;
	ahc->flags = AHC_PAGESCBS;
	ahc->bugs = AHC_TMODE_WIDEODD_BUG;
	ahc->tag = iot;
	ahc->bsh = ioh;
	ahc->channel = 'A';

	if (ahc_softc_init(ahc) != 0)
		goto free_io;

	ahc_intr_enable(ahc, FALSE);

	if (ahc_reset(ahc) != 0)
		goto free_io;

	if (eisa_intr_map(ec, irq, &ih)) {
		aprint_error_dev(ahc->sc_dev, "couldn't map interrupt (%d)\n",
		    irq);
		goto free_io;
	}

	intdef = bus_space_read_1(iot, ioh, INTDEF);

	if (intdef & EDGE_TRIG) {
		intrtype = IST_EDGE;
		intrtypestr = "edge triggered";
	} else {
		intrtype = IST_LEVEL;
		intrtypestr = "level sensitive";
	}
	intrstr = eisa_intr_string(ec, ih, intrbuf, sizeof(intrbuf));
	ahc->ih = eisa_intr_establish(ec, ih,
	    intrtype, IPL_BIO, ahc_intr, ahc);
	if (ahc->ih == NULL) {
		aprint_error_dev(ahc->sc_dev, "couldn't establish %s interrupt",
		    intrtypestr);
		if (intrstr != NULL)
			aprint_error(" at %s", intrstr);
		aprint_error("\n");
		goto free_io;
	}
	if (intrstr != NULL)
		aprint_normal_dev(ahc->sc_dev, "%s interrupting at %s\n",
		       intrtypestr, intrstr);

	/*
	 * Now that we know we own the resources we need, do the
	 * card initialization.
	 *
	 * First, the aic7770 card specific setup.
	 */
	biosctrl = ahc_inb(ahc, HA_274_BIOSCTRL);
	scsiconf = ahc_inb(ahc, SCSICONF);
	scsiconf1 = ahc_inb(ahc, SCSICONF + 1);

#ifdef AHC_DEBUG
	for (i = TARG_SCSIRATE; i <= HA_274_BIOSCTRL; i+=8) {
		printf("0x%x, 0x%x, 0x%x, 0x%x, "
		       "0x%x, 0x%x, 0x%x, 0x%x\n",
		       ahc_inb(ahc, i),
		       ahc_inb(ahc, i+1),
		       ahc_inb(ahc, i+2),
		       ahc_inb(ahc, i+3),
		       ahc_inb(ahc, i+4),
		       ahc_inb(ahc, i+5),
		       ahc_inb(ahc, i+6),
		       ahc_inb(ahc, i+7));
	}
#endif

	/* Get the primary channel information */
	if ((biosctrl & CHANNEL_B_PRIMARY) != 0)
		ahc->flags |= AHC_PRIMARY_CHANNEL;

	if ((biosctrl & BIOSMODE) == BIOSDISABLED) {
		ahc->flags |= AHC_USEDEFAULTS;
	} else if ((ahc->features & AHC_WIDE) != 0) {
		ahc->our_id = scsiconf1 & HWSCSIID;
		if (scsiconf & TERM_ENB)
			ahc->flags |= AHC_TERM_ENB_A;
	} else {
		ahc->our_id = scsiconf & HSCSIID;
		ahc->our_id_b = scsiconf1 & HSCSIID;
		if (scsiconf & TERM_ENB)
			ahc->flags |= AHC_TERM_ENB_A;
		if (scsiconf1 & TERM_ENB)
			ahc->flags |= AHC_TERM_ENB_B;
	}
	if ((ahc_inb(ahc, HA_274_BIOSGLOBAL) & HA_274_EXTENDED_TRANS))
		ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B;

	/* Attach sub-devices */
	if (ahc_aic77xx_attach(ahc) == 0)
		return; /* succeed */

	/* failed */
	eisa_intr_disestablish(ec, ahc->ih);
free_io:
	bus_space_unmap(iot, ioh, AHC_EISA_IOSIZE);
}
Beispiel #4
0
static int
cuda_intr(void *arg)
{
	struct cuda_softc *sc = arg;
	int i, ending, type;
	uint8_t reg;

	reg = cuda_read_reg(sc, vIFR);		/* Read the interrupts */
	DPRINTF("[");
	if ((reg & 0x80) == 0) {
		DPRINTF("irq %02x]", reg);
		return 0;			/* No interrupts to process */
	}
	DPRINTF(":");

	cuda_write_reg(sc, vIFR, 0x7f);	/* Clear 'em */

switch_start:
	switch (sc->sc_state) {
	case CUDA_IDLE:
		/*
		 * This is an unexpected packet, so grab the first (dummy)
		 * byte, set up the proper vars, and tell the chip we are
		 * starting to receive the packet by setting the TIP bit.
		 */
		sc->sc_in[1] = cuda_read_reg(sc, vSR);
		DPRINTF("start: %02x", sc->sc_in[1]);
		if (cuda_intr_state(sc) == 0) {
			/* must have been a fake start */
			DPRINTF(" ... fake start\n");
			if (sc->sc_waiting) {
				/* start over */
				delay(150);
				sc->sc_state = CUDA_OUT;
				sc->sc_sent = 0;
				cuda_out(sc);
				cuda_write_reg(sc, vSR, sc->sc_out[1]);
				cuda_ack_off(sc);
				cuda_tip(sc);
			}
			break;
		}

		cuda_in(sc);
		cuda_tip(sc);

		sc->sc_received = 1;
		sc->sc_state = CUDA_IN;
		DPRINTF(" CUDA_IN");
		break;

	case CUDA_IN:
		sc->sc_in[sc->sc_received] = cuda_read_reg(sc, vSR);
		DPRINTF(" %02x", sc->sc_in[sc->sc_received]);
		ending = 0;
		if (sc->sc_received > 255) {
			/* bitch only once */
			if (sc->sc_received == 256) {
				printf("%s: input overflow\n",
				    device_xname(sc->sc_dev));
				ending = 1;
			}
		} else
			sc->sc_received++;
		if (sc->sc_received > 3) {
			if ((sc->sc_in[3] == CMD_IIC) && 
			    (sc->sc_received > (sc->sc_i2c_read_len + 4))) {
				ending = 1;
			}
		}

		/* intr off means this is the last byte (end of frame) */
		if (cuda_intr_state(sc) == 0) {
			ending = 1;
			DPRINTF(".\n");
		} else {
			cuda_toggle_ack(sc);			
		}
		
		if (ending == 1) {	/* end of message? */

			sc->sc_in[0] = sc->sc_received - 1;

			/* reset vars and signal the end of this frame */
			cuda_idle(sc);

			/* check if we have a handler for this message */
			type = sc->sc_in[1];
			if ((type >= 0) && (type < 16)) {
				CudaHandler *me = &sc->sc_handlers[type];

				if (me->handler != NULL) {
					me->handler(me->cookie,
					    sc->sc_received - 1, &sc->sc_in[1]);
				} else {
					printf("no handler for type %02x\n", type);
					panic("barf");
				}
			}

			DPRINTF("CUDA_IDLE");
			sc->sc_state = CUDA_IDLE;
			
			sc->sc_received = 0;

			/*
			 * If there is something waiting to be sent out,
			 * set everything up and send the first byte.
			 */
			if (sc->sc_waiting == 1) {

				DPRINTF("pending write\n");
				delay(1500);	/* required */
				sc->sc_sent = 0;
				sc->sc_state = CUDA_OUT;

				/*
				 * If the interrupt is on, we were too slow
				 * and the chip has already started to send
				 * something to us, so back out of the write
				 * and start a read cycle.
				 */
				if (cuda_intr_state(sc)) {
					cuda_in(sc);
					cuda_idle(sc);
					sc->sc_sent = 0;
					sc->sc_state = CUDA_IDLE;
					sc->sc_received = 0;
					delay(150);
					DPRINTF("too slow - incoming message\n");
					goto switch_start;
				}
				/*
				 * If we got here, it's ok to start sending
				 * so load the first byte and tell the chip
				 * we want to send.
				 */
				DPRINTF("sending ");

				cuda_out(sc);
				cuda_write_reg(sc, vSR,
				    sc->sc_out[sc->sc_sent]);
				cuda_ack_off(sc);
				cuda_tip(sc);
			}
		}
		break;

	case CUDA_OUT:
		i = cuda_read_reg(sc, vSR);	/* reset SR-intr in IFR */

		sc->sc_sent++;
		if (cuda_intr_state(sc)) {	/* ADB intr low during write */

			DPRINTF("incoming msg during send\n");
			cuda_in(sc);	/* make sure SR is set to IN */
			cuda_idle(sc);
			sc->sc_sent = 0;	/* must start all over */
			sc->sc_state = CUDA_IDLE;	/* new state */
			sc->sc_received = 0;
			sc->sc_waiting = 1;	/* must retry when done with
						 * read */
			delay(150);
			goto switch_start;	/* process next state right
						 * now */
			break;
		}
		if (sc->sc_out_length == sc->sc_sent) {	/* check for done */

			sc->sc_waiting = 0;	/* done writing */
			sc->sc_state = CUDA_IDLE;	/* signal bus is idle */
			cuda_in(sc);
			cuda_idle(sc);
			DPRINTF("done sending\n");
		} else {
			/* send next byte */
			cuda_write_reg(sc, vSR, sc->sc_out[sc->sc_sent]);
			cuda_toggle_ack(sc);	/* signal byte ready to
							 * shift */
		}
		break;

	case CUDA_NOTREADY:
		DPRINTF("adb: not yet initialized\n");
		break;

	default:
		DPRINTF("intr: unknown ADB state\n");
		break;
	}

	DPRINTF("]");
	return 1;
}
Beispiel #5
0
int
mtopen(dev_t dev, int flag, int mode, struct lwp *l)
{
	struct mt_softc *sc;
	int req_den;
	int error;

	sc = device_lookup_private(&mt_cd, MTUNIT(dev));
	if (sc == NULL || (sc->sc_flags & MTF_EXISTS) == 0)
		return (ENXIO);

	if (sc->sc_flags & MTF_OPEN)
		return (EBUSY);

	DPRINTF(MDB_ANY, ("%s open: flags 0x%x", device_xname(sc->sc_dev),
	    sc->sc_flags));

	sc->sc_flags |= MTF_OPEN;
	sc->sc_ttyp = tprintf_open(l->l_proc);
	if ((sc->sc_flags & MTF_ALIVE) == 0) {
		error = mtcommand(dev, MTRESET, 0);
		if (error != 0 || (sc->sc_flags & MTF_ALIVE) == 0)
			goto errout;
		if ((sc->sc_stat1 & (SR1_BOT | SR1_ONLINE)) == SR1_ONLINE)
			(void) mtcommand(dev, MTREW, 0);
	}
	for (;;) {
		if ((error = mtcommand(dev, MTNOP, 0)) != 0)
			goto errout;
		if (!(sc->sc_flags & MTF_REW))
			break;
		error = kpause("mt", true, hz, NULL);
		if (error != 0 && error != EWOULDBLOCK) {
			error = EINTR;
			goto errout;
		}
	}
	if ((flag & FWRITE) && (sc->sc_stat1 & SR1_RO)) {
		error = EROFS;
		goto errout;
	}
	if (!(sc->sc_stat1 & SR1_ONLINE)) {
		uprintf("%s: not online\n", device_xname(sc->sc_dev));
		error = EIO;
		goto errout;
	}
	/*
	 * Select density:
	 *  - find out what density the drive is set to
	 *	(i.e. the density of the current tape)
	 *  - if we are going to write
	 *    - if we're not at the beginning of the tape
	 *      - complain if we want to change densities
	 *    - otherwise, select the mtcommand to set the density
	 *
	 * If the drive doesn't support it then don't change the recorded
	 * density.
	 *
	 * The original MOREbsd code had these additional conditions
	 * for the mid-tape change
	 *
	 *	req_den != T_BADBPI &&
	 *	sc->sc_density != T_6250BPI
	 *
	 * which suggests that it would be possible to write multiple
	 * densities if req_den == T_BAD_BPI or the current tape
	 * density was 6250.  Testing of our 7980 suggests that the
	 * device cannot change densities mid-tape.
	 *
	 * [email protected]
	 */
	sc->sc_density = (sc->sc_stat2 & SR2_6250) ? T_6250BPI : (
			 (sc->sc_stat3 & SR3_1600) ? T_1600BPI : (
			 (sc->sc_stat3 & SR3_800) ? T_800BPI : -1));
	req_den = (dev & T_DENSEL);

	if (flag & FWRITE) {
		if (!(sc->sc_stat1 & SR1_BOT)) {
			if (sc->sc_density != req_den) {
				uprintf("%s: can't change density mid-tape\n",
				    device_xname(sc->sc_dev));
				error = EIO;
				goto errout;
			}
		}
		else {
			int mtset_density =
			    (req_den == T_800BPI  ? MTSET800BPI : (
			     req_den == T_1600BPI ? MTSET1600BPI : (
			     req_den == T_6250BPI ? MTSET6250BPI : (
			     sc->sc_type == MT7980ID
						  ? MTSET6250DC
						  : MTSET6250BPI))));
			if (mtcommand(dev, mtset_density, 0) == 0)
				sc->sc_density = req_den;
		}
	}
	return (0);
errout:
	sc->sc_flags &= ~MTF_OPEN;
	return (error);
}
Beispiel #6
0
/* sun4m vmebus */
static void
vmeattach_iommu(device_t parent, device_t self, void *aux)
{
#if defined(SUN4M)
	struct sparcvme_softc *sc = device_private(self);
	struct iommu_attach_args *ia = aux;
	struct vmebus_attach_args vba;
	bus_space_handle_t bh;
	int node;
	int cline;

	sc->sc_bustag = ia->iom_bustag;
	sc->sc_dmatag = ia->iom_dmatag;

	/* VME interrupt entry point */
	sc->sc_vmeintr = vmeintr4m;

/*XXX*/	sparc_vme_chipset_tag.cookie = sc;
/*XXX*/	sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create;
/*XXX*/	sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy;
/*XXX*/	sparc_vme_iommu_dma_tag._cookie = sc;

	vba.va_vct = &sparc_vme_chipset_tag;
	vba.va_bdt = &sparc_vme_iommu_dma_tag;
	vba.va_slaveconfig = 0;

	node = ia->iom_node;

	/*
	 * Map VME control space
	 */
	if (ia->iom_nreg < 2) {
		printf("%s: only %d register sets\n", device_xname(self),
			ia->iom_nreg);
		return;
	}

	if (bus_space_map(ia->iom_bustag,
			  (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space,
						ia->iom_reg[0].oa_base),
			  (bus_size_t)ia->iom_reg[0].oa_size,
			  BUS_SPACE_MAP_LINEAR,
			  &bh) != 0) {
		panic("%s: can't map vmebusreg", device_xname(self));
	}
	sc->sc_reg = (struct vmebusreg *)bh;

	if (bus_space_map(ia->iom_bustag,
			  (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space,
						ia->iom_reg[1].oa_base),
			  (bus_size_t)ia->iom_reg[1].oa_size,
			  BUS_SPACE_MAP_LINEAR,
			  &bh) != 0) {
		panic("%s: can't map vmebusvec", device_xname(self));
	}
	sc->sc_vec = (struct vmebusvec *)bh;

	/*
	 * Map VME IO cache tags and flush control.
	 */
	if (bus_space_map(ia->iom_bustag,
			  (bus_addr_t) BUS_ADDR(
				ia->iom_reg[1].oa_space,
				ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET),
			  VME_IOC_SIZE,
			  BUS_SPACE_MAP_LINEAR,
			  &bh) != 0) {
		panic("%s: can't map IOC tags", device_xname(self));
	}
	sc->sc_ioctags = (uint32_t *)bh;

	if (bus_space_map(ia->iom_bustag,
			  (bus_addr_t) BUS_ADDR(
				ia->iom_reg[1].oa_space,
				ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET),
			  VME_IOC_SIZE,
			  BUS_SPACE_MAP_LINEAR,
			  &bh) != 0) {
		panic("%s: can't map IOC flush registers", device_xname(self));
	}
	sc->sc_iocflush = (uint32_t *)bh;

	/*
	 * Get "range" property.
	 */
	if (prom_getprop(node, "ranges", sizeof(struct rom_range),
		    &sc->sc_nrange, &sc->sc_range) != 0) {
		panic("%s: can't get ranges property", device_xname(self));
	}

	sparcvme_sc = sc;
	vmeerr_handler = sparc_vme_error;

	/*
	 * Invalidate all IO-cache entries.
	 */
	for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) {
		sc->sc_ioctags[--cline] = 0;
	}

	/* Enable IO-cache */
	sc->sc_reg->vmebus_cr |= VMEBUS_CR_C;

	printf(": version 0x%x\n",
	       sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL);

	(void)config_found(self, &vba, 0);
#endif /* SUN4M */
}
Beispiel #7
0
void
tap_attach(device_t parent, device_t self, void *aux)
{
	struct tap_softc *sc = device_private(self);
	struct ifnet *ifp;
	const struct sysctlnode *node;
	int error;
	uint8_t enaddr[ETHER_ADDR_LEN] =
	    { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff };
	char enaddrstr[3 * ETHER_ADDR_LEN];

	sc->sc_dev = self;
	sc->sc_sih = NULL;
	getnanotime(&sc->sc_btime);
	sc->sc_atime = sc->sc_mtime = sc->sc_btime;
	sc->sc_flags = 0;
	selinit(&sc->sc_rsel);

	/*
	 * Initialize the two locks for the device.
	 *
	 * We need a lock here because even though the tap device can be
	 * opened only once, the file descriptor might be passed to another
	 * process, say a fork(2)ed child.
	 *
	 * The Giant saves us from most of the hassle, but since the read
	 * operation can sleep, we don't want two processes to wake up at
	 * the same moment and both try and dequeue a single packet.
	 *
	 * The queue for event listeners (used by kqueue(9), see below) has
	 * to be protected too, so use a spin lock.
	 */
	mutex_init(&sc->sc_rdlock, MUTEX_DEFAULT, IPL_NONE);
	mutex_init(&sc->sc_kqlock, MUTEX_DEFAULT, IPL_VM);

	if (!pmf_device_register(self, NULL, NULL))
		aprint_error_dev(self, "couldn't establish power handler\n");

	/*
	 * In order to obtain unique initial Ethernet address on a host,
	 * do some randomisation.  It's not meant for anything but avoiding
	 * hard-coding an address.
	 */
	cprng_fast(&enaddr[3], 3);

	aprint_verbose_dev(self, "Ethernet address %s\n",
	    ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr));

	/*
	 * Why 1000baseT? Why not? You can add more.
	 *
	 * Note that there are 3 steps: init, one or several additions to
	 * list of supported media, and in the end, the selection of one
	 * of them.
	 */
	ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL);
	ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO);

	/*
	 * One should note that an interface must do multicast in order
	 * to support IPv6.
	 */
	ifp = &sc->sc_ec.ec_if;
	strcpy(ifp->if_xname, device_xname(self));
	ifp->if_softc	= sc;
	ifp->if_flags	= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl	= tap_ioctl;
	ifp->if_start	= tap_start;
	ifp->if_stop	= tap_stop;
	ifp->if_init	= tap_init;
	IFQ_SET_READY(&ifp->if_snd);

	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;

	/* Those steps are mandatory for an Ethernet driver. */
	if_initialize(ifp);
	ether_ifattach(ifp, enaddr);
	if_register(ifp);

	/*
	 * Add a sysctl node for that interface.
	 *
	 * The pointer transmitted is not a string, but instead a pointer to
	 * the softc structure, which we can use to build the string value on
	 * the fly in the helper function of the node.  See the comments for
	 * tap_sysctl_handler for details.
	 *
	 * Usually sysctl_createv is called with CTL_CREATE as the before-last
	 * component.  However, we can allocate a number ourselves, as we are
	 * the only consumer of the net.link.<iface> node.  In this case, the
	 * unit number is conveniently used to number the node.  CTL_CREATE
	 * would just work, too.
	 */
	if ((error = sysctl_createv(NULL, 0, NULL,
	    &node, CTLFLAG_READWRITE,
	    CTLTYPE_STRING, device_xname(self), NULL,
	    tap_sysctl_handler, 0, (void *)sc, 18,
	    CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev),
	    CTL_EOL)) != 0)
		aprint_error_dev(self, "sysctl_createv returned %d, ignoring\n",
		    error);
}
Beispiel #8
0
/*
 * Stop the adapter and free any mbufs allocated to the
 * RX and TX lists.
 */
static void
kue_stop(struct kue_softc *sc)
{
	usbd_status		err;
	struct ifnet		*ifp;
	int			i;

	DPRINTFN(5,("%s: %s: enter\n", device_xname(sc->kue_dev),__func__));

	ifp = GET_IFP(sc);
	ifp->if_timer = 0;

	/* Stop transfers. */
	if (sc->kue_ep[KUE_ENDPT_RX] != NULL) {
		err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_RX]);
		if (err) {
			printf("%s: abort rx pipe failed: %s\n",
			    device_xname(sc->kue_dev), usbd_errstr(err));
		}
		err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_RX]);
		if (err) {
			printf("%s: close rx pipe failed: %s\n",
			    device_xname(sc->kue_dev), usbd_errstr(err));
		}
		sc->kue_ep[KUE_ENDPT_RX] = NULL;
	}

	if (sc->kue_ep[KUE_ENDPT_TX] != NULL) {
		err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_TX]);
		if (err) {
			printf("%s: abort tx pipe failed: %s\n",
			    device_xname(sc->kue_dev), usbd_errstr(err));
		}
		err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_TX]);
		if (err) {
			printf("%s: close tx pipe failed: %s\n",
			    device_xname(sc->kue_dev), usbd_errstr(err));
		}
		sc->kue_ep[KUE_ENDPT_TX] = NULL;
	}

	if (sc->kue_ep[KUE_ENDPT_INTR] != NULL) {
		err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_INTR]);
		if (err) {
			printf("%s: abort intr pipe failed: %s\n",
			    device_xname(sc->kue_dev), usbd_errstr(err));
		}
		err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_INTR]);
		if (err) {
			printf("%s: close intr pipe failed: %s\n",
			    device_xname(sc->kue_dev), usbd_errstr(err));
		}
		sc->kue_ep[KUE_ENDPT_INTR] = NULL;
	}

	/* Free RX resources. */
	for (i = 0; i < KUE_RX_LIST_CNT; i++) {
		if (sc->kue_cdata.kue_rx_chain[i].kue_xfer != NULL) {
			usbd_free_xfer(sc->kue_cdata.kue_rx_chain[i].kue_xfer);
			sc->kue_cdata.kue_rx_chain[i].kue_xfer = NULL;
		}
	}

	/* Free TX resources. */
	for (i = 0; i < KUE_TX_LIST_CNT; i++) {
		if (sc->kue_cdata.kue_tx_chain[i].kue_xfer != NULL) {
			usbd_free_xfer(sc->kue_cdata.kue_tx_chain[i].kue_xfer);
			sc->kue_cdata.kue_tx_chain[i].kue_xfer = NULL;
		}
	}

	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
}
Beispiel #9
0
static int
kue_load_fw(struct kue_softc *sc)
{
	usb_device_descriptor_t dd;
	usbd_status		err;

	DPRINTFN(1,("%s: %s: enter\n", device_xname(sc->kue_dev), __func__));

	/*
	 * First, check if we even need to load the firmware.
	 * If the device was still attached when the system was
	 * rebooted, it may already have firmware loaded in it.
	 * If this is the case, we don't need to do it again.
	 * And in fact, if we try to load it again, we'll hang,
	 * so we have to avoid this condition if we don't want
	 * to look stupid.
	 *
	 * We can test this quickly by checking the bcdRevision
	 * code. The NIC will return a different revision code if
	 * it's probed while the firmware is still loaded and
	 * running.
	 */
	if (usbd_get_device_desc(sc->kue_udev, &dd))
		return (EIO);
	if (UGETW(dd.bcdDevice) == KUE_WARM_REV) {
		printf("%s: warm boot, no firmware download\n",
		       device_xname(sc->kue_dev));
		return (0);
	}

	printf("%s: cold boot, downloading firmware\n",
	       device_xname(sc->kue_dev));

	/* Load code segment */
	DPRINTFN(1,("%s: kue_load_fw: download code_seg\n",
		    device_xname(sc->kue_dev)));
	/*XXXUNCONST*/
	err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN,
	    0, __UNCONST(kue_code_seg), sizeof(kue_code_seg));
	if (err) {
		printf("%s: failed to load code segment: %s\n",
		    device_xname(sc->kue_dev), usbd_errstr(err));
			return (EIO);
	}

	/* Load fixup segment */
	DPRINTFN(1,("%s: kue_load_fw: download fix_seg\n",
		    device_xname(sc->kue_dev)));
	/*XXXUNCONST*/
	err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN,
	    0, __UNCONST(kue_fix_seg), sizeof(kue_fix_seg));
	if (err) {
		printf("%s: failed to load fixup segment: %s\n",
		    device_xname(sc->kue_dev), usbd_errstr(err));
			return (EIO);
	}

	/* Send trigger command. */
	DPRINTFN(1,("%s: kue_load_fw: download trig_seg\n",
		    device_xname(sc->kue_dev)));
	/*XXXUNCONST*/
	err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN,
	    0, __UNCONST(kue_trig_seg), sizeof(kue_trig_seg));
	if (err) {
		printf("%s: failed to load trigger segment: %s\n",
		    device_xname(sc->kue_dev), usbd_errstr(err));
			return (EIO);
	}

	usbd_delay_ms(sc->kue_udev, 10);

	/*
	 * Reload device descriptor.
	 * Why? The chip without the firmware loaded returns
	 * one revision code. The chip with the firmware
	 * loaded and running returns a *different* revision
	 * code. This confuses the quirk mechanism, which is
	 * dependent on the revision data.
	 */
	(void)usbd_reload_device_desc(sc->kue_udev);

	DPRINTFN(1,("%s: %s: done\n", device_xname(sc->kue_dev), __func__));

	/* Reset the adapter. */
	kue_reset(sc);

	return (0);
}
Beispiel #10
0
Static void
umass_scsipi_cb(struct umass_softc *sc, void *priv, int residue, int status)
{
	struct umass_scsipi_softc *scbus = (struct umass_scsipi_softc *)sc->bus;
	struct scsipi_xfer *xs = priv;
	struct scsipi_periph *periph = xs->xs_periph;
	int cmdlen, senselen;
	int s;
#ifdef UMASS_DEBUG
	struct timeval tv;
	u_int delta;
	microtime(&tv);
	delta = (tv.tv_sec - sc->tv.tv_sec) * 1000000 + tv.tv_usec - sc->tv.tv_usec;
#endif

	DPRINTF(UDMASS_CMD,("umass_scsipi_cb: at %"PRIu64".%06"PRIu64", delta=%u: xs=%p residue=%d"
	    " status=%d\n", tv.tv_sec, (uint64_t)tv.tv_usec, delta, xs, residue, status));

	xs->resid = residue;

	switch (status) {
	case STATUS_CMD_OK:
		xs->error = XS_NOERROR;
		break;

	case STATUS_CMD_UNKNOWN:
		/* FALLTHROUGH */
	case STATUS_CMD_FAILED:
		/* fetch sense data */
		sc->sc_sense = 1;
		memset(&scbus->sc_sense_cmd, 0, sizeof(scbus->sc_sense_cmd));
		scbus->sc_sense_cmd.opcode = SCSI_REQUEST_SENSE;
		scbus->sc_sense_cmd.byte2 = periph->periph_lun <<
		    SCSI_CMD_LUN_SHIFT;

		if (sc->sc_cmd == UMASS_CPROTO_UFI ||
		    sc->sc_cmd == UMASS_CPROTO_ATAPI)
			cmdlen = UFI_COMMAND_LENGTH;	/* XXX */
		else
			cmdlen = sizeof(scbus->sc_sense_cmd);
		if (periph->periph_version < 0x05) /* SPC-3 */
			senselen = 18;
		else
			senselen = sizeof(xs->sense);
		scbus->sc_sense_cmd.length = senselen;
		sc->sc_methods->wire_xfer(sc, periph->periph_lun,
					  &scbus->sc_sense_cmd, cmdlen,
					  &xs->sense, senselen,
					  DIR_IN, xs->timeout, 0,
					  umass_scsipi_sense_cb, xs);
		return;

	case STATUS_WIRE_FAILED:
		xs->error = XS_RESET;
		break;

	default:
		panic("%s: Unknown status %d in umass_scsipi_cb",
			device_xname(sc->sc_dev), status);
	}

	DPRINTF(UDMASS_CMD,("umass_scsipi_cb: at %"PRIu64".%06"PRIu64": return xs->error="
            "%d, xs->xs_status=0x%x xs->resid=%d\n",
	     tv.tv_sec, (uint64_t)tv.tv_usec,
	     xs->error, xs->xs_status, xs->resid));

	s = splbio();
	KERNEL_LOCK(1, curlwp);
	scsipi_done(xs);
	KERNEL_UNLOCK_ONE(curlwp);
	splx(s);
}
Beispiel #11
0
static	void	
sackbc_attach(device_t parent, device_t self, void *aux)
{
	struct sackbc_softc *sc = device_private(self);
	struct sacc_softc *psc = device_private(parent);
	struct sa1111_attach_args *aa = (struct sa1111_attach_args *)aux;
	device_t child;
	uint32_t tmp, clock_bit;
	int intr, slot;

	switch (aa->sa_addr) {
	case SACC_KBD0: clock_bit = (1<<6); intr = 21; break;
	case SACC_KBD1: clock_bit = (1<<5); intr = 18; break;
	default:
		return;
	}

	if (aa->sa_size <= 0)
		aa->sa_size = SACCKBD_SIZE;
	if (aa->sa_intr == SACCCF_INTR_DEFAULT)
		aa->sa_intr = intr;

	sc->dev = self;
	sc->iot = psc->sc_iot;
	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
	    aa->sa_addr, aa->sa_size, &sc->ioh)) {
		aprint_normal(": can't map subregion\n");
		return;
	}

	/* enable clock for PS/2 kbd or mouse */
	tmp = bus_space_read_4(psc->sc_iot, psc->sc_ioh, SACCSC_SKPCR);
	bus_space_write_4(psc->sc_iot, psc->sc_ioh, SACCSC_SKPCR,
	    tmp | clock_bit);

	sc->ih_rx = NULL;
	sc->intr = aa->sa_intr;
	sc->polling = 0;

	tmp = bus_space_read_4(sc->iot, sc->ioh, SACCKBD_CR);
	bus_space_write_4(sc->iot, sc->ioh, SACCKBD_CR, tmp | KBDCR_ENA);

	/* XXX: this is necessary to get keyboard working. but I don't know why */
	bus_space_write_4(sc->iot, sc->ioh, SACCKBD_CLKDIV, 2);

	tmp = bus_space_read_4(sc->iot, sc->ioh, SACCKBD_STAT);
	if ((tmp & KBDSTAT_ENA) == 0) {
		printf("??? can't enable KBD controller\n");
		return;
	}

	printf("\n");

	sc->pt = pckbport_attach(sc, &sackbc_ops);

	/*
	 * Although there is no such thing as SLOT for SA-1111 kbd
	 * controller, pckbd and pms drivers require it.
	 */
	for (slot = PCKBPORT_KBD_SLOT; slot <= PCKBPORT_AUX_SLOT; ++slot) {
		child = pckbport_attach_slot(self, sc->pt, slot);

		if (child == NULL)
			continue;
		sc->slot = slot;
		rnd_attach_source(&sc->rnd_source, device_xname(child),
		    RND_TYPE_TTY, 0);
		/* only one of KBD_SLOT or AUX_SLOT is used. */
		break;			
	}
}
Beispiel #12
0
Static void
umass_scsipi_request(struct scsipi_channel *chan,
		scsipi_adapter_req_t req, void *arg)
{
	struct scsipi_adapter *adapt = chan->chan_adapter;
	struct scsipi_periph *periph;
	struct scsipi_xfer *xs;
	struct umass_softc *sc = device_private(adapt->adapt_dev);
	struct umass_scsipi_softc *scbus = (struct umass_scsipi_softc *)sc->bus;
	struct scsipi_generic *cmd;
	int cmdlen;
	int dir;
#ifdef UMASS_DEBUG
	microtime(&sc->tv);
#endif
	switch(req) {
	case ADAPTER_REQ_RUN_XFER:
		xs = arg;
		periph = xs->xs_periph;
		DIF(UDMASS_UPPER, periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS);

		DPRINTF(UDMASS_CMD, ("%s: umass_scsi_cmd: at %"PRIu64".%06"PRIu64": %d:%d "
		    "xs=%p cmd=0x%02x datalen=%d (quirks=0x%x, poll=%d)\n",
		    device_xname(sc->sc_dev), sc->tv.tv_sec, (uint64_t)sc->tv.tv_usec,
		    periph->periph_target, periph->periph_lun,
		    xs, xs->cmd->opcode, xs->datalen,
		    periph->periph_quirks, xs->xs_control & XS_CTL_POLL));
#if defined(UMASS_DEBUG) && defined(SCSIPI_DEBUG)
		if (umassdebug & UDMASS_SCSI)
			show_scsipi_xs(xs);
		else if (umassdebug & ~UDMASS_CMD)
			show_scsipi_cmd(xs);
#endif

		if (sc->sc_dying) {
			xs->error = XS_DRIVER_STUFFUP;
			goto done;
		}

#ifdef UMASS_DEBUG
		if (SCSIPI_BUSTYPE_TYPE(chan->chan_bustype->bustype_type) ==
		    SCSIPI_BUSTYPE_ATAPI ?
		    periph->periph_target != UMASS_ATAPI_DRIVE :
		    periph->periph_target == chan->chan_id) {
			DPRINTF(UDMASS_SCSI, ("%s: wrong SCSI ID %d\n",
			    device_xname(sc->sc_dev),
			    periph->periph_target));
			xs->error = XS_DRIVER_STUFFUP;
			goto done;
		}
#endif

		cmd = xs->cmd;
		cmdlen = xs->cmdlen;

		dir = DIR_NONE;
		if (xs->datalen) {
			switch (xs->xs_control &
			    (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
			case XS_CTL_DATA_IN:
				dir = DIR_IN;
				break;
			case XS_CTL_DATA_OUT:
				dir = DIR_OUT;
				break;
			}
		}

		if (xs->datalen > UMASS_MAX_TRANSFER_SIZE) {
			printf("umass_cmd: large datalen, %d\n", xs->datalen);
			xs->error = XS_DRIVER_STUFFUP;
			goto done;
		}

		if (xs->xs_control & XS_CTL_POLL) {
			/* Use sync transfer. XXX Broken! */
			DPRINTF(UDMASS_SCSI,
			    ("umass_scsi_cmd: sync dir=%d\n", dir));
			scbus->sc_sync_status = USBD_INVAL;
			sc->sc_methods->wire_xfer(sc, periph->periph_lun, cmd,
						  cmdlen, xs->data,
						  xs->datalen, dir,
						  xs->timeout, USBD_SYNCHRONOUS,
						  0, xs);
			DPRINTF(UDMASS_SCSI, ("umass_scsi_cmd: done err=%d\n",
					      scbus->sc_sync_status));
			switch (scbus->sc_sync_status) {
			case USBD_NORMAL_COMPLETION:
				xs->error = XS_NOERROR;
				break;
			case USBD_TIMEOUT:
				xs->error = XS_TIMEOUT;
				break;
			default:
				xs->error = XS_DRIVER_STUFFUP;
				break;
			}
			goto done;
		} else {
			DPRINTF(UDMASS_SCSI,
			    ("umass_scsi_cmd: async dir=%d, cmdlen=%d"
				      " datalen=%d\n",
				      dir, cmdlen, xs->datalen));
			sc->sc_methods->wire_xfer(sc, periph->periph_lun, cmd,
						  cmdlen, xs->data,
						  xs->datalen, dir,
						  xs->timeout, 0,
						  umass_scsipi_cb, xs);
			return;
		}

		/* Return if command finishes early. */
 done:
		KERNEL_LOCK(1, curlwp);
		scsipi_done(xs);
		KERNEL_UNLOCK_ONE(curlwp);
		return;
	default:
		/* Not supported, nothing to do. */
		;
	}
}
void
apm_attach(struct apm_softc *sc)
{
	u_int numbatts, capflags;

	aprint_normal(": ");

	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
	case 0x0100:
		apm_v11_enabled = 0;
		apm_v12_enabled = 0;
		break;
	case 0x0101:
		apm_v12_enabled = 0;
		/* fall through */
	case 0x0102:
	default:
		break;
	}

	apm_set_ver(sc);	/* prints version info */
	aprint_normal("\n");
	if (apm_minver >= 2)
		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
		    &capflags);

	/*
	 * enable power management
	 */
	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);

	if (sc->sc_ops->aa_cpu_busy)
		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);

	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);

	/* Initial state is `resumed'. */
	sc->sc_power_state = PWR_RESUME;
	selinit(&sc->sc_rsel);
	selinit(&sc->sc_xsel);

	/* Do an initial check. */
	apm_periodic_check(sc);

	/*
	 * Create a kernel thread to periodically check for APM events,
	 * and notify other subsystems when they occur.
	 */
	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
		/*
		 * We were unable to create the APM thread; bail out.
		 */
		if (sc->sc_ops->aa_disconnect)
			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
		aprint_error_dev(sc->sc_dev, "unable to create thread, "
		    "kernel APM support disabled\n");
	}

	if (!pmf_device_register(sc->sc_dev, NULL, NULL))
		aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
}
Beispiel #14
0
/*
 * aha_cmd(iot, ioh, sc, icnt, ibuf, ocnt, obuf)
 *
 * Activate Adapter command
 *    icnt:   number of args (outbound bytes including opcode)
 *    ibuf:   argument buffer
 *    ocnt:   number of expected returned bytes
 *    obuf:   result buffer
 *    wait:   number of seconds to wait for response
 *
 * Performs an adapter command through the ports.  Not to be confused with a
 * scsi command, which is read in via the DMA; one of the adapter commands
 * tells it to read in a scsi command.
 */
static int
aha_cmd(bus_space_tag_t iot, bus_space_handle_t ioh, struct aha_softc *sc,
    int icnt, u_char *ibuf, int ocnt, u_char *obuf)
{
	const char *name;
	int i;
	int wait;
	u_char sts;
	u_char opcode = ibuf[0];

	if (sc != NULL)
		name = device_xname(&sc->sc_dev);
	else
		name = "(aha probe)";

	/*
	 * Calculate a reasonable timeout for the command.
	 */
	switch (opcode) {
	case AHA_INQUIRE_DEVICES:
		wait = 90 * 20000;
		break;
	default:
		wait = 1 * 20000;
		break;
	}

	/*
	 * Wait for the adapter to go idle, unless it's one of
	 * the commands which don't need this
	 */
	if (opcode != AHA_MBO_INTR_EN) {
		for (i = 20000; i; i--) {	/* 1 sec? */
			sts = bus_space_read_1(iot, ioh, AHA_STAT_PORT);
			if (sts & AHA_STAT_IDLE)
				break;
			delay(50);
		}
		if (!i) {
			printf("%s: aha_cmd, host not idle(0x%x)\n",
			    name, sts);
			return (1);
		}
	}
	/*
	 * Now that it is idle, if we expect output, preflush the
	 * queue feeding to us.
	 */
	if (ocnt) {
		while ((bus_space_read_1(iot, ioh, AHA_STAT_PORT)) & AHA_STAT_DF)
			bus_space_read_1(iot, ioh, AHA_DATA_PORT);
	}
	/*
	 * Output the command and the number of arguments given
	 * for each byte, first check the port is empty.
	 */
	while (icnt--) {
		for (i = wait; i; i--) {
			sts = bus_space_read_1(iot, ioh, AHA_STAT_PORT);
			if (!(sts & AHA_STAT_CDF))
				break;
			delay(50);
		}
		if (!i) {
			if (opcode != AHA_INQUIRE_REVISION)
				printf("%s: aha_cmd, cmd/data port full\n", name);
			bus_space_write_1(iot, ioh, AHA_CTRL_PORT, AHA_CTRL_SRST);
			return (1);
		}
		bus_space_write_1(iot, ioh, AHA_CMD_PORT, *ibuf++);
	}
	/*
	 * If we expect input, loop that many times, each time,
	 * looking for the data register to have valid data
	 */
	while (ocnt--) {
		for (i = wait; i; i--) {
			sts = bus_space_read_1(iot, ioh, AHA_STAT_PORT);
			if (sts & AHA_STAT_DF)
				break;
			delay(50);
		}
		if (!i) {
			if (opcode != AHA_INQUIRE_REVISION)
				printf("%s: aha_cmd, cmd/data port empty %d\n",
				    name, ocnt);
			bus_space_write_1(iot, ioh, AHA_CTRL_PORT, AHA_CTRL_SRST);
			return (1);
		}
		*obuf++ = bus_space_read_1(iot, ioh, AHA_DATA_PORT);
	}
	/*
	 * Wait for the board to report a finished instruction.
	 * We may get an extra interrupt for the HACC signal, but this is
	 * unimportant.
	 */
	if (opcode != AHA_MBO_INTR_EN) {
		for (i = 20000; i; i--) {	/* 1 sec? */
			sts = bus_space_read_1(iot, ioh, AHA_INTR_PORT);
			/* XXX Need to save this in the interrupt handler? */
			if (sts & AHA_INTR_HACC)
				break;
			delay(50);
		}
		if (!i) {
			printf("%s: aha_cmd, host not finished(0x%x)\n",
			    name, sts);
			return (1);
		}
	}
	bus_space_write_1(iot, ioh, AHA_CTRL_PORT, AHA_CTRL_IRST);
	return (0);
}
static void
wi_pci_attach(struct device *parent, struct device *self, void *aux)
{
	struct wi_pci_softc *psc = (struct wi_pci_softc *)self;
	struct wi_softc *sc = &psc->psc_wi;
	struct pci_attach_args *pa = aux;
	pci_chipset_tag_t pc = pa->pa_pc;
	const char *intrstr;
	const struct wi_pci_product *wpp;
	pci_intr_handle_t ih;
	bus_space_tag_t memt, iot, plxt, tmdt;
	bus_space_handle_t memh, ioh, plxh, tmdh;

	psc->psc_pc = pc;
	psc->psc_pcitag = pa->pa_tag;

	wpp = wi_pci_lookup(pa);
#ifdef DIAGNOSTIC
	if (wpp == NULL) {
		printf("\n");
		panic("wi_pci_attach: impossible");
	}
#endif

	switch (wpp->wpp_chip) {
	case CHIP_PLX_OTHER:
	case CHIP_PLX_9052:
		/* Map memory and I/O registers. */
		if (pci_mapreg_map(pa, WI_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
		    &memt, &memh, NULL, NULL) != 0) {
			printf(": can't map mem space\n");
			return;
		}
		if (pci_mapreg_map(pa, WI_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
		    &iot, &ioh, NULL, NULL) != 0) {
			printf(": can't map I/O space\n");
			return;
		}

		if (wpp->wpp_chip == CHIP_PLX_OTHER) {
			/* The PLX 9052 doesn't have IO at 0x14.  Perhaps
			   other chips have, so we'll make this conditional. */
			if (pci_mapreg_map(pa, WI_PCI_PLX_LOIO,
				PCI_MAPREG_TYPE_IO, 0, &plxt,
				&plxh, NULL, NULL) != 0) {
					printf(": can't map PLX\n");
					return;
				}
		}
		break;
	case CHIP_TMD_7160:
		/* Used instead of PLX on at least one revision of
		 * the National Datacomm Corporation NCP130. Values
		 * for registers acquired from OpenBSD, which in
		 * turn got them from a Linux driver.
		 */
		/* Map COR and I/O registers. */
		if (pci_mapreg_map(pa, WI_TMD_COR, PCI_MAPREG_TYPE_IO, 0,
		    &tmdt, &tmdh, NULL, NULL) != 0) {
			printf(": can't map TMD\n");
			return;
		}
		if (pci_mapreg_map(pa, WI_TMD_IO, PCI_MAPREG_TYPE_IO, 0,
		    &iot, &ioh, NULL, NULL) != 0) {
			printf(": can't map I/O space\n");
			return;
		}
		break;
	default:
		if (pci_mapreg_map(pa, WI_PCI_CBMA,
		    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
		    0, &iot, &ioh, NULL, NULL) != 0) {
			printf(": can't map mem space\n");
			return;
		}

		memt = iot;
		memh = ioh;
		sc->sc_pci = 1;
		break;
	}

	{
		char devinfo[256];

		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo));
		printf(": %s (rev. 0x%02x)\n", devinfo,
		       PCI_REVISION(pa->pa_class));
	}

	sc->sc_enabled = 1;
	sc->sc_enable = wi_pci_enable;
	sc->sc_disable = wi_pci_disable;

	sc->sc_iot = iot;
	sc->sc_ioh = ioh;
	/* Make sure interrupts are disabled. */
	CSR_WRITE_2(sc, WI_INT_EN, 0);
	CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF);

	if (wpp->wpp_chip == CHIP_PLX_OTHER) {
		uint32_t command;
#define	WI_LOCAL_INTCSR		0x4c
#define	WI_LOCAL_INTEN		0x40	/* poke this into INTCSR */

		command = bus_space_read_4(plxt, plxh, WI_LOCAL_INTCSR);
		command |= WI_LOCAL_INTEN;
		bus_space_write_4(plxt, plxh, WI_LOCAL_INTCSR, command);
	}

	/* Map and establish the interrupt. */
	if (pci_intr_map(pa, &ih)) {
		aprint_error_dev(self, "couldn't map interrupt\n");
		return;
	}
	intrstr = pci_intr_string(pc, ih);

	psc->psc_ih = ih;
	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wi_intr, sc);
	if (sc->sc_ih == NULL) {
		aprint_error_dev(self, "couldn't establish interrupt");
		if (intrstr != NULL)
			printf(" at %s", intrstr);
		printf("\n");
		return;
	}

	printf("%s: interrupting at %s\n", device_xname(self), intrstr);

	switch (wpp->wpp_chip) {
	case CHIP_PLX_OTHER:
	case CHIP_PLX_9052:
		/*
		 * Setup the PLX chip for level interrupts and config index 1
		 * XXX - should really reset the PLX chip too.
		 */
		bus_space_write_1(memt, memh,
		    WI_PLX_COR_OFFSET, WI_PLX_COR_VALUE);
		break;
	case CHIP_TMD_7160:
		/* Enable I/O mode and level interrupts on the embedded
		 * card. The card's COR is the first byte of BAR 0.
		 */
		bus_space_write_1(tmdt, tmdh, 0, WI_COR_IOMODE);
		break;
	default:
		/* reset HFA3842 MAC core */
		wi_pci_reset(sc);
		break;
	}

	printf("%s:", device_xname(self));

	if (wi_attach(sc, 0) != 0) {
		aprint_error_dev(self, "failed to attach controller\n");
		pci_intr_disestablish(pa->pa_pc, sc->sc_ih);
		return;
	}

	if (!wpp->wpp_chip)
		sc->sc_reset = wi_pci_reset;

	if (!pmf_device_register(self, NULL, NULL))
		aprint_error_dev(self, "couldn't establish power handler\n");
	else
		pmf_class_network_register(self, &sc->sc_if);
}
Beispiel #16
0
/*
 * Attach the interface. Allocate softc structures, do
 * setup and ethernet/BPF attach.
 */
void
kue_attach(device_t parent, device_t self, void *aux)
{
	struct kue_softc *sc = device_private(self);
	struct usb_attach_arg *uaa = aux;
	char			*devinfop;
	int			s;
	struct ifnet		*ifp;
	usbd_device_handle	dev = uaa->device;
	usbd_interface_handle	iface;
	usbd_status		err;
	usb_interface_descriptor_t	*id;
	usb_endpoint_descriptor_t	*ed;
	int			i;

	DPRINTFN(5,(" : kue_attach: sc=%p, dev=%p", sc, dev));

	sc->kue_dev = self;

	aprint_naive("\n");
	aprint_normal("\n");

	devinfop = usbd_devinfo_alloc(dev, 0);
	aprint_normal_dev(self, "%s\n", devinfop);
	usbd_devinfo_free(devinfop);

	err = usbd_set_config_no(dev, KUE_CONFIG_NO, 1);
	if (err) {
		aprint_error_dev(self, "failed to set configuration"
		    ", err=%s\n", usbd_errstr(err));
		return;
	}

	sc->kue_udev = dev;
	sc->kue_product = uaa->product;
	sc->kue_vendor = uaa->vendor;

	/* Load the firmware into the NIC. */
	if (kue_load_fw(sc)) {
		aprint_error_dev(self, "loading firmware failed\n");
		return;
	}

	err = usbd_device2interface_handle(dev, KUE_IFACE_IDX, &iface);
	if (err) {
		aprint_error_dev(self, "getting interface handle failed\n");
		return;
	}

	sc->kue_iface = iface;
	id = usbd_get_interface_descriptor(iface);

	/* Find endpoints. */
	for (i = 0; i < id->bNumEndpoints; i++) {
		ed = usbd_interface2endpoint_descriptor(iface, i);
		if (ed == NULL) {
			aprint_error_dev(self, "couldn't get ep %d\n", i);
			return;
		}
		if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
		    UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
			sc->kue_ed[KUE_ENDPT_RX] = ed->bEndpointAddress;
		} else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT &&
			   UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) {
			sc->kue_ed[KUE_ENDPT_TX] = ed->bEndpointAddress;
		} else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN &&
			   UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) {
			sc->kue_ed[KUE_ENDPT_INTR] = ed->bEndpointAddress;
		}
	}

	if (sc->kue_ed[KUE_ENDPT_RX] == 0 || sc->kue_ed[KUE_ENDPT_TX] == 0) {
		aprint_error_dev(self, "missing endpoint\n");
		return;
	}

	/* Read ethernet descriptor */
	err = kue_ctl(sc, KUE_CTL_READ, KUE_CMD_GET_ETHER_DESCRIPTOR,
	    0, &sc->kue_desc, sizeof(sc->kue_desc));
	if (err) {
		aprint_error_dev(self, "could not read Ethernet descriptor\n");
		return;
	}

	sc->kue_mcfilters = malloc(KUE_MCFILTCNT(sc) * ETHER_ADDR_LEN,
	    M_USBDEV, M_NOWAIT);
	if (sc->kue_mcfilters == NULL) {
		aprint_error_dev(self,
		    "no memory for multicast filter buffer\n");
		return;
	}

	s = splnet();

	/*
	 * A KLSI chip was detected. Inform the world.
	 */
	aprint_normal_dev(self, "Ethernet address %s\n",
	    ether_sprintf(sc->kue_desc.kue_macaddr));

	/* Initialize interface info.*/
	ifp = GET_IFP(sc);
	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_ioctl = kue_ioctl;
	ifp->if_start = kue_start;
	ifp->if_watchdog = kue_watchdog;
	strncpy(ifp->if_xname, device_xname(sc->kue_dev), IFNAMSIZ);

	IFQ_SET_READY(&ifp->if_snd);

	/* Attach the interface. */
	if_attach(ifp);
	ether_ifattach(ifp, sc->kue_desc.kue_macaddr);
	rnd_attach_source(&sc->rnd_source, device_xname(sc->kue_dev),
	    RND_TYPE_NET, RND_FLAG_DEFAULT);

	sc->kue_attached = true;
	splx(s);

	usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->kue_udev,
			   sc->kue_dev);

	return;
}
void
omapmputmr_attach(device_t parent, device_t self, void *aux)
{
	struct omapmputmr_softc *sc = device_private(self);
	struct tipb_attach_args *tipb = aux;
	int ints_per_sec;

	sc->sc_iot = tipb->tipb_iot;
	sc->sc_intr = tipb->tipb_intr;

	if (bus_space_map(tipb->tipb_iot, tipb->tipb_addr, tipb->tipb_size, 0,
			 &sc->sc_ioh))
		panic("%s: Cannot map registers", device_xname(self));

	switch (device_unit(self)) {
	case 0:
		clock_sc = sc;
		ints_per_sec = hz;
		break;
	case 1:
		stat_sc = sc;
		ints_per_sec = profhz = stathz = STATHZ;
		break;
	case 2:
		ref_sc = sc;
		ints_per_sec = hz;	/* Same rate as clock */
		break;
	default:
		ints_per_sec = hz;	/* Better value? */
		break;
	}

	aprint_normal(": OMAP MPU Timer\n");
	aprint_naive("\n");

	/* Stop the timer from counting, but keep the timer module working. */
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MPU_CNTL_TIMER,
			  MPU_CLOCK_ENABLE);

	timer_factors tf;
	calc_timer_factors(ints_per_sec, &tf);

	switch (device_unit(self)) {
	case 0:
		counts_per_hz = tf.reload + 1;
		counts_per_usec = tf.counts_per_usec;
		break;
	case 2:

		/*
		 * The microtime reference clock for all practical purposes
		 * just wraps around as an unsigned int.
		 */

		tf.reload = 0xffffffff;
		break;

	default:
		break;
	}

	/* Set the reload value. */
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MPU_LOAD_TIMER, tf.reload);
	/* Set the PTV and the other required bits and pieces. */
	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MPU_CNTL_TIMER,
			  ( MPU_CLOCK_ENABLE
			    | (tf.ptv << MPU_PTV_SHIFT)
			    | MPU_AR
			    | MPU_ST));
	/* The clock is now running, but is not generating interrupts. */
}
Beispiel #18
0
/*
 * A frame has been uploaded: pass the resulting mbuf chain up to
 * the higher level protocols.
 */
static void
kue_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status)
{
	struct kue_chain	*c = priv;
	struct kue_softc	*sc = c->kue_sc;
	struct ifnet		*ifp = GET_IFP(sc);
	struct mbuf		*m;
	int			total_len, pktlen;
	int			s;

	DPRINTFN(10,("%s: %s: enter status=%d\n", device_xname(sc->kue_dev),
		     __func__, status));

	if (sc->kue_dying)
		return;

	if (!(ifp->if_flags & IFF_RUNNING))
		return;

	if (status != USBD_NORMAL_COMPLETION) {
		if (status == USBD_NOT_STARTED || status == USBD_CANCELLED)
			return;
		sc->kue_rx_errs++;
		if (usbd_ratecheck(&sc->kue_rx_notice)) {
			printf("%s: %u usb errors on rx: %s\n",
			    device_xname(sc->kue_dev), sc->kue_rx_errs,
			    usbd_errstr(status));
			sc->kue_rx_errs = 0;
		}
		if (status == USBD_STALLED)
			usbd_clear_endpoint_stall_async(sc->kue_ep[KUE_ENDPT_RX]);
		goto done;
	}

	usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL);

	DPRINTFN(10,("%s: %s: total_len=%d len=%d\n", device_xname(sc->kue_dev),
		     __func__, total_len,
		     le16dec(c->kue_buf)));

	if (total_len <= 1)
		goto done;

	pktlen = le16dec(c->kue_buf);
	if (pktlen > total_len - 2)
		pktlen = total_len - 2;

	if (pktlen < ETHER_MIN_LEN - ETHER_CRC_LEN ||
	    pktlen > MCLBYTES - ETHER_ALIGN) {
		ifp->if_ierrors++;
		goto done;
	}

	/* No errors; receive the packet. */
	MGETHDR(m, M_DONTWAIT, MT_DATA);
	if (m == NULL) {
		ifp->if_ierrors++;
		goto done;
	}
	if (pktlen > MHLEN - ETHER_ALIGN) {
		MCLGET(m, M_DONTWAIT);
		if ((m->m_flags & M_EXT) == 0) {
			m_freem(m);
			ifp->if_ierrors++;
			goto done;
		}
	}
	m->m_data += ETHER_ALIGN;

	/* copy data to mbuf */
	memcpy(mtod(m, uint8_t *), c->kue_buf + 2, pktlen);

	ifp->if_ipackets++;
	m->m_pkthdr.len = m->m_len = pktlen;
	m->m_pkthdr.rcvif = ifp;

	s = splnet();

	/*
	 * Handle BPF listeners. Let the BPF user see the packet, but
	 * don't pass it up to the ether_input() layer unless it's
	 * a broadcast packet, multicast packet, matches our ethernet
	 * address or the interface is in promiscuous mode.
	 */
	bpf_mtap(ifp, m);

	DPRINTFN(10,("%s: %s: deliver %d\n", device_xname(sc->kue_dev),
		    __func__, m->m_len));
	(*ifp->if_input)(ifp, m);

	splx(s);

 done:

	/* Setup new transfer. */
	usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_RX],
	    c, c->kue_buf, KUE_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY,
	    USBD_NO_TIMEOUT, kue_rxeof);
	usbd_transfer(c->kue_xfer);

	DPRINTFN(10,("%s: %s: start rx\n", device_xname(sc->kue_dev),
		    __func__));
}
Beispiel #19
0
static void
igma_attach(device_t parent, device_t self, void *aux)
{
	struct igma_softc *sc = device_private(self);
	const struct pci_attach_args *pa = (struct pci_attach_args *)aux;
	struct igma_attach_args iaa;
	bus_space_tag_t gttmmt, gmt, regt;
	bus_space_handle_t gttmmh, gmh, regh;
	bus_addr_t gttmmb, gmb;

	pci_aprint_devinfo(pa, NULL);

	sc->sc_dev = self;

	/* Initialize according to chip type */
	igma_product_to_chip(pa, &sc->sc_chip);

	if (pci_mapreg_map(pa, PCI_BAR0, PCI_MAPREG_TYPE_MEM,
			BUS_SPACE_MAP_LINEAR,
			&gttmmt, &gttmmh, &gttmmb, NULL)) {
		aprint_error_dev(sc->sc_dev, "unable to map GTTMM\n");
		return;
	}
	sc->sc_chip.mmiot = gttmmt;
	if (bus_space_subregion(gttmmt, gttmmh, 0, 2*1024*1024,
			&sc->sc_chip.mmioh)) {
		aprint_error_dev(sc->sc_dev, "unable to submap MMIO\n");
		return;
	}
	sc->sc_chip.gttt = gttmmt;
	if (bus_space_subregion(gttmmt, gttmmh, 2*1024*1024, 2*1024*1024,
			&sc->sc_chip.gtth)) {
		aprint_error_dev(sc->sc_dev, "unable to submap GTT\n");
		return;
	}

	if (pci_mapreg_map(pa, PCI_BAR2, PCI_MAPREG_TYPE_MEM,
			BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE,
			&gmt, &gmh, &gmb, NULL)) {
		aprint_error_dev(sc->sc_dev, "unable to map aperture\n");
		return;
	}
	sc->sc_chip.gmt = gmt;
	sc->sc_chip.gmh = gmh;
	sc->sc_chip.gmb = gmb;

	if (pci_mapreg_map(pa, PCI_BAR4, PCI_MAPREG_TYPE_IO, 0,
			&regt, &regh, NULL, NULL)) {
		aprint_error_dev(sc->sc_dev, "unable to map IO registers\n");
		return;
	}

#if NVGA > 0
	iaa.iaa_console = vga_cndetach() ? true : false;
#else
	iaa.iaa_console = 0;
#endif
	sc->sc_chip.vgat = regt;
	if (bus_space_map(regt, 0x3c0, 0x10, 0, &sc->sc_chip.vgah)) {
		aprint_error_dev(sc->sc_dev, "unable to map VGA registers\n");
		return;
	}

	/* Check hardware for more information */
	igma_adjust_chip(sc, &sc->sc_chip);

	aprint_normal("%s: VGA_CNTRL: 0x%x\n",device_xname(sc->sc_dev),
		sc->sc_chip.vga_cntrl);
	aprint_normal("%s: GPIO_OFFSET: 0x%x\n",device_xname(sc->sc_dev),
		sc->sc_chip.gpio_offset);
	aprint_normal("%s: BACKLIGHT_CTRL: 0x%x\n",device_xname(sc->sc_dev),
		sc->sc_chip.backlight_cntrl);
	aprint_normal("%s: BACKLIGHT_CTRL2: 0x%x\n",device_xname(sc->sc_dev),
		sc->sc_chip.backlight_cntrl2);

#if NIGMAFB > 0
	strcpy(iaa.iaa_name, "igmafb");
	iaa.iaa_chip = sc->sc_chip;
	config_found_ia(sc->sc_dev, "igmabus", &iaa, igma_print);
#endif

	igma_i2c_attach(sc);
}
Beispiel #20
0
static void
kue_init(void *xsc)
{
	struct kue_softc	*sc = xsc;
	struct ifnet		*ifp = GET_IFP(sc);
	int			s;
	uint8_t			eaddr[ETHER_ADDR_LEN];

	DPRINTFN(5,("%s: %s: enter\n", device_xname(sc->kue_dev),__func__));

	if (ifp->if_flags & IFF_RUNNING)
		return;

	s = splnet();

	memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
	/* Set MAC address */
	kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MAC, 0, eaddr, ETHER_ADDR_LEN);

	sc->kue_rxfilt = KUE_RXFILT_UNICAST | KUE_RXFILT_BROADCAST;

	 /* If we want promiscuous mode, set the allframes bit. */
	if (ifp->if_flags & IFF_PROMISC)
		sc->kue_rxfilt |= KUE_RXFILT_PROMISC;

	kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt);

	/* I'm not sure how to tune these. */
#if 0
	/*
	 * Leave this one alone for now; setting it
	 * wrong causes lockups on some machines/controllers.
	 */
	kue_setword(sc, KUE_CMD_SET_SOFS, 1);
#endif
	kue_setword(sc, KUE_CMD_SET_URB_SIZE, 64);

	/* Init TX ring. */
	if (kue_tx_list_init(sc) == ENOBUFS) {
		printf("%s: tx list init failed\n", device_xname(sc->kue_dev));
		splx(s);
		return;
	}

	/* Init RX ring. */
	if (kue_rx_list_init(sc) == ENOBUFS) {
		printf("%s: rx list init failed\n", device_xname(sc->kue_dev));
		splx(s);
		return;
	}

	/* Load the multicast filter. */
	kue_setmulti(sc);

	if (sc->kue_ep[KUE_ENDPT_RX] == NULL) {
		if (kue_open_pipes(sc)) {
			splx(s);
			return;
		}
	}

	ifp->if_flags |= IFF_RUNNING;
	ifp->if_flags &= ~IFF_OACTIVE;

	splx(s);
}
static void
ahd_pci_attach(device_t parent, device_t self, void *aux)
{
	struct pci_attach_args	*pa = aux;
	struct ahd_softc       	*ahd = device_private(self);

	const struct ahd_pci_identity *entry;

	uint32_t	   	devconfig;
	pcireg_t	   	command;
	int		   	error;
	pcireg_t	   	subid;
	uint16_t	   	subvendor;
	pcireg_t           	reg;
	int		   	ioh_valid, ioh2_valid, memh_valid;
	pcireg_t           	memtype;
	pci_intr_handle_t  	ih;
	const char         	*intrstr;
	struct ahd_pci_busdata 	*bd;

	ahd->sc_dev = self;
	ahd_set_name(ahd, device_xname(self));
	ahd->parent_dmat = pa->pa_dmat;

	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
	subid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
	entry = ahd_find_pci_device(pa->pa_id, subid);
	if (entry == NULL)
		return;

	/* Keep information about the PCI bus */
	bd = malloc(sizeof (struct ahd_pci_busdata), M_DEVBUF, M_NOWAIT);
	if (bd == NULL) {
		aprint_error("%s: unable to allocate bus-specific data\n",
		    ahd_name(ahd));
		return;
	}
	memset(bd, 0, sizeof(struct ahd_pci_busdata));

	bd->pc = pa->pa_pc;
	bd->tag = pa->pa_tag;
	bd->func = pa->pa_function;
	bd->dev = pa->pa_device;

	ahd->bus_data = bd;

	ahd->description = entry->name;

	ahd->seep_config = malloc(sizeof(*ahd->seep_config),
				  M_DEVBUF, M_NOWAIT);
	if (ahd->seep_config == NULL) {
		aprint_error("%s: cannot malloc seep_config!\n", ahd_name(ahd));
		return;
	}
	memset(ahd->seep_config, 0, sizeof(*ahd->seep_config));

	LIST_INIT(&ahd->pending_scbs);
	ahd_timer_init(&ahd->reset_timer);
	ahd_timer_init(&ahd->stat_timer);
	ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A
	    | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A;
	ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT;
	ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT;
	ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT;
	ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT;
	ahd->int_coalescing_stop_threshold =
	    AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT;

	if (ahd_platform_alloc(ahd, NULL) != 0) {
                ahd_free(ahd);
                return;
        }

	/*
	 * Record if this is an HP board.
	 */
	subvendor = PCI_VENDOR(subid);
	if (subvendor == SUBID_HP)
		ahd->flags |= AHD_HP_BOARD;

	error = entry->setup(ahd, pa);
	if (error != 0)
		return;

	devconfig = pci_conf_read(pa->pa_pc, pa->pa_tag, DEVCONFIG);
	if ((devconfig & PCIXINITPAT) == PCIXINIT_PCI33_66) {
		ahd->chip |= AHD_PCI;
		/* Disable PCIX workarounds when running in PCI mode. */
		ahd->bugs &= ~AHD_PCIX_BUG_MASK;
	} else {
		ahd->chip |= AHD_PCIX;
	}
	ahd->bus_description = pci_bus_modes[PCI_BUS_MODES_INDEX(devconfig)];

	memh_valid = ioh_valid = ioh2_valid = 0;

	if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIX,
	    &bd->pcix_off, NULL)) {
		if (ahd->chip & AHD_PCIX)
			aprint_error_dev(self,
			    "warning: can't find PCI-X capability\n");
		ahd->chip &= ~AHD_PCIX;
		ahd->chip |= AHD_PCI;
		ahd->bugs &= ~AHD_PCIX_BUG_MASK;
	}

	/*
	 * Map PCI Registers
	 */
	if ((ahd->bugs & AHD_PCIX_MMAPIO_BUG) == 0) {
		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
					  AHD_PCI_MEMADDR);
		switch (memtype) {
		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
		case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
			memh_valid = (pci_mapreg_map(pa, AHD_PCI_MEMADDR,
						     memtype, 0, &ahd->tags[0],
						     &ahd->bshs[0],
						     NULL, NULL) == 0);
			if (memh_valid) {
				ahd->tags[1] = ahd->tags[0];
				bus_space_subregion(ahd->tags[0], ahd->bshs[0],
						    /*offset*/0x100,
						    /*size*/0x100,
						    &ahd->bshs[1]);
				if (ahd_pci_test_register_access(ahd) != 0)
					memh_valid = 0;
			}
			break;
		default:
			memh_valid = 0;
			aprint_error("%s: unknown memory type: 0x%x\n",
			       ahd_name(ahd), memtype);
			break;
		}

		if (memh_valid) {
			command &= ~PCI_COMMAND_IO_ENABLE;
                        pci_conf_write(pa->pa_pc, pa->pa_tag,
                        	       PCI_COMMAND_STATUS_REG, command);
		}
#ifdef AHD_DEBUG
		printf("%s: doing memory mapping shs0 0x%lx, shs1 0x%lx\n",
		    ahd_name(ahd), ahd->bshs[0], ahd->bshs[1]);
#endif
	}

	if (command & PCI_COMMAND_IO_ENABLE) {
		/* First BAR */
		ioh_valid = (pci_mapreg_map(pa, AHD_PCI_IOADDR,
					    PCI_MAPREG_TYPE_IO, 0,
					    &ahd->tags[0], &ahd->bshs[0],
					    NULL, NULL) == 0);

		/* 2nd BAR */
		ioh2_valid = (pci_mapreg_map(pa, AHD_PCI_IOADDR1,
					     PCI_MAPREG_TYPE_IO, 0,
					     &ahd->tags[1], &ahd->bshs[1],
					     NULL, NULL) == 0);

		if (ioh_valid && ioh2_valid) {
			KASSERT(memh_valid == 0);
			command &= ~PCI_COMMAND_MEM_ENABLE;
                        pci_conf_write(pa->pa_pc, pa->pa_tag,
                        	       PCI_COMMAND_STATUS_REG, command);
		}
#ifdef AHD_DEBUG
		printf("%s: doing io mapping shs0 0x%lx, shs1 0x%lx\n",
		    ahd_name(ahd), ahd->bshs[0], ahd->bshs[1]);
#endif

	}

	if (memh_valid == 0 && (ioh_valid == 0 || ioh2_valid == 0)) {
		aprint_error("%s: unable to map registers\n", ahd_name(ahd));
		return;
	}

	aprint_normal("\n");
	aprint_naive("\n");

	/* power up chip */
	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
	    pci_activate_null)) && error != EOPNOTSUPP) {
		aprint_error_dev(self, "cannot activate %d\n", error);
		return;
	}
	/*
         * Should we bother disabling 39Bit addressing
         * based on installed memory?
         */
        if (sizeof(bus_addr_t) > 4)
        	ahd->flags |= AHD_39BIT_ADDRESSING;

	/*
	 * If we need to support high memory, enable dual
	 * address cycles.  This bit must be set to enable
	 * high address bit generation even if we are on a
	 * 64bit bus (PCI64BIT set in devconfig).
	 */
	if ((ahd->flags & (AHD_39BIT_ADDRESSING|AHD_64BIT_ADDRESSING)) != 0) {
		uint32_t dvconfig;

		aprint_normal("%s: Enabling 39Bit Addressing\n", ahd_name(ahd));
		dvconfig = pci_conf_read(pa->pa_pc, pa->pa_tag, DEVCONFIG);
		dvconfig |= DACEN;
		pci_conf_write(pa->pa_pc, pa->pa_tag, DEVCONFIG, dvconfig);
	}

	/* Ensure busmastering is enabled */
        reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
        pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
		       reg | PCI_COMMAND_MASTER_ENABLE);

	ahd_softc_init(ahd);

	/*
	 * Map the interrupt routines
	 */
	ahd->bus_intr = ahd_pci_intr;

	error = ahd_reset(ahd, /*reinit*/FALSE);
	if (error != 0) {
		ahd_free(ahd);
		return;
	}

	if (pci_intr_map(pa, &ih)) {
		aprint_error("%s: couldn't map interrupt\n", ahd_name(ahd));
		ahd_free(ahd);
		return;
	}
	intrstr = pci_intr_string(pa->pa_pc, ih);
	ahd->ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ahd_intr, ahd);
	if (ahd->ih == NULL) {
		aprint_error("%s: couldn't establish interrupt",
		       ahd_name(ahd));
		if (intrstr != NULL)
			aprint_error(" at %s", intrstr);
		aprint_error("\n");
		ahd_free(ahd);
		return;
	}
	if (intrstr != NULL)
		aprint_normal("%s: interrupting at %s\n", ahd_name(ahd),
		       intrstr);

	/* Get the size of the cache */
	ahd->pci_cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG);
	ahd->pci_cachesize *= 4;

 	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
	/* See if we have a SEEPROM and perform auto-term */
	error = ahd_check_extport(ahd);
	if (error != 0)
		return;

	/* Core initialization */
	error = ahd_init(ahd);
	if (error != 0)
		return;

	/*
	 * Link this softc in with all other ahd instances.
	 */
	ahd_attach(ahd);
}
Beispiel #22
0
static int
kue_ioctl(struct ifnet *ifp, u_long command, void *data)
{
	struct kue_softc	*sc = ifp->if_softc;
	struct ifaddr 		*ifa = (struct ifaddr *)data;
	struct ifreq		*ifr = (struct ifreq *)data;
	int			s, error = 0;

	DPRINTFN(5,("%s: %s: enter\n", device_xname(sc->kue_dev),__func__));

	if (sc->kue_dying)
		return (EIO);

	s = splnet();

	switch(command) {
	case SIOCINITIFADDR:
		ifp->if_flags |= IFF_UP;
		kue_init(sc);

		switch (ifa->ifa_addr->sa_family) {
#ifdef INET
		case AF_INET:
			arp_ifinit(ifp, ifa);
			break;
#endif /* INET */
		}
		break;

	case SIOCSIFMTU:
		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
			error = EINVAL;
		else if ((error = ifioctl_common(ifp, command, data)) == ENETRESET)
			error = 0;
		break;

	case SIOCSIFFLAGS:
		if ((error = ifioctl_common(ifp, command, data)) != 0)
			break;
		if (ifp->if_flags & IFF_UP) {
			if (ifp->if_flags & IFF_RUNNING &&
			    ifp->if_flags & IFF_PROMISC &&
			    !(sc->kue_if_flags & IFF_PROMISC)) {
				sc->kue_rxfilt |= KUE_RXFILT_PROMISC;
				kue_setword(sc, KUE_CMD_SET_PKT_FILTER,
				    sc->kue_rxfilt);
			} else if (ifp->if_flags & IFF_RUNNING &&
			    !(ifp->if_flags & IFF_PROMISC) &&
			    sc->kue_if_flags & IFF_PROMISC) {
				sc->kue_rxfilt &= ~KUE_RXFILT_PROMISC;
				kue_setword(sc, KUE_CMD_SET_PKT_FILTER,
				    sc->kue_rxfilt);
			} else if (!(ifp->if_flags & IFF_RUNNING))
				kue_init(sc);
		} else {
			if (ifp->if_flags & IFF_RUNNING)
				kue_stop(sc);
		}
		sc->kue_if_flags = ifp->if_flags;
		error = 0;
		break;
	case SIOCADDMULTI:
	case SIOCDELMULTI:
		error = ether_ioctl(ifp, command, data);
		if (error == ENETRESET) {
			if (ifp->if_flags & IFF_RUNNING)
				kue_setmulti(sc);
			error = 0;
		}
		break;
	default:
		error = ether_ioctl(ifp, command, data);
		break;
	}

	splx(s);

	return (error);
}
Beispiel #23
0
/*
 * Perform a read of "Device Status Jump" register and update the
 * status if necessary.  If status is read, the given "ecmd" is also
 * performed, unless "ecmd" is zero.  Returns DSJ value, -1 on failure
 * and -2 on "temporary" failure.
 */
int
mtreaddsj(struct mt_softc *sc, int ecmd)
{
	int retval;

	if (sc->sc_flags & MTF_STATTIMEO)
		goto getstats;
	retval = gpibrecv(sc->sc_ic,
	    (sc->sc_flags & MTF_DSJTIMEO) ? -1 : sc->sc_slave,
	    MTT_DSJ, &(sc->sc_lastdsj), 1);
	sc->sc_flags &= ~MTF_DSJTIMEO;
	if (retval != 1) {
		DPRINTF(MDB_ANY, ("%s can't gpibrecv DSJ",
		    device_xname(sc->sc_dev)));
		if (sc->sc_recvtimeo == 0)
			sc->sc_recvtimeo = hz;
		if (--sc->sc_recvtimeo == 0)
			return (-1);
		if (retval == 0)
			sc->sc_flags |= MTF_DSJTIMEO;
		return (-2);
	}
	sc->sc_recvtimeo = 0;
	sc->sc_statindex = 0;
	DPRINTF(MDB_ANY, ("%s readdsj: 0x%x", device_xname(sc->sc_dev),
	    sc->sc_lastdsj));
	sc->sc_lastecmd = ecmd;
	switch (sc->sc_lastdsj) {
	    case 0:
		if (ecmd & MTE_DSJ_FORCE)
			break;
		return (0);

	    case 2:
		sc->sc_lastecmd = MTE_COMPLETE;
	    case 1:
		break;

	    default:
		printf("%s readdsj: DSJ 0x%x\n", device_xname(sc->sc_dev),
		    sc->sc_lastdsj);
		return (-1);
	}

getstats:
	retval = gpibrecv(sc->sc_ic,
	    (sc->sc_flags & MTF_STATCONT) ? -1 : sc->sc_slave, MTT_STAT,
	     ((char *)&(sc->sc_stat)) + sc->sc_statindex,
	    sizeof(sc->sc_stat) - sc->sc_statindex);
	sc->sc_flags &= ~(MTF_STATTIMEO | MTF_STATCONT);
	if (retval != sizeof(sc->sc_stat) - sc->sc_statindex) {
		if (sc->sc_recvtimeo == 0)
			sc->sc_recvtimeo = hz;
		if (--sc->sc_recvtimeo != 0) {
			if (retval >= 0) {
				sc->sc_statindex += retval;
				sc->sc_flags |= MTF_STATCONT;
			}
			sc->sc_flags |= MTF_STATTIMEO;
			return (-2);
		}
		printf("%s readdsj: can't read status", device_xname(sc->sc_dev));
		return (-1);
	}
	sc->sc_recvtimeo = 0;
	sc->sc_statindex = 0;
	DPRINTF(MDB_ANY, ("%s readdsj: status is %x %x %x %x %x %x",
	    device_xname(sc->sc_dev),
	    sc->sc_stat1, sc->sc_stat2, sc->sc_stat3,
	    sc->sc_stat4, sc->sc_stat5, sc->sc_stat6));
	if (sc->sc_lastecmd)
		(void) gpibsend(sc->sc_ic, sc->sc_slave,
		    MTL_ECMD, &(sc->sc_lastecmd), 1);
	return ((int) sc->sc_lastdsj);
}
Beispiel #24
0
/*
 * A frame has been uploaded: pass the resulting mbuf chain up to
 * the higher level protocols.
 */
static void
vr_rxeof(struct vr_softc *sc)
{
	struct mbuf *m;
	struct ifnet *ifp;
	struct vr_desc *d;
	struct vr_descsoft *ds;
	int i, total_len;
	uint32_t rxstat;

	ifp = &sc->vr_ec.ec_if;

	for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
		d = VR_CDRX(sc, i);
		ds = VR_DSRX(sc, i);

		VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);

		rxstat = le32toh(d->vr_status);

		if (rxstat & VR_RXSTAT_OWN) {
			/*
			 * We have processed all of the receive buffers.
			 */
			break;
		}

		/*
		 * If an error occurs, update stats, clear the
		 * status word and leave the mbuf cluster in place:
		 * it should simply get re-used next time this descriptor
		 * comes up in the ring.
		 */
		if (rxstat & VR_RXSTAT_RXERR) {
			const char *errstr;

			ifp->if_ierrors++;
			switch (rxstat & 0x000000FF) {
			case VR_RXSTAT_CRCERR:
				errstr = "crc error";
				break;
			case VR_RXSTAT_FRAMEALIGNERR:
				errstr = "frame alignment error";
				break;
			case VR_RXSTAT_FIFOOFLOW:
				errstr = "FIFO overflow";
				break;
			case VR_RXSTAT_GIANT:
				errstr = "received giant packet";
				break;
			case VR_RXSTAT_RUNT:
				errstr = "received runt packet";
				break;
			case VR_RXSTAT_BUSERR:
				errstr = "system bus error";
				break;
			case VR_RXSTAT_BUFFERR:
				errstr = "rx buffer error";
				break;
			default:
				errstr = "unknown rx error";
				break;
			}
			printf("%s: receive error: %s\n", device_xname(sc->vr_dev),
			    errstr);

			VR_INIT_RXDESC(sc, i);

			continue;
		} else if (!(rxstat & VR_RXSTAT_FIRSTFRAG) ||
		           !(rxstat & VR_RXSTAT_LASTFRAG)) {
			/*
			 * This driver expects to receive whole packets every
			 * time.  In case we receive a fragment that is not
			 * a complete packet, we discard it.
			 */
			ifp->if_ierrors++;

			printf("%s: receive error: incomplete frame; "
			       "size = %d, status = 0x%x\n",
			       device_xname(sc->vr_dev),
			       VR_RXBYTES(le32toh(d->vr_status)), rxstat);

			VR_INIT_RXDESC(sc, i);

			continue;
		}

		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);

		/* No errors; receive the packet. */
		total_len = VR_RXBYTES(le32toh(d->vr_status));
#ifdef DIAGNOSTIC
		if (total_len == 0) {
			/*
			 * If we receive a zero-length packet, we probably
			 * missed to handle an error condition above.
			 * Discard it to avoid a later crash.
			 */
			ifp->if_ierrors++;

			printf("%s: receive error: zero-length packet; "
			       "status = 0x%x\n",
			       device_xname(sc->vr_dev), rxstat);

			VR_INIT_RXDESC(sc, i);

			continue;
		}
#endif

		/*
		 * The Rhine chip includes the CRC with every packet.
		 * Trim it off here.
		 */
		total_len -= ETHER_CRC_LEN;

#ifdef __NO_STRICT_ALIGNMENT
		/*
		 * If the packet is small enough to fit in a
		 * single header mbuf, allocate one and copy
		 * the data into it.  This greatly reduces
		 * memory consumption when we receive lots
		 * of small packets.
		 *
		 * Otherwise, we add a new buffer to the receive
		 * chain.  If this fails, we drop the packet and
		 * recycle the old buffer.
		 */
		if (vr_copy_small != 0 && total_len <= MHLEN) {
			MGETHDR(m, M_DONTWAIT, MT_DATA);
			if (m == NULL)
				goto dropit;
			memcpy(mtod(m, void *),
			    mtod(ds->ds_mbuf, void *), total_len);
			VR_INIT_RXDESC(sc, i);
			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
			    ds->ds_dmamap->dm_mapsize,
			    BUS_DMASYNC_PREREAD);
		} else {
static void
dec_3000_300_device_register(device_t dev, void *aux)
{
	static int found, initted, scsiboot, netboot;
	static device_t scsidev;
	static device_t tcdsdev;
	struct bootdev_data *b = bootdev_data;
	device_t parent = device_parent(dev);

	if (found)
		return;

	if (!initted) {
		scsiboot = (strcmp(b->protocol, "SCSI") == 0);
		netboot = (strcmp(b->protocol, "BOOTP") == 0) ||
		    (strcmp(b->protocol, "MOP") == 0);
#if 0
		printf("scsiboot = %d, netboot = %d\n", scsiboot, netboot);
#endif
		initted = 1;
	}

	/*
	 * for scsi boot, we look for "tcds", make sure it has the
	 * right slot number, then find the "asc" on this tcds that
	 * as the right channel.  then we find the actual scsi
	 * device we came from.  note: no SCSI LUN support (yet).
	 */
	if (scsiboot && device_is_a(dev, "tcds")) {
		struct tc_attach_args *tcargs = aux;

		if (b->slot != tcargs->ta_slot)
			return;

		tcdsdev = dev;
#if 0
		printf("\ntcdsdev = %s\n", device_xname(dev));
#endif
	}
	if (scsiboot && tcdsdev &&
	    device_is_a(dev, "asc")) {
		struct tcdsdev_attach_args *ta = aux;

		if (parent != tcdsdev)
			return;

		if (ta->tcdsda_chip != b->channel)
			return;

		scsidev = dev;
#if 0
		printf("\nscsidev = %s\n", device_xname(dev));
#endif
	}

	if (scsiboot && scsidev &&
	    (device_is_a(dev, "sd") ||
	     device_is_a(dev, "st") ||
	     device_is_a(dev, "cd"))) {
		struct scsipibus_attach_args *sa = aux;

		if (device_parent(parent) != scsidev)
			return;

		if (b->unit / 100 != sa->sa_periph->periph_target)
			return;

		/* XXX LUN! */

		switch (b->boot_dev_type) {
		case 0:
			if (!device_is_a(dev, "sd") &&
			    !device_is_a(dev, "cd"))
				return;
			break;
		case 1:
			if (!device_is_a(dev, "st"))
				return;
			break;
		default:
			return;
		}

		/* we've found it! */
		booted_device = dev;
#if 0
		printf("\nbooted_device = %s\n", device_xname(booted_device));
#endif
		found = 1;
	}

	if (netboot) {
	        if (b->slot == 5 && device_is_a(dev, "le") &&
		    device_is_a(parent, "ioasic")) {
			/*
			 * no need to check ioasic_attach_args, since only
			 * one le on ioasic.
			 */

			booted_device = dev;
#if 0
			printf("\nbooted_device = %s\n", device_xname(booted_device));
#endif
			found = 1;
			return;
		}

		/*
		 * XXX GENERIC SUPPORT FOR TC NETWORK BOARDS
		 */
	}
}
Beispiel #26
0
static int
maxrtc_clock_write(struct maxrtc_softc *sc, struct clock_ymdhms *dt)
{
	uint8_t bcd[MAX6900_BURST_LEN], cmdbuf[2];
	uint8_t init_seconds, final_seconds;
	int i;

	/*
	 * Convert our time representation into something the MAX6900
	 * can understand.
	 */
	bcd[MAX6900_BURST_SECOND] = TOBCD(dt->dt_sec);
	bcd[MAX6900_BURST_MINUTE] = TOBCD(dt->dt_min);
	bcd[MAX6900_BURST_HOUR] = TOBCD(dt->dt_hour) & MAX6900_HOUR_24MASK;
	bcd[MAX6900_BURST_DATE] = TOBCD(dt->dt_day);
	bcd[MAX6900_BURST_WDAY] = TOBCD(dt->dt_wday);
	bcd[MAX6900_BURST_MONTH] = TOBCD(dt->dt_mon);
	bcd[MAX6900_BURST_YEAR] = TOBCD(dt->dt_year % 100);
		/* century in control slot */
	bcd[MAX6900_BURST_CONTROL] = TOBCD(dt->dt_year / 100);

	if (iic_acquire_bus(sc->sc_tag, I2C_F_POLL)) {
		aprint_error_dev(sc->sc_dev,
		    "maxrtc_clock_write: failed to acquire I2C bus\n");
		return (0);
	}

	/* Start by clearing the control register's write-protect bit. */
	cmdbuf[0] = MAX6900_REG_CONTROL | MAX6900_CMD_WRITE;
	cmdbuf[1] = 0;

	if (iic_exec(sc->sc_tag, I2C_OP_WRITE, sc->sc_address,
		     cmdbuf, 1, &cmdbuf[1], 1, I2C_F_POLL)) {
		iic_release_bus(sc->sc_tag, I2C_F_POLL);
		aprint_error_dev(sc->sc_dev,
		    "maxrtc_clock_write: failed to clear WP bit\n");
		return (0);
	}

	/*
	 * The MAX6900 RTC manual recommends ensuring "atomicity" of
	 * a non-burst write by:
	 *
	 *	- writing SECONDS
	 *	- reading back SECONDS, remembering it as "initial seconds"
	 *	- write the remaing RTC registers
	 *	- read back SECONDS as "final seconds"
	 *	- if "initial seconds" == 59, ensure "final seconds" == 59
	 *	- else, ensure "final seconds" is no more than one second
	 *	  beyond "initial seconds".
	 */
 again:
	cmdbuf[0] = MAX6900_REG_SECOND | MAX6900_CMD_WRITE;
	if (iic_exec(sc->sc_tag, I2C_OP_WRITE, sc->sc_address,
		     cmdbuf, 1, &bcd[MAX6900_BURST_SECOND], 1, I2C_F_POLL)) {
		iic_release_bus(sc->sc_tag, I2C_F_POLL);
		aprint_error_dev(sc->sc_dev,
		    "maxrtc_clock_write: failed to write SECONDS\n");
		return (0);
	}

	cmdbuf[0] = MAX6900_REG_SECOND | MAX6900_CMD_READ;
	if (iic_exec(sc->sc_tag, I2C_OP_READ, sc->sc_address,
		     cmdbuf, 1, &init_seconds, 1, I2C_F_POLL)) {
		iic_release_bus(sc->sc_tag, I2C_F_POLL);
		aprint_error_dev(sc->sc_dev,
		    "maxrtc_clock_write: failed to read "
		    "INITIAL SECONDS\n");
		return (0);
	}

	for (i = 1; i < MAX6900_BURST_LEN; i++) {
		cmdbuf[0] = max6900_rtc_offset[i] | MAX6900_CMD_WRITE;
		if (iic_exec(sc->sc_tag,
			     i != MAX6900_BURST_LEN - 1 ? I2C_OP_WRITE :
			     I2C_OP_WRITE_WITH_STOP, sc->sc_address,
			     cmdbuf, 1, &bcd[i], 1, I2C_F_POLL)) {
			iic_release_bus(sc->sc_tag, I2C_F_POLL);
			aprint_error_dev(sc->sc_dev,
			    "maxrtc_clock_write: failed to write rtc "
			    " at 0x%x\n",
			    max6900_rtc_offset[i]);
			return (0);
		}
	}

	cmdbuf[0] = MAX6900_REG_SECOND | MAX6900_CMD_READ;
	if (iic_exec(sc->sc_tag, I2C_OP_READ_WITH_STOP, sc->sc_address,
		     cmdbuf, 1, &final_seconds, 1, I2C_F_POLL)) {
		iic_release_bus(sc->sc_tag, I2C_F_POLL);
		aprint_error_dev(sc->sc_dev,
		    "maxrtc_clock_write: failed to read "
		    "FINAL SECONDS\n");
		return (0);
	}

	if ((init_seconds == 59 && final_seconds != 59) ||
	    (init_seconds != 59 && final_seconds != init_seconds + 1)) {
#if 1
		printf("%s: maxrtc_clock_write: init %d, final %d, try again\n",
		    device_xname(sc->sc_dev), init_seconds, final_seconds);
#endif
		goto again;
	}

	/* Finish by setting the control register's write-protect bit. */
	cmdbuf[0] = MAX6900_REG_CONTROL | MAX6900_CMD_WRITE;
	cmdbuf[1] = MAX6900_CONTROL_WP;

	if (iic_exec(sc->sc_tag, I2C_OP_WRITE_WITH_STOP, sc->sc_address,
		     cmdbuf, 1, &cmdbuf[1], 1, I2C_F_POLL)) {
		iic_release_bus(sc->sc_tag, I2C_F_POLL);
		aprint_error_dev(sc->sc_dev,
		    "maxrtc_clock_write: failed to set WP bit\n");
		return (0);
	}

	iic_release_bus(sc->sc_tag, I2C_F_POLL);

	return (1);
}
Beispiel #27
0
void
spif_softintr(void *vsc)
{
	struct spif_softc *sc = (struct spif_softc *)vsc;
	struct stty_softc *stc = sc->sc_ttys;
	int i, data, s, flags;
	uint8_t stat, msvr;
	struct stty_port *sp;
	struct tty *tp;

	if (stc != NULL) {
		for (i = 0; i < stc->sc_nports; i++) {
			sp = &stc->sc_port[i];
			tp = sp->sp_tty;

			if (!ISSET(tp->t_state, TS_ISOPEN))
				continue;

			while (sp->sp_rget != sp->sp_rput) {
				stat = sp->sp_rget[0];
				data = sp->sp_rget[1];
				sp->sp_rget += 2;
				if (sp->sp_rget == sp->sp_rend)
					sp->sp_rget = sp->sp_rbuf;

				if (stat & (CD180_RCSR_BE | CD180_RCSR_FE))
					data |= TTY_FE;

				if (stat & CD180_RCSR_PE)
					data |= TTY_PE;

				(*tp->t_linesw->l_rint)(data, tp);
			}

			s = splhigh();
			flags = sp->sp_flags;
			CLR(sp->sp_flags, STTYF_DONE | STTYF_CDCHG |
			    STTYF_RING_OVERFLOW);
			splx(s);

			if (ISSET(flags, STTYF_CDCHG)) {
				s = spltty();
				STC_WRITE(sc, STC_CAR, i);
				msvr = STC_READ(sc, STC_MSVR);
				splx(s);

				sp->sp_carrier = msvr & CD180_MSVR_CD;
				(*tp->t_linesw->l_modem)(tp,
				    sp->sp_carrier);
			}

			if (ISSET(flags, STTYF_RING_OVERFLOW)) {
				log(LOG_WARNING, "%s-%x: ring overflow\n",
					device_xname(stc->sc_dev), i);
			}

			if (ISSET(flags, STTYF_DONE)) {
				ndflush(&tp->t_outq,
				    sp->sp_txp - tp->t_outq.c_cf);
				CLR(tp->t_state, TS_BUSY);
				(*tp->t_linesw->l_start)(tp);
			}
		}
	}
}
/* ARGSUSED */
void
ie_pcctwo_attach(device_t parent, device_t self, void *aux)
{
	struct pcctwo_attach_args *pa;
	struct ie_pcctwo_softc *ps;
	struct ie_softc *sc;
	bus_dma_segment_t seg;
	int rseg;

	pa = aux;
	ps = device_private(self);
	sc = &ps->ps_ie;
	sc->sc_dev = self;

	/* Map the MPU controller registers in PCCTWO space */
	ps->ps_bust = pa->pa_bust;
	bus_space_map(pa->pa_bust, pa->pa_offset, IE_MPUREG_SIZE,
	    0, &ps->ps_bush);

	/* Get contiguous DMA-able memory for the IE chip */
	if (bus_dmamem_alloc(pa->pa_dmat, ether_data_buff_size, PAGE_SIZE, 0,
		&seg, 1, &rseg,
		BUS_DMA_NOWAIT | BUS_DMA_ONBOARD_RAM | BUS_DMA_24BIT) != 0) {
		aprint_error_dev(self, "Failed to allocate ether buffer\n");
		return;
	}
	if (bus_dmamem_map(pa->pa_dmat, &seg, rseg, ether_data_buff_size,
	    (void **) & sc->sc_maddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) {
		aprint_error_dev(self, "Failed to map ether buffer\n");
		bus_dmamem_free(pa->pa_dmat, &seg, rseg);
		return;
	}
	sc->bt = pa->pa_bust;
	sc->bh = (bus_space_handle_t) sc->sc_maddr;	/* XXXSCW Better way? */
	sc->sc_iobase = (void *) seg.ds_addr;
	sc->sc_msize = ether_data_buff_size;
	memset(sc->sc_maddr, 0, ether_data_buff_size);

	sc->hwreset = ie_reset;
	sc->hwinit = ie_hwinit;
	sc->chan_attn = ie_atten;
	sc->intrhook = ie_intrhook;
	sc->memcopyin = ie_copyin;
	sc->memcopyout = ie_copyout;
	sc->ie_bus_barrier = NULL;
	sc->ie_bus_read16 = ie_read_16;
	sc->ie_bus_write16 = ie_write_16;
	sc->ie_bus_write24 = ie_write_24;
	sc->sc_mediachange = NULL;
	sc->sc_mediastatus = NULL;

	sc->scp = 0;
	sc->iscp = sc->scp + ((IE_SCP_SZ + 15) & ~15);
	sc->scb = sc->iscp + IE_ISCP_SZ;
	sc->buf_area = sc->scb + IE_SCB_SZ;
	sc->buf_area_sz = sc->sc_msize - (sc->buf_area - sc->scp);

	/*
	 * BUS_USE -> Interrupt Active High (edge-triggered),
	 *            Lock function enabled,
	 *            Internal bus throttle timer triggering,
	 *            82586 operating mode.
	 */
	ie_write_16(sc, IE_SCP_BUS_USE(sc->scp), IE_BUS_USE);
	ie_write_24(sc, IE_SCP_ISCP(sc->scp), sc->iscp);
	ie_write_16(sc, IE_ISCP_SCB(sc->iscp), sc->scb);
	ie_write_24(sc, IE_ISCP_BASE(sc->iscp), sc->scp);

	/* This has the side-effect of resetting the chip */
	i82586_proberam(sc);

	/* Attach the MI back-end */
	i82586_attach(sc, "onboard", mvme_ea, NULL, 0, 0);

	/* Register the event counter */
	evcnt_attach_dynamic(&ps->ps_evcnt, EVCNT_TYPE_INTR,
	    pcctwointr_evcnt(pa->pa_ipl), "ether", device_xname(self));

	/* Finally, hook the hardware interrupt */
	pcctwointr_establish(PCCTWOV_LANC_IRQ, i82586_intr, pa->pa_ipl, sc,
	    &ps->ps_evcnt);
}
Beispiel #29
0
/*---------------------------------------------------------------------------*
 *	HSCX IRQ Handler
 *---------------------------------------------------------------------------*/
void
isic_hscx_irq(register struct isic_softc *sc, u_char ista, int h_chan, u_char ex_irq)
{
	register l1_bchan_state_t *chan = &sc->sc_chan[h_chan];
	u_char exir = 0;
	int activity = -1;
	u_char cmd = 0;

	NDBGL1(L1_H_IRQ, "%#x", ista);

	if(ex_irq)
	{
		/* get channel extended irq reg */

		exir = HSCX_READ(h_chan, H_EXIR);

		if(exir & HSCX_EXIR_RFO)
		{
			chan->stat_RFO++;
			NDBGL1(L1_H_XFRERR, "ex_irq: receive data overflow");
		}

		if((exir & HSCX_EXIR_XDU) && (chan->bprot != BPROT_NONE))/* xmit data underrun */
		{
			chan->stat_XDU++;
			NDBGL1(L1_H_XFRERR, "ex_irq: xmit data underrun");
			isic_hscx_cmd(sc, h_chan, HSCX_CMDR_XRES);

			if (chan->out_mbuf_head != NULL)  /* don't continue to transmit this buffer */
			{
				i4b_Bfreembuf(chan->out_mbuf_head);
				chan->out_mbuf_cur = chan->out_mbuf_head = NULL;
			}
		}

	}

	/* rx message end, end of frame */

	if(ista & HSCX_ISTA_RME)
	{
		register int fifo_data_len;
		u_char rsta;
		int error = 0;

		rsta = HSCX_READ(h_chan, H_RSTA);

		if((rsta & 0xf0) != 0xa0)
		{
			if((rsta & HSCX_RSTA_VFR) == 0)
			{
				chan->stat_VFR++;
				cmd |= (HSCX_CMDR_RHR);
				NDBGL1(L1_H_XFRERR, "received invalid Frame");
				error++;
			}

			if(rsta & HSCX_RSTA_RDO)
			{
				chan->stat_RDO++;
				NDBGL1(L1_H_XFRERR, "receive data overflow");
				error++;
			}

			if((rsta & HSCX_RSTA_CRC) == 0)
			{
				chan->stat_CRC++;
				cmd |= (HSCX_CMDR_RHR);
				NDBGL1(L1_H_XFRERR, "CRC check failed");
				error++;
			}

			if(rsta & HSCX_RSTA_RAB)
			{
				chan->stat_RAB++;
				NDBGL1(L1_H_XFRERR, "Receive message aborted");
				error++;
			}
		}

		fifo_data_len = ((HSCX_READ(h_chan, H_RBCL)) &
						((sc->sc_bfifolen)-1));

		if(fifo_data_len == 0)
			fifo_data_len = sc->sc_bfifolen;

		/* all error conditions checked, now decide and take action */

		if(error == 0)
		{
			if(chan->in_mbuf == NULL)
			{
				if((chan->in_mbuf = i4b_Bgetmbuf(BCH_MAX_DATALEN)) == NULL)
					panic("L1 isic_hscx_irq: RME, cannot allocate mbuf!");
				chan->in_cbptr = chan->in_mbuf->m_data;
				chan->in_len = 0;
			}

			fifo_data_len -= 1; /* last byte in fifo is RSTA ! */

			if((chan->in_len + fifo_data_len) <= BCH_MAX_DATALEN)
			{
				/* read data from HSCX fifo */

				HSCX_RDFIFO(h_chan, chan->in_cbptr, fifo_data_len);

				cmd |= (HSCX_CMDR_RMC);
				isic_hscx_cmd(sc, h_chan, cmd);
				cmd = 0;

		                chan->in_len += fifo_data_len;
				chan->rxcount += fifo_data_len;

				/* setup mbuf data length */

				chan->in_mbuf->m_len = chan->in_len;
				chan->in_mbuf->m_pkthdr.len = chan->in_len;

				if(sc->sc_trace & TRACE_B_RX)
				{
					i4b_trace_hdr hdr;
					hdr.type = (h_chan == HSCX_CH_A ? TRC_CH_B1 : TRC_CH_B2);
					hdr.dir = FROM_NT;
					hdr.count = ++sc->sc_trace_bcount;
					isdn_layer2_trace_ind(&sc->sc_l2, sc->sc_l3token, &hdr, chan->in_mbuf->m_len, chan->in_mbuf->m_data);
				}

				(*chan->l4_driver->bch_rx_data_ready)(chan->l4_driver_softc);

				activity = ACT_RX;

				/* mark buffer ptr as unused */

				chan->in_mbuf = NULL;
				chan->in_cbptr = NULL;
				chan->in_len = 0;
			}
			else
			{
				NDBGL1(L1_H_XFRERR, "RAWHDLC rx buffer overflow in RME, in_len=%d, fifolen=%d", chan->in_len, fifo_data_len);
				chan->in_cbptr = chan->in_mbuf->m_data;
				chan->in_len = 0;
				cmd |= (HSCX_CMDR_RHR | HSCX_CMDR_RMC);
			}
		}
		else
		{
			if (chan->in_mbuf != NULL)
			{
				i4b_Bfreembuf(chan->in_mbuf);
				chan->in_mbuf = NULL;
				chan->in_cbptr = NULL;
				chan->in_len = 0;
			}
			cmd |= (HSCX_CMDR_RMC);
		}
	}

	/* rx fifo full */

	if(ista & HSCX_ISTA_RPF)
	{
		if(chan->in_mbuf == NULL)
		{
			if((chan->in_mbuf = i4b_Bgetmbuf(BCH_MAX_DATALEN)) == NULL)
				panic("L1 isic_hscx_irq: RPF, cannot allocate mbuf!");
			chan->in_cbptr = chan->in_mbuf->m_data;
			chan->in_len = 0;
		}

		chan->rxcount += sc->sc_bfifolen;

		if((chan->in_len + sc->sc_bfifolen) <= BCH_MAX_DATALEN)
		{
			/* read data from HSCX fifo */

			HSCX_RDFIFO(h_chan, chan->in_cbptr, sc->sc_bfifolen);

			chan->in_cbptr += sc->sc_bfifolen;
	                chan->in_len += sc->sc_bfifolen;
		}
		else
		{
			if(chan->bprot == BPROT_NONE)
			{
				/* setup mbuf data length */

				chan->in_mbuf->m_len = chan->in_len;
				chan->in_mbuf->m_pkthdr.len = chan->in_len;

				if(sc->sc_trace & TRACE_B_RX)
				{
					i4b_trace_hdr hdr;
					hdr.type = (h_chan == HSCX_CH_A ? TRC_CH_B1 : TRC_CH_B2);
					hdr.dir = FROM_NT;
					hdr.count = ++sc->sc_trace_bcount;
					isdn_layer2_trace_ind(&sc->sc_l2, sc->sc_l3token, &hdr,chan->in_mbuf->m_len, chan->in_mbuf->m_data);
				}

				/* silence detection */

				if(!(isdn_bchan_silence(chan->in_mbuf->m_data, chan->in_mbuf->m_len)))
					activity = ACT_RX;

				if(!(IF_QFULL(&chan->rx_queue)))
				{
					IF_ENQUEUE(&chan->rx_queue, chan->in_mbuf);
				}
				else
				{
					i4b_Bfreembuf(chan->in_mbuf);
				}

				/* signal upper driver that data is available */

				(*chan->l4_driver->bch_rx_data_ready)(chan->l4_driver_softc);

				/* alloc new buffer */

				if((chan->in_mbuf = i4b_Bgetmbuf(BCH_MAX_DATALEN)) == NULL)
					panic("L1 isic_hscx_irq: RPF, cannot allocate new mbuf!");

				/* setup new data ptr */

				chan->in_cbptr = chan->in_mbuf->m_data;

				/* read data from HSCX fifo */

				HSCX_RDFIFO(h_chan, chan->in_cbptr, sc->sc_bfifolen);

				chan->in_cbptr += sc->sc_bfifolen;
				chan->in_len = sc->sc_bfifolen;

				chan->rxcount += sc->sc_bfifolen;
			}
			else
			{
				NDBGL1(L1_H_XFRERR, "RAWHDLC rx buffer overflow in RPF, in_len=%d", chan->in_len);
				chan->in_cbptr = chan->in_mbuf->m_data;
				chan->in_len = 0;
				cmd |= (HSCX_CMDR_RHR);
			}
		}

		/* command to release fifo space */

		cmd |= HSCX_CMDR_RMC;
	}

	/* transmit fifo empty, new data can be written to fifo */

	if(ista & HSCX_ISTA_XPR)
	{
		/*
		 * for a description what is going on here, please have
		 * a look at isic_bchannel_start() in i4b_bchan.c !
		 */

		int len;
		int nextlen;

		NDBGL1(L1_H_IRQ, "%s, chan %d - XPR, Tx Fifo Empty!", device_xname(sc->sc_dev), h_chan);

		if(chan->out_mbuf_cur == NULL) 	/* last frame is transmitted */
		{
			IF_DEQUEUE(&chan->tx_queue, chan->out_mbuf_head);

			if(chan->out_mbuf_head == NULL)
			{
				chan->state &= ~HSCX_TX_ACTIVE;
				(*chan->l4_driver->bch_tx_queue_empty)(chan->l4_driver_softc);
			}
			else
			{
				chan->state |= HSCX_TX_ACTIVE;
				chan->out_mbuf_cur = chan->out_mbuf_head;
				chan->out_mbuf_cur_ptr = chan->out_mbuf_cur->m_data;
				chan->out_mbuf_cur_len = chan->out_mbuf_cur->m_len;

				if(sc->sc_trace & TRACE_B_TX)
				{
					i4b_trace_hdr hdr;
					hdr.type = (h_chan == HSCX_CH_A ? TRC_CH_B1 : TRC_CH_B2);
					hdr.dir = FROM_TE;
					hdr.count = ++sc->sc_trace_bcount;
					isdn_layer2_trace_ind(&sc->sc_l2, sc->sc_l3token, &hdr, chan->out_mbuf_cur->m_len, chan->out_mbuf_cur->m_data);
				}

				if(chan->bprot == BPROT_NONE)
				{
					if(!(isdn_bchan_silence(chan->out_mbuf_cur->m_data, chan->out_mbuf_cur->m_len)))
						activity = ACT_TX;
				}
				else
				{
					activity = ACT_TX;
				}
			}
		}

		len = 0;

		while(chan->out_mbuf_cur && len != sc->sc_bfifolen)
		{
			nextlen = min(chan->out_mbuf_cur_len, sc->sc_bfifolen - len);

#ifdef NOTDEF
			printf("i:mh=%x, mc=%x, mcp=%x, mcl=%d l=%d nl=%d # ",
				chan->out_mbuf_head,
				chan->out_mbuf_cur,
				chan->out_mbuf_cur_ptr,
				chan->out_mbuf_cur_len,
				len,
				next_len);
#endif

			isic_hscx_waitxfw(sc, h_chan);	/* necessary !!! */

			HSCX_WRFIFO(h_chan, chan->out_mbuf_cur_ptr, nextlen);
			cmd |= HSCX_CMDR_XTF;

			len += nextlen;
			chan->txcount += nextlen;

			chan->out_mbuf_cur_ptr += nextlen;
			chan->out_mbuf_cur_len -= nextlen;

			if(chan->out_mbuf_cur_len == 0)
			{
				if((chan->out_mbuf_cur = chan->out_mbuf_cur->m_next) != NULL)
				{
					chan->out_mbuf_cur_ptr = chan->out_mbuf_cur->m_data;
					chan->out_mbuf_cur_len = chan->out_mbuf_cur->m_len;

					if(sc->sc_trace & TRACE_B_TX)
					{
						i4b_trace_hdr hdr;
						hdr.type = (h_chan == HSCX_CH_A ? TRC_CH_B1 : TRC_CH_B2);
						hdr.dir = FROM_TE;
						hdr.count = ++sc->sc_trace_bcount;
						isdn_layer2_trace_ind(&sc->sc_l2, sc->sc_l3token, &hdr, chan->out_mbuf_cur->m_len, chan->out_mbuf_cur->m_data);
					}
				}
				else
				{
					if (chan->bprot != BPROT_NONE)
						cmd |= HSCX_CMDR_XME;
					i4b_Bfreembuf(chan->out_mbuf_head);
					chan->out_mbuf_head = NULL;
				}

			}
		}
	}

	if(cmd)		/* is there a command for the HSCX ? */
	{
		isic_hscx_cmd(sc, h_chan, cmd);	/* yes, to HSCX */
	}

	/* call timeout handling routine */

	if(activity == ACT_RX || activity == ACT_TX)
		(*chan->l4_driver->bch_activity)(chan->l4_driver_softc, activity);
}
Beispiel #30
0
Static int
url_openpipes(struct url_softc *sc)
{
	struct url_chain *c;
	usbd_status err;
	int i;
	int error = 0;

	if (sc->sc_dying)
		return (EIO);

	sc->sc_refcnt++;

	/* Open RX pipe */
	err = usbd_open_pipe(sc->sc_ctl_iface, sc->sc_bulkin_no,
			     USBD_EXCLUSIVE_USE, &sc->sc_pipe_rx);
	if (err) {
		printf("%s: open rx pipe failed: %s\n",
		       device_xname(sc->sc_dev), usbd_errstr(err));
		error = EIO;
		goto done;
	}

	/* Open TX pipe */
	err = usbd_open_pipe(sc->sc_ctl_iface, sc->sc_bulkout_no,
			     USBD_EXCLUSIVE_USE, &sc->sc_pipe_tx);
	if (err) {
		printf("%s: open tx pipe failed: %s\n",
		       device_xname(sc->sc_dev), usbd_errstr(err));
		error = EIO;
		goto done;
	}

#if 0
	/* XXX: interrupt endpoint is not yet supported */
	/* Open Interrupt pipe */
	err = usbd_open_pipe_intr(sc->sc_ctl_iface, sc->sc_intrin_no,
				  USBD_EXCLUSIVE_USE, &sc->sc_pipe_intr, sc,
				  &sc->sc_cdata.url_ibuf, URL_INTR_PKGLEN,
				  url_intr, USBD_DEFAULT_INTERVAL);
	if (err) {
		printf("%s: open intr pipe failed: %s\n",
		       device_xname(sc->sc_dev), usbd_errstr(err));
		error = EIO;
		goto done;
	}
#endif


	/* Start up the receive pipe. */
	for (i = 0; i < URL_RX_LIST_CNT; i++) {
		c = &sc->sc_cdata.url_rx_chain[i];
		usbd_setup_xfer(c->url_xfer, sc->sc_pipe_rx,
				c, c->url_buf, URL_BUFSZ,
				USBD_SHORT_XFER_OK | USBD_NO_COPY,
				USBD_NO_TIMEOUT, url_rxeof);
		(void)usbd_transfer(c->url_xfer);
		DPRINTF(("%s: %s: start read\n", device_xname(sc->sc_dev),
			 __func__));
	}

 done:
	if (--sc->sc_refcnt < 0)
		usb_detach_wakeupold(sc->sc_dev);

	return (error);
}