示例#1
0
uint32_t
fsl_ocotp_read_4(bus_size_t off)
{

	if (off > FSL_OCOTP_LAST_REG)
		panic("fsl_ocotp_read_4: offset out of range");

	/* If we have a softcontext use the regular bus_space read. */
	if (ocotp_sc != NULL)
		return (RD4(ocotp_sc, off));

	/*
	 * Otherwise establish a tempory device mapping if necessary, and read
	 * the device without any help from bus_space.
	 *
	 * XXX Eventually the code from there down can be deleted.
	 */
	if (ocotp_regs == NULL)
		fsl_ocotp_devmap();

	return (ocotp_regs[off / 4]);
}
static void
at91_twi_intr(void *xsc)
{
	struct at91_twi_softc *sc = xsc;
	uint32_t status;

	status = RD4(sc, TWI_SR);
	if (status == 0)
		return;
	AT91_TWI_LOCK(sc);
	sc->flags |= status & (TWI_SR_OVRE | TWI_SR_UNRE | TWI_SR_NACK);
	if (status & TWI_SR_RXRDY)
		sc->flags |= TWI_SR_RXRDY;
	if (status & TWI_SR_TXRDY)
		sc->flags |= TWI_SR_TXRDY;
	if (status & TWI_SR_TXCOMP)
		sc->flags |= TWI_SR_TXCOMP;
	WR4(sc, TWI_IDR, status);
	wakeup(sc);
	AT91_TWI_UNLOCK(sc);
	return;
}
示例#3
0
static void
iomux_configure_input(struct iomux_softc *sc, uint32_t reg, uint32_t val)
{
	u_int select, mask, shift, width;

	/* If register and value are zero, there is nothing to configure. */
	if (reg == 0 && val == 0)
		return;

	/*
	 * If the config value has 0xff in the high byte it is encoded:
	 * 	31     23      15      7        0
	 *      | 0xff | shift | width | select |
	 * We need to mask out the old select value and OR in the new, using a
	 * mask of the given width and shifting the values up by shift.
	 */
	if ((val & 0xff000000) == 0xff000000) {
		select = val & 0x000000ff;
		width = (val & 0x0000ff00) >> 8;
		shift = (val & 0x00ff0000) >> 16;
		mask  = ((1u << width) - 1) << shift;
		val = (RD4(sc, reg) & ~mask) | (select << shift);
	}
示例#4
0
/* Enable programming the PL through PCAP. */
static void
zy7_devcfg_init_hw(struct zy7_devcfg_softc *sc)
{

	DEVCFG_SC_ASSERT_LOCKED(sc);

	/* Set devcfg control register. */
	WR4(sc, ZY7_DEVCFG_CTRL,
	    ZY7_DEVCFG_CTRL_PCFG_PROG_B |
	    ZY7_DEVCFG_CTRL_PCAP_PR |
	    ZY7_DEVCFG_CTRL_PCAP_MODE |
	    ZY7_DEVCFG_CTRL_USER_MODE |
	    ZY7_DEVCFG_CTRL_RESVD_WR11 |
	    ZY7_DEVCFG_CTRL_SPNIDEN |
	    ZY7_DEVCFG_CTRL_SPIDEN |
	    ZY7_DEVCFG_CTRL_NIDEN |
	    ZY7_DEVCFG_CTRL_DBGEN |
	    ZY7_DEVCFG_CTRL_DAP_EN_MASK);

	/* Turn off internal PCAP loopback. */
	WR4(sc, ZY7_DEVCFG_MCTRL, RD4(sc, ZY7_DEVCFG_MCTRL) &
	    ~ZY7_DEVCFG_MCTRL_INT_PCAP_LPBK);
}
示例#5
0
static int
ffec_miibus_readreg(device_t dev, int phy, int reg)
{
	struct ffec_softc *sc;
	int val;

	sc = device_get_softc(dev);

	WR4(sc, FEC_IER_REG, FEC_IER_MII);

	WR4(sc, FEC_MMFR_REG, FEC_MMFR_OP_READ |
	    FEC_MMFR_ST_VALUE | FEC_MMFR_TA_VALUE |
	    ((phy << FEC_MMFR_PA_SHIFT) & FEC_MMFR_PA_MASK) |
	    ((reg << FEC_MMFR_RA_SHIFT) & FEC_MMFR_RA_MASK));

	if (!ffec_miibus_iowait(sc)) {
		device_printf(dev, "timeout waiting for mii read\n");
		return (-1); /* All-ones is a symptom of bad mdio. */
	}

	val = RD4(sc, FEC_MMFR_REG) & FEC_MMFR_DATA_MASK;

	return (val);
}
示例#6
0
static void
at91_mci_cmdrdy(struct at91_mci_softc *sc, uint32_t sr)
{
	struct mmc_command *cmd = sc->curcmd;
	int i;

	if (cmd == NULL)
		return;

	/*
	 * We get here at the end of EVERY command.  We retrieve the command
	 * response (if any) then decide what to do next based on the command.
	 */

	if (cmd->flags & MMC_RSP_PRESENT) {
		for (i = 0; i < ((cmd->flags & MMC_RSP_136) ? 4 : 1); i++) {
			cmd->resp[i] = RD4(sc, MCI_RSPR + i * 4);
			if (mci_debug)
				printf("RSPR[%d] = %x sr=%x\n", i, cmd->resp[i],  sr);
		}
	}

	/*
	 * If this was a stop command, go handle the various special
	 * conditions (read: bugs) that have to be dealt with following a stop.
	 */
	if (cmd->opcode == MMC_STOP_TRANSMISSION) {
		at91_mci_stop_done(sc, sr);
		return;
	}

	/*
	 * If this command can continue to assert BUSY beyond the response then
	 * we need to wait for NOTBUSY before the command is really done.
	 *
	 * Note that this may not work properly on the at91rm9200.  It certainly
	 * doesn't work for the STOP command that follows a multi-block write,
	 * so post-stop CMDRDY is handled separately; see the special handling
	 * in at91_mci_stop_done().
	 *
	 * Beside STOP, there are other R1B-type commands that use the busy
	 * signal after CMDRDY: CMD7 (card select), CMD28-29 (write protect),
	 * CMD38 (erase). I haven't tested any of them, but I rather expect
	 * them all to have the same sort of problem with MCI_SR not actually
	 * reflecting the state of the DAT0-line busy indicator.  So this code
	 * may need to grow some sort of special handling for them too. (This
	 * just in: CMD7 isn't a problem right now because dev/mmc.c incorrectly
	 * sets the response flags to R1 rather than R1B.) XXX
	 */
	if ((cmd->flags & MMC_RSP_BUSY)) {
		WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY);
		return;
	}

	/*
	 * If there is a data transfer with this command, then...
	 * - If it's a read, we need to wait for ENDRX.
	 * - If it's a write, now is the time to enable the PDC, and we need
	 *   to wait for a BLKE that follows a TXBUFE, because if we're doing
	 *   a split transfer we get a BLKE after the first half (when TPR/TCR
	 *   get loaded from TNPR/TNCR).  So first we wait for the TXBUFE, and
	 *   the handling for that interrupt will then invoke the wait for the
	 *   subsequent BLKE which indicates actual completion.
	 */
	if (cmd->data) {
		uint32_t ier;
		if (cmd->data->flags & MMC_DATA_READ) {
			ier = MCI_SR_ENDRX;
		} else {
			ier = MCI_SR_TXBUFE;
			WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN);
		}
		WR4(sc, MCI_IER, MCI_SR_ERROR | ier);
		return;
	}

	/*
	 * If we made it to here, we don't need to wait for anything more for
	 * the current command, move on to the next command (will complete the
	 * request if there is no next command).
	 */
	cmd->error = MMC_ERR_NONE;
	at91_mci_next_operation(sc);
}
示例#7
0
static int
at91_rst_attach(device_t dev)
{
	struct rst_softc *sc;
	const char *cause;
	int rid, err;

	rst_sc = sc = device_get_softc(dev);
	sc->sc_dev = dev;

	callout_init(&sc->tick_ch, 0);

	rid = 0;
	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (sc->mem_res == NULL) {
		device_printf(dev, "could not allocate memory resources.\n");
		err = ENOMEM;
		goto out;
	}
	rid = 0;
	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
	    RF_ACTIVE | RF_SHAREABLE);
	if (sc->irq_res == NULL) {
		device_printf(dev, "could not allocate interrupt resources.\n");
		err = ENOMEM;
		goto out;
	}

	/* Activate the interrupt. */
	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
	    rst_intr, NULL, sc, &sc->intrhand);
	if (err)
		device_printf(dev, "could not establish interrupt handler.\n");

	WR4(rst_sc, RST_MR, RST_MR_ERSTL(0xd) | RST_MR_URSIEN | RST_MR_KEY);

	switch (RD4(sc, RST_SR) & RST_SR_RST_MASK) {
		case	RST_SR_RST_POW:
			cause = "Power On";
			break;
		case	RST_SR_RST_WAKE:
			cause = "Wake Up";
			break;
		case	RST_SR_RST_WDT:	
			cause = "Watchdog";
			break;
		case	RST_SR_RST_SOFT:
			cause = "Software Request";
			break;
		case	RST_SR_RST_USR:
			cause = "External (User)";
			break;
		default:
			cause = "Unknown";
			break;
	}

	device_printf(dev, "Reset cause: %s.\n", cause);
	/* cpu_reset_addr = cpu_reset; */

out:
	return (err);
}
示例#8
0
static int
zy7_devcfg_attach(device_t dev)
{
	struct zy7_devcfg_softc *sc = device_get_softc(dev);
	int i;
	int rid, err;

	/* Allow only one attach. */
	if (zy7_devcfg_softc_p != NULL)
		return (ENXIO);

	sc->dev = dev;

	DEVCFG_SC_LOCK_INIT(sc);

	/* Get memory resource. */
	rid = 0;
	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
					     RF_ACTIVE);
	if (sc->mem_res == NULL) {
		device_printf(dev, "could not allocate memory resources.\n");
		zy7_devcfg_detach(dev);
		return (ENOMEM);
	}

	/* Allocate IRQ. */
	rid = 0;
	sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
					     RF_ACTIVE);
	if (sc->irq_res == NULL) {
		device_printf(dev, "cannot allocate IRQ\n");
		zy7_devcfg_detach(dev);
		return (ENOMEM);
	}

	/* Activate the interrupt. */
	err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
			     NULL, zy7_devcfg_intr, sc, &sc->intrhandle);
	if (err) {
		device_printf(dev, "cannot setup IRQ\n");
		zy7_devcfg_detach(dev);
		return (err);
	}

	/* Create /dev/devcfg */
	sc->sc_ctl_dev = make_dev(&zy7_devcfg_cdevsw, 0,
			  UID_ROOT, GID_WHEEL, 0600, "devcfg");
	if (sc->sc_ctl_dev == NULL) {
		device_printf(dev, "failed to create /dev/devcfg");
		zy7_devcfg_detach(dev);
		return (ENXIO);
	}
	sc->sc_ctl_dev->si_drv1 = sc;

	zy7_devcfg_softc_p = sc;

	/* Unlock devcfg registers. */
	WR4(sc, ZY7_DEVCFG_UNLOCK, ZY7_DEVCFG_UNLOCK_MAGIC);

	/* Make sure interrupts are completely disabled. */
	WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL);
	WR4(sc, ZY7_DEVCFG_INT_MASK, 0xffffffff);

	/* Get PS_VERS for SYSCTL. */
	zy7_ps_vers = (RD4(sc, ZY7_DEVCFG_MCTRL) &
		       ZY7_DEVCFG_MCTRL_PS_VERS_MASK) >>
		ZY7_DEVCFG_MCTRL_PS_VERS_SHIFT;

	for (i = 0; i < FCLK_NUM; i++) {
		fclk_configs[i].source = zy7_pl_fclk_get_source(i);
		fclk_configs[i].actual_frequency = 
			zy7_pl_fclk_enabled(i) ? zy7_pl_fclk_get_freq(i) : 0;
		/* Initially assume actual frequency is the configure one */
		fclk_configs[i].frequency = fclk_configs[i].actual_frequency;
	}

	if (zy7_devcfg_init_fclk_sysctl(sc) < 0)
		device_printf(dev, "failed to initialized sysctl tree\n");

	return (0);
}
示例#9
0
static int
zy7_devcfg_write(struct cdev *dev, struct uio *uio, int ioflag)
{
	struct zy7_devcfg_softc *sc = dev->si_drv1;
	void *dma_mem;
	bus_addr_t dma_physaddr;
	int segsz, err;

	DEVCFG_SC_LOCK(sc);

	/* First write?  Reset PL. */
	if (uio->uio_offset == 0 && uio->uio_resid > 0)	{
		zy7_devcfg_init_hw(sc);
		zy7_slcr_preload_pl();
		err = zy7_devcfg_reset_pl(sc);
		if (err != 0) {
			DEVCFG_SC_UNLOCK(sc);
			return (err);
		}
	}

	/* Allocate dma memory and load. */
	err = bus_dmamem_alloc(sc->dma_tag, &dma_mem, BUS_DMA_NOWAIT,
			       &sc->dma_map);
	if (err != 0) {
		DEVCFG_SC_UNLOCK(sc);
		return (err);
	}
	err = bus_dmamap_load(sc->dma_tag, sc->dma_map, dma_mem, PAGE_SIZE,
			      zy7_dma_cb2, &dma_physaddr, 0);
	if (err != 0) {
		bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map);
		DEVCFG_SC_UNLOCK(sc);
		return (err);
	}

	while (uio->uio_resid > 0) {
		/* If DONE signal has been set, we shouldn't write anymore. */
		if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) &
		     ZY7_DEVCFG_INT_PCFG_DONE) != 0) {
			err = EIO;
			break;
		}

		/* uiomove the data from user buffer to our dma map. */
		segsz = MIN(PAGE_SIZE, uio->uio_resid);
		DEVCFG_SC_UNLOCK(sc);
		err = uiomove(dma_mem, segsz, uio);
		DEVCFG_SC_LOCK(sc);
		if (err != 0)
			break;

		/* Flush the cache to memory. */
		bus_dmamap_sync(sc->dma_tag, sc->dma_map,
				BUS_DMASYNC_PREWRITE);

		/* Program devcfg's DMA engine.  The ordering of these
		 * register writes is critical.
		 */
		if (uio->uio_resid > segsz)
			WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR,
			    (uint32_t) dma_physaddr);
		else
			WR4(sc, ZY7_DEVCFG_DMA_SRC_ADDR,
			    (uint32_t) dma_physaddr |
			    ZY7_DEVCFG_DMA_ADDR_WAIT_PCAP);
		WR4(sc, ZY7_DEVCFG_DMA_DST_ADDR, ZY7_DEVCFG_DMA_ADDR_ILLEGAL);
		WR4(sc, ZY7_DEVCFG_DMA_SRC_LEN, (segsz+3)/4);
		WR4(sc, ZY7_DEVCFG_DMA_DST_LEN, 0);

		/* Now clear done bit and set up DMA done interrupt. */
		WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL);
		WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_DMA_DONE);

		/* Wait for DMA done interrupt. */
		err = mtx_sleep(sc->dma_map, &sc->sc_mtx, PCATCH,
				"zy7dma", hz);
		if (err != 0)
			break;

		bus_dmamap_sync(sc->dma_tag, sc->dma_map,
				BUS_DMASYNC_POSTWRITE);

		/* Check DONE signal. */
		if ((RD4(sc, ZY7_DEVCFG_INT_STATUS) &
		     ZY7_DEVCFG_INT_PCFG_DONE) != 0)
			zy7_slcr_postload_pl(zy7_en_level_shifters);
	}

	bus_dmamap_unload(sc->dma_tag, sc->dma_map);
	bus_dmamem_free(sc->dma_tag, dma_mem, sc->dma_map);
	DEVCFG_SC_UNLOCK(sc);
	return (err);
}
示例#10
0
static void
tegra_i2c_intr(void *arg)
{
    struct tegra_i2c_softc *sc;
    uint32_t status, reg;
    int rv;

    sc = (struct tegra_i2c_softc *)arg;

    LOCK(sc);
    status = RD4(sc, I2C_INTERRUPT_SOURCE_REGISTER);
    if (sc->msg == NULL) {
        /* Unexpected interrupt - disable FIFOs, clear reset. */
        reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER);
        reg &= ~I2C_INT_TFIFO_DATA_REQ;
        reg &= ~I2C_INT_RFIFO_DATA_REQ;
        WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0);
        WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status);
        UNLOCK(sc);
        return;
    }

    if ((status & I2C_ERROR_MASK) != 0) {
        if (status & I2C_INT_NOACK)
            sc->bus_err = IIC_ENOACK;
        if (status & I2C_INT_ARB_LOST)
            sc->bus_err = IIC_EBUSERR;
        if ((status & I2C_INT_TFIFO_OVR) ||
                (status & I2C_INT_RFIFO_UNF))
            sc->bus_err = IIC_EBUSERR;
        sc->done = 1;
    } else if ((status & I2C_INT_RFIFO_DATA_REQ) &&
               (sc->msg != NULL) && (sc->msg->flags & IIC_M_RD)) {
        rv = tegra_i2c_rx(sc);
        if (rv == 0) {
            reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER);
            reg &= ~I2C_INT_RFIFO_DATA_REQ;
            WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg);
        }
    } else if ((status & I2C_INT_TFIFO_DATA_REQ) &&
               (sc->msg != NULL) && !(sc->msg->flags & IIC_M_RD)) {
        rv = tegra_i2c_tx(sc);
        if (rv == 0) {
            reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER);
            reg &= ~I2C_INT_TFIFO_DATA_REQ;
            WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg);
        }
    } else if ((status & I2C_INT_RFIFO_DATA_REQ) ||
               (status & I2C_INT_TFIFO_DATA_REQ)) {
        device_printf(sc->dev, "Unexpected data interrupt: 0x%08X\n",
                      status);
        reg = RD4(sc, I2C_INTERRUPT_MASK_REGISTER);
        reg &= ~I2C_INT_TFIFO_DATA_REQ;
        reg &= ~I2C_INT_RFIFO_DATA_REQ;
        WR4(sc, I2C_INTERRUPT_MASK_REGISTER, reg);
    }
    if (status & I2C_INT_PACKET_XFER_COMPLETE)
        sc->done = 1;
    WR4(sc, I2C_INTERRUPT_STATUS_REGISTER, status);
    if (sc->done) {
        WR4(sc, I2C_INTERRUPT_MASK_REGISTER, 0);
        wakeup(&(sc->done));
    }
    UNLOCK(sc);
}
示例#11
0
static void
ffec_miibus_statchg(device_t dev)
{
	struct ffec_softc *sc;
	struct mii_data *mii;
	uint32_t ecr, rcr, tcr;

	/*
	 * Called by the MII bus driver when the PHY establishes link to set the
	 * MAC interface registers.
	 */

	sc = device_get_softc(dev);

	FFEC_ASSERT_LOCKED(sc);

	mii = sc->mii_softc;

	if (mii->mii_media_status & IFM_ACTIVE)
		sc->link_is_up = true;
	else
		sc->link_is_up = false;

	ecr = RD4(sc, FEC_ECR_REG) & ~FEC_ECR_SPEED;
	rcr = RD4(sc, FEC_RCR_REG) & ~(FEC_RCR_RMII_10T | FEC_RCR_RMII_MODE |
	    FEC_RCR_RGMII_EN | FEC_RCR_DRT | FEC_RCR_FCE);
	tcr = RD4(sc, FEC_TCR_REG) & ~FEC_TCR_FDEN;

	rcr |= FEC_RCR_MII_MODE; /* Must always be on even for R[G]MII. */
	switch (sc->phy_conn_type) {
	case PHY_CONN_MII:
		break;
	case PHY_CONN_RMII:
		rcr |= FEC_RCR_RMII_MODE;
		break;
	case PHY_CONN_RGMII:
		rcr |= FEC_RCR_RGMII_EN;
		break;
	}

	switch (IFM_SUBTYPE(mii->mii_media_active)) {
	case IFM_1000_T:
	case IFM_1000_SX:
		ecr |= FEC_ECR_SPEED;
		break;
	case IFM_100_TX:
		/* Not-FEC_ECR_SPEED + not-FEC_RCR_RMII_10T means 100TX */
		break;
	case IFM_10_T:
		rcr |= FEC_RCR_RMII_10T;
		break;
	case IFM_NONE:
		sc->link_is_up = false;
		return;
	default:
		sc->link_is_up = false;
		device_printf(dev, "Unsupported media %u\n",
		    IFM_SUBTYPE(mii->mii_media_active));
		return;
	}

	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
		tcr |= FEC_TCR_FDEN;
	else
		rcr |= FEC_RCR_DRT;

	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FLOW) != 0)
		rcr |= FEC_RCR_FCE;

	WR4(sc, FEC_RCR_REG, rcr);
	WR4(sc, FEC_TCR_REG, tcr);
	WR4(sc, FEC_ECR_REG, ecr);
}
示例#12
0
static void
at91_mci_stop_done(struct at91_mci_softc *sc, uint32_t sr)
{
	struct mmc_command *cmd = sc->curcmd;

	/*
	 * We arrive here after receiving CMDRDY for a MMC_STOP_TRANSMISSION
	 * command.  Depending on the operation being stopped, we may have to
	 * do some unusual things to work around hardware bugs.
	 */

	/*
	 * This is known to be true of at91rm9200 hardware; it may or may not
	 * apply to more recent chips:
	 *
	 * After stopping a multi-block write, the NOTBUSY bit in MCI_SR does
	 * not properly reflect the actual busy state of the card as signaled
	 * on the DAT0 line; it always claims the card is not-busy.  If we
	 * believe that and let operations continue, following commands will
	 * fail with response timeouts (except of course MMC_SEND_STATUS -- it
	 * indicates the card is busy in the PRG state, which was the smoking
	 * gun that showed MCI_SR NOTBUSY was not tracking DAT0 correctly).
	 *
	 * The atmel docs are emphatic: "This flag [NOTBUSY] must be used only
	 * for Write Operations."  I guess technically since we sent a stop
	 * it's not a write operation anymore.  But then just what did they
	 * think it meant for the stop command to have "...an optional busy
	 * signal transmitted on the data line" according to the SD spec?
	 *
	 * I tried a variety of things to un-wedge the MCI and get the status
	 * register to reflect NOTBUSY correctly again, but the only thing
	 * that worked was a full device reset.  It feels like an awfully big
	 * hammer, but doing a full reset after every multiblock write is
	 * still faster than doing single-block IO (by almost two orders of
	 * magnitude: 20KB/sec improves to about 1.8MB/sec best case).
	 *
	 * After doing the reset, wait for a NOTBUSY interrupt before
	 * continuing with the next operation.
	 *
	 * This workaround breaks multiwrite on the rev2xx parts, but some other
	 * workaround is needed.
	 */
	if ((sc->flags & CMD_MULTIWRITE) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) {
		at91_mci_reset(sc);
		WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY);
		return;
	}

	/*
	 * This is known to be true of at91rm9200 hardware; it may or may not
	 * apply to more recent chips:
	 *
	 * After stopping a multi-block read, loop to read and discard any
	 * data that coasts in after we sent the stop command.  The docs don't
	 * say anything about it, but empirical testing shows that 1-3
	 * additional words of data get buffered up in some unmentioned
	 * internal fifo and if we don't read and discard them here they end
	 * up on the front of the next read DMA transfer we do.
	 *
	 * This appears to be unnecessary for rev2xx parts.
	 */
	if ((sc->flags & CMD_MULTIREAD) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) {
		uint32_t sr;
		int count = 0;

		do {
			sr = RD4(sc, MCI_SR);
			if (sr & MCI_SR_RXRDY) {
				RD4(sc,  MCI_RDR);
				++count;
			}
		} while (sr & MCI_SR_RXRDY);
		at91_mci_reset(sc);
	}

	cmd->error = MMC_ERR_NONE;
	at91_mci_next_operation(sc);

}
示例#13
0
static int
tegra124_pmc_attach(device_t dev)
{
	struct tegra124_pmc_softc *sc;
	int rid, rv;
	uint32_t reg;
	phandle_t node;

	sc = device_get_softc(dev);
	sc->dev = dev;
	node = ofw_bus_get_node(dev);

	rv = tegra124_pmc_parse_fdt(sc, node);
	if (rv != 0) {
		device_printf(sc->dev, "Cannot parse FDT data\n");
		return (rv);
	}

	rv = clk_get_by_ofw_name(sc->dev, 0, "pclk", &sc->clk);
	if (rv != 0) {
		device_printf(sc->dev, "Cannot get \"pclk\" clock\n");
		return (ENXIO);
	}

	rid = 0;
	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (sc->mem_res == NULL) {
		device_printf(dev, "Cannot allocate memory resources\n");
		return (ENXIO);
	}

	PMC_LOCK_INIT(sc);

	/* Enable CPU power request. */
	reg = RD4(sc, PMC_CNTRL);
	reg |= PMC_CNTRL_CPU_PWRREQ_OE;
	WR4(sc, PMC_CNTRL, reg);

	/* Set sysclk output polarity */
	reg = RD4(sc, PMC_CNTRL);
	if (sc->sysclkreq_high)
		reg &= ~PMC_CNTRL_SYSCLK_POLARITY;
	else
		reg |= PMC_CNTRL_SYSCLK_POLARITY;
	WR4(sc, PMC_CNTRL, reg);

	/* Enable sysclk request. */
	reg = RD4(sc, PMC_CNTRL);
	reg |= PMC_CNTRL_SYSCLK_OE;
	WR4(sc, PMC_CNTRL, reg);

	/*
	 * Remove HDMI from deep power down mode.
	 * XXX mote this to HDMI driver
	 */
	reg = RD4(sc, PMC_IO_DPD_STATUS);
	reg &= ~ PMC_IO_DPD_STATUS_HDMI;
	WR4(sc, PMC_IO_DPD_STATUS, reg);

	reg = RD4(sc, PMC_IO_DPD2_STATUS);
	reg &= ~ PMC_IO_DPD2_STATUS_HV;
	WR4(sc, PMC_IO_DPD2_STATUS, reg);

	if (pmc_sc != NULL)
		panic("tegra124_pmc: double driver attach");
	pmc_sc = sc;
	return (0);
}
示例#14
0
static int
at91_spi_transfer(device_t dev, device_t child, struct spi_command *cmd)
{
	struct at91_spi_softc *sc;
	bus_addr_t addr;
	int err, i, j, mode[4];

	KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz,
	    ("%s: TX/RX command sizes should be equal", __func__));
	KASSERT(cmd->tx_data_sz == cmd->rx_data_sz,
	    ("%s: TX/RX data sizes should be equal", __func__));

	sc = device_get_softc(dev);
	i = 0;

	sx_xlock(&sc->xfer_mtx);

	/*
	 * Disable transfers while we set things up.
	 */
	WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS);

#ifdef SPI_CHIPSEL_SUPPORT
	if (cmd->cs < 0 || cmd->cs > 3) {
		device_printf(dev,
		    "Invalid chip select %d requested by %s\n", cmd->cs,
		    device_get_nameunit(child));
		err = EINVAL;
		goto out;
	}
#ifdef SPI_CHIP_SELECT_HIGH_SUPPORT
	if (at91_is_rm92() && cmd->cs == 0 &&
	    (cmd->flags & SPI_CHIP_SELECT_HIGH) != 0) {
		device_printf(dev,
		    "Invalid chip select high requested by %s\n",
		    device_get_nameunit(child));
		err = EINVAL;
		goto out;
	}
#endif
	WR4(sc, SPI_MR, (RD4(sc, SPI_MR) & ~0x000f0000) | CS_TO_MR(cmd->cs));
#endif

	/*
	 * Set up the TX side of the transfer.
	 */
	if ((err = bus_dmamap_load(sc->dmatag, sc->map[i], cmd->tx_cmd,
	    cmd->tx_cmd_sz, at91_getaddr, &addr, 0)) != 0)
		goto out;
	WR4(sc, PDC_TPR, addr);
	WR4(sc, PDC_TCR, cmd->tx_cmd_sz);
	bus_dmamap_sync(sc->dmatag, sc->map[i], BUS_DMASYNC_PREWRITE);
	mode[i++] = BUS_DMASYNC_POSTWRITE;
	if (cmd->tx_data_sz > 0) {
		if ((err = bus_dmamap_load(sc->dmatag, sc->map[i],
		    cmd->tx_data, cmd->tx_data_sz, at91_getaddr, &addr, 0)) !=
		    0)
			goto out;
		WR4(sc, PDC_TNPR, addr);
		WR4(sc, PDC_TNCR, cmd->tx_data_sz);
		bus_dmamap_sync(sc->dmatag, sc->map[i], BUS_DMASYNC_PREWRITE);
		mode[i++] = BUS_DMASYNC_POSTWRITE;
	}

	/*
	 * Set up the RX side of the transfer.
	 */
	if ((err = bus_dmamap_load(sc->dmatag, sc->map[i], cmd->rx_cmd,
	    cmd->rx_cmd_sz, at91_getaddr, &addr, 0)) != 0)
		goto out;
	WR4(sc, PDC_RPR, addr);
	WR4(sc, PDC_RCR, cmd->rx_cmd_sz);
	bus_dmamap_sync(sc->dmatag, sc->map[i], BUS_DMASYNC_PREREAD);
	mode[i++] = BUS_DMASYNC_POSTREAD;
	if (cmd->rx_data_sz > 0) {
		if ((err = bus_dmamap_load(sc->dmatag, sc->map[i],
		    cmd->rx_data, cmd->rx_data_sz, at91_getaddr, &addr, 0)) !=
		    0)
			goto out;
		WR4(sc, PDC_RNPR, addr);
		WR4(sc, PDC_RNCR, cmd->rx_data_sz);
		bus_dmamap_sync(sc->dmatag, sc->map[i], BUS_DMASYNC_PREREAD);
		mode[i++] = BUS_DMASYNC_POSTREAD;
	}

	/*
	 * Start the transfer, wait for it to complete.
	 */
	sc->xfer_done = 0;
	WR4(sc, SPI_IER, SPI_SR_RXBUFF);
	WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN | PDC_PTCR_RXTEN);
	do
		err = tsleep(&sc->xfer_done, PCATCH | PZERO, "at91_spi", hz);
	while (sc->xfer_done == 0 && err != EINTR);

	/*
	 * Stop the transfer and clean things up.
	 */
	WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS);
	if (err == 0)
		for (j = 0; j < i; j++)
			bus_dmamap_sync(sc->dmatag, sc->map[j], mode[j]);
out:
	for (j = 0; j < i; j++)
		bus_dmamap_unload(sc->dmatag, sc->map[j]);

	sx_xunlock(&sc->xfer_mtx);

	return (err);
}
示例#15
0
static int
at91_spi_attach(device_t dev)
{
	struct at91_spi_softc *sc;
	int err;
	uint32_t csr;

	sc = device_get_softc(dev);

	sc->dev = dev;
	sx_init(&sc->xfer_mtx, device_get_nameunit(dev));

	/*
	 * Allocate resources.
	 */
	err = at91_spi_activate(dev);
	if (err)
		goto out;

	/*
	 * Set up the hardware.
	 */

	WR4(sc, SPI_CR, SPI_CR_SWRST);
	/* "Software Reset must be Written Twice" erratum */
	WR4(sc, SPI_CR, SPI_CR_SWRST);
	WR4(sc, SPI_IDR, 0xffffffff);

	WR4(sc, SPI_MR, (0xf << 24) | SPI_MR_MSTR | SPI_MR_MODFDIS |
	    CS_TO_MR(0));

	/*
	 * For now, run the bus at the slowest speed possible as otherwise we
	 * may encounter data corruption on transmit as seen with ETHERNUT5
	 * and AT45DB321D even though both board and slave device can take
	 * more.
	 * This also serves as a work-around for the "NPCSx rises if no data
	 * data is to be transmitted" erratum.  The ideal workaround for the
	 * latter is to take the chip select control away from the peripheral
	 * and manage it directly as a GPIO line.  The easy solution is to
	 * slow down the bus so dramatically that it just never gets starved
	 * as may be seen when the OCHI controller is running and consuming
	 * memory and APB bandwidth.
	 * Also, currently we lack a way for lettting both the board and the
	 * slave devices take their maximum supported SPI clocks into account.
	 */
	csr = SPI_CSR_CPOL | (4 << 16) | (0xff << 8);
	WR4(sc, SPI_CSR0, csr);
	WR4(sc, SPI_CSR1, csr);
	WR4(sc, SPI_CSR2, csr);
	WR4(sc, SPI_CSR3, csr);

	WR4(sc, SPI_CR, SPI_CR_SPIEN);

	WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS);
	WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS);
	WR4(sc, PDC_RNPR, 0);
	WR4(sc, PDC_RNCR, 0);
	WR4(sc, PDC_TNPR, 0);
	WR4(sc, PDC_TNCR, 0);
	WR4(sc, PDC_RPR, 0);
	WR4(sc, PDC_RCR, 0);
	WR4(sc, PDC_TPR, 0);
	WR4(sc, PDC_TCR, 0);
	RD4(sc, SPI_RDR);
	RD4(sc, SPI_SR);

	device_add_child(dev, "spibus", -1);
	bus_generic_attach(dev);
out:
	if (err)
		at91_spi_deactivate(dev);
	return (err);
}
示例#16
0
/*
 * NB: Do not re-enable interrupts here -- reentrancy here can cause all
 * sorts of Bad Things(tm) to happen, including kernel stack overflows.
 */
static void
at32intc_cpu_intr(int ilvl, uint32_t st, uint32_t pc)
{
	struct at32intc_softc *sc = at32intc_sc;
	struct at32intc_line *il;
	struct at32intc_ictx ictx;
	struct cpu_info *ci;
	uint32_t icrx;
	uint32_t irrx;
	uint32_t igrp;
	int line;
	int b;

	ci = curcpu();
	ci->ci_idepth++;
	uvmexp.intrs++;

	ictx.st = st;
	ictx.pc = pc;

	if (ilvl >= 4)
		panic("at32intc_cpu_intr: bad interrupt level");

	icrx = RD4(AT32INTC_ICR(ilvl));
	igrp = icrx & AT32INTC_ICR_CAUSE_MASK;
	irrx = RD4(AT32INTC_IRR(igrp));

	if (irrx == 0)
		panic("cpu_intr: no pending group %u level %d interrupts", 
			igrp, ilvl);

	for (b = 0; b < 32; ++b) {
		if ((irrx & (1U << b)) == 0)
			continue;

		il = &at32intc_lines[b + (igrp << 5)];

		if (il->handler == NULL)
			panic("at32_intc_cpu_intr: no irq handler for ICR %x IRR %x",
				(unsigned)icrx, (unsigned)irrx);

		if (il->level != ilvl)
			panic("at32intc_cpu_intr: interrupt level mismatch");

		if (il->group != igrp)
			panic("at32intc_cpu_intr: interrupt level mismatch");

		il->handler(&ictx, il->arg);
	}

	ci->ci_idepth--;
#ifdef __HAVE_FAST_SOFTINTS
#error __HAVE_FAST_SOFTINTS: notyet
	/* software interrupt */
	ipending &= (MIPS_SOFT_INT_MASK_1|MIPS_SOFT_INT_MASK_0);
	if (ipending == 0)
		return;
	_clrsoftintr(ipending);
	softintr_dispatch(ipending);
#endif
}
示例#17
0
static void
ffec_harvest_stats(struct ffec_softc *sc)
{
	struct ifnet *ifp;

	/* We don't need to harvest too often. */
	if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL)
		return;

	/*
	 * Try to avoid harvesting unless the IDLE flag is on, but if it has
	 * been too long just go ahead and do it anyway, the worst that'll
	 * happen is we'll lose a packet count or two as we clear at the end.
	 */
	if (sc->stats_harvest_count < (2 * STATS_HARVEST_INTERVAL) &&
	    ((RD4(sc, FEC_MIBC_REG) & FEC_MIBC_IDLE) == 0))
		return;

	sc->stats_harvest_count = 0;
	ifp = sc->ifp;

	ifp->if_ipackets   += RD4(sc, FEC_RMON_R_PACKETS);
	ifp->if_imcasts    += RD4(sc, FEC_RMON_R_MC_PKT);
	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_CRC_ALIGN);
	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_UNDERSIZE);
	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_OVERSIZE);
	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_FRAG);
	ifp->if_ierrors    += RD4(sc, FEC_RMON_R_JAB);

	ifp->if_opackets   += RD4(sc, FEC_RMON_T_PACKETS);
	ifp->if_omcasts    += RD4(sc, FEC_RMON_T_MC_PKT);
	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_CRC_ALIGN);
	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_UNDERSIZE);
	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_OVERSIZE );
	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_FRAG);
	ifp->if_oerrors    += RD4(sc, FEC_RMON_T_JAB);

	ifp->if_collisions += RD4(sc, FEC_RMON_T_COL);

	ffec_clear_stats(sc);
}
示例#18
0
static int
usbphy_utmi_enable(struct usbphy_softc *sc)
{
	int rv;
	uint32_t val;

	/* Reset phy */
	val = RD4(sc, IF_USB_SUSP_CTRL);
	val |= UTMIP_RESET;
	WR4(sc, IF_USB_SUSP_CTRL, val);


	val = RD4(sc, UTMIP_TX_CFG0);
	val |= UTMIP_FS_PREAMBLE_J;
	WR4(sc, UTMIP_TX_CFG0, val);

	val = RD4(sc, UTMIP_HSRX_CFG0);
	val &= ~UTMIP_IDLE_WAIT(~0);
	val &= ~UTMIP_ELASTIC_LIMIT(~0);
	val |= UTMIP_IDLE_WAIT(sc->idle_wait_delay);
	val |= UTMIP_ELASTIC_LIMIT(sc->elastic_limit);
	WR4(sc, UTMIP_HSRX_CFG0, val);

	val = RD4(sc, UTMIP_HSRX_CFG1);
	val &= ~UTMIP_HS_SYNC_START_DLY(~0);
	val |= UTMIP_HS_SYNC_START_DLY(sc->hssync_start_delay);
	WR4(sc, UTMIP_HSRX_CFG1, val);

	val = RD4(sc, UTMIP_DEBOUNCE_CFG0);
	val &= ~UTMIP_BIAS_DEBOUNCE_A(~0);
	val |= UTMIP_BIAS_DEBOUNCE_A(0x7530);  /* For 12MHz */
	WR4(sc, UTMIP_DEBOUNCE_CFG0, val);

	val = RD4(sc, UTMIP_MISC_CFG0);
	val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
	WR4(sc, UTMIP_MISC_CFG0, val);

	if (sc->dr_mode == USB_DR_MODE_DEVICE) {
		val = RD4(sc,IF_USB_SUSP_CTRL);
		val &= ~USB_WAKE_ON_CNNT_EN_DEV;
		val &= ~USB_WAKE_ON_DISCON_EN_DEV;
		WR4(sc, IF_USB_SUSP_CTRL, val);

		val = RD4(sc, UTMIP_BAT_CHRG_CFG0);
		val &= ~UTMIP_PD_CHRG;
		WR4(sc, UTMIP_BAT_CHRG_CFG0, val);
	} else {
		val = RD4(sc, UTMIP_BAT_CHRG_CFG0);
		val |= UTMIP_PD_CHRG;
		WR4(sc, UTMIP_BAT_CHRG_CFG0, val);
	}

	usbpby_enable_cnt++;
	if (usbpby_enable_cnt == 1) {
		rv = hwreset_deassert(sc->reset_pads);
		if (rv != 0) {
			device_printf(sc->dev,
			     "Cannot unreset 'utmi-pads' reset\n");
			return (rv);
		}
		rv = clk_enable(sc->clk_pads);
		if (rv != 0) {
			device_printf(sc->dev,
			    "Cannot enable 'utmi-pads' clock\n");
			return (rv);
		}

		val = bus_read_4(sc->pads_res, UTMIP_BIAS_CFG0);
		val &= ~UTMIP_OTGPD;
		val &= ~UTMIP_BIASPD;
		val &= ~UTMIP_HSSQUELCH_LEVEL(~0);
		val &= ~UTMIP_HSDISCON_LEVEL(~0);
		val &= ~UTMIP_HSDISCON_LEVEL_MSB(~0);
		val |= UTMIP_HSSQUELCH_LEVEL(sc->hssquelch_level);
		val |= UTMIP_HSDISCON_LEVEL(sc->hsdiscon_level);
		val |= UTMIP_HSDISCON_LEVEL_MSB(sc->hsdiscon_level);
		bus_write_4(sc->pads_res, UTMIP_BIAS_CFG0, val);

		rv = clk_disable(sc->clk_pads);
		if (rv != 0) {
			device_printf(sc->dev,
			    "Cannot disable 'utmi-pads' clock\n");
			return (rv);
		}
	}

	val = RD4(sc, UTMIP_XCVR_CFG0);
	val &= ~UTMIP_FORCE_PD_POWERDOWN;
	val &= ~UTMIP_FORCE_PD2_POWERDOWN ;
	val &= ~UTMIP_FORCE_PDZI_POWERDOWN;
	val &= ~UTMIP_XCVR_LSBIAS_SEL;
	val &= ~UTMIP_XCVR_LSFSLEW(~0);
	val &= ~UTMIP_XCVR_LSRSLEW(~0);
	val &= ~UTMIP_XCVR_HSSLEW(~0);
	val &= ~UTMIP_XCVR_HSSLEW_MSB(~0);
	val |= UTMIP_XCVR_LSFSLEW(sc->xcvr_lsfslew);
	val |= UTMIP_XCVR_LSRSLEW(sc->xcvr_lsrslew);
	val |= UTMIP_XCVR_HSSLEW(sc->xcvr_hsslew);
	val |= UTMIP_XCVR_HSSLEW_MSB(sc->xcvr_hsslew);
	if (!sc->xcvr_setup_use_fuses) {
		val &= ~UTMIP_XCVR_SETUP(~0);
		val &= ~UTMIP_XCVR_SETUP_MSB(~0);
		val |= UTMIP_XCVR_SETUP(sc->xcvr_setup);
		val |= UTMIP_XCVR_SETUP_MSB(sc->xcvr_setup);
	}
	WR4(sc, UTMIP_XCVR_CFG0, val);

	val = RD4(sc, UTMIP_XCVR_CFG1);
	val &= ~UTMIP_FORCE_PDDISC_POWERDOWN;
	val &= ~UTMIP_FORCE_PDCHRP_POWERDOWN;
	val &= ~UTMIP_FORCE_PDDR_POWERDOWN;
	val &= ~UTMIP_XCVR_TERM_RANGE_ADJ(~0);
	val |= UTMIP_XCVR_TERM_RANGE_ADJ(sc->term_range_adj);
	WR4(sc, UTMIP_XCVR_CFG1, val);


	val = RD4(sc, UTMIP_BIAS_CFG1);
	val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
	val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
	WR4(sc, UTMIP_BIAS_CFG1, val);

	val = RD4(sc, UTMIP_SPARE_CFG0);
	if (sc->xcvr_setup_use_fuses)
		val |= FUSE_SETUP_SEL;
	else
		val &= ~FUSE_SETUP_SEL;
	WR4(sc, UTMIP_SPARE_CFG0, val);

	val = RD4(sc, IF_USB_SUSP_CTRL);
	val |= UTMIP_PHY_ENB;
	WR4(sc, IF_USB_SUSP_CTRL, val);

	val = RD4(sc, IF_USB_SUSP_CTRL);
	val &= ~UTMIP_RESET;
	WR4(sc, IF_USB_SUSP_CTRL, val);

	usbphy_utmi_phy_clk(sc, true);

	val = RD4(sc, CTRL_USB_USBMODE);
	val &= ~USB_USBMODE_MASK;
	if (sc->dr_mode == USB_DR_MODE_HOST)
		val |= USB_USBMODE_HOST;
	else
		val |= USB_USBMODE_DEVICE;
	WR4(sc, CTRL_USB_USBMODE, val);

	val = RD4(sc, CTRL_USB_HOSTPC1_DEVLC);
	val &= ~USB_HOSTPC1_DEVLC_PTS(~0);
	val |= USB_HOSTPC1_DEVLC_PTS(0);
	WR4(sc, CTRL_USB_HOSTPC1_DEVLC, val);

	return (0);
}
示例#19
0
bool interpretDWARFLines(const PEImage& img, mspdb::Mod* mod)
{
	for(unsigned long off = 0; off < img.debug_line_length; )
	{
		DWARF_LineNumberProgramHeader* hdr = (DWARF_LineNumberProgramHeader*) (img.debug_line + off);
		int length = hdr->unit_length;
		if(length < 0)
			break;
		length += sizeof(length);

		unsigned char* p = (unsigned char*) (hdr + 1);
		unsigned char* end = (unsigned char*) hdr + length;

		std::vector<unsigned int> opcode_lengths;
		opcode_lengths.resize(hdr->opcode_base);
		if (hdr->opcode_base > 0)
		{
			opcode_lengths[0] = 0;
			for(int o = 1; o < hdr->opcode_base && p < end; o++)
				opcode_lengths[o] = LEB128(p);
		}

		DWARF_LineState state;
		state.seg_offset = img.getImageBase() + img.getSection(img.codeSegment).VirtualAddress;

		// dirs
		while(p < end)
		{
			if(*p == 0)
				break;
			state.include_dirs.push_back((const char*) p);
			p += strlen((const char*) p) + 1;
		}
		p++;

		// files
		DWARF_FileName fname;
		while(p < end && *p)
		{
			fname.read(p);
			state.files.push_back(fname);
		}
		p++;

		state.init(hdr);
		while(p < end)
		{
			int opcode = *p++;
			if(opcode >= hdr->opcode_base)
			{
				// special opcode
				int adjusted_opcode = opcode - hdr->opcode_base;
				int operation_advance = adjusted_opcode / hdr->line_range;
				state.advance_addr(hdr, operation_advance);
				int line_advance = hdr->line_base + (adjusted_opcode % hdr->line_range);
				state.line += line_advance;

				state.addLineInfo();

				state.basic_block = false;
				state.prologue_end = false;
				state.epilogue_end = false;
				state.discriminator = 0;
			}
			else
			{
				switch(opcode)
				{
				case 0: // extended
				{
					int exlength = LEB128(p);
					unsigned char* q = p + exlength;
					int excode = *p++;
					switch(excode)
					{
					case DW_LNE_end_sequence:
						if((char*)p - img.debug_line >= 0xe4e0)
							p = p;
						state.end_sequence = true;
						state.last_addr = state.address;
						state.addLineInfo();
						if(!_flushDWARFLines(img, mod, state))
							return false;
						state.init(hdr);
						break;
					case DW_LNE_set_address:
                        if (!mod && state.section == -1)
                            state.section = img.getRelocationInLineSegment((char*)p - img.debug_line);
						if(unsigned long adr = RD4(p))
							state.address = adr;
						else if (!mod)
							state.address = adr;
                        else
							state.address = state.last_addr; // strange adr 0 for templates?
						state.op_index = 0;
						break;
					case DW_LNE_define_file:
						fname.read(p);
						state.file_ptr = &fname;
						state.file = 0;
						break;
					case DW_LNE_set_discriminator:
						state.discriminator = LEB128(p);
						break;
					}
					p = q;
					break;
				}
				case DW_LNS_copy:
					state.addLineInfo();
					state.basic_block = false;
					state.prologue_end = false;
					state.epilogue_end = false;
					state.discriminator = 0;
					break;
				case DW_LNS_advance_pc:
					state.advance_addr(hdr, LEB128(p));
					break;
				case DW_LNS_advance_line:
					state.line += SLEB128(p);
					break;
				case DW_LNS_set_file:
					if(!_flushDWARFLines(img, mod, state))
						return false;
					state.file = LEB128(p);
					break;
				case DW_LNS_set_column:
					state.column = LEB128(p);
					break;
				case DW_LNS_negate_stmt:
					state.is_stmt = !state.is_stmt;
					break;
				case DW_LNS_set_basic_block:
					state.basic_block = true;
					break;
				case DW_LNS_const_add_pc:
					state.advance_addr(hdr, (255 - hdr->opcode_base) / hdr->line_range);
					break;
				case DW_LNS_fixed_advance_pc:
					state.address += RD2(p);
					state.op_index = 0;
					break;
				case DW_LNS_set_prologue_end:
					state.prologue_end = true;
					break;
				case DW_LNS_set_epilogue_begin:
					state.epilogue_end = true;
					break;
				case DW_LNS_set_isa:
					state.isa = LEB128(p);
					break;
				default:
					// unknown standard opcode
					for(unsigned int arg = 0; arg < opcode_lengths[opcode]; arg++)
						LEB128(p);
					break;
				}
			}
		}
		if(!_flushDWARFLines(img, mod, state))
			return false;

		off += length;
	}

	return true;
}
示例#20
0
static void
at91_mci_intr(void *arg)
{
	struct at91_mci_softc *sc = (struct at91_mci_softc*)arg;
	struct mmc_command *cmd = sc->curcmd;
	uint32_t sr, isr;

	AT91_MCI_LOCK(sc);

	sr = RD4(sc, MCI_SR);
	isr = sr & RD4(sc, MCI_IMR);

	if (mci_debug)
		printf("i 0x%x sr 0x%x\n", isr, sr);

	/*
	 * All interrupts are one-shot; disable it now.
	 * The next operation will re-enable whatever interrupts it wants.
	 */
	WR4(sc, MCI_IDR, isr);
	if (isr & MCI_SR_ERROR) {
		if (isr & (MCI_SR_RTOE | MCI_SR_DTOE))
			cmd->error = MMC_ERR_TIMEOUT;
		else if (isr & (MCI_SR_RCRCE | MCI_SR_DCRCE))
			cmd->error = MMC_ERR_BADCRC;
		else if (isr & (MCI_SR_OVRE | MCI_SR_UNRE))
			cmd->error = MMC_ERR_FIFO;
		else
			cmd->error = MMC_ERR_FAILED;
		/*
		 * CMD8 is used to probe for SDHC cards, a standard SD card
		 * will get a response timeout; don't report it because it's a
		 * normal and expected condition.  One might argue that all
		 * error reporting should be left to higher levels, but when
		 * they report at all it's always EIO, which isn't very
		 * helpful. XXX bootverbose?
		 */
		if (cmd->opcode != 8) {
			device_printf(sc->dev,
			    "IO error; status MCI_SR = 0x%x cmd opcode = %d%s\n",
			    sr, cmd->opcode,
			    (cmd->opcode != 12) ? "" :
			    (sc->flags & CMD_MULTIREAD) ? " after read" : " after write");
			at91_mci_reset(sc);
		}
		at91_mci_next_operation(sc);
	} else {
		if (isr & MCI_SR_TXBUFE) {
//			printf("TXBUFE\n");
			/*
			 * We need to wait for a BLKE that follows TXBUFE
			 * (intermediate BLKEs might happen after ENDTXes if
			 * we're chaining multiple buffers).  If BLKE is also
			 * asserted at the time we get TXBUFE, we can avoid
			 * another interrupt and process it right away, below.
			 */
			if (sr & MCI_SR_BLKE)
				isr |= MCI_SR_BLKE;
			else
				WR4(sc, MCI_IER, MCI_SR_BLKE);
		}
		if (isr & MCI_SR_RXBUFF) {
//			printf("RXBUFF\n");
		}
		if (isr & MCI_SR_ENDTX) {
//			printf("ENDTX\n");
		}
		if (isr & MCI_SR_ENDRX) {
//			printf("ENDRX\n");
			at91_mci_read_done(sc, sr);
		}
		if (isr & MCI_SR_NOTBUSY) {
//			printf("NOTBUSY\n");
			at91_mci_notbusy(sc);
		}
		if (isr & MCI_SR_DTIP) {
//			printf("Data transfer in progress\n");
		}
		if (isr & MCI_SR_BLKE) {
//			printf("Block transfer end\n");
			at91_mci_write_done(sc, sr);
		}
		if (isr & MCI_SR_TXRDY) {
//			printf("Ready to transmit\n");
		}
		if (isr & MCI_SR_RXRDY) {
//			printf("Ready to receive\n");
		}
		if (isr & MCI_SR_CMDRDY) {
//			printf("Command ready\n");
			at91_mci_cmdrdy(sc, sr);
		}
	}
	AT91_MCI_UNLOCK(sc);
}
示例#21
0
static void
at91_mci_start_cmd(struct at91_mci_softc *sc, struct mmc_command *cmd)
{
	uint32_t cmdr, mr;
	struct mmc_data *data;

	sc->curcmd = cmd;
	data = cmd->data;

	/* XXX Upper layers don't always set this */
	cmd->mrq = sc->req;

	/* Begin setting up command register. */

	cmdr = cmd->opcode;

	if (sc->host.ios.bus_mode == opendrain)
		cmdr |= MCI_CMDR_OPDCMD;

	/* Set up response handling.  Allow max timeout for responses. */

	if (MMC_RSP(cmd->flags) == MMC_RSP_NONE)
		cmdr |= MCI_CMDR_RSPTYP_NO;
	else {
		cmdr |= MCI_CMDR_MAXLAT;
		if (cmd->flags & MMC_RSP_136)
			cmdr |= MCI_CMDR_RSPTYP_136;
		else
			cmdr |= MCI_CMDR_RSPTYP_48;
	}

	/*
	 * If there is no data transfer, just set up the right interrupt mask
	 * and start the command.
	 *
	 * The interrupt mask needs to be CMDRDY plus all non-data-transfer
	 * errors. It's important to leave the transfer-related errors out, to
	 * avoid spurious timeout or crc errors on a STOP command following a
	 * multiblock read.  When a multiblock read is in progress, sending a
	 * STOP in the middle of a block occasionally triggers such errors, but
	 * we're totally disinterested in them because we've already gotten all
	 * the data we wanted without error before sending the STOP command.
	 */

	if (data == NULL) {
		uint32_t ier = MCI_SR_CMDRDY |
		    MCI_SR_RTOE | MCI_SR_RENDE |
		    MCI_SR_RCRCE | MCI_SR_RDIRE | MCI_SR_RINDE;

		at91_mci_pdc_disable(sc);

		if (cmd->opcode == MMC_STOP_TRANSMISSION)
			cmdr |= MCI_CMDR_TRCMD_STOP;

		/* Ignore response CRC on CMD2 and ACMD41, per standard. */

		if (cmd->opcode == MMC_SEND_OP_COND ||
		    cmd->opcode == ACMD_SD_SEND_OP_COND)
			ier &= ~MCI_SR_RCRCE;

		if (mci_debug)
			printf("CMDR %x (opcode %d) ARGR %x no data\n",
			    cmdr, cmd->opcode, cmd->arg);

		WR4(sc, MCI_ARGR, cmd->arg);
		WR4(sc, MCI_CMDR, cmdr);
		WR4(sc, MCI_IDR, 0xffffffff);
		WR4(sc, MCI_IER, ier);
		return;
	}

	/* There is data, set up the transfer-related parts of the command. */

	if (data->flags & MMC_DATA_READ)
		cmdr |= MCI_CMDR_TRDIR;

	if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE))
		cmdr |= MCI_CMDR_TRCMD_START;

	if (data->flags & MMC_DATA_STREAM)
		cmdr |= MCI_CMDR_TRTYP_STREAM;
	else if (data->flags & MMC_DATA_MULTI) {
		cmdr |= MCI_CMDR_TRTYP_MULTIPLE;
		sc->flags |= (data->flags & MMC_DATA_READ) ?
		    CMD_MULTIREAD : CMD_MULTIWRITE;
	}

	/*
	 * Disable PDC until we're ready.
	 *
	 * Set block size and turn on PDC mode for dma xfer.
	 * Note that the block size is the smaller of the amount of data to be
	 * transferred, or 512 bytes.  The 512 size is fixed by the standard;
	 * smaller blocks are possible, but never larger.
	 */

	WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);

	mr = RD4(sc,MCI_MR) & ~MCI_MR_BLKLEN;
	mr |=  min(data->len, 512) << 16;
	WR4(sc, MCI_MR, mr | MCI_MR_PDCMODE|MCI_MR_PDCPADV);

	/*
	 * Set up DMA.
	 *
	 * Use bounce buffers even if we don't need to byteswap, because doing
	 * multi-block IO with large DMA buffers is way fast (compared to
	 * single-block IO), even after incurring the overhead of also copying
	 * from/to the caller's buffers (which may be in non-contiguous physical
	 * pages).
	 *
	 * In an ideal non-byteswap world we could create a dma tag that allows
	 * for discontiguous segments and do the IO directly from/to the
	 * caller's buffer(s), using ENDRX/ENDTX interrupts to chain the
	 * discontiguous buffers through the PDC. Someday.
	 *
	 * If a read is bigger than 2k, split it in half so that we can start
	 * byte-swapping the first half while the second half is on the wire.
	 * It would be best if we could split it into 8k chunks, but we can't
	 * always keep up with the byte-swapping due to other system activity,
	 * and if an RXBUFF interrupt happens while we're still handling the
	 * byte-swap from the prior buffer (IE, we haven't returned from
	 * handling the prior interrupt yet), then data will get dropped on the
	 * floor and we can't easily recover from that.  The right fix for that
	 * would be to have the interrupt handling only keep the DMA flowing and
	 * enqueue filled buffers to be byte-swapped in a non-interrupt context.
	 * Even that won't work on the write side of things though; in that
	 * context we have to have all the data ready to go before starting the
	 * dma.
	 *
	 * XXX what about stream transfers?
	 */
	sc->xfer_offset = 0;
	sc->bbuf_curidx = 0;

	if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) {
		uint32_t len;
		uint32_t remaining = data->len;
		bus_addr_t paddr;
		int err;

		if (remaining > (BBCOUNT*BBSIZE))
			panic("IO read size exceeds MAXDATA\n");

		if (data->flags & MMC_DATA_READ) {
			if (remaining > 2048) // XXX
				len = remaining / 2;
			else
				len = remaining;
			err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0],
			    sc->bbuf_vaddr[0], len, at91_mci_getaddr,
			    &paddr, BUS_DMA_NOWAIT);
			if (err != 0)
				panic("IO read dmamap_load failed\n");
			bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0],
			    BUS_DMASYNC_PREREAD);
			WR4(sc, PDC_RPR, paddr);
			WR4(sc, PDC_RCR, len / 4);
			sc->bbuf_len[0] = len;
			remaining -= len;
			if (remaining == 0) {
				sc->bbuf_len[1] = 0;
			} else {
				len = remaining;
				err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1],
				    sc->bbuf_vaddr[1], len, at91_mci_getaddr,
				    &paddr, BUS_DMA_NOWAIT);
				if (err != 0)
					panic("IO read dmamap_load failed\n");
				bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1],
				    BUS_DMASYNC_PREREAD);
				WR4(sc, PDC_RNPR, paddr);
				WR4(sc, PDC_RNCR, len / 4);
				sc->bbuf_len[1] = len;
				remaining -= len;
			}
			WR4(sc, PDC_PTCR, PDC_PTCR_RXTEN);
		} else {
			len = min(BBSIZE, remaining);
			/*
			 * If this is MCI1 revision 2xx controller, apply
			 * a work-around for the "Data Write Operation and
			 * number of bytes" erratum.
			 */
			if ((sc->sc_cap & CAP_MCI1_REV2XX) && len < 12) {
				len = 12;
				memset(sc->bbuf_vaddr[0], 0, 12);
			}
			at91_bswap_buf(sc, sc->bbuf_vaddr[0], data->data, len);
			err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0],
			    sc->bbuf_vaddr[0], len, at91_mci_getaddr,
			    &paddr, BUS_DMA_NOWAIT);
			if (err != 0)
				panic("IO write dmamap_load failed\n");
			bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0],
			    BUS_DMASYNC_PREWRITE);
			WR4(sc, PDC_TPR,paddr);
			WR4(sc, PDC_TCR, len / 4);
			sc->bbuf_len[0] = len;
			remaining -= len;
			if (remaining == 0) {
				sc->bbuf_len[1] = 0;
			} else {
				len = remaining;
				at91_bswap_buf(sc, sc->bbuf_vaddr[1],
				    ((char *)data->data)+BBSIZE, len);
				err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1],
				    sc->bbuf_vaddr[1], len, at91_mci_getaddr,
				    &paddr, BUS_DMA_NOWAIT);
				if (err != 0)
					panic("IO write dmamap_load failed\n");
				bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1],
				    BUS_DMASYNC_PREWRITE);
				WR4(sc, PDC_TNPR, paddr);
				WR4(sc, PDC_TNCR, len / 4);
				sc->bbuf_len[1] = len;
				remaining -= len;
			}
			/* do not enable PDC xfer until CMDRDY asserted */
		}
		data->xfer_len = 0; /* XXX what's this? appears to be unused. */
	}

	if (mci_debug)
		printf("CMDR %x (opcode %d) ARGR %x with data len %d\n",
		       cmdr, cmd->opcode, cmd->arg, cmd->data->len);

	WR4(sc, MCI_ARGR, cmd->arg);
	WR4(sc, MCI_CMDR, cmdr);
	WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_CMDRDY);
}
示例#22
0
/*
 * Check for a character available.
 */
static int
at91_usart_rxready(struct uart_bas *bas)
{

	return ((RD4(bas, USART_CSR) & USART_CSR_RXRDY) != 0 ? 1 : 0);
}
示例#23
0
文件: sdhci.c 项目: 2asoft/freebsd
static void
sdhci_reset(struct sdhci_slot *slot, uint8_t mask)
{
	int timeout;

	if (slot->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
		if (!(RD4(slot, SDHCI_PRESENT_STATE) &
			SDHCI_CARD_PRESENT))
			return;
	}

	/* Some controllers need this kick or reset won't work. */
	if ((mask & SDHCI_RESET_ALL) == 0 &&
	    (slot->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)) {
		uint32_t clock;

		/* This is to force an update */
		clock = slot->clock;
		slot->clock = 0;
		sdhci_set_clock(slot, clock);
	}

	if (mask & SDHCI_RESET_ALL) {
		slot->clock = 0;
		slot->power = 0;
	}

	WR1(slot, SDHCI_SOFTWARE_RESET, mask);

	if (slot->quirks & SDHCI_QUIRK_WAITFOR_RESET_ASSERTED) {
		/*
		 * Resets on TI OMAPs and AM335x are incompatible with SDHCI
		 * specification.  The reset bit has internal propagation delay,
		 * so a fast read after write returns 0 even if reset process is
		 * in progress. The workaround is to poll for 1 before polling
		 * for 0.  In the worst case, if we miss seeing it asserted the
		 * time we spent waiting is enough to ensure the reset finishes.
		 */
		timeout = 10000;
		while ((RD1(slot, SDHCI_SOFTWARE_RESET) & mask) != mask) {
			if (timeout <= 0)
				break;
			timeout--;
			DELAY(1);
		}
	}

	/* Wait max 100 ms */
	timeout = 10000;
	/* Controller clears the bits when it's done */
	while (RD1(slot, SDHCI_SOFTWARE_RESET) & mask) {
		if (timeout <= 0) {
			slot_printf(slot, "Reset 0x%x never completed.\n",
			    mask);
			sdhci_dumpregs(slot);
			return;
		}
		timeout--;
		DELAY(10);
	}
}
示例#24
0
Location decodeLocation(const DWARF_Attribute& attr, const Location* frameBase, int at)
{
	static Location invalid = { Location::Invalid };

	if (attr.type == Const)
		return mkAbs(attr.cons);

	if (attr.type != ExprLoc && attr.type != Block) // same memory layout
		return invalid;

	byte* p = attr.expr.ptr;
	Location stack[256];
	int stackDepth = 0;
    if (at == DW_AT_data_member_location)
        stack[stackDepth++] = Location { Location::Abs, 0, 0 };

	for (;;)
	{
		if (p >= attr.expr.ptr + attr.expr.len)
			break;

		int op = *p++;
		if (op == 0)
			break;

		switch (op)
		{
			case DW_OP_reg0:  case DW_OP_reg1:  case DW_OP_reg2:  case DW_OP_reg3:
			case DW_OP_reg4:  case DW_OP_reg5:  case DW_OP_reg6:  case DW_OP_reg7:
			case DW_OP_reg8:  case DW_OP_reg9:  case DW_OP_reg10: case DW_OP_reg11:
			case DW_OP_reg12: case DW_OP_reg13: case DW_OP_reg14: case DW_OP_reg15:
			case DW_OP_reg16: case DW_OP_reg17: case DW_OP_reg18: case DW_OP_reg19:
			case DW_OP_reg20: case DW_OP_reg21: case DW_OP_reg22: case DW_OP_reg23:
			case DW_OP_reg24: case DW_OP_reg25: case DW_OP_reg26: case DW_OP_reg27:
			case DW_OP_reg28: case DW_OP_reg29: case DW_OP_reg30: case DW_OP_reg31:
				stack[stackDepth++] = mkInReg(op - DW_OP_reg0);
				break;
			case DW_OP_regx:
				stack[stackDepth++] = mkInReg(LEB128(p));
				break;

			case DW_OP_const1u: stack[stackDepth++] = mkAbs(*p); break;
			case DW_OP_const2u: stack[stackDepth++] = mkAbs(RD2(p)); break;
			case DW_OP_const4u: stack[stackDepth++] = mkAbs(RD4(p)); break;
			case DW_OP_const1s: stack[stackDepth++] = mkAbs((char)*p); break;
			case DW_OP_const2s: stack[stackDepth++] = mkAbs((short)RD2(p)); break;
			case DW_OP_const4s: stack[stackDepth++] = mkAbs((int)RD4(p)); break;
			case DW_OP_constu:  stack[stackDepth++] = mkAbs(LEB128(p)); break;
			case DW_OP_consts:  stack[stackDepth++] = mkAbs(SLEB128(p)); break;

			case DW_OP_plus_uconst: 
				if (stack[stackDepth - 1].is_inreg())
					return invalid;
				stack[stackDepth - 1].off += LEB128(p); 
				break;

			case DW_OP_lit0:  case DW_OP_lit1:  case DW_OP_lit2:  case DW_OP_lit3:
			case DW_OP_lit4:  case DW_OP_lit5:  case DW_OP_lit6:  case DW_OP_lit7:
			case DW_OP_lit8:  case DW_OP_lit9:  case DW_OP_lit10: case DW_OP_lit11:
			case DW_OP_lit12: case DW_OP_lit13: case DW_OP_lit14: case DW_OP_lit15:
			case DW_OP_lit16: case DW_OP_lit17: case DW_OP_lit18: case DW_OP_lit19:
			case DW_OP_lit20: case DW_OP_lit21: case DW_OP_lit22: case DW_OP_lit23:
				stack[stackDepth++] = mkAbs(op - DW_OP_lit0);
				break;

			case DW_OP_breg0:  case DW_OP_breg1:  case DW_OP_breg2:  case DW_OP_breg3:
			case DW_OP_breg4:  case DW_OP_breg5:  case DW_OP_breg6:  case DW_OP_breg7:
			case DW_OP_breg8:  case DW_OP_breg9:  case DW_OP_breg10: case DW_OP_breg11:
			case DW_OP_breg12: case DW_OP_breg13: case DW_OP_breg14: case DW_OP_breg15:
			case DW_OP_breg16: case DW_OP_breg17: case DW_OP_breg18: case DW_OP_breg19:
			case DW_OP_breg20: case DW_OP_breg21: case DW_OP_breg22: case DW_OP_breg23:
			case DW_OP_breg24: case DW_OP_breg25: case DW_OP_breg26: case DW_OP_breg27:
			case DW_OP_breg28: case DW_OP_breg29: case DW_OP_breg30: case DW_OP_breg31:
				stack[stackDepth++] = mkRegRel(op - DW_OP_breg0, SLEB128(p));
				break;
			case DW_OP_bregx:
			{
				unsigned reg = LEB128(p);
				stack[stackDepth++] = mkRegRel(reg, SLEB128(p));
			}   break;


			case DW_OP_abs: case DW_OP_neg: case DW_OP_not:
			{
				Location& op1 = stack[stackDepth - 1];
				if (!op1.is_abs())
					return invalid;
				switch (op)
				{
					case DW_OP_abs:   op1 = mkAbs(abs(op1.off)); break;
					case DW_OP_neg:   op1 = mkAbs(-op1.off); break;
					case DW_OP_not:   op1 = mkAbs(~op1.off); break;
				}
			}   break;

			case DW_OP_plus:  // op2 + op1
			{
				Location& op1 = stack[stackDepth - 1];
				Location& op2 = stack[stackDepth - 2];
				// Can add only two offsets or a regrel and an offset.
				if (op2.is_regrel() && op1.is_abs())
					op2 = mkRegRel(op2.reg, op2.off + op1.off);
				else if (op2.is_abs() && op1.is_regrel())
					op2 = mkRegRel(op1.reg, op2.off + op1.off);
				else if (op2.is_abs() && op1.is_abs())
					op2 = mkAbs(op2.off + op1.off);
				else
					return invalid;
				--stackDepth;
			}   break;

			case DW_OP_minus: // op2 - op1
			{
				Location& op1 = stack[stackDepth - 1];
				Location& op2 = stack[stackDepth - 2];
				if (op2.is_regrel() && op1.is_regrel() && op2.reg == op1.reg)
					op2 = mkAbs(0); // X - X == 0
				else if (op2.is_regrel() && op1.is_abs())
					op2 = mkRegRel(op2.reg, op2.off - op1.off);
				else if (op2.is_abs() && op1.is_abs())
					op2 = mkAbs(op2.off - op1.off);
				else
					return invalid;
				--stackDepth;
			}   break;

			case DW_OP_mul:
			{
				Location& op1 = stack[stackDepth - 1];
				Location& op2 = stack[stackDepth - 2];
				if ((op1.is_abs() && op1.off == 0) || (op2.is_abs() && op2.off == 0))
					op2 = mkAbs(0); // X * 0 == 0
				else if (op1.is_abs() && op2.is_abs())
					op2 = mkAbs(op1.off * op2.off);
				else
					return invalid;
				--stackDepth;
			}   break;

			case DW_OP_and:
			{
				Location& op1 = stack[stackDepth - 1];
				Location& op2 = stack[stackDepth - 2];
				if ((op1.is_abs() && op1.off == 0) || (op2.is_abs() && op2.off == 0))
					op2 = mkAbs(0); // X & 0 == 0
				else if (op1.is_abs() && op2.is_abs())
					op2 = mkAbs(op1.off & op2.off);
				else
					return invalid;
				--stackDepth;
			}   break;

			case DW_OP_div: case DW_OP_mod: case DW_OP_shl:
			case DW_OP_shr: case DW_OP_shra: case DW_OP_or:
			case DW_OP_xor:
			case DW_OP_eq:  case DW_OP_ge:  case DW_OP_gt:
			case DW_OP_le:  case DW_OP_lt:  case DW_OP_ne:
			{
				Location& op1 = stack[stackDepth - 1];
				Location& op2 = stack[stackDepth - 2];
				if (!op1.is_abs() || !op2.is_abs()) // can't combine unless both are constants
					return invalid;
				switch (op)
				{
					case DW_OP_div:   op2.off = op2.off / op1.off; break;
					case DW_OP_mod:   op2.off = op2.off % op1.off; break;
					case DW_OP_shl:   op2.off = op2.off << op1.off; break;
					case DW_OP_shr:   op2.off = op2.off >> op1.off; break;
					case DW_OP_shra:  op2.off = op2.off >> op1.off; break;
					case DW_OP_or:    op2.off = op2.off | op1.off; break;
					case DW_OP_xor:   op2.off = op2.off ^ op1.off; break;
					case DW_OP_eq:    op2.off = op2.off == op1.off; break;
					case DW_OP_ge:    op2.off = op2.off >= op1.off; break;
					case DW_OP_gt:    op2.off = op2.off > op1.off; break;
					case DW_OP_le:    op2.off = op2.off <= op1.off; break;
					case DW_OP_lt:    op2.off = op2.off < op1.off; break;
					case DW_OP_ne:    op2.off = op2.off != op1.off; break;
				}
				--stackDepth;
			}   break;

			case DW_OP_fbreg:
			{
				if (!frameBase)
					return invalid;

				Location loc;
				if (frameBase->is_inreg()) // ok in frame base specification, per DWARF4 spec #3.3.5
					loc = mkRegRel(frameBase->reg, SLEB128(p));
				else if (frameBase->is_regrel())
					loc = mkRegRel(frameBase->reg, frameBase->off + SLEB128(p));
				else
					return invalid;
				stack[stackDepth++] = loc;
			}   break;

			case DW_OP_dup:   stack[stackDepth] = stack[stackDepth - 1]; stackDepth++; break;
			case DW_OP_drop:  stackDepth--; break;
			case DW_OP_over:  stack[stackDepth] = stack[stackDepth - 2]; stackDepth++; break;
			case DW_OP_pick:  stack[stackDepth++] = stack[*p]; break;
			case DW_OP_swap:  { Location tmp = stack[stackDepth - 1]; stack[stackDepth - 1] = stack[stackDepth - 2]; stack[stackDepth - 2] = tmp; } break;
			case DW_OP_rot:   { Location tmp = stack[stackDepth - 1]; stack[stackDepth - 1] = stack[stackDepth - 2]; stack[stackDepth - 2] = stack[stackDepth - 3]; stack[stackDepth - 3] = tmp; } break;

			case DW_OP_addr:
				stack[stackDepth++] = mkAbs(RD4(p)); // TODO: 64-bit
				break;

			case DW_OP_skip:
			{
				unsigned off = RD2(p);
				p = p + off;
			}   break;

			case DW_OP_bra:
			{
				Location& op1 = stack[stackDepth - 1];
				if (!op1.is_abs())
					return invalid;
				if (op1.off != 0)
				{
					unsigned off = RD2(p);
					p = p + off;
				}
				--stackDepth;
			}   break;

			case DW_OP_nop:
				break;

			case DW_OP_call_frame_cfa: // assume ebp+8/rbp+16
				stack[stackDepth++] = Location{ Location::RegRel, DW_REG_CFA, 0 };
				break;

			case DW_OP_deref:
			case DW_OP_deref_size:
			case DW_OP_push_object_address:
			case DW_OP_call2:
			case DW_OP_call4:
			case DW_OP_form_tls_address:
			case DW_OP_call_ref:
			case DW_OP_bit_piece:
			case DW_OP_implicit_value:
			case DW_OP_stack_value:
			default:
				return invalid;
		}
	}

	assert(stackDepth > 0);
	return stack[0];
}
示例#25
0
static void
ffec_harvest_stats(struct ffec_softc *sc)
{
	struct ifnet *ifp;

	/* We don't need to harvest too often. */
	if (++sc->stats_harvest_count < STATS_HARVEST_INTERVAL)
		return;

	/*
	 * Try to avoid harvesting unless the IDLE flag is on, but if it has
	 * been too long just go ahead and do it anyway, the worst that'll
	 * happen is we'll lose a packet count or two as we clear at the end.
	 */
	if (sc->stats_harvest_count < (2 * STATS_HARVEST_INTERVAL) &&
	    ((RD4(sc, FEC_MIBC_REG) & FEC_MIBC_IDLE) == 0))
		return;

	sc->stats_harvest_count = 0;
	ifp = sc->ifp;

	if_inc_counter(ifp, IFCOUNTER_IPACKETS, RD4(sc, FEC_RMON_R_PACKETS));
	if_inc_counter(ifp, IFCOUNTER_IMCASTS, RD4(sc, FEC_RMON_R_MC_PKT));
	if_inc_counter(ifp, IFCOUNTER_IERRORS,
	    RD4(sc, FEC_RMON_R_CRC_ALIGN) + RD4(sc, FEC_RMON_R_UNDERSIZE) +
	    RD4(sc, FEC_RMON_R_OVERSIZE) + RD4(sc, FEC_RMON_R_FRAG) +
	    RD4(sc, FEC_RMON_R_JAB));

	if_inc_counter(ifp, IFCOUNTER_OPACKETS, RD4(sc, FEC_RMON_T_PACKETS));
	if_inc_counter(ifp, IFCOUNTER_OMCASTS, RD4(sc, FEC_RMON_T_MC_PKT));
	if_inc_counter(ifp, IFCOUNTER_OERRORS,
	    RD4(sc, FEC_RMON_T_CRC_ALIGN) + RD4(sc, FEC_RMON_T_UNDERSIZE) +
	    RD4(sc, FEC_RMON_T_OVERSIZE) + RD4(sc, FEC_RMON_T_FRAG) +
	    RD4(sc, FEC_RMON_T_JAB));

	if_inc_counter(ifp, IFCOUNTER_COLLISIONS, RD4(sc, FEC_RMON_T_COL));

	ffec_clear_stats(sc);
}
示例#26
0
文件: if_awg.c 项目: Hooman3/freebsd
static void
awg_update_link_locked(struct awg_softc *sc)
{
	struct mii_data *mii;
	uint32_t val;

	AWG_ASSERT_LOCKED(sc);

	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
		return;
	mii = device_get_softc(sc->miibus);

	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
	    (IFM_ACTIVE | IFM_AVALID)) {
		switch (IFM_SUBTYPE(mii->mii_media_active)) {
		case IFM_1000_T:
		case IFM_1000_SX:
		case IFM_100_TX:
		case IFM_10_T:
			sc->link = 1;
			break;
		default:
			sc->link = 0;
			break;
		}
	} else
		sc->link = 0;

	if (sc->link == 0)
		return;

	val = RD4(sc, EMAC_BASIC_CTL_0);
	val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX);

	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
		val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT;
	else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
		val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT;
	else
		val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT;

	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
		val |= BASIC_CTL_DUPLEX;

	WR4(sc, EMAC_BASIC_CTL_0, val);

	val = RD4(sc, EMAC_RX_CTL_0);
	val &= ~RX_FLOW_CTL_EN;
	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
		val |= RX_FLOW_CTL_EN;
	WR4(sc, EMAC_RX_CTL_0, val);

	val = RD4(sc, EMAC_TX_FLOW_CTL);
	val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN);
	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
		val |= TX_FLOW_CTL_EN;
	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
		val |= awg_pause_time << PAUSE_TIME_SHIFT;
	WR4(sc, EMAC_TX_FLOW_CTL, val);
}
示例#27
0
static int
at91_usart_bus_attach(struct uart_softc *sc)
{
	int err;
	int i;
	uint32_t cr;
	struct at91_usart_softc *atsc;

	atsc = (struct at91_usart_softc *)sc;

	if (at91_usart_requires_rts0_workaround(sc))
		atsc->flags |= USE_RTS0_WORKAROUND;

	/*
	 * See if we have a TIMEOUT bit.  We disable all interrupts as
	 * a side effect.  Boot loaders may have enabled them.  Since
	 * a TIMEOUT interrupt can't happen without other setup, the
	 * apparent race here can't actually happen.
	 */
	WR4(&sc->sc_bas, USART_IDR, 0xffffffff);
	WR4(&sc->sc_bas, USART_IER, USART_CSR_TIMEOUT);
	if (RD4(&sc->sc_bas, USART_IMR) & USART_CSR_TIMEOUT)
		atsc->flags |= HAS_TIMEOUT;
	WR4(&sc->sc_bas, USART_IDR, 0xffffffff);

	/*
	 * Allocate transmit DMA tag and map.  We allow a transmit buffer
	 * to be any size, but it must map to a single contiguous physical
	 * extent.
	 */
	err = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
	    BUS_SPACE_MAXSIZE_32BIT, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
	    NULL, &atsc->tx_tag);
	if (err != 0)
		goto errout;
	err = bus_dmamap_create(atsc->tx_tag, 0, &atsc->tx_map);
	if (err != 0)
		goto errout;

	if (atsc->flags & HAS_TIMEOUT) {
		/*
		 * Allocate receive DMA tags, maps, and buffers.
		 * The receive buffers should be aligned to arm_dcache_align,
		 * otherwise partial cache line flushes on every receive
		 * interrupt are pretty much guaranteed.
		 */
		err = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
		    arm_dcache_align, 0, BUS_SPACE_MAXADDR_32BIT,
		    BUS_SPACE_MAXADDR, NULL, NULL, sc->sc_rxfifosz, 1,
		    sc->sc_rxfifosz, BUS_DMA_ALLOCNOW, NULL, NULL,
		    &atsc->rx_tag);
		if (err != 0)
			goto errout;
		for (i = 0; i < 2; i++) {
			err = bus_dmamem_alloc(atsc->rx_tag,
			    (void **)&atsc->ping_pong[i].buffer,
			    BUS_DMA_NOWAIT, &atsc->ping_pong[i].map);
			if (err != 0)
				goto errout;
			err = bus_dmamap_load(atsc->rx_tag,
			    atsc->ping_pong[i].map,
			    atsc->ping_pong[i].buffer, sc->sc_rxfifosz,
			    at91_getaddr, &atsc->ping_pong[i].pa, 0);
			if (err != 0)
				goto errout;
			bus_dmamap_sync(atsc->rx_tag, atsc->ping_pong[i].map,
			    BUS_DMASYNC_PREREAD);
		}
		atsc->ping = &atsc->ping_pong[0];
		atsc->pong = &atsc->ping_pong[1];
	}

	/* Turn on rx and tx */
	cr = USART_CR_RSTSTA | USART_CR_RSTRX | USART_CR_RSTTX;
	WR4(&sc->sc_bas, USART_CR, cr);
	WR4(&sc->sc_bas, USART_CR, USART_CR_RXEN | USART_CR_TXEN);

	/*
	 * Setup the PDC to receive data.  We use the ping-pong buffers
	 * so that we can more easily bounce between the two and so that
	 * we get an interrupt 1/2 way through the software 'fifo' we have
	 * to avoid overruns.
	 */
	if (atsc->flags & HAS_TIMEOUT) {
		WR4(&sc->sc_bas, PDC_RPR, atsc->ping->pa);
		WR4(&sc->sc_bas, PDC_RCR, sc->sc_rxfifosz);
		WR4(&sc->sc_bas, PDC_RNPR, atsc->pong->pa);
		WR4(&sc->sc_bas, PDC_RNCR, sc->sc_rxfifosz);
		WR4(&sc->sc_bas, PDC_PTCR, PDC_PTCR_RXTEN);

		/*
		 * Set the receive timeout to be 1.5 character times
		 * assuming 8N1.
		 */
		WR4(&sc->sc_bas, USART_RTOR, 15);
		WR4(&sc->sc_bas, USART_CR, USART_CR_STTTO);
		WR4(&sc->sc_bas, USART_IER, USART_CSR_TIMEOUT |
		    USART_CSR_RXBUFF | USART_CSR_ENDRX);
	} else {
		WR4(&sc->sc_bas, USART_IER, USART_CSR_RXRDY);
	}
	WR4(&sc->sc_bas, USART_IER, USART_CSR_RXBRK | USART_DCE_CHANGE_BITS);

	/* Prime sc->hwsig with the initial hw line states. */
	at91_usart_bus_getsig(sc);

errout:
	return (err);
}
示例#28
0
/* Clear previous configuration of the PL by asserting PROG_B. */
static int
zy7_devcfg_reset_pl(struct zy7_devcfg_softc *sc)
{
	uint32_t devcfg_ctl;
	int tries, err;

	DEVCFG_SC_ASSERT_LOCKED(sc);

	devcfg_ctl = RD4(sc, ZY7_DEVCFG_CTRL);

	/* Clear sticky bits and set up INIT signal positive edge interrupt. */
	WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL);
	WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_PCFG_INIT_PE);

	/* Deassert PROG_B (active low). */
	devcfg_ctl |= ZY7_DEVCFG_CTRL_PCFG_PROG_B;
	WR4(sc, ZY7_DEVCFG_CTRL, devcfg_ctl);

	/*
	 * Wait for INIT to assert.  If it is already asserted, we may not get
	 * an edge interrupt so cancel it and continue.
	 */
	if ((RD4(sc, ZY7_DEVCFG_STATUS) &
	     ZY7_DEVCFG_STATUS_PCFG_INIT) != 0) {
		/* Already asserted.  Cancel interrupt. */
		WR4(sc, ZY7_DEVCFG_INT_MASK, ~0);
	}
	else {
		/* Wait for positive edge interrupt. */
		err = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "zy7i1", hz);
		if (err != 0)
			return (err);
	}
	
	/* Reassert PROG_B (active low). */
	devcfg_ctl &= ~ZY7_DEVCFG_CTRL_PCFG_PROG_B;
	WR4(sc, ZY7_DEVCFG_CTRL, devcfg_ctl);

	/* Wait for INIT deasserted.  This happens almost instantly. */
	tries = 0;
	while ((RD4(sc, ZY7_DEVCFG_STATUS) &
		ZY7_DEVCFG_STATUS_PCFG_INIT) != 0) {
		if (++tries >= 100)
			return (EIO);
		DELAY(5);
	}

	/* Clear sticky bits and set up INIT positive edge interrupt. */
	WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL);
	WR4(sc, ZY7_DEVCFG_INT_MASK, ~ZY7_DEVCFG_INT_PCFG_INIT_PE);

	/* Deassert PROG_B again. */
	devcfg_ctl |= ZY7_DEVCFG_CTRL_PCFG_PROG_B;
	WR4(sc, ZY7_DEVCFG_CTRL, devcfg_ctl);

	/*
	 * Wait for INIT asserted indicating FPGA internal initialization
	 * is complete.
	 */
	err = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "zy7i2", hz);
	if (err != 0)
		return (err);

	/* Clear sticky DONE bit in interrupt status. */
	WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_ALL);

	return (0);
}
示例#29
0
static int
at91_twi_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs)
{
	struct at91_twi_softc *sc;
	int i, len, err;
	uint32_t rdwr;
	uint8_t *buf;
	uint32_t sr;

	sc = device_get_softc(dev);
	err = 0;
	AT91_TWI_LOCK(sc);
	for (i = 0; i < nmsgs; i++) {
		/*
		 * The linux atmel driver doesn't use the internal device
		 * address feature of twi.  A separate i2c message needs to
		 * be written to use this.
		 * See http://lists.arm.linux.org.uk/pipermail/linux-arm-kernel/2004-September/024411.html
		 * for details.  Upon reflection, we could use this as an
		 * optimization, but it is unclear the code bloat will
		 * result in faster/better operations.
		 */
		rdwr = (msgs[i].flags & IIC_M_RD) ? TWI_MMR_MREAD : 0;
		WR4(sc, TWI_MMR, TWI_MMR_DADR(msgs[i].slave) | rdwr);
		len = msgs[i].len;
		buf = msgs[i].buf;
		/* zero byte transfers aren't allowed */
		if (len == 0 || buf == NULL) {
			err = EINVAL;
			goto out;
		}
		if (len == 1 && msgs[i].flags & IIC_M_RD)
			WR4(sc, TWI_CR, TWI_CR_START | TWI_CR_STOP);
		else
			WR4(sc, TWI_CR, TWI_CR_START);
		if (msgs[i].flags & IIC_M_RD) {
			sr = RD4(sc, TWI_SR);
			while (!(sr & TWI_SR_TXCOMP)) {
				if ((sr = RD4(sc, TWI_SR)) & TWI_SR_RXRDY) {
					len--;
					*buf++ = RD4(sc, TWI_RHR) & 0xff;
					if (len == 1)
						WR4(sc, TWI_CR, TWI_CR_STOP);
				}
			}
			if (len > 0 || (sr & TWI_SR_NACK)) {
				err = ENXIO;		// iic nack convention
				goto out;
			}
		} else {
			while (len--) {
				if ((err = at91_twi_wait(sc, TWI_SR_TXRDY)))
					goto out;
				WR4(sc, TWI_THR, *buf++);
			}
			WR4(sc, TWI_CR, TWI_CR_STOP);
		}
		if ((err = at91_twi_wait(sc, TWI_SR_TXCOMP)))
			break;
	}
out:
	if (err) {
		WR4(sc, TWI_CR, TWI_CR_SWRST);
		WR4(sc, TWI_CR, TWI_CR_MSEN | TWI_CR_SVDIS);
		WR4(sc, TWI_CWGR, sc->cwgr);
	}
	AT91_TWI_UNLOCK(sc);
	return (err);
}
示例#30
0
bool DIECursor::readNext(DWARF_InfoData& id, bool stopAtNull)
{
	id.clear();

	if (hasChild)
		++level;

	for (;;)
	{
		if (level == -1)
			return false; // we were already at the end of the subtree

		if (ptr >= ((byte*)cu + sizeof(cu->unit_length) + cu->unit_length))
			return false; // root of the tree does not have a null terminator, but we know the length

		id.entryPtr = ptr;
		id.entryOff = ptr - (byte*)cu;
		id.code = LEB128(ptr);
		if (id.code == 0)
		{
			--level; // pop up one level
			if (stopAtNull)
			{
				hasChild = false;
				return false;
			}
			continue; // read the next DIE
		}

		break;
	}

	byte* abbrev = getDWARFAbbrev(cu->debug_abbrev_offset, id.code);
	assert(abbrev);
	if (!abbrev)
		return false;

	id.abbrev = abbrev;
	id.tag = LEB128(abbrev);
	id.hasChild = *abbrev++;

	int attr, form;
	for (;;)
	{
		attr = LEB128(abbrev);
		form = LEB128(abbrev);

		if (attr == 0 && form == 0)
			break;

		while (form == DW_FORM_indirect)
			form = LEB128(ptr);

		DWARF_Attribute a;
		switch (form)
		{
			case DW_FORM_addr:           a.type = Addr; a.addr = (unsigned long)RDsize(ptr, cu->address_size); break;
			case DW_FORM_block:          a.type = Block; a.block.len = LEB128(ptr); a.block.ptr = ptr; ptr += a.block.len; break;
			case DW_FORM_block1:         a.type = Block; a.block.len = *ptr++;      a.block.ptr = ptr; ptr += a.block.len; break;
			case DW_FORM_block2:         a.type = Block; a.block.len = RD2(ptr);   a.block.ptr = ptr; ptr += a.block.len; break;
			case DW_FORM_block4:         a.type = Block; a.block.len = RD4(ptr);   a.block.ptr = ptr; ptr += a.block.len; break;
			case DW_FORM_data1:          a.type = Const; a.cons = *ptr++; break;
			case DW_FORM_data2:          a.type = Const; a.cons = RD2(ptr); break;
			case DW_FORM_data4:          a.type = Const; a.cons = RD4(ptr); break;
			case DW_FORM_data8:          a.type = Const; a.cons = RD8(ptr); break;
			case DW_FORM_sdata:          a.type = Const; a.cons = SLEB128(ptr); break;
			case DW_FORM_udata:          a.type = Const; a.cons = LEB128(ptr); break;
			case DW_FORM_string:         a.type = String; a.string = (const char*)ptr; ptr += strlen(a.string) + 1; break;
            case DW_FORM_strp:           a.type = String; a.string = (const char*)(img->debug_str + RDsize(ptr, cu->isDWARF64() ? 8 : 4)); break;
			case DW_FORM_flag:           a.type = Flag; a.flag = (*ptr++ != 0); break;
			case DW_FORM_flag_present:   a.type = Flag; a.flag = true; break;
			case DW_FORM_ref1:           a.type = Ref; a.ref = (byte*)cu + *ptr++; break;
			case DW_FORM_ref2:           a.type = Ref; a.ref = (byte*)cu + RD2(ptr); break;
			case DW_FORM_ref4:           a.type = Ref; a.ref = (byte*)cu + RD4(ptr); break;
			case DW_FORM_ref8:           a.type = Ref; a.ref = (byte*)cu + RD8(ptr); break;
			case DW_FORM_ref_udata:      a.type = Ref; a.ref = (byte*)cu + LEB128(ptr); break;
			case DW_FORM_ref_addr:       a.type = Ref; a.ref = (byte*)img->debug_info + (cu->isDWARF64() ? RD8(ptr) : RD4(ptr)); break;
			case DW_FORM_ref_sig8:       a.type = Invalid; ptr += 8;  break;
			case DW_FORM_exprloc:        a.type = ExprLoc; a.expr.len = LEB128(ptr); a.expr.ptr = ptr; ptr += a.expr.len; break;
			case DW_FORM_sec_offset:     a.type = SecOffset;  a.sec_offset = cu->isDWARF64() ? RD8(ptr) : RD4(ptr); break;
			case DW_FORM_indirect:
			default: assert(false && "Unsupported DWARF attribute form"); return false;
		}

		switch (attr)
		{
			case DW_AT_byte_size: 
				assert(a.type == Const || a.type == Ref || a.type == ExprLoc);
				if (a.type == Const) // TODO: other types not supported yet
					id.byte_size = a.cons; 
				break;
			case DW_AT_sibling:   assert(a.type == Ref); id.sibling = a.ref; break;
			case DW_AT_encoding:  assert(a.type == Const); id.encoding = a.cons; break;
			case DW_AT_name:      assert(a.type == String); id.name = a.string; break;
			case DW_AT_MIPS_linkage_name: assert(a.type == String); id.linkage_name = a.string; break;
			case DW_AT_comp_dir:  assert(a.type == String); id.dir = a.string; break;
			case DW_AT_low_pc:    assert(a.type == Addr); id.pclo = a.addr; break;
			case DW_AT_high_pc:
				if (a.type == Addr)
					id.pchi = a.addr;
				else if (a.type == Const)
					id.pchi = id.pclo + a.cons;
				else
					assert(false);
			    break;
			case DW_AT_ranges:
				if (a.type == SecOffset)
					id.ranges = a.sec_offset;
				else if (a.type == Const)
					id.ranges = a.cons;
				else
					assert(false);
			    break;
			case DW_AT_type:      assert(a.type == Ref); id.type = a.ref; break;
			case DW_AT_inline:    assert(a.type == Const); id.inlined = a.cons; break;
			case DW_AT_external:  assert(a.type == Flag); id.external = a.flag; break;
			case DW_AT_upper_bound: 
				assert(a.type == Const || a.type == Ref || a.type == ExprLoc);
				if (a.type == Const) // TODO: other types not supported yet
					id.upper_bound = a.cons;
				break;
			case DW_AT_lower_bound: 
				assert(a.type == Const || a.type == Ref || a.type == ExprLoc);
				if (a.type == Const) // TODO: other types not supported yet
					id.lower_bound = a.cons;
				break;
			case DW_AT_containing_type: assert(a.type == Ref); id.containing_type = a.ref; break;
			case DW_AT_specification: assert(a.type == Ref); id.specification = a.ref; break;
			case DW_AT_data_member_location: id.member_location = a; break;
			case DW_AT_location: id.location = a; break;
			case DW_AT_frame_base: id.frame_base = a; break;
		}
	}

	hasChild = id.hasChild != 0;
	sibling = id.sibling;

	return true;
}