Exemplo n.º 1
0
/* set the core to socram run bist and return bist status back */
int
si_bist_socram(si_t *sih, uint32 *biststatus)
{
    si_info_t *sii;
    uint origidx;
    uint intr_val = 0;
    sbsocramregs_t *regs;
    int error = 0;
    uint status = 0;

    SI_ERROR(("doing the bist on SOCRAM\n"));

    sii = SI_INFO(sih);

    /* Block ints and save current core */
    INTR_OFF(sii, intr_val);
    origidx = si_coreidx(sih);

    /* Switch to SOCRAM core */
    if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
        goto done;

    si_core_reset(sih, SICF_BIST_EN, SICF_BIST_EN);

    /* Wait for bist done */
    SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);

    status = si_core_sflags(sih, 0, 0);

    if (status & SISF_BIST_DONE) {
        if (status & SISF_BIST_ERROR) {
            *biststatus = R_REG(sii->osh, &regs->biststat);
            /* hnd_bist gives errors for ROM bist test, so ignore it */
            *biststatus &= 0xFFFF;
            if (!*biststatus)
                error = 0;
            else
                error = 1;
        }
    }

    si_core_reset(sih, 0, 0);
    /* Return to previous state and core */
    si_setcoreidx(sih, origidx);
done:
    INTR_RESTORE(sii, intr_val);
    return error;
}
Exemplo n.º 2
0
/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
int
si_corebist(si_t *sih)
{
	uint32 cflags;
	int result = 0;

	/* Read core control flags */
	cflags = si_core_cflags(sih, 0, 0);

	/* Set bist & fgc */
	si_core_cflags(sih, 0, (SICF_BIST_EN | SICF_FGC));

	/* Wait for bist done */
	SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000);

	if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR)
		result = BCME_ERROR;

	/* Reset core control flags */
	si_core_cflags(sih, 0xffff, cflags);

	return result;
}
Exemplo n.º 3
0
int
si_bist_d11(si_t *sih, uint32 *biststatus1, uint32 *biststatus2)
{
	si_info_t *sii;
	uint origidx;
    uint intr_val = 0;
    void *regs;
    int error = 0;
    bool wasup;
    uint32 offset = SBCONFIGOFF + SBTMSTATELOW;
    uint32 max_res_mask;
    uint32 pmu_ctrl;

    *biststatus1 = 0;
    *biststatus2 = 0;

    SI_ERROR(("doing the bist on D11\n"));

    sii = SI_INFO(sih);

    if (CHIPTYPE(sih->socitype) != SOCI_SB) {
     return 0;
    }

    /* Block ints and save current core */
    INTR_OFF(sii, intr_val);
    origidx = si_coreidx(sih);

    /* Switch to D11 core */
    if (!(regs = si_setcore(sih, D11_CORE_ID, 0)))
	    goto done;

    /* Get info for determining size */
    /* coming out of reset device shoudl have clk enabled, bw set, etc */
    if (!(wasup = si_iscoreup(sih)))
        si_core_reset(sih, 0x4F, 0x4F);

    max_res_mask = si_corereg(sih, 0, OFFSETOF(chipcregs_t, max_res_mask), 0, 0);
    si_corereg(sih, 0, OFFSETOF(chipcregs_t, max_res_mask), ~0, 0x3fffff);

    if (si_corerev(&sii->pub) == 20) {
        uint32 phy_reset_val;
        uint32 bist_test_val, bist_status;

        /* XXX: enable the phy PLL */
        pmu_ctrl = si_corereg(sih, si_coreidx(&sii->pub), 0x1e8, 0, 0);
        pmu_ctrl |= 0x000010000;
        si_corereg(sih, si_coreidx(&sii->pub), 0x1e8, ~0, pmu_ctrl);
        SPINWAIT(((si_corereg(sih, si_coreidx(&sii->pub), 0x1e8, 0, 0) & 0x01000000) == 0),
                1000000);
        pmu_ctrl = si_corereg(sih, si_coreidx(&sii->pub), 0x1e8, 0, 0);

        /* take the phy out of reset */
        phy_reset_val = si_corereg(sih, si_coreidx(&sii->pub), offset, 0, 0);
        phy_reset_val &= ~(0x0008 << SBTML_SICF_SHIFT);
        si_corereg(sih, si_coreidx(&sii->pub), offset, ~0, phy_reset_val);
        phy_reset_val = si_corereg(sih, si_coreidx(&sii->pub), offset, 0, 0);

        /* enable bist first */
        bist_test_val = si_corereg(sih, si_coreidx(&sii->pub), offset, 0, 0);
        bist_test_val |= (SICF_BIST_EN << 16);
        si_corereg(sih, si_coreidx(&sii->pub), offset, ~0, bist_test_val);
        SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 1000000);
        bist_status = si_core_sflags(sih, 0, 0);
        SI_ERROR(("status are 0x%08x\n", bist_status));
        if (bist_status & SISF_BIST_DONE) {
            if (bist_status & SISF_BIST_ERROR) {
                error = 1;
                *biststatus1 = si_corereg(sih,  si_coreidx(&sii->pub), 12, 0, 0);
                *biststatus2 = si_corereg(sih,  si_coreidx(&sii->pub), 16, 0, 0);
            }
        }
        /* stop the phy pll */
        pmu_ctrl = si_corereg(sih, si_coreidx(&sii->pub), 0x1e8, 0, 0);
        pmu_ctrl &= ~0x10000;
        si_corereg(sih, si_coreidx(&sii->pub), 0x1e8, ~0, pmu_ctrl);
    }

    /* remove the resource mask */
    si_corereg(sih, 0, OFFSETOF(chipcregs_t, max_res_mask), ~0, max_res_mask);

    /* Return to previous state and core */
    if (!wasup)
        si_core_disable(sih, 0);

    /* Return to previous state and core */
    si_setcoreidx(sih, origidx);
done:
    INTR_RESTORE(sii, intr_val);
    return error;
}
Exemplo n.º 4
0
struct hnddma_pub *dma_attach(struct osl_info *osh, char *name, si_t *sih,
		     void *dmaregstx, void *dmaregsrx, uint ntxd,
		     uint nrxd, uint rxbufsize, int rxextheadroom,
		     uint nrxpost, uint rxoffset, uint *msg_level)
{
	dma_info_t *di;
	uint size;

	/* allocate private info structure */
	di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
	if (di == NULL) {
#ifdef BCMDBG
		printf("dma_attach: out of memory\n");
#endif
		return NULL;
	}

	di->msg_level = msg_level ? msg_level : &dma_msg_level;

	/* old chips w/o sb is no longer supported */
	ASSERT(sih != NULL);

	di->dma64 = ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);

	/* check arguments */
	ASSERT(ISPOWEROF2(ntxd));
	ASSERT(ISPOWEROF2(nrxd));

	if (nrxd == 0)
		ASSERT(dmaregsrx == NULL);
	if (ntxd == 0)
		ASSERT(dmaregstx == NULL);

	/* init dma reg pointer */
	ASSERT(ntxd <= D64MAXDD);
	ASSERT(nrxd <= D64MAXDD);
	di->d64txregs = (dma64regs_t *) dmaregstx;
	di->d64rxregs = (dma64regs_t *) dmaregsrx;
	di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;

	/* Default flags (which can be changed by the driver calling dma_ctrlflags
	 * before enable): For backwards compatibility both Rx Overflow Continue
	 * and Parity are DISABLED.
	 * supports it.
	 */
	di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
				    0);

	DMA_TRACE(("%s: dma_attach: %s osh %p flags 0x%x ntxd %d nrxd %d "
		   "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
		   "dmaregstx %p dmaregsrx %p\n", name, "DMA64", osh,
		   di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize,
		   rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));

	/* make a private copy of our callers name */
	strncpy(di->name, name, MAXNAMEL);
	di->name[MAXNAMEL - 1] = '\0';

	di->osh = osh;
	di->sih = sih;

	/* save tunables */
	di->ntxd = (u16) ntxd;
	di->nrxd = (u16) nrxd;

	/* the actual dma size doesn't include the extra headroom */
	di->rxextrahdrroom =
	    (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
	if (rxbufsize > BCMEXTRAHDROOM)
		di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
	else
		di->rxbufsize = (u16) rxbufsize;

	di->nrxpost = (u16) nrxpost;
	di->rxoffset = (u8) rxoffset;

	/*
	 * figure out the DMA physical address offset for dd and data
	 *     PCI/PCIE: they map silicon backplace address to zero based memory, need offset
	 *     Other bus: use zero
	 *     SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
	 */
	di->ddoffsetlow = 0;
	di->dataoffsetlow = 0;
	/* for pci bus, add offset */
	if (sih->bustype == PCI_BUS) {
		/* pcie with DMA64 */
		di->ddoffsetlow = 0;
		di->ddoffsethigh = SI_PCIE_DMA_H32;
		di->dataoffsetlow = di->ddoffsetlow;
		di->dataoffsethigh = di->ddoffsethigh;
	}
#if defined(__mips__) && defined(IL_BIGENDIAN)
	di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
#endif				/* defined(__mips__) && defined(IL_BIGENDIAN) */
	/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
	if ((si_coreid(sih) == SDIOD_CORE_ID)
	    && ((si_corerev(sih) > 0) && (si_corerev(sih) <= 2)))
		di->addrext = 0;
	else if ((si_coreid(sih) == I2S_CORE_ID) &&
		 ((si_corerev(sih) == 0) || (si_corerev(sih) == 1)))
		di->addrext = 0;
	else
		di->addrext = _dma_isaddrext(di);

	/* does the descriptors need to be aligned and if yes, on 4K/8K or not */
	di->aligndesc_4k = _dma_descriptor_align(di);
	if (di->aligndesc_4k) {
		di->dmadesc_align = D64RINGALIGN_BITS;
		if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
			/* for smaller dd table, HW relax alignment reqmnt */
			di->dmadesc_align = D64RINGALIGN_BITS - 1;
		}
	} else
		di->dmadesc_align = 4;	/* 16 byte alignment */

	DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
		  di->aligndesc_4k, di->dmadesc_align));

	/* allocate tx packet pointer vector */
	if (ntxd) {
		size = ntxd * sizeof(void *);
		di->txp = kzalloc(size, GFP_ATOMIC);
		if (di->txp == NULL) {
			DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
			goto fail;
		}
	}

	/* allocate rx packet pointer vector */
	if (nrxd) {
		size = nrxd * sizeof(void *);
		di->rxp = kzalloc(size, GFP_ATOMIC);
		if (di->rxp == NULL) {
			DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
			goto fail;
		}
	}

	/* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
	if (ntxd) {
		if (!_dma_alloc(di, DMA_TX))
			goto fail;
	}

	/* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
	if (nrxd) {
		if (!_dma_alloc(di, DMA_RX))
			goto fail;
	}

	if ((di->ddoffsetlow != 0) && !di->addrext) {
		if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
			DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
			goto fail;
		}
		if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
			DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
			goto fail;
		}
	}

	DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));

	/* allocate DMA mapping vectors */
	if (DMASGLIST_ENAB) {
		if (ntxd) {
			size = ntxd * sizeof(hnddma_seg_map_t);
			di->txp_dmah = kzalloc(size, GFP_ATOMIC);
			if (di->txp_dmah == NULL)
				goto fail;
		}

		if (nrxd) {
			size = nrxd * sizeof(hnddma_seg_map_t);
			di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
			if (di->rxp_dmah == NULL)
				goto fail;
		}
	}

	return (struct hnddma_pub *) di;

 fail:
	_dma_detach(di);
	return NULL;
}