Exemplo n.º 1
0
void 
plog_cache_writeback(unsigned long start, long size)
{
	long linesz, linemsk;
	unsigned long end;

#if defined(CONFIG_BMIPS5000)
	linesz = cpu_scache_line_size();
#else
	linesz = cpu_dcache_line_size();
#endif

	/*
	 * Set up the loop counters so the address is cache line aligned (do
	 * we really need to do that?) and the length is a multiple of the
	 * cache line size.
	 */

	linemsk = linesz - 1;
	start &= ~linemsk;
	size += linesz;
	end = start + size;

	while (start < end) {
#if defined(CONFIG_BMIPS5000)
		cache_op(HitWbSc, start);
#else
		cache_op(Hit_Writeback_D, start);
#endif
		start += linesz;
	}
	__sync();
}
Exemplo n.º 2
0
void r4k_dcache_inv(rt_ubase_t addr, rt_ubase_t size)
{
    rt_ubase_t end, a;
    rt_ubase_t dc_lsize = cpu_dcache_line_size();

    a = addr & ~(dc_lsize - 1);
    end = ((addr + size) - 1) & ~(dc_lsize - 1);
    while (1)
    {
        invalidate_dcache_line(a);
        if (a == end)
            break;
        a += dc_lsize;
    }
}
Exemplo n.º 3
0
static int __init pcibios_set_cache_line_size(void)
{
	unsigned int lsize;

	/*
	 * Set PCI cacheline size to that of the highest level in the
	 * cache hierarchy.
	 */
	lsize = cpu_dcache_line_size();
	lsize = cpu_scache_line_size() ? : lsize;
	lsize = cpu_tcache_line_size() ? : lsize;

	BUG_ON(!lsize);

	pci_dfl_cache_line_size = lsize >> 2;

	pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
	return 0;
}
Exemplo n.º 4
0
void r4k_dcache_wback_inv(rt_ubase_t addr, rt_ubase_t size)
{
    rt_ubase_t end, a;

    if (size >= dcache_size)
    {
        blast_dcache16();
    }
    else
    {
        rt_ubase_t dc_lsize = cpu_dcache_line_size();

        a = addr & ~(dc_lsize - 1);
        end = ((addr + size) - 1) & ~(dc_lsize - 1);
        while (1)
        {
            flush_dcache_line(a);
            if (a == end)
                break;
            a += dc_lsize;
        }
    }
}
Exemplo n.º 5
0
int32 swNic_init(uint32 userNeedRxPkthdrRingCnt[RTL865X_SWNIC_RXRING_HW_PKTDESC],
                 uint32 userNeedRxMbufRingCnt,
                 uint32 userNeedTxPkthdrRingCnt[RTL865X_SWNIC_TXRING_HW_PKTDESC],
                 uint32 clusterSize)
{
	uint32 i, j, k;
	static uint32 totalRxPkthdrRingCnt = 0, totalTxPkthdrRingCnt = 0;
	static struct rtl_pktHdr *pPkthdrList_start;
	static struct rtl_mBuf *pMbufList_start;
	struct rtl_pktHdr *pPkthdrList;
	struct rtl_mBuf *pMbufList;
	struct rtl_pktHdr * pPkthdr;
	struct rtl_mBuf * pMbuf;
	unsigned long flags=0;
	int	ret;

#if defined(CONFIG_RTL_8198C) && defined(_PKTHDR_CACHEABLE)
	int cpu_dcache_line = cpu_dcache_line_size(); // in \arch\mips\include\asm\cpu-features.h
#endif

	/* init const array for rx pre-process	*/
	extPortMaskToPortNum[0] = 5;
	extPortMaskToPortNum[1] = 6;
	extPortMaskToPortNum[2] = 7;
	extPortMaskToPortNum[3] = 5;
	extPortMaskToPortNum[4] = 8;
	extPortMaskToPortNum[5] = 5;
	extPortMaskToPortNum[6] = 5;
	extPortMaskToPortNum[7] = 5;

#if	defined(DELAY_REFILL_ETH_RX_BUF)
	rxPkthdrRefillThreshold[0] = ETH_REFILL_THRESHOLD;
	rxPkthdrRefillThreshold[1] = ETH_REFILL_THRESHOLD1;
	rxPkthdrRefillThreshold[2] = ETH_REFILL_THRESHOLD2;
	rxPkthdrRefillThreshold[3] = ETH_REFILL_THRESHOLD3;
	rxPkthdrRefillThreshold[4] = ETH_REFILL_THRESHOLD4;
	rxPkthdrRefillThreshold[5] = ETH_REFILL_THRESHOLD5;
#endif

	#if defined(CONFIG_RTL8196C_REVISION_B)
	rtl_chip_version = REG32(REVR);
	#endif

	ret = SUCCESS;
	SMP_LOCK_ETH_RECV(flags);
	if (rxMbufRing == NULL)
	{
		size_of_cluster = clusterSize;

		/* Allocate Rx descriptors of rings */
		for (i = 0; i < RTL865X_SWNIC_RXRING_HW_PKTDESC; i++) {
			rxPkthdrRingCnt[i] = userNeedRxPkthdrRingCnt[i];
			if (rxPkthdrRingCnt[i] == 0)
			{
				rxPkthdrRing[i] = NULL;
				continue;
			}

			rxPkthdrRing[i] = (uint32 *) UNCACHED_MALLOC(rxPkthdrRingCnt[i] * sizeof(uint32*));
			ASSERT_CSP( (uint32) rxPkthdrRing[i] & 0x0fffffff );

			totalRxPkthdrRingCnt += rxPkthdrRingCnt[i];
		}

		if (totalRxPkthdrRingCnt == 0) {
			ret = EINVAL;
			goto out;
		}

		/* Allocate Tx descriptors of rings */
		for (i = 0; i < RTL865X_SWNIC_TXRING_HW_PKTDESC; i++) {
			txPkthdrRingCnt[i] = userNeedTxPkthdrRingCnt[i];

			if (txPkthdrRingCnt[i] == 0)
			{
				txPkthdrRing[i] = NULL;
				continue;
			}

			txPkthdrRing[i] = (uint32 *) UNCACHED_MALLOC(txPkthdrRingCnt[i] * sizeof(uint32*));
			#ifdef CONFIG_RTL8196C_REVISION_B
			if (rtl_chip_version == RTL8196C_REVISION_A)
				txPkthdrRing_backup[i]=(uint32 *) UNCACHED_MALLOC(txPkthdrRingCnt[i] * sizeof(uint32));
			#endif

			ASSERT_CSP( (uint32) txPkthdrRing[i] & 0x0fffffff );

			totalTxPkthdrRingCnt += txPkthdrRingCnt[i];
		}

		if (totalTxPkthdrRingCnt == 0) {
			ret = EINVAL;
			goto out;
		}

		/* Allocate MBuf descriptors of rings */
		rxMbufRingCnt = userNeedRxMbufRingCnt;

		if (userNeedRxMbufRingCnt == 0) {
			ret = EINVAL;
			goto out;
		}

		rxMbufRing = (uint32 *) UNCACHED_MALLOC((rxMbufRingCnt+RESERVERD_MBUF_RING_NUM) * sizeof(uint32*));
		ASSERT_CSP( (uint32) rxMbufRing & 0x0fffffff );

		/* Allocate pkthdr */
#ifdef _PKTHDR_CACHEABLE

#if 0 //defined(CONFIG_RTL_8198C)
		pPkthdrList_start = (struct rtl_pktHdr *) kmalloc(
		(totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct rtl_pktHdr), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pPkthdrList_start & 0x0fffffff );

		/* Allocate mbufs */
		pMbufList_start = (struct rtl_mBuf *) kmalloc(
		(rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+ totalTxPkthdrRingCnt) * sizeof(struct rtl_mBuf), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pMbufList_start & 0x0fffffff );

#else
		pPkthdrList_start = (struct rtl_pktHdr *) kmalloc(
		(totalRxPkthdrRingCnt+totalTxPkthdrRingCnt+1) * sizeof(struct rtl_pktHdr), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pPkthdrList_start & 0x0fffffff );

		pPkthdrList_start = (struct rtl_pktHdr *)(((uint32) pPkthdrList_start + (cpu_dcache_line - 1))& ~(cpu_dcache_line - 1));
	
		/* Allocate mbufs */
		pMbufList_start = (struct rtl_mBuf *) kmalloc(
		(rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+totalTxPkthdrRingCnt+1) * sizeof(struct rtl_mBuf), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pMbufList_start & 0x0fffffff );
		
		pMbufList_start = (struct rtl_mBuf *)(((uint32) pMbufList_start + (cpu_dcache_line - 1))& ~(cpu_dcache_line - 1));
#endif

#else
		pPkthdrList_start = (struct rtl_pktHdr *) UNCACHED_MALLOC(
		(totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct rtl_pktHdr));
		ASSERT_CSP( (uint32) pPkthdrList_start & 0x0fffffff );

		/* Allocate mbufs */
		pMbufList_start = (struct rtl_mBuf *) UNCACHED_MALLOC(
		(rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+ totalTxPkthdrRingCnt) * sizeof(struct rtl_mBuf));
		ASSERT_CSP( (uint32) pMbufList_start & 0x0fffffff );
#endif
	}

	/* Initialize interrupt statistics counter */
	//rxPktCounter = txPktCounter = 0;

	/* Initialize index of Tx pkthdr descriptor */
	for (i=0;i<RTL865X_SWNIC_TXRING_HW_PKTDESC;i++)
	{
		currTxPkthdrDescIndex[i] = 0;
		txPktDoneDescIndex[i]=0;
	}

	pPkthdrList = pPkthdrList_start;
	pMbufList = pMbufList_start;

	/* Initialize Tx packet header descriptors */
	for (i = 0; i < RTL865X_SWNIC_TXRING_HW_PKTDESC; i++)
	{
		for (j = 0; j < txPkthdrRingCnt[i]; j++)
		{
			/* Dequeue pkthdr and mbuf */
			pPkthdr = pPkthdrList++;
			pMbuf = pMbufList++;

			bzero((void *) pPkthdr, sizeof(struct rtl_pktHdr));
			bzero((void *) pMbuf, sizeof(struct rtl_mBuf));

			pPkthdr->ph_mbuf = pMbuf;
			pPkthdr->ph_len = 0;
			pPkthdr->ph_flags = PKTHDR_USED | PKT_OUTGOING;
			pPkthdr->ph_type = PKTHDR_ETHERNET;
			pPkthdr->ph_portlist = 0;

			pMbuf->m_next = NULL;
			pMbuf->m_pkthdr = pPkthdr;
			pMbuf->m_flags = MBUF_USED | MBUF_EXT | MBUF_PKTHDR | MBUF_EOR;
			pMbuf->m_data = NULL;
			pMbuf->m_extbuf = NULL;
			pMbuf->m_extsize = 0;

			txPkthdrRing[i][j] = (int32) pPkthdr | DESC_RISC_OWNED;
			#ifdef CONFIG_RTL8196C_REVISION_B
			if (rtl_chip_version == RTL8196C_REVISION_A)
				txPkthdrRing_backup[i][j]=(int32) pPkthdr | DESC_RISC_OWNED;
			#endif
		}

#ifdef CONFIG_RTL_ENHANCE_RELIABILITY
		txPkthdrRing_base[i] = txPkthdrRing[i][0];
#endif

		if(txPkthdrRingCnt[i] > 0)
		{
			/* Set wrap bit of the last descriptor */
			txPkthdrRing[i][txPkthdrRingCnt[i] - 1] |= DESC_WRAP;
			#ifdef CONFIG_RTL8196C_REVISION_B
			if (rtl_chip_version == RTL8196C_REVISION_A)
				txPkthdrRing_backup[i][txPkthdrRingCnt[i] - 1] |= DESC_WRAP;
			#endif
		}

	}

	/* Fill Tx packet header FDP */
	REG32(CPUTPDCR0) = (uint32) txPkthdrRing[0];
	REG32(CPUTPDCR1) = (uint32) txPkthdrRing[1];

#if defined(CONFIG_RTL_819XD) || defined(CONFIG_RTL_8196E) || defined(CONFIG_RTL_8198C)
	REG32(CPUTPDCR2) = (uint32) txPkthdrRing[2];
	REG32(CPUTPDCR3) = (uint32) txPkthdrRing[3];
#endif

	/* Initialize Rx packet header descriptors */
	k = 0;

	for (i = 0; i < RTL865X_SWNIC_RXRING_HW_PKTDESC; i++)
	{
		for (j = 0; j < rxPkthdrRingCnt[i]; j++)
		{
			/* Dequeue pkthdr and mbuf */
			pPkthdr = pPkthdrList++;
			pMbuf = pMbufList++;

			bzero((void *) pPkthdr, sizeof(struct rtl_pktHdr));
			bzero((void *) pMbuf, sizeof(struct rtl_mBuf));

			/* Setup pkthdr and mbuf */
			pPkthdr->ph_mbuf = pMbuf;
			pPkthdr->ph_len = 0;
			pPkthdr->ph_flags = PKTHDR_USED | PKT_INCOMING;
			pPkthdr->ph_type = PKTHDR_ETHERNET;
			pPkthdr->ph_portlist = 0;
			pMbuf->m_next = NULL;
			pMbuf->m_pkthdr = pPkthdr;
			pMbuf->m_len = 0;
			pMbuf->m_flags = MBUF_USED | MBUF_EXT | MBUF_PKTHDR | MBUF_EOR;
			pMbuf->m_extsize = size_of_cluster;
			pMbuf->m_data = pMbuf->m_extbuf = alloc_rx_buf(&pPkthdr->ph_mbuf->skb, size_of_cluster);

			/* Setup descriptors */
			rxPkthdrRing[i][j] = (int32) pPkthdr | DESC_SWCORE_OWNED;
			rxMbufRing[k++] = (int32) pMbuf | DESC_SWCORE_OWNED;
		}

#ifdef CONFIG_RTL_ENHANCE_RELIABILITY
		rxPkthdrRing_base[i] = rxPkthdrRing[i][0] & ~DESC_OWNED_BIT;
#endif

		/* Initialize index of current Rx pkthdr descriptor */
		currRxPkthdrDescIndex[i] = 0;

		/* Initialize index of current Rx Mbuf descriptor */
		currRxMbufDescIndex = 0;

		/* Set wrap bit of the last descriptor */
		if(rxPkthdrRingCnt[i] > 0)
			rxPkthdrRing[i][rxPkthdrRingCnt[i] - 1] |= DESC_WRAP;

		#if	defined(DELAY_REFILL_ETH_RX_BUF)
		rxDescReadyForHwIndex[i] = 0;
		rxDescCrossBoundFlag[i] = 0;
		#endif
	}

#if defined(CONFIG_RTL_ENHANCE_RELIABILITY) && defined(CONFIG_RTL_8198C)
	rxMbufRing_base = rxMbufRing[0] & ~DESC_OWNED_BIT;
#endif

	rxMbufRing[rxMbufRingCnt - 1] |= DESC_WRAP;

	/* Fill Rx packet header FDP */
	REG32(CPURPDCR0) = (uint32) rxPkthdrRing[0];
	REG32(CPURPDCR1) = (uint32) rxPkthdrRing[1];
	REG32(CPURPDCR2) = (uint32) rxPkthdrRing[2];
	REG32(CPURPDCR3) = (uint32) rxPkthdrRing[3];
	REG32(CPURPDCR4) = (uint32) rxPkthdrRing[4];
	REG32(CPURPDCR5) = (uint32) rxPkthdrRing[5];

	REG32(CPURMDCR0) = (uint32) rxMbufRing;

out:
	//SMP_UNLOCK_ETH_RECV(flags);

#ifdef _PKTHDR_CACHEABLE
	_dma_cache_wback_inv((unsigned long)pPkthdrList_start, (totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct rtl_pktHdr));
	_dma_cache_wback_inv((unsigned long)pMbufList_start, (rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+ totalTxPkthdrRingCnt) * sizeof(struct rtl_mBuf));
#endif
	SMP_UNLOCK_ETH_RECV(flags);

	return ret;
}
Exemplo n.º 6
0
static void set_prefetch_parameters(void)
{
	if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
		clear_word_size = 8;
	else
		clear_word_size = 4;

	if (cpu_has_64bit_gp_regs)
		copy_word_size = 8;
	else
		copy_word_size = 4;

	/*
	 * The pref's used here are using "streaming" hints, which cause the
	 * copied data to be kicked out of the cache sooner.  A page copy often
	 * ends up copying a lot more data than is commonly used, so this seems
	 * to make sense in terms of reducing cache pollution, but I've no real
	 * performance data to back this up.
	 */
	if (cpu_has_prefetch) {
		/*
		 * XXX: Most prefetch bias values in here are based on
		 * guesswork.
		 */
		cache_line_size = cpu_dcache_line_size();
		switch (current_cpu_type()) {
		case CPU_R5500:
		case CPU_TX49XX:
			/* These processors only support the Pref_Load. */
			pref_bias_copy_load = 256;
			break;

		case CPU_RM9000:
			/*
			 * As a workaround for erratum G105 which make the
			 * PrepareForStore hint unusable we fall back to
			 * StoreRetained on the RM9000.  Once it is known which
			 * versions of the RM9000 we'll be able to condition-
			 * alize this.
			 */

		case CPU_R10000:
		case CPU_R12000:
		case CPU_R14000:
			/*
			 * Those values have been experimentally tuned for an
			 * Origin 200.
			 */
			pref_bias_clear_store = 512;
			pref_bias_copy_load = 256;
			pref_bias_copy_store = 256;
			pref_src_mode = Pref_LoadStreamed;
			pref_dst_mode = Pref_StoreStreamed;
			break;

		case CPU_SB1:
		case CPU_SB1A:
			pref_bias_clear_store = 128;
			pref_bias_copy_load = 128;
			pref_bias_copy_store = 128;
			/*
			 * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
			 * hints are broken.
			 */
			if (current_cpu_type() == CPU_SB1 &&
			    (current_cpu_data.processor_id & 0xff) < 0x02) {
				pref_src_mode = Pref_Load;
				pref_dst_mode = Pref_Store;
			} else {
				pref_src_mode = Pref_LoadStreamed;
				pref_dst_mode = Pref_StoreStreamed;
			}
			break;

		default:
			pref_bias_clear_store = 128;
			pref_bias_copy_load = 256;
			pref_bias_copy_store = 128;
			pref_src_mode = Pref_LoadStreamed;
			pref_dst_mode = Pref_PrepareForStore;
			break;
		}
	} else {
		if (cpu_has_cache_cdex_s)
			cache_line_size = cpu_scache_line_size();
		else if (cpu_has_cache_cdex_p)
			cache_line_size = cpu_dcache_line_size();
	}
	/*
	 * Too much unrolling will overflow the available space in
	 * clear_space_array / copy_page_array.
	 */
	half_clear_loop_size = min(16 * clear_word_size,
				   max(cache_line_size >> 1,
				       4 * clear_word_size));
	half_copy_loop_size = min(16 * copy_word_size,
				  max(cache_line_size >> 1,
				      4 * copy_word_size));
}
Exemplo n.º 7
0
void r4k_cache_init(void)
{
    cache_init(dcache_size, cpu_dcache_line_size());
}