Esempio n. 1
0
static int aes_engine_desc_init(void)
{
	int i;
	u32 regVal;

	AES_Entry.AES_tx_ring0 = dma_alloc_coherent(NULL, NUM_AES_TX_DESC * sizeof(struct AES_txdesc), &AES_Entry.phy_aes_tx_ring0, GFP_KERNEL);
	if (!AES_Entry.AES_tx_ring0)
		goto err_cleanup;

	AES_Entry.AES_rx_ring0 = dma_alloc_coherent(NULL, NUM_AES_RX_DESC * sizeof(struct AES_rxdesc), &AES_Entry.phy_aes_rx_ring0, GFP_KERNEL);
	if (!AES_Entry.AES_rx_ring0)
		goto err_cleanup;

	for (i = 0; i < NUM_AES_TX_DESC; i++) {
		memset(&AES_Entry.AES_tx_ring0[i], 0, sizeof(struct AES_txdesc));
		AES_Entry.AES_tx_ring0[i].txd_info2 |= TX2_DMA_DONE;
	}

	for (i = 0; i < NUM_AES_RX_DESC; i++) {
		memset(&AES_Entry.AES_rx_ring0[i], 0, sizeof(struct AES_rxdesc));
	}

	AES_Entry.aes_tx_front_idx = 0;
	AES_Entry.aes_tx_rear_idx = NUM_AES_TX_DESC-1;

	AES_Entry.aes_rx_front_idx = 0;
	AES_Entry.aes_rx_rear_idx = NUM_AES_RX_DESC-1;

	wmb();

	regVal = sysRegRead(AES_GLO_CFG);
	regVal &= 0x00000ff0;
	sysRegWrite(AES_GLO_CFG, regVal);
	regVal = sysRegRead(AES_GLO_CFG);

	sysRegWrite(AES_TX_BASE_PTR0, phys_to_bus((u32)AES_Entry.phy_aes_tx_ring0));
	sysRegWrite(AES_TX_MAX_CNT0, cpu_to_le32((u32)NUM_AES_TX_DESC));
	sysRegWrite(AES_TX_CTX_IDX0, 0);
	sysRegWrite(AES_RST_CFG, AES_PST_DTX_IDX0);

	sysRegWrite(AES_RX_BASE_PTR0, phys_to_bus((u32)AES_Entry.phy_aes_rx_ring0));
	sysRegWrite(AES_RX_MAX_CNT0, cpu_to_le32((u32)NUM_AES_RX_DESC));
	sysRegWrite(AES_RX_CALC_IDX0, cpu_to_le32((u32)(NUM_AES_RX_DESC - 1)));
	regVal = sysRegRead(AES_RX_CALC_IDX0);
	sysRegWrite(AES_RST_CFG, AES_PST_DRX_IDX0);

	return 0;

err_cleanup:
	aes_engine_desc_free();
	return -ENOMEM;
}
Esempio n. 2
0
static void
ramips_setup_dma(struct raeth_priv *re)
{
	ramips_fe_wr(phys_to_bus(re->phy_tx), RAMIPS_TX_BASE_PTR0);
	ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
	ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
	ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);

	ramips_fe_wr(phys_to_bus(re->phy_rx), RAMIPS_RX_BASE_PTR0);
	ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
	ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
	ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
}
Esempio n. 3
0
/* Function: ns8382x_send
 * Description: transmits a packet and waits for completion or timeout.
 * Returns:   void.  */
static int
ns8382x_send(struct eth_device *dev, volatile void *packet, int length)
{
	u32 i, status = 0;
	vu_long tx_stat = 0;

	/* Stop the transmitter */
	OUTL(dev, TxOff, ChipCmd);
#ifdef NS8382X_DEBUG
	printf("ns8382x_send: sending %d bytes\n", (int)length);
#endif

	/* set the transmit buffer descriptor and enable Transmit State Machine */
	txd.link = cpu_to_le32(0x0);
	txd.bufptr = cpu_to_le32(phys_to_bus((u32)packet));
	txd.extsts = cpu_to_le32(0x0);
	txd.cmdsts = cpu_to_le32(DescOwn | length);

	/* load Transmit Descriptor Register */
	OUTL(dev, phys_to_bus((u32) & txd), TxRingPtr);
#ifdef NS8382X_DEBUG
	printf("ns8382x_send: TX descriptor register loaded with: %#08X\n",
	       INL(dev, TxRingPtr));
	printf("\ttxd.link:%X\tbufp:%X\texsts:%X\tcmdsts:%X\n",
	       le32_to_cpu(txd.link), le32_to_cpu(txd.bufptr),
	       le32_to_cpu(txd.extsts), le32_to_cpu(txd.cmdsts));
#endif
	/* restart the transmitter */
	OUTL(dev, TxOn, ChipCmd);

	for (i = 0; (tx_stat = le32_to_cpu(txd.cmdsts)) & DescOwn; i++) {
		if (i >= TOUT_LOOP) {
			printf ("%s: tx error buffer not ready: txd.cmdsts %#lX\n",
			     dev->name, tx_stat);
			goto Done;
		}
	}

	if (!(tx_stat & DescPktOK)) {
		printf("ns8382x_send: Transmit error, Tx status %lX.\n", tx_stat);
		goto Done;
	}
#ifdef NS8382X_DEBUG
	printf("ns8382x_send: tx_stat: %#08X\n", tx_stat);
#endif

	status = 1;
      Done:
	return status;
}
Esempio n. 4
0
static void
ramips_setup_dma(struct net_device *dev)
{
	struct raeth_priv *priv = netdev_priv(dev);

	ramips_fe_wr(phys_to_bus(priv->phy_tx), RAMIPS_TX_BASE_PTR0);
	ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
	ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
	ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);

	ramips_fe_wr(phys_to_bus(priv->phy_rx), RAMIPS_RX_BASE_PTR0);
	ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
	ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
	ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
}
Esempio n. 5
0
static void
ns8382x_init_rxd(struct eth_device *dev)
{
	int i;

	OUTL(dev, 0x0, RxRingPtrHi);

	cur_rx = 0;
	for (i = 0; i < NUM_RX_DESC; i++) {
		rxd[i].link =
		    cpu_to_le32((i + 1 <
				 NUM_RX_DESC) ? (u32) & rxd[i +
							    1] : (u32) &
				rxd[0]);
		rxd[i].extsts = cpu_to_le32((u32) 0x0);
		rxd[i].cmdsts = cpu_to_le32((u32) RX_BUF_SIZE);
		rxd[i].bufptr = cpu_to_le32((u32) & rxb[i * RX_BUF_SIZE]);
#ifdef NS8382X_DEBUG
		printf
		    ("ns8382x_init_rxd: rxd[%d]=%p link=%X cmdsts=%X bufptr=%X\n",
		     i, &rxd[i], le32_to_cpu(rxd[i].link),
		     le32_to_cpu(rxd[i].cmdsts), le32_to_cpu(rxd[i].bufptr));
#endif
	}
	OUTL(dev, phys_to_bus((u32) & rxd), RxRingPtr);

#ifdef NS8382X_DEBUG
	printf("ns8382x_init_rxd: RX descriptor register loaded with: %X\n",
	       INL(dev, RxRingPtr));
#endif
}
Esempio n. 6
0
static void rtl_reset(struct eth_device *dev)
{
	int i;

	outb(CmdReset, ioaddr + ChipCmd);

	cur_rx = 0;
	cur_tx = 0;

	/* Give the chip 10ms to finish the reset. */
	for (i=0; i<100; ++i){
		if ((inb(ioaddr + ChipCmd) & CmdReset) == 0) break;
		udelay (100); /* wait 100us */
	}


	for (i = 0; i < ETH_ALEN; i++)
		outb(dev->enetaddr[i], ioaddr + MAC0 + i);

	/* Must enable Tx/Rx before setting transfer thresholds! */
	outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
	outl((RX_FIFO_THRESH<<13) | (RX_BUF_LEN_IDX<<11) | (RX_DMA_BURST<<8),
		ioaddr + RxConfig);		/* accept no frames yet!  */
	outl((TX_DMA_BURST<<8)|0x03000000, ioaddr + TxConfig);

	/* The Linux driver changes Config1 here to use a different LED pattern
	 * for half duplex or full/autodetect duplex (for full/autodetect, the
	 * outputs are TX/RX, Link10/100, FULL, while for half duplex it uses
	 * TX/RX, Link100, Link10).  This is messy, because it doesn't match
	 * the inscription on the mounting bracket.  It should not be changed
	 * from the configuration EEPROM default, because the card manufacturer
	 * should have set that to match the card.  */

#ifdef	DEBUG_RX
	printf("rx ring address is %X\n",(unsigned long)rx_ring);
#endif
	flush_cache((unsigned long)rx_ring, RX_BUF_LEN);
	outl(phys_to_bus((int)rx_ring), ioaddr + RxBuf);

	/* If we add multicast support, the MAR0 register would have to be
	 * initialized to 0xffffffffffffffff (two 32 bit accesses).  Etherboot
	 * only needs broadcast (for ARP/RARP/BOOTP/DHCP) and unicast.	*/

	outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);

	outl(rtl8139_rx_config, ioaddr + RxConfig);

	/* Start the chip's Tx and Rx process. */
	outl(0, ioaddr + RxMissed);

	/* set_rx_mode */
	set_rx_mode(dev);

	/* Disable all known interrupts by setting the interrupt mask. */
	outw(0, ioaddr + IntrMask);
}
Esempio n. 7
0
static int rtl_transmit(struct eth_device *dev, volatile void *packet, int length)
{
	unsigned int status;
	unsigned long txstatus;
	unsigned int len = length;
	int i = 0;

	ioaddr = dev->iobase;

	memcpy((char *)tx_buffer, (char *)packet, (int)length);

#ifdef	DEBUG_TX
	printf("sending %d bytes\n", len);
#endif

	/* Note: RTL8139 doesn't auto-pad, send minimum payload (another 4
	 * bytes are sent automatically for the FCS, totalling to 64 bytes). */
	while (len < ETH_ZLEN) {
		tx_buffer[len++] = '\0';
	}

	flush_cache((unsigned long)tx_buffer, length);
	outl(phys_to_bus((int)tx_buffer), ioaddr + TxAddr0 + cur_tx*4);
	outl(((TX_FIFO_THRESH<<11) & 0x003f0000) | len,
		ioaddr + TxStatus0 + cur_tx*4);

	do {
		status = inw(ioaddr + IntrStatus);
		/* Only acknlowledge interrupt sources we can properly handle
		 * here - the RxOverflow/RxFIFOOver MUST be handled in the
		 * rtl_poll() function.	 */
		outw(status & (TxOK | TxErr | PCIErr), ioaddr + IntrStatus);
		if ((status & (TxOK | TxErr | PCIErr)) != 0) break;
		udelay(10);
	} while (i++ < RTL_TIMEOUT);

	txstatus = inl(ioaddr + TxStatus0 + cur_tx*4);

	if (status & TxOK) {
		cur_tx = (cur_tx + 1) % NUM_TX_DESC;
#ifdef	DEBUG_TX
		printf("tx done (%d ticks), status %hX txstatus %X\n",
			to-currticks(), status, txstatus);
#endif
		return length;
	} else {
#ifdef	DEBUG_TX
		printf("tx timeout/error (%d usecs), status %hX txstatus %X\n",
		       10*i, status, txstatus);
#endif
		rtl_reset(dev);

		return 0;
	}
}
Esempio n. 8
0
static void
ns8382x_init_txd(struct eth_device *dev)
{
	txd.link = (u32) 0;
	txd.bufptr = cpu_to_le32((u32) & txb[0]);
	txd.cmdsts = (u32) 0;
	txd.extsts = (u32) 0;

	OUTL(dev, 0x0, TxRingPtrHi);
	OUTL(dev, phys_to_bus((u32)&txd), TxRingPtr);
#ifdef NS8382X_DEBUG
	printf("ns8382x_init_txd: TX descriptor register loaded with: %#08X (&txd: %p)\n",
	       INL(dev, TxRingPtr), &txd);
#endif
}
Esempio n. 9
0
/*
 * Set up persistent memory. If we were given values, we patch the array of
 * resources. Otherwise, persistent memory may be allocated anywhere at all.
 */
static void __init pmem_setup_resource(void)
{
	struct resource *resource;
	resource = asic_resource_get("DiagPersistentMemory");

	if (resource && pmemaddr && pmemlen) {
		/* The address provided by bootloader is in kseg0. Convert to
		 * a bus address. */
		resource->start = phys_to_bus(pmemaddr - 0x80000000);
		resource->end = resource->start + pmemlen - 1;

		pr_info("persistent memory: start=0x%x  end=0x%x\n",
			resource->start, resource->end);
	}
}
Esempio n. 10
0
int chunk_msg(struct usb_device *dev, unsigned long pipe, int *pid, int in,
	      void *buffer, int len, bool ignore_ack)
{
	struct dwc2_hc_regs *hc_regs = &regs->hc_regs[DWC2_HC_CHANNEL];
	int devnum = usb_pipedevice(pipe);
	int ep = usb_pipeendpoint(pipe);
	int max = usb_maxpacket(dev, pipe);
	int eptype = dwc2_eptype[usb_pipetype(pipe)];
	int done = 0;
	int ret = 0;
	uint32_t sub;
	uint32_t xfer_len;
	uint32_t num_packets;
	int stop_transfer = 0;

	debug("%s: msg: pipe %lx pid %d in %d len %d\n", __func__, pipe, *pid,
	      in, len);

	do {
		/* Initialize channel */
		dwc_otg_hc_init(regs, DWC2_HC_CHANNEL, dev, devnum, ep, in,
				eptype, max);

		xfer_len = len - done;
		if (xfer_len > CONFIG_DWC2_MAX_TRANSFER_SIZE)
			xfer_len = CONFIG_DWC2_MAX_TRANSFER_SIZE - max + 1;
		if (xfer_len > DWC2_DATA_BUF_SIZE)
			xfer_len = DWC2_DATA_BUF_SIZE - max + 1;

		/* Make sure that xfer_len is a multiple of max packet size. */
		if (xfer_len > 0) {
			num_packets = (xfer_len + max - 1) / max;
			if (num_packets > CONFIG_DWC2_MAX_PACKET_COUNT) {
				num_packets = CONFIG_DWC2_MAX_PACKET_COUNT;
				xfer_len = num_packets * max;
			}
		} else {
			num_packets = 1;
		}

		if (in)
			xfer_len = num_packets * max;

		debug("%s: chunk: pid %d xfer_len %u pkts %u\n", __func__,
		      *pid, xfer_len, num_packets);

		writel((xfer_len << DWC2_HCTSIZ_XFERSIZE_OFFSET) |
		       (num_packets << DWC2_HCTSIZ_PKTCNT_OFFSET) |
		       (*pid << DWC2_HCTSIZ_PID_OFFSET),
		       &hc_regs->hctsiz);

		if (!in)
			memcpy(aligned_buffer, (char *)buffer + done, len);

		writel(phys_to_bus((unsigned long)aligned_buffer),
		       &hc_regs->hcdma);

		/* Set host channel enable after all other setup is complete. */
		clrsetbits_le32(&hc_regs->hcchar, DWC2_HCCHAR_MULTICNT_MASK |
				DWC2_HCCHAR_CHEN | DWC2_HCCHAR_CHDIS,
				(1 << DWC2_HCCHAR_MULTICNT_OFFSET) |
				DWC2_HCCHAR_CHEN);

		ret = wait_for_chhltd(&sub, pid, ignore_ack);
		if (ret)
			break;

		if (in) {
			xfer_len -= sub;
			memcpy(buffer + done, aligned_buffer, xfer_len);
			if (sub)
				stop_transfer = 1;
		}

		done += xfer_len;

	} while ((done < len) && !stop_transfer);

	writel(0, &hc_regs->hcintmsk);
	writel(0xFFFFFFFF, &hc_regs->hcint);

	dev->status = 0;
	dev->act_len = done;

	return ret;
}
Esempio n. 11
0
/* must be spinlock protected */
static void
fe_dma_init(END_DEVICE *ei_local)
{
	u32 i, txd_idx, regVal;
	dma_addr_t rxd_buf_phy, fq_tail_phy, txd_free_phy;

	/* init QDMA HW TX pool */
	for (i = 0; i < NUM_QDMA_PAGE; i++) {
		struct QDMA_txdesc *txd = &ei_local->fq_head[i];
		dma_addr_t fq_buf_phy, fq_ndp_phy;
		
		fq_buf_phy = ei_local->fq_head_page_phy + (i * QDMA_PAGE_SIZE);
		if (i < (NUM_QDMA_PAGE-1))
			fq_ndp_phy = ei_local->fq_head_phy + ((i+1) * sizeof(struct QDMA_txdesc));
		else
			fq_ndp_phy = ei_local->fq_head_phy;
		
		ACCESS_ONCE(txd->txd_info1) = (u32)fq_buf_phy;
		ACCESS_ONCE(txd->txd_info2) = (u32)fq_ndp_phy;
		ACCESS_ONCE(txd->txd_info3) = TX3_QDMA_SDL(QDMA_PAGE_SIZE);
		ACCESS_ONCE(txd->txd_info4) = 0;
	}

	fq_tail_phy = ei_local->fq_head_phy + ((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc));

	/* init QDMA SW TX pool */
	for (i = 0; i < NUM_TX_DESC; i++) {
		struct QDMA_txdesc *txd = &ei_local->txd_pool[i];
		
		ei_local->txd_buff[i] = NULL;
		ei_local->txd_pool_info[i] = i + 1;
		
		ACCESS_ONCE(txd->txd_info1) = 0;
		ACCESS_ONCE(txd->txd_info2) = 0;
		ACCESS_ONCE(txd->txd_info3) = TX3_QDMA_LS | TX3_QDMA_OWN;
		ACCESS_ONCE(txd->txd_info4) = 0;
	}

	ei_local->txd_pool_free_head = 0;
	ei_local->txd_pool_free_tail = NUM_TX_DESC - 1;
	ei_local->txd_pool_free_num = NUM_TX_DESC;

	/* init PDMA (or QDMA) RX ring */
	for (i = 0; i < NUM_RX_DESC; i++) {
		struct PDMA_rxdesc *rxd = &ei_local->rxd_ring[i];
		
		rxd_buf_phy = dma_map_single(NULL, ei_local->rxd_buff[i]->data, MAX_RX_LENGTH + NET_IP_ALIGN, DMA_FROM_DEVICE);
		
		ACCESS_ONCE(rxd->rxd_info1) = (u32)rxd_buf_phy;
		ACCESS_ONCE(rxd->rxd_info2) = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
		ACCESS_ONCE(rxd->rxd_info3) = 0;
		ACCESS_ONCE(rxd->rxd_info4) = 0;
	}

#if !defined (CONFIG_RAETH_QDMATX_QDMARX)
	/* init QDMA RX stub ring (map one buffer to all RXD) */
	rxd_buf_phy = dma_map_single(NULL, ei_local->qrx_buff->data, MAX_RX_LENGTH + NET_IP_ALIGN, DMA_FROM_DEVICE);

	for (i = 0; i < NUM_QRX_DESC; i++) {
		struct PDMA_rxdesc *rxd = &ei_local->qrx_ring[i];
		
		ACCESS_ONCE(rxd->rxd_info1) = (u32)rxd_buf_phy;
		ACCESS_ONCE(rxd->rxd_info2) = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
		ACCESS_ONCE(rxd->rxd_info3) = 0;
		ACCESS_ONCE(rxd->rxd_info4) = 0;
	}
#endif

	wmb();

	/* clear QDMA */
	regVal = sysRegRead(QDMA_GLO_CFG);
	regVal &= ~(CSR_CLKGATE | RX_DMA_EN | TX_DMA_EN);
	sysRegWrite(QDMA_GLO_CFG, regVal);

	/* clear PDMA */
	regVal = sysRegRead(PDMA_GLO_CFG);
	regVal &= ~(CSR_CLKGATE | RX_DMA_EN | TX_DMA_EN);
	sysRegWrite(PDMA_GLO_CFG, regVal);

	/* PPE QoS -> QDMA HW TX pool */
	sysRegWrite(QDMA_FQ_HEAD, (u32)ei_local->fq_head_phy);
	sysRegWrite(QDMA_FQ_TAIL, (u32)fq_tail_phy);
	sysRegWrite(QDMA_FQ_CNT,  cpu_to_le32((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
	sysRegWrite(QDMA_FQ_BLEN, cpu_to_le32(QDMA_PAGE_SIZE << 16));

#if defined (CONFIG_RAETH_QDMATX_QDMARX)
	/* GDMA1/2 -> QDMA RX ring #0 */
	sysRegWrite(QRX_BASE_PTR0, phys_to_bus((u32)ei_local->rxd_ring_phy));
	sysRegWrite(QRX_MAX_CNT0, cpu_to_le32(NUM_RX_DESC));
	sysRegWrite(QRX_CRX_IDX0, cpu_to_le32(NUM_RX_DESC - 1));
#else
	/* GDMA1/2 -> PDMA RX ring #0 */
	sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32)ei_local->rxd_ring_phy));
	sysRegWrite(RX_MAX_CNT0,  cpu_to_le32(NUM_RX_DESC));
	sysRegWrite(RX_CALC_IDX0, cpu_to_le32(NUM_RX_DESC - 1));
	sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);

	/* GDMA1/2 -> QDMA RX stub ring #0 (not used, but RX DMA started) */
	sysRegWrite(QRX_BASE_PTR0, phys_to_bus((u32)ei_local->qrx_ring_phy));
	sysRegWrite(QRX_MAX_CNT0, cpu_to_le32(NUM_QRX_DESC));
	sysRegWrite(QRX_CRX_IDX0, cpu_to_le32(NUM_QRX_DESC-1));
#endif
	sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);

	/* Reserve 4 TXD for each physical queue */
	for (i = 0; i < 16; i++)
		sysRegWrite(QTX_CFG_0 + 0x10*i, ((NUM_PQ_RESV << 8) | NUM_PQ_RESV));

	/* get free txd from pool for RLS (release) */
	txd_idx = get_free_txd(ei_local);
	txd_free_phy = get_txd_ptr_phy(ei_local, txd_idx);
	sysRegWrite(QTX_CRX_PTR, (u32)txd_free_phy);
	sysRegWrite(QTX_DRX_PTR, (u32)txd_free_phy);

	/* get free txd from pool for FWD (forward) */
	txd_idx = get_free_txd(ei_local);
	txd_free_phy = get_txd_ptr_phy(ei_local, txd_idx);
	ei_local->txd_last_idx = txd_idx;
	sysRegWrite(QTX_CTX_PTR, (u32)txd_free_phy);
	sysRegWrite(QTX_DTX_PTR, (u32)txd_free_phy);

	/* reset TX indexes for queues 0~15 */
	sysRegWrite(QDMA_RST_CFG, 0xffff);

	/* enable random early drop and set drop threshold automatically */
	sysRegWrite(QDMA_FC_THRES, 0x174444);
	sysRegWrite(QDMA_HRED2, 0x0);

	/* config DLY interrupt */
	sysRegWrite(DLY_INT_CFG, FE_DLY_INIT_VALUE);
	sysRegWrite(QDMA_DELAY_INT, FE_DLY_INIT_VALUE);
}
Esempio n. 12
0
int bcm2835_mbox_call_prop(u32 chan, struct bcm2835_mbox_hdr *buffer)
{
	int ret;
	u32 rbuffer;
	struct bcm2835_mbox_tag_hdr *tag;
	int tag_index;

#ifdef DEBUG
	printf("mbox: TX buffer\n");
	dump_buf(buffer);
#endif

	flush_dcache_range((unsigned long)buffer,
			   (unsigned long)((void *)buffer +
			   roundup(buffer->buf_size, ARCH_DMA_MINALIGN)));

	ret = bcm2835_mbox_call_raw(chan,
				    phys_to_bus((unsigned long)buffer),
				    &rbuffer);
	if (ret)
		return ret;

	invalidate_dcache_range((unsigned long)buffer,
				(unsigned long)((void *)buffer +
				roundup(buffer->buf_size, ARCH_DMA_MINALIGN)));

	if (rbuffer != phys_to_bus((unsigned long)buffer)) {
		printf("mbox: Response buffer mismatch\n");
		return -1;
	}

#ifdef DEBUG
	printf("mbox: RX buffer\n");
	dump_buf(buffer);
#endif

	/* Validate overall response status */

	if (buffer->code != BCM2835_MBOX_RESP_CODE_SUCCESS) {
		printf("mbox: Header response code invalid\n");
		return -1;
	}

	/* Validate each tag's response status */

	tag = (void *)(buffer + 1);
	tag_index = 0;
	while (tag->tag) {
		if (!(tag->val_len & BCM2835_MBOX_TAG_VAL_LEN_RESPONSE)) {
			printf("mbox: Tag %d missing val_len response bit\n",
				tag_index);
			return -1;
		}
		/*
		 * Clear the reponse bit so clients can just look right at the
		 * length field without extra processing
		 */
		tag->val_len &= ~BCM2835_MBOX_TAG_VAL_LEN_RESPONSE;
		tag = (void *)(((u8 *)tag) + sizeof(*tag) + tag->val_buf_size);
		tag_index++;
	}

	return 0;
}
Esempio n. 13
0
static int HSDMA_init(void)
{
	int		i;
	unsigned int	regVal;
	printk("%s\n",__FUNCTION__);
	while(1)
	{
		regVal = sysRegRead(HSDMA_GLO_CFG);
		if((regVal & HSDMA_RX_DMA_BUSY))
		{
			printk("\n  RX_DMA_BUSY !!! ");
			continue;
		}
		if((regVal & HSDMA_TX_DMA_BUSY))
		{
			printk("\n  TX_DMA_BUSY !!! ");
			continue;
		}
		break;
	}
	//initial TX ring0
	HSDMA_Entry.HSDMA_tx_ring0 = pci_alloc_consistent(NULL, NUM_HSDMA_TX_DESC * sizeof(struct HSDMA_txdesc), &HSDMA_Entry.phy_hsdma_tx_ring0);
	printk("\n hsdma_phy_tx_ring0 = 0x%08x, hsdma_tx_ring0 = 0x%p\n", HSDMA_Entry.phy_hsdma_tx_ring0, HSDMA_Entry.HSDMA_tx_ring0);
	
		
	for (i=0; i < NUM_HSDMA_TX_DESC; i++) {
		memset(&HSDMA_Entry.HSDMA_tx_ring0[i],0,sizeof(struct HSDMA_txdesc));
		HSDMA_Entry.HSDMA_tx_ring0[i].hsdma_txd_info2.LS0_bit = 1;
		HSDMA_Entry.HSDMA_tx_ring0[i].hsdma_txd_info2.DDONE_bit = 1;
	}

	//initial RX ring0
	HSDMA_Entry.HSDMA_rx_ring0 = pci_alloc_consistent(NULL, NUM_HSDMA_RX_DESC * sizeof(struct HSDMA_rxdesc), &HSDMA_Entry.phy_hsdma_rx_ring0);
	
	
	for (i = 0; i < NUM_HSDMA_RX_DESC; i++) {
		memset(&HSDMA_Entry.HSDMA_rx_ring0[i],0,sizeof(struct HSDMA_rxdesc));
		HSDMA_Entry.HSDMA_rx_ring0[i].hsdma_rxd_info2.DDONE_bit = 0;
		HSDMA_Entry.HSDMA_rx_ring0[i].hsdma_rxd_info2.LS0 = 0;
	}	
		printk("\n hsdma_phy_rx_ring0 = 0x%08x, hsdma_rx_ring0 = 0x%p\n",HSDMA_Entry.phy_hsdma_rx_ring0,HSDMA_Entry.HSDMA_rx_ring0);
	
		// HSDMA_GLO_CFG
		regVal = sysRegRead(HSDMA_GLO_CFG);
		regVal &= 0x000000FF;
		sysRegWrite(HSDMA_GLO_CFG, regVal);
		regVal=sysRegRead(HSDMA_GLO_CFG);
		/* Tell the adapter where the TX/RX rings are located. */
		//TX0
    sysRegWrite(HSDMA_TX_BASE_PTR0, phys_to_bus((u32) HSDMA_Entry.phy_hsdma_tx_ring0));
		sysRegWrite(HSDMA_TX_MAX_CNT0, cpu_to_le32((u32) NUM_HSDMA_TX_DESC));
		sysRegWrite(HSDMA_TX_CTX_IDX0, 0);
		hsdma_tx_cpu_owner_idx0 = 0;
		sysRegWrite(HSDMA_RST_CFG, HSDMA_PST_DTX_IDX0);
		printk("TX_CTX_IDX0 = %x\n", sysRegRead(HSDMA_TX_CTX_IDX0));
	  printk("TX_DTX_IDX0 = %x\n", sysRegRead(HSDMA_TX_DTX_IDX0));

	    
		//RX0
		sysRegWrite(HSDMA_RX_BASE_PTR0, phys_to_bus((u32) HSDMA_Entry.phy_hsdma_rx_ring0));
		sysRegWrite(HSDMA_RX_MAX_CNT0,  cpu_to_le32((u32) NUM_HSDMA_RX_DESC));
		sysRegWrite(HSDMA_RX_CALC_IDX0, cpu_to_le32((u32) (NUM_HSDMA_RX_DESC - 1)));
		hsdma_rx_calc_idx0 = hsdma_rx_dma_owner_idx0 =  sysRegRead(HSDMA_RX_CALC_IDX0);
		sysRegWrite(HSDMA_RST_CFG, HSDMA_PST_DRX_IDX0);
		printk("RX_CRX_IDX0 = %x\n", sysRegRead(HSDMA_RX_CALC_IDX0));
		printk("RX_DRX_IDX0 = %x\n", sysRegRead(HSDMA_RX_DRX_IDX0));

		set_fe_HSDMA_glo_cfg();
		printk("HSDMA_GLO_CFG = %x\n", sysRegRead(HSDMA_GLO_CFG));
		return 1;
}
Esempio n. 14
0
int fe_tx_desc_init(struct net_device *dev, unsigned int ring_no, unsigned int qn, unsigned int pn)
{
	END_DEVICE* ei_local = netdev_priv(dev);
	struct PDMA_txdesc *tx_desc;
	unsigned int tx_cpu_owner_idx = 0;
	int i;
	unsigned int phy_tx_ring;

	// sanity check
	if ( ring_no > 3 ){
		printk("%s : ring_no - %d, please under 4...\n", dev->name, ring_no);
		return 0;
	}

	if ( pn > 2 ){
		printk("%s : pn - %d, please under 2...\n", dev->name, pn);
		return 0;
	}

	tx_desc = pci_alloc_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), &phy_tx_ring);
	ei_local->tx_cpu_owner_idx0 = tx_cpu_owner_idx;
	
	switch (ring_no) {
		case 0:
			ei_local->tx_ring0 = tx_desc;
			ei_local->phy_tx_ring0 = phy_tx_ring;
			break;
		case 1:
			ei_local->phy_tx_ring1 = phy_tx_ring;
			ei_local->tx_ring1 = tx_desc;
			break;
		case 2:
			ei_local->phy_tx_ring2 = phy_tx_ring;
			ei_local->tx_ring2 = tx_desc;
			break;
		case 3:
			ei_local->phy_tx_ring3 = phy_tx_ring;
			ei_local->tx_ring3 = tx_desc;
			break;
		default:
			printk("ring_no input error! %d\n", ring_no);
			pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), tx_desc, phy_tx_ring);
			return 0;
	};	

	if ( tx_desc == NULL)
	{
		printk("tx desc allocation failed!\n");
		return 0;
	}

	for( i = 0; i < NUM_TX_DESC; i++) {
		memset( &tx_desc[i], 0, sizeof(struct PDMA_txdesc));
		tx_desc[i].txd_info2.LS0_bit = 1;
		tx_desc[i].txd_info2.DDONE_bit = 1;
		tx_desc[i].txd_info4.PN = pn;
		tx_desc[i].txd_info4.QN = qn;
	}

	switch ( ring_no ) {
		case 0 :
			*(unsigned long*)TX_BASE_PTR0 = phys_to_bus((u32) phy_tx_ring);
			*(unsigned long*)TX_MAX_CNT0  = cpu_to_le32((u32)NUM_TX_DESC);
			*(unsigned long*)TX_CTX_IDX0  = cpu_to_le32((u32) tx_cpu_owner_idx);
			sysRegWrite(PDMA_RST_CFG, RT2880_PST_DTX_IDX0);
			break;
		case 1 :
			*(unsigned long*)TX_BASE_PTR1 = phys_to_bus((u32) phy_tx_ring);
			*(unsigned long*)TX_MAX_CNT1  = cpu_to_le32((u32)NUM_TX_DESC);
			*(unsigned long*)TX_CTX_IDX1  = cpu_to_le32((u32) tx_cpu_owner_idx);
			sysRegWrite(PDMA_RST_CFG, RT2880_PST_DTX_IDX1);
			break;
		case 2 :
			*(unsigned long*)TX_BASE_PTR2 = phys_to_bus((u32) phy_tx_ring);
			*(unsigned long*)TX_MAX_CNT2  = cpu_to_le32((u32)NUM_TX_DESC);
			*(unsigned long*)TX_CTX_IDX2  = cpu_to_le32((u32) tx_cpu_owner_idx);
			sysRegWrite(PDMA_RST_CFG, RT2880_PST_DTX_IDX2);
			break;
		case 3 :
			*(unsigned long*)TX_BASE_PTR3 = phys_to_bus((u32) phy_tx_ring);
			*(unsigned long*)TX_MAX_CNT3  = cpu_to_le32((u32)NUM_TX_DESC);
			*(unsigned long*)TX_CTX_IDX3  = cpu_to_le32((u32) tx_cpu_owner_idx);
			sysRegWrite(PDMA_RST_CFG, RT2880_PST_DTX_IDX3);
			break;
		default :
			printk("tx descriptor init failed %d\n", ring_no);
			return 0;
	};
	return 1;
}
Esempio n. 15
0
/* must be spinlock protected */
static void
fe_dma_init(END_DEVICE *ei_local)
{
	int i;
	u32 regVal;

	/* init PDMA TX ring */
	for (i = 0; i < NUM_TX_DESC; i++) {
		struct PDMA_txdesc *txd = &ei_local->txd_ring[i];
		
		ei_local->txd_buff[i] = NULL;
		
		ACCESS_ONCE(txd->txd_info1) = 0;
		ACCESS_ONCE(txd->txd_info2) = TX2_DMA_DONE;
#if defined (RAETH_PDMA_V2)
		ACCESS_ONCE(txd->txd_info4) = 0;
#else
		ACCESS_ONCE(txd->txd_info4) = TX4_DMA_QN(3);
#endif
		ACCESS_ONCE(txd->txd_info3) = 0;
	}

	/* init PDMA RX ring */
	for (i = 0; i < NUM_RX_DESC; i++) {
		struct PDMA_rxdesc *rxd = &ei_local->rxd_ring[i];
#if defined (RAETH_PDMA_V2)
		ACCESS_ONCE(rxd->rxd_info1) = (u32)dma_map_single(NULL, ei_local->rxd_buff[i]->data, MAX_RX_LENGTH + NET_IP_ALIGN, DMA_FROM_DEVICE);
		ACCESS_ONCE(rxd->rxd_info2) = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
		ACCESS_ONCE(rxd->rxd_info1) = (u32)dma_map_single(NULL, ei_local->rxd_buff[i]->data, MAX_RX_LENGTH, DMA_FROM_DEVICE);
		ACCESS_ONCE(rxd->rxd_info2) = RX2_DMA_LS0;
#endif
		ACCESS_ONCE(rxd->rxd_info3) = 0;
		ACCESS_ONCE(rxd->rxd_info4) = 0;
	}

	wmb();

	/* clear PDMA */
	regVal = sysRegRead(PDMA_GLO_CFG);
	regVal &= ~(CSR_CLKGATE | RX_DMA_EN | TX_DMA_EN);
	sysRegWrite(PDMA_GLO_CFG, regVal);

	/* GDMA1/2 <- TX Ring #0 */
	sysRegWrite(TX_BASE_PTR0, phys_to_bus((u32)ei_local->txd_ring_phy));
	sysRegWrite(TX_MAX_CNT0, cpu_to_le32(NUM_TX_DESC));
	sysRegWrite(TX_CTX_IDX0, 0);
	sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
	ei_local->txd_last_idx = le32_to_cpu(sysRegRead(TX_CTX_IDX0));
	ei_local->txd_free_idx = ei_local->txd_last_idx;

	/* GDMA1/2 -> RX Ring #0 */
	sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32)ei_local->rxd_ring_phy));
	sysRegWrite(RX_MAX_CNT0, cpu_to_le32(NUM_RX_DESC));
	sysRegWrite(RX_CALC_IDX0, cpu_to_le32(NUM_RX_DESC - 1));
	sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);

	/* only the following chipset need to set it */
#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
	// set 1us timer count in unit of clock cycle
	regVal = sysRegRead(FE_GLO_CFG);
	regVal &= ~(0xff << 8); //clear bit8-bit15
	regVal |= (((get_surfboard_sysclk()/1000000)) << 8);
	sysRegWrite(FE_GLO_CFG, regVal);
#endif

	/* config DLY interrupt */
	sysRegWrite(DLY_INT_CFG, FE_DLY_INIT_VALUE);
}
Esempio n. 16
0
/*
 * Allocates/reserves the Platform memory resources early in the boot process.
 * This ignores any resources that are designated IORESOURCE_IO
 */
void __init platform_alloc_bootmem(void)
{
	int i;
	int total = 0;

	/* Get persistent memory data from command line before allocating
	 * resources. This need to happen before normal command line parsing
	 * has been done */
	pmem_setup_resource();

	/* Loop through looking for resources that want a particular address */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = gp_resources[i].end - gp_resources[i].start + 1;
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			reserve_bootmem(bus_to_phys(gp_resources[i].start),
				size, 0);
			total += gp_resources[i].end -
				gp_resources[i].start + 1;
			pr_info("reserve resource %s at %08x (%u bytes)\n",
				gp_resources[i].name, gp_resources[i].start,
				gp_resources[i].end -
					gp_resources[i].start + 1);
		}
	}

	/* Loop through assigning addresses for those that are left */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		int size = gp_resources[i].end - gp_resources[i].start + 1;
		if ((gp_resources[i].start == 0) &&
			((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
			void *mem = alloc_bootmem_pages(size);

			if (mem == NULL)
				pr_err("Unable to allocate bootmem pages "
					"for %s\n", gp_resources[i].name);

			else {
				gp_resources[i].start =
					phys_to_bus(virt_to_phys(mem));
				gp_resources[i].end =
					gp_resources[i].start + size - 1;
				total += size;
				pr_info("allocate resource %s at %08x "
						"(%u bytes)\n",
					gp_resources[i].name,
					gp_resources[i].start, size);
			}
		}
	}

	pr_info("Total Platform driver memory allocation: 0x%08x\n", total);

	/* indicate resources that are platform I/O related */
	for (i = 0; gp_resources[i].flags != 0; i++) {
		if ((gp_resources[i].start != 0) &&
			((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
			pr_info("reserved platform resource %s at %08x\n",
				gp_resources[i].name, gp_resources[i].start);
		}
	}
}