Esempio n. 1
0
static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
{
	int port = eth->port, i, ret = 0;
	u32 tmp_addr;
	struct sh_eth_info *port_info = &eth->port_info[port];
	struct tx_desc_s *cur_tx_desc;

	/*
	 * Allocate tx descriptors. They must be TX_DESC_SIZE bytes aligned
	 */
	port_info->tx_desc_malloc = malloc(NUM_TX_DESC *
						 sizeof(struct tx_desc_s) +
						 TX_DESC_SIZE - 1);
	if (!port_info->tx_desc_malloc) {
		printf(SHETHER_NAME ": malloc failed\n");
		ret = -ENOMEM;
		goto err;
	}

	tmp_addr = (u32) (((int)port_info->tx_desc_malloc + TX_DESC_SIZE - 1) &
			  ~(TX_DESC_SIZE - 1));
	flush_cache_wback(tmp_addr, NUM_TX_DESC * sizeof(struct tx_desc_s));
	/* Make sure we use a P2 address (non-cacheable) */
	port_info->tx_desc_base = (struct tx_desc_s *)ADDR_TO_P2(tmp_addr);
	port_info->tx_desc_cur = port_info->tx_desc_base;

	/* Initialize all descriptors */
	for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
	     cur_tx_desc++, i++) {
		cur_tx_desc->td0 = 0x00;
		cur_tx_desc->td1 = 0x00;
		cur_tx_desc->td2 = 0x00;
	}

	/* Mark the end of the descriptors */
	cur_tx_desc--;
	cur_tx_desc->td0 |= TD_TDLE;

	/* Point the controller to the tx descriptor list. Must use physical
	   addresses */
	sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
#if defined(SH_ETH_TYPE_GETHER)
	sh_eth_write(eth, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
	sh_eth_write(eth, ADDR_TO_PHY(cur_tx_desc), TDFXR);
	sh_eth_write(eth, 0x01, TDFFR);/* Last discriptor bit */
#endif

err:
	return ret;
}
Esempio n. 2
0
int sh_eth_recv(struct eth_device *dev)
{
	struct sh_eth_dev *eth = dev->priv;
	int port = eth->port, len = 0;
	struct sh_eth_info *port_info = &eth->port_info[port];
	uchar *packet;

	/* Check if the rx descriptor is ready */
	invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
	if (!(port_info->rx_desc_cur->rd0 & RD_RACT)) {
		/* Check for errors */
		if (!(port_info->rx_desc_cur->rd0 & RD_RFE)) {
			len = port_info->rx_desc_cur->rd1 & 0xffff;
			packet = (uchar *)
				ADDR_TO_P2(port_info->rx_desc_cur->rd2);
			invalidate_cache(packet, len);
			net_process_received_packet(packet, len);
		}

		/* Make current descriptor available again */
		if (port_info->rx_desc_cur->rd0 & RD_RDLE)
			port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
		else
			port_info->rx_desc_cur->rd0 = RD_RACT;

		flush_cache_wback(port_info->rx_desc_cur,
				  sizeof(struct rx_desc_s));

		/* Point to the next descriptor */
		port_info->rx_desc_cur++;
		if (port_info->rx_desc_cur >=
		    port_info->rx_desc_base + NUM_RX_DESC)
			port_info->rx_desc_cur = port_info->rx_desc_base;
	}

	/* Restart the receiver if disabled */
	if (!(sh_eth_read(eth, EDRRR) & EDRRR_R))
		sh_eth_write(eth, EDRRR_R, EDRRR);

	return len;
}
int sh_eth_recv(struct eth_device *dev)
{
	struct sh_eth_dev *eth = dev->priv;
	int port = eth->port, len = 0;
	struct sh_eth_info *port_info = &eth->port_info[port];
	volatile u8 *packet;

	/* Check if the rx descriptor is ready */
	if (!(port_info->rx_desc_cur->rd0 & RD_RACT)) {
		/* Check for errors */
		if (!(port_info->rx_desc_cur->rd0 & RD_RFE)) {
			len = port_info->rx_desc_cur->rd1 & 0xffff;
			packet = (volatile u8 *)
			    ADDR_TO_P2(port_info->rx_desc_cur->rd2);
			NetReceive(packet, len);
		}

		/* Make current descriptor available again */
		if (port_info->rx_desc_cur->rd0 & RD_RDLE)
			port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
		else
			port_info->rx_desc_cur->rd0 = RD_RACT;

		/* Point to the next descriptor */
		port_info->rx_desc_cur++;
		if (port_info->rx_desc_cur >=
		    port_info->rx_desc_base + NUM_RX_DESC)
			port_info->rx_desc_cur = port_info->rx_desc_base;
	}

	/* Restart the receiver if disabled */
	if (!(inl(EDRRR(port)) & EDRRR_R))
		outl(EDRRR_R, EDRRR(port));

	return len;
}
static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
{
	int port = eth->port, i , ret = 0;
	struct sh_eth_info *port_info = &eth->port_info[port];
	struct rx_desc_s *cur_rx_desc;
	u32 tmp_addr;
	u8 *rx_buf;

	/*
	 * Allocate rx descriptors. They must be RX_DESC_SIZE bytes aligned
	 */
	port_info->rx_desc_malloc = malloc(NUM_RX_DESC *
						 sizeof(struct rx_desc_s) +
						 RX_DESC_SIZE - 1);
	if (!port_info->rx_desc_malloc) {
		printf(SHETHER_NAME ": malloc failed\n");
		ret = -ENOMEM;
		goto err;
	}

	tmp_addr = (u32) (((int)port_info->rx_desc_malloc + RX_DESC_SIZE - 1) &
			  ~(RX_DESC_SIZE - 1));
	/* Make sure we use a P2 address (non-cacheable) */
	port_info->rx_desc_base = (struct rx_desc_s *)ADDR_TO_P2(tmp_addr);

	port_info->rx_desc_cur = port_info->rx_desc_base;

	/*
	 * Allocate rx data buffers. They must be 32 bytes aligned  and in
	 * P2 area
	 */
	port_info->rx_buf_malloc = malloc(NUM_RX_DESC * MAX_BUF_SIZE + 31);
	if (!port_info->rx_buf_malloc) {
		printf(SHETHER_NAME ": malloc failed\n");
		ret = -ENOMEM;
		goto err_buf_malloc;
	}

	tmp_addr = (u32)(((int)port_info->rx_buf_malloc + (32 - 1)) &
			  ~(32 - 1));
	port_info->rx_buf_base = (u8 *)ADDR_TO_P2(tmp_addr);

	/* Initialize all descriptors */
	for (cur_rx_desc = port_info->rx_desc_base,
	     rx_buf = port_info->rx_buf_base, i = 0;
	     i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
		cur_rx_desc->rd0 = RD_RACT;
		cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
		cur_rx_desc->rd2 = (u32) ADDR_TO_PHY(rx_buf);
	}

	/* Mark the end of the descriptors */
	cur_rx_desc--;
	cur_rx_desc->rd0 |= RD_RDLE;

	/* Point the controller to the rx descriptor list */
	outl(ADDR_TO_PHY(port_info->rx_desc_base), RDLAR(port));
	outl(ADDR_TO_PHY(port_info->rx_desc_base), RDFAR(port));
	outl(ADDR_TO_PHY(cur_rx_desc), RDFXR(port));
	outl(RDFFR_RDLF, RDFFR(port));

	return ret;

err_buf_malloc:
	free(port_info->rx_desc_malloc);
	port_info->rx_desc_malloc = NULL;

err:
	return ret;
}
Esempio n. 5
0
static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
{
	int port = eth->port, i , ret = 0;
	u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
	struct sh_eth_info *port_info = &eth->port_info[port];
	struct rx_desc_s *cur_rx_desc;
	u8 *rx_buf;

	/*
	 * Allocate rx descriptors. They must be aligned to size of struct
	 * rx_desc_s.
	 */
	port_info->rx_desc_alloc =
		memalign(sizeof(struct rx_desc_s), alloc_desc_size);
	if (!port_info->rx_desc_alloc) {
		printf(SHETHER_NAME ": memalign failed\n");
		ret = -ENOMEM;
		goto err;
	}

	flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);

	/* Make sure we use a P2 address (non-cacheable) */
	port_info->rx_desc_base =
		(struct rx_desc_s *)ADDR_TO_P2((u32)port_info->rx_desc_alloc);

	port_info->rx_desc_cur = port_info->rx_desc_base;

	/*
	 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
	 * aligned and in P2 area.
	 */
	port_info->rx_buf_alloc =
		memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
	if (!port_info->rx_buf_alloc) {
		printf(SHETHER_NAME ": alloc failed\n");
		ret = -ENOMEM;
		goto err_buf_alloc;
	}

	port_info->rx_buf_base = (u8 *)ADDR_TO_P2((u32)port_info->rx_buf_alloc);

	/* Initialize all descriptors */
	for (cur_rx_desc = port_info->rx_desc_base,
	     rx_buf = port_info->rx_buf_base, i = 0;
	     i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
		cur_rx_desc->rd0 = RD_RACT;
		cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
		cur_rx_desc->rd2 = (u32) ADDR_TO_PHY(rx_buf);
	}

	/* Mark the end of the descriptors */
	cur_rx_desc--;
	cur_rx_desc->rd0 |= RD_RDLE;

	/* Point the controller to the rx descriptor list */
	sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
	sh_eth_write(eth, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
	sh_eth_write(eth, ADDR_TO_PHY(cur_rx_desc), RDFXR);
	sh_eth_write(eth, RDFFR_RDLF, RDFFR);
#endif

	return ret;

err_buf_alloc:
	free(port_info->rx_desc_alloc);
	port_info->rx_desc_alloc = NULL;

err:
	return ret;
}