Пример #1
0
static void bfin_mac_rx(struct net_device *dev)
{
	struct sk_buff *skb, *new_skb;
	unsigned short len;

	/* allocate a new skb for next time receive */
	skb = current_rx_ptr->skb;
	new_skb = dev_alloc_skb(PKT_BUF_SZ + 2);
	if (!new_skb) {
		printk(KERN_NOTICE DRV_NAME
		       ": rx: low on mem - packet dropped\n");
		dev->stats.rx_dropped++;
		goto out;
	}
	/* reserve 2 bytes for RXDWA padding */
	skb_reserve(new_skb, 2);
	current_rx_ptr->skb = new_skb;
	current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;

	/* Invidate the data cache of skb->data range when it is write back
	 * cache. It will prevent overwritting the new data from DMA
	 */
	blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
					 (unsigned long)new_skb->end);

	len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
	skb_put(skb, len);
	blackfin_dcache_invalidate_range((unsigned long)skb->head,
					 (unsigned long)skb->tail);

	dev->last_rx = jiffies;
	skb->dev = dev;
	skb->protocol = eth_type_trans(skb, dev);
#if defined(BFIN_MAC_CSUM_OFFLOAD)
	skb->csum = current_rx_ptr->status.ip_payload_csum;
	skb->ip_summed = CHECKSUM_COMPLETE;
#endif

	netif_rx(skb);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += len;
	current_rx_ptr->status.status_word = 0x00000000;
	current_rx_ptr = current_rx_ptr->next;

out:
	return;
}
Пример #2
0
static void bfin_mac_rx(struct net_device *dev)
{
	struct sk_buff *skb, *new_skb;
	unsigned short len;

	
	skb = current_rx_ptr->skb;
	new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
	if (!new_skb) {
		printk(KERN_NOTICE DRV_NAME
		       ": rx: low on mem - packet dropped\n");
		dev->stats.rx_dropped++;
		goto out;
	}
	
	skb_reserve(new_skb, NET_IP_ALIGN);
	current_rx_ptr->skb = new_skb;
	current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;

	
	blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
					 (unsigned long)new_skb->end);

	len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
	skb_put(skb, len);
	blackfin_dcache_invalidate_range((unsigned long)skb->head,
					 (unsigned long)skb->tail);

	skb->protocol = eth_type_trans(skb, dev);
#if defined(BFIN_MAC_CSUM_OFFLOAD)
	skb->csum = current_rx_ptr->status.ip_payload_csum;
	skb->ip_summed = CHECKSUM_COMPLETE;
#endif

	netif_rx(skb);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += len;
	current_rx_ptr->status.status_word = 0x00000000;
	current_rx_ptr = current_rx_ptr->next;

out:
	return;
}
Пример #3
0
static void ipi_flush_icache(void *info)
{
	struct blackfin_flush_data *fdata = info;

	/* Invalidate the memory holding the bounds of the flushed region. */
	blackfin_dcache_invalidate_range((unsigned long)fdata,
					 (unsigned long)fdata + sizeof(*fdata));

	blackfin_icache_flush_range(fdata->start, fdata->end);
}
Пример #4
0
static void ipi_flush_icache(void *info)
{
	struct blackfin_flush_data *fdata = info;

	/* Invalidate the memory holding the bounds of the flushed region. */
	blackfin_dcache_invalidate_range((unsigned long)fdata,
					 (unsigned long)fdata + sizeof(*fdata));

	/* Make sure all write buffers in the data side of the core
	 * are flushed before trying to invalidate the icache.  This
	 * needs to be after the data flush and before the icache
	 * flush so that the SSYNC does the right thing in preventing
	 * the instruction prefetcher from hitting things in cached
	 * memory at the wrong time -- it runs much further ahead than
	 * the pipeline.
	 */
	SSYNC();

	/* ipi_flaush_icache is invoked by generic flush_icache_range,
	 * so call blackfin arch icache flush directly here.
	 */
	blackfin_icache_flush_range(fdata->start, fdata->end);
}
static int desc_list_init(struct net_device *dev)
{
	int i;
	struct sk_buff *new_skb;
#if !defined(CONFIG_BFIN_MAC_USE_L1)
	dma_addr_t dma_handle;
#endif

	tx_desc = bfin_mac_alloc(&dma_handle,
				sizeof(struct net_dma_desc_tx),
				CONFIG_BFIN_TX_DESC_NUM);
	if (tx_desc == NULL)
		goto init_error;

	rx_desc = bfin_mac_alloc(&dma_handle,
				sizeof(struct net_dma_desc_rx),
				CONFIG_BFIN_RX_DESC_NUM);
	if (rx_desc == NULL)
		goto init_error;

	
	tx_list_head = tx_list_tail = tx_desc;

	for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
		struct net_dma_desc_tx *t = tx_desc + i;
		struct dma_descriptor *a = &(t->desc_a);
		struct dma_descriptor *b = &(t->desc_b);

		a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
		a->start_addr = (unsigned long)t->packet;
		a->x_count = 0;
		a->next_dma_desc = b;

		b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
		b->start_addr = (unsigned long)(&(t->status));
		b->x_count = 0;

		t->skb = NULL;
		tx_list_tail->desc_b.next_dma_desc = a;
		tx_list_tail->next = t;
		tx_list_tail = t;
	}
	tx_list_tail->next = tx_list_head;	
	tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
	current_tx_ptr = tx_list_head;

	
	rx_list_head = rx_list_tail = rx_desc;

	for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
		struct net_dma_desc_rx *r = rx_desc + i;
		struct dma_descriptor *a = &(r->desc_a);
		struct dma_descriptor *b = &(r->desc_b);

		
		new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
		if (!new_skb) {
			pr_notice("init: low on mem - packet dropped\n");
			goto init_error;
		}
		skb_reserve(new_skb, NET_IP_ALIGN);
		blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
					 (unsigned long)new_skb->end);
		r->skb = new_skb;

		a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
		
		a->start_addr = (unsigned long)new_skb->data - 2;
		a->x_count = 0;
		a->next_dma_desc = b;

		b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
				NDSIZE_6 | DMAFLOW_LARGE;
		b->start_addr = (unsigned long)(&(r->status));
		b->x_count = 0;

		rx_list_tail->desc_b.next_dma_desc = a;
		rx_list_tail->next = r;
		rx_list_tail = r;
	}
	rx_list_tail->next = rx_list_head;	
	rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
	current_rx_ptr = rx_list_head;

	return 0;

init_error:
	desc_list_free();
	pr_err("kmalloc failed\n");
	return -ENOMEM;
}
Пример #6
0
static int desc_list_init(void)
{
	int i;
	struct sk_buff *new_skb;
#if !defined(CONFIG_BFIN_MAC_USE_L1)
	/*
	 * This dma_handle is useless in Blackfin dma_alloc_coherent().
	 * The real dma handler is the return value of dma_alloc_coherent().
	 */
	dma_addr_t dma_handle;
#endif

	tx_desc = bfin_mac_alloc(&dma_handle,
				sizeof(struct net_dma_desc_tx) *
				CONFIG_BFIN_TX_DESC_NUM);
	if (tx_desc == NULL)
		goto init_error;

	rx_desc = bfin_mac_alloc(&dma_handle,
				sizeof(struct net_dma_desc_rx) *
				CONFIG_BFIN_RX_DESC_NUM);
	if (rx_desc == NULL)
		goto init_error;

	/* init tx_list */
	tx_list_head = tx_list_tail = tx_desc;

	for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
		struct net_dma_desc_tx *t = tx_desc + i;
		struct dma_descriptor *a = &(t->desc_a);
		struct dma_descriptor *b = &(t->desc_b);

		/*
		 * disable DMA
		 * read from memory WNR = 0
		 * wordsize is 32 bits
		 * 6 half words is desc size
		 * large desc flow
		 */
		a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
		a->start_addr = (unsigned long)t->packet;
		a->x_count = 0;
		a->next_dma_desc = b;

		/*
		 * enabled DMA
		 * write to memory WNR = 1
		 * wordsize is 32 bits
		 * disable interrupt
		 * 6 half words is desc size
		 * large desc flow
		 */
		b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
		b->start_addr = (unsigned long)(&(t->status));
		b->x_count = 0;

		t->skb = NULL;
		tx_list_tail->desc_b.next_dma_desc = a;
		tx_list_tail->next = t;
		tx_list_tail = t;
	}
	tx_list_tail->next = tx_list_head;	/* tx_list is a circle */
	tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
	current_tx_ptr = tx_list_head;

	/* init rx_list */
	rx_list_head = rx_list_tail = rx_desc;

	for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
		struct net_dma_desc_rx *r = rx_desc + i;
		struct dma_descriptor *a = &(r->desc_a);
		struct dma_descriptor *b = &(r->desc_b);

		/* allocate a new skb for next time receive */
		new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
		if (!new_skb) {
			printk(KERN_NOTICE DRV_NAME
			       ": init: low on mem - packet dropped\n");
			goto init_error;
		}
		skb_reserve(new_skb, NET_IP_ALIGN);
		/* Invidate the data cache of skb->data range when it is write back
		 * cache. It will prevent overwritting the new data from DMA
		 */
		blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
					 (unsigned long)new_skb->end);
		r->skb = new_skb;

		/*
		 * enabled DMA
		 * write to memory WNR = 1
		 * wordsize is 32 bits
		 * disable interrupt
		 * 6 half words is desc size
		 * large desc flow
		 */
		a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
		/* since RXDWA is enabled */
		a->start_addr = (unsigned long)new_skb->data - 2;
		a->x_count = 0;
		a->next_dma_desc = b;

		/*
		 * enabled DMA
		 * write to memory WNR = 1
		 * wordsize is 32 bits
		 * enable interrupt
		 * 6 half words is desc size
		 * large desc flow
		 */
		b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
				NDSIZE_6 | DMAFLOW_LARGE;
		b->start_addr = (unsigned long)(&(r->status));
		b->x_count = 0;

		rx_list_tail->desc_b.next_dma_desc = a;
		rx_list_tail->next = r;
		rx_list_tail = r;
	}
	rx_list_tail->next = rx_list_head;	/* rx_list is a circle */
	rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
	current_rx_ptr = rx_list_head;

	return 0;

init_error:
	desc_list_free();
	printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
	return -ENOMEM;
}
Пример #7
0
static void bfin_mac_rx(struct net_device *dev)
{
	struct sk_buff *skb, *new_skb;
	unsigned short len;
	struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
#if defined(BFIN_MAC_CSUM_OFFLOAD)
	unsigned int i;
	unsigned char fcs[ETH_FCS_LEN + 1];
#endif

	/* check if frame status word reports an error condition
	 * we which case we simply drop the packet
	 */
	if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
		printk(KERN_NOTICE DRV_NAME
		       ": rx: receive error - packet dropped\n");
		dev->stats.rx_dropped++;
		goto out;
	}

	/* allocate a new skb for next time receive */
	skb = current_rx_ptr->skb;

	new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
	if (!new_skb) {
		printk(KERN_NOTICE DRV_NAME
		       ": rx: low on mem - packet dropped\n");
		dev->stats.rx_dropped++;
		goto out;
	}
	/* reserve 2 bytes for RXDWA padding */
	skb_reserve(new_skb, NET_IP_ALIGN);
	/* Invidate the data cache of skb->data range when it is write back
	 * cache. It will prevent overwritting the new data from DMA
	 */
	blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
					 (unsigned long)new_skb->end);

	current_rx_ptr->skb = new_skb;
	current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;

	len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
	/* Deduce Ethernet FCS length from Ethernet payload length */
	len -= ETH_FCS_LEN;
	skb_put(skb, len);

	skb->protocol = eth_type_trans(skb, dev);

	bfin_rx_hwtstamp(dev, skb);

#if defined(BFIN_MAC_CSUM_OFFLOAD)
	/* Checksum offloading only works for IPv4 packets with the standard IP header
	 * length of 20 bytes, because the blackfin MAC checksum calculation is
	 * based on that assumption. We must NOT use the calculated checksum if our
	 * IP version or header break that assumption.
	 */
	if (skb->data[IP_HEADER_OFF] == 0x45) {
		skb->csum = current_rx_ptr->status.ip_payload_csum;
		/*
		 * Deduce Ethernet FCS from hardware generated IP payload checksum.
		 * IP checksum is based on 16-bit one's complement algorithm.
		 * To deduce a value from checksum is equal to add its inversion.
		 * If the IP payload len is odd, the inversed FCS should also
		 * begin from odd address and leave first byte zero.
		 */
		if (skb->len % 2) {
			fcs[0] = 0;
			for (i = 0; i < ETH_FCS_LEN; i++)
				fcs[i + 1] = ~skb->data[skb->len + i];
			skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
		} else {
			for (i = 0; i < ETH_FCS_LEN; i++)
				fcs[i] = ~skb->data[skb->len + i];
			skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
		}
		skb->ip_summed = CHECKSUM_COMPLETE;
	}
#endif

	netif_rx(skb);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += len;
out:
	current_rx_ptr->status.status_word = 0x00000000;
	current_rx_ptr = current_rx_ptr->next;
}
Пример #8
0
static void *__dma_memcpy(void *dest, const void *src, size_t size)
{
	int direction;	/* 1 - address decrease, 0 - address increase */
	int flag_align;	/* 1 - address aligned,  0 - address unaligned */
	int flag_2D;	/* 1 - 2D DMA needed,	 0 - 1D DMA needed */
	unsigned long flags;

	if (size <= 0)
		return NULL;

	local_irq_save(flags);

	if ((unsigned long)src < memory_end)
		blackfin_dcache_flush_range((unsigned int)src,
					    (unsigned int)(src + size));

	if ((unsigned long)dest < memory_end)
		blackfin_dcache_invalidate_range((unsigned int)dest,
						 (unsigned int)(dest + size));

	bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);

	if ((unsigned long)src < (unsigned long)dest)
		direction = 1;
	else
		direction = 0;

	if ((((unsigned long)dest % 2) == 0) && (((unsigned long)src % 2) == 0)
	    && ((size % 2) == 0))
		flag_align = 1;
	else
		flag_align = 0;

	if (size > 0x10000)	/* size > 64K */
		flag_2D = 1;
	else
		flag_2D = 0;

	/* Setup destination and source start address */
	if (direction) {
		if (flag_align) {
			bfin_write_MDMA_D0_START_ADDR(dest + size - 2);
			bfin_write_MDMA_S0_START_ADDR(src + size - 2);
		} else {
			bfin_write_MDMA_D0_START_ADDR(dest + size - 1);
			bfin_write_MDMA_S0_START_ADDR(src + size - 1);
		}
	} else {
		bfin_write_MDMA_D0_START_ADDR(dest);
		bfin_write_MDMA_S0_START_ADDR(src);
	}

	/* Setup destination and source xcount */
	if (flag_2D) {
		if (flag_align) {
			bfin_write_MDMA_D0_X_COUNT(1024 / 2);
			bfin_write_MDMA_S0_X_COUNT(1024 / 2);
		} else {
			bfin_write_MDMA_D0_X_COUNT(1024);
			bfin_write_MDMA_S0_X_COUNT(1024);
		}
		bfin_write_MDMA_D0_Y_COUNT(size >> 10);
		bfin_write_MDMA_S0_Y_COUNT(size >> 10);
	} else {
		if (flag_align) {
Пример #9
0
/*
 * FUNCTION NAME: ppi_read
 *
 * INPUTS/OUTPUTS:
 * in_filp - Description of openned file.
 * in_count - how many bytes user wants to get.
 * out_buf - data would be write to this address.
 *
 * RETURN
 * positive number: bytes read back
 * -EINVIL When word size is set to 16, reading odd bytes.
 * -EAGAIN When reading mode is set to non block and there is no rx data.
 *
 * FUNCTION(S) CALLED:
 *
 * GLOBAL VARIABLES REFERENCED: ppiinfo
 *
 * GLOBAL VARIABLES MODIFIED: NIL
 *
 * DESCRIPTION: It is invoked when user call 'read' system call
 *              to read from system.
 *
 * CAUTION:
 */
static ssize_t ppi_read(struct file *filp, char *buf, size_t count,
			loff_t * pos)
{
	int ierr;
	ppi_device_t *pdev = filp->private_data;

	pr_debug("ppi_read:\n");

	if (count <= 0)
		return 0;

	pdev->done = 0;

	/* Invalidate allocated memory in Data Cache */

	blackfin_dcache_invalidate_range((u_long) buf, (u_long) (buf + count));

	pr_debug("ppi_read: blackfin_dcache_invalidate_range : DONE\n");

	/* configure ppi port for DMA RX */

	set_dma_config(CH_PPI, pdev->dma_config);
	set_dma_start_addr(CH_PPI, (u_long) buf);
	set_dma_x_count(CH_PPI, pdev->pixel_per_line / 2);	// Div 2 because of 16-bit packing
	set_dma_y_count(CH_PPI, pdev->lines_per_frame);
	set_dma_y_modify(CH_PPI, 2);

	if (pdev->bpp > 8 || pdev->dma_config & WDSIZE_16)
		set_dma_x_modify(CH_PPI, 2);
	else
		set_dma_x_modify(CH_PPI, 1);

	pr_debug("ppi_read: SETUP DMA : DONE\n");

	enable_dma(CH_PPI);

	/* Enable PPI */

	bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN);
	SSYNC();

	if (pdev->ppi_trigger_gpio > NO_TRIGGER) {
		gpio_set_value(pdev->ppi_trigger_gpio, 1);
		udelay(1);
		gpio_set_value(pdev->ppi_trigger_gpio, 0);
	}

	pr_debug("ppi_read: PPI ENABLED : DONE\n");

	/* Wait for data available */
	if (1) {
		if (pdev->nonblock)
			return -EAGAIN;
		else {
			pr_debug("PPI wait_event_interruptible\n");
			ierr =
			    wait_event_interruptible(*(pdev->rx_avail),
						     pdev->done);
			if (ierr) {
				/* waiting is broken by a signal */
				pr_debug("PPI wait_event_interruptible ierr\n");
				return ierr;
			}
		}
	}

	pr_debug("PPI wait_event_interruptible done\n");

	disable_dma(CH_PPI);

	pr_debug("ppi_read: return\n");

	return count;
}