Exemple #1
0
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
{
	struct net_device *netdev = dev_instance;
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	unsigned long lpar_rc;

	if (napi_schedule_prep(&adapter->napi)) {
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_DISABLE);
		BUG_ON(lpar_rc != H_SUCCESS);
		__napi_schedule(&adapter->napi);
	}
	return IRQ_HANDLED;
}
Exemple #2
0
static int hvsi_open(struct tty_struct *tty, struct file *filp)
{
	struct hvsi_struct *hp;
	unsigned long flags;
	int line = tty->index;
	int ret;

	pr_debug("%s\n", __func__);

	if (line < 0 || line >= hvsi_count)
		return -ENODEV;
	hp = &hvsi_ports[line];

	tty->driver_data = hp;

	mb();
	if (hp->state == HVSI_FSP_DIED)
		return -EIO;

	spin_lock_irqsave(&hp->lock, flags);
	hp->tty = tty;
	hp->count++;
	atomic_set(&hp->seqno, 0);
	h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
	spin_unlock_irqrestore(&hp->lock, flags);

	if (is_console(hp))
		return 0; /* this has already been handshaked as the console */

	ret = hvsi_handshake(hp);
	if (ret < 0) {
		printk(KERN_ERR "%s: HVSI handshaking failed\n", tty->name);
		return ret;
	}

	ret = hvsi_get_mctrl(hp);
	if (ret < 0) {
		printk(KERN_ERR "%s: couldn't get initial modem flags\n", tty->name);
		return ret;
	}

	ret = hvsi_set_mctrl(hp, hp->mctrl | TIOCM_DTR);
	if (ret < 0) {
		printk(KERN_ERR "%s: couldn't set DTR\n", tty->name);
		return ret;
	}

	return 0;
}
Exemple #3
0
static void hvsi_unthrottle(struct tty_struct *tty)
{
	struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data;
	unsigned long flags;
	int shouldflip = 0;

	pr_debug("%s\n", __FUNCTION__);

	spin_lock_irqsave(&hp->lock, flags);
	if (hp->n_throttle) {
		hvsi_send_overflow(hp);
		shouldflip = 1;
	}
	spin_unlock_irqrestore(&hp->lock, flags);

	if (shouldflip)
		tty_flip_buffer_push(hp->tty);

	h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
}
Exemple #4
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

 restart_poll:
	do {
		struct sk_buff *skb;

		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); 
			adapter->rx_invalid_buffer++;
			ibmveth_debug_printk("recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);
			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			ibmveth_rxq_harvest_buffer(adapter);

			skb_reserve(skb, offset);
			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			netif_receive_skb(skb);	

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		ibmveth_assert(lpar_rc == H_SUCCESS);

		napi_complete(napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
Exemple #5
0
static int ibmveth_open(struct net_device *netdev)
{
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	u64 mac_address = 0;
	int rxq_entries = 1;
	unsigned long lpar_rc;
	int rc;
	union ibmveth_buf_desc rxq_desc;
	int i;
	struct device *dev;

	ibmveth_debug_printk("open starting\n");

	napi_enable(&adapter->napi);

	for(i = 0; i<IbmVethNumBufferPools; i++)
		rxq_entries += adapter->rx_buff_pool[i].size;

	adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
	adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);

	if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
		ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}

	adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
	adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);

	if(!adapter->rx_queue.queue_addr) {
		ibmveth_error_printk("unable to allocate rx queue pages\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}

	dev = &adapter->vdev->dev;

	adapter->buffer_list_dma = dma_map_single(dev,
			adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
	adapter->filter_list_dma = dma_map_single(dev,
			adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
	adapter->rx_queue.queue_dma = dma_map_single(dev,
			adapter->rx_queue.queue_addr,
			adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);

	if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
	    (dma_mapping_error(dev, adapter->filter_list_dma)) ||
	    (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
		ibmveth_error_printk("unable to map filter or buffer list pages\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}

	adapter->rx_queue.index = 0;
	adapter->rx_queue.num_slots = rxq_entries;
	adapter->rx_queue.toggle = 1;

	memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
	mac_address = mac_address >> 16;

	rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len;
	rxq_desc.fields.address = adapter->rx_queue.queue_dma;

	ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
	ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
	ibmveth_debug_printk("receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);

	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);

	lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);

	if(lpar_rc != H_SUCCESS) {
		ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
		ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n",
				     adapter->buffer_list_dma,
				     adapter->filter_list_dma,
				     rxq_desc.desc,
				     mac_address);
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENONET;
	}

	for(i = 0; i<IbmVethNumBufferPools; i++) {
		if(!adapter->rx_buff_pool[i].active)
			continue;
		if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
			ibmveth_error_printk("unable to alloc pool\n");
			adapter->rx_buff_pool[i].active = 0;
			ibmveth_cleanup(adapter);
			napi_disable(&adapter->napi);
			return -ENOMEM ;
		}
	}

	ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
	if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
		ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
		do {
			rc = h_free_logical_lan(adapter->vdev->unit_address);
		} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));

		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return rc;
	}

	adapter->bounce_buffer =
	    kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
	if (!adapter->bounce_buffer) {
		ibmveth_error_printk("unable to allocate bounce buffer\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}
	adapter->bounce_buffer_dma =
	    dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
			   netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
		ibmveth_error_printk("unable to map bounce buffer\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}

	ibmveth_debug_printk("initial replenish cycle\n");
	ibmveth_interrupt(netdev->irq, netdev);

	netif_start_queue(netdev);

	ibmveth_debug_printk("open complete\n");

	return 0;
}
Exemple #6
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter =
			container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;

restart_poll:
	do {
		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		smp_rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			netdev_dbg(netdev, "recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			struct sk_buff *skb, *new_skb;
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);

			new_skb = NULL;
			if (length < rx_copybreak)
				new_skb = netdev_alloc_skb(netdev, length);

			if (new_skb) {
				skb_copy_to_linear_data(new_skb,
							skb->data + offset,
							length);
				if (rx_flush)
					ibmveth_flush_buffer(skb->data,
						length + offset);
				if (!ibmveth_rxq_recycle_buffer(adapter))
					kfree_skb(skb);
				skb = new_skb;
			} else {
				ibmveth_rxq_harvest_buffer(adapter);
				skb_reserve(skb, offset);
			}

			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			if (csum_good)
				skb->ip_summed = CHECKSUM_UNNECESSARY;

			netif_receive_skb(skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	} while (frames_processed < budget);

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		BUG_ON(lpar_rc != H_SUCCESS);

		napi_complete(napi);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}
Exemple #7
0
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
	struct ibmveth_adapter *adapter =
			container_of(napi, struct ibmveth_adapter, napi);
	struct net_device *netdev = adapter->netdev;
	int frames_processed = 0;
	unsigned long lpar_rc;
	struct iphdr *iph;
	u16 mss = 0;

restart_poll:
	while (frames_processed < budget) {
		if (!ibmveth_rxq_pending_buffer(adapter))
			break;

		smp_rmb();
		if (!ibmveth_rxq_buffer_valid(adapter)) {
			wmb(); /* suggested by larson1 */
			adapter->rx_invalid_buffer++;
			netdev_dbg(netdev, "recycling invalid buffer\n");
			ibmveth_rxq_recycle_buffer(adapter);
		} else {
			struct sk_buff *skb, *new_skb;
			int length = ibmveth_rxq_frame_length(adapter);
			int offset = ibmveth_rxq_frame_offset(adapter);
			int csum_good = ibmveth_rxq_csum_good(adapter);
			int lrg_pkt = ibmveth_rxq_large_packet(adapter);

			skb = ibmveth_rxq_get_buffer(adapter);

			/* if the large packet bit is set in the rx queue
			 * descriptor, the mss will be written by PHYP eight
			 * bytes from the start of the rx buffer, which is
			 * skb->data at this stage
			 */
			if (lrg_pkt) {
				__be64 *rxmss = (__be64 *)(skb->data + 8);

				mss = (u16)be64_to_cpu(*rxmss);
			}

			new_skb = NULL;
			if (length < rx_copybreak)
				new_skb = netdev_alloc_skb(netdev, length);

			if (new_skb) {
				skb_copy_to_linear_data(new_skb,
							skb->data + offset,
							length);
				if (rx_flush)
					ibmveth_flush_buffer(skb->data,
						length + offset);
				if (!ibmveth_rxq_recycle_buffer(adapter))
					kfree_skb(skb);
				skb = new_skb;
			} else {
				ibmveth_rxq_harvest_buffer(adapter);
				skb_reserve(skb, offset);
			}

			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, netdev);

			if (csum_good) {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
				if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
					iph = (struct iphdr *)skb->data;

					/* If the IP checksum is not offloaded and if the packet
					 *  is large send, the checksum must be rebuilt.
					 */
					if (iph->check == 0xffff) {
						iph->check = 0;
						iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
					}
				}
			}

			if (length > netdev->mtu + ETH_HLEN) {
				ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
				adapter->rx_large_packets++;
			}

			napi_gro_receive(napi, skb);	/* send it up */

			netdev->stats.rx_packets++;
			netdev->stats.rx_bytes += length;
			frames_processed++;
		}
	}

	ibmveth_replenish_task(adapter);

	if (frames_processed < budget) {
		napi_complete(napi);

		/* We think we are done - reenable interrupts,
		 * then check once more to make sure we are done.
		 */
		lpar_rc = h_vio_signal(adapter->vdev->unit_address,
				       VIO_IRQ_ENABLE);

		BUG_ON(lpar_rc != H_SUCCESS);

		if (ibmveth_rxq_pending_buffer(adapter) &&
		    napi_reschedule(napi)) {
			lpar_rc = h_vio_signal(adapter->vdev->unit_address,
					       VIO_IRQ_DISABLE);
			goto restart_poll;
		}
	}

	return frames_processed;
}