Пример #1
0
static int
micveth_deliver(struct sk_buff *skb, struct net_device *dev, micveth_info_t *veth_info)
{
	veth_ring_t *ring;
	ring_queue_t *tx_queue;
	ring_desc_t *desc;
	ring_packet_t *packet;
	int next_tail;

	//dump_skb(skb, 1);

	spin_lock(&veth_info->vi_txlock);
	ring = &veth_info->vi_ring.ring;
	tx_queue = &ring->r_tx;

	next_tail = (tx_queue->rq_tail + 1) % tx_queue->rq_length;
	if (next_tail == tx_queue->rq_head) {
		// queue_full situation - just drop the packet and let the stack retry
		spin_unlock(&veth_info->vi_txlock);
		return 1;
	}

	desc = &tx_queue->rq_descs[tx_queue->rq_tail];
	packet = &veth_info->vi_tx_desc[tx_queue->rq_tail];
	packet->pd_skb = skb;
	packet->pd_phys = mic_ctx_map_single(veth_to_ctx(veth_info),
					     skb->data, skb->len);
	packet->pd_length = skb->len;
	desc->rd_phys = packet->pd_phys;
	desc->rd_length = skb->len;
	desc->rd_valid = 1;

	/*
	 * Need a write memory barrier between copying the skb data to
	 * the buffer and updating the tail pointer.  NOT an smp_wmb(),
	 * because this memory barrier needs to be done even if there is
	 * a single CPU in the system.
	 */
	wmb();
	tx_queue->rq_tail = (tx_queue->rq_tail + 1) % tx_queue->rq_length;
	spin_unlock(&veth_info->vi_txlock);

	if (mic_vnet_mode == VNET_MODE_INTR) {
		micveth_send_intr(veth_info);
	}

	return 0;
}
Пример #2
0
static int
micveth_probe_int(micveth_info_t *veth_info, mic_ctx_t *mic_ctx)
{
	struct net_device *dev_veth;
	ring_queue_t *queue;
	ring_desc_t *desc;
	ring_packet_t *packet;
	int idx;
	int err = 0;

	veth_info->vi_pdev = mic_ctx->bi_pdev;
	veth_info->vi_sbox = (uint8_t *)((unsigned long)mic_ctx->mmio.va +
					 HOST_SBOX_BASE_ADDRESS);
	veth_info->vi_scratch14 = (uint32_t *)((unsigned long)mic_ctx->mmio.va +
					       HOST_SBOX_BASE_ADDRESS + SBOX_SCRATCH14);
	veth_info->vi_scratch15 = (uint32_t *)((unsigned long)mic_ctx->mmio.va +
					       HOST_SBOX_BASE_ADDRESS + SBOX_SCRATCH15);
	veth_info->mic_ctx = mic_ctx;
	mic_ctx->bi_vethinfo = (void *)veth_info;

	spin_lock_init(&veth_info->vi_txlock);
	spin_lock_init(&veth_info->vi_rxlock);

	if (mic_vnet_mode == VNET_MODE_POLL)
		INIT_DELAYED_WORK(&veth_info->vi_poll, micveth_poll);

	// Set the current sk_buff allocation size
	veth_info->vi_skb_mtu = MICVETH_MAX_PACKET_SIZE + 32;

	// Get the physical memory address for the ring descriptors
	veth_info->vi_ring.phys = mic_ctx_map_single(veth_to_ctx(veth_info), &veth_info->vi_ring.ring,
						     sizeof(veth_ring_t));
	veth_info->vi_ring.length = sizeof(veth_ring_t);

	queue = &veth_info->vi_ring.ring.r_tx;
	queue->rq_head = 0;
	queue->rq_tail = 0;
	queue->rq_length = MICVETH_TRANSFER_FIFO_SIZE;

	veth_info->vi_pend = 0;

	packet = &veth_info->vi_tx_desc[0];
	for (idx = 0; idx < queue->rq_length; idx++) {
		desc = &queue->rq_descs[idx];
		packet[idx].pd_skb = NULL;
		packet[idx].pd_phys = 0;
		packet[idx].pd_length = 0;

		desc->rd_phys = 0;
		desc->rd_length = 0;
		desc->rd_valid = 0;
	}

	// This is the recieve end.
	queue = &veth_info->vi_ring.ring.r_rx;
	queue->rq_head = 0;
	queue->rq_tail = 0;
	queue->rq_length = MICVETH_TRANSFER_FIFO_SIZE;

	packet = &veth_info->vi_rx_desc[0];
	for (idx = 0; idx < queue->rq_length; idx++) {
		desc = &queue->rq_descs[idx];
		if (!(packet[idx].pd_skb = dev_alloc_skb(veth_info->vi_skb_mtu)))
			return -ENOMEM;
		packet[idx].pd_phys = mic_ctx_map_single(veth_to_ctx(veth_info), packet[idx].pd_skb->data,
							 veth_info->vi_skb_mtu);
		packet[idx].pd_length = veth_info->vi_skb_mtu;

		desc->rd_phys = packet[idx].pd_phys;
		desc->rd_length = packet[idx].pd_length;
		desc->rd_valid = 1;
	}

#if LINUX_VERSION_CODE > KERNEL_VERSION(3,14,4)
	if ((dev_veth = alloc_netdev(sizeof(micveth_info_t), "mic%d", NET_NAME_ENUM,  micveth_setup)) == NULL) {
#else
	if ((dev_veth = alloc_netdev(sizeof(micveth_info_t), "mic%d", micveth_setup)) == NULL) {
#endif
		return -ENOMEM;
	}

	veth_info->vi_netdev = dev_veth;
	dev_veth->ml_priv = veth_info;
	dev_veth->rtnl_link_ops = &micveth_link_ops;

	if ((err = register_netdev(dev_veth)) < 0) {
		printk("register netdev failed %d\n", err);
		free_netdev(dev_veth);
		return err;
	}

	veth_info->vi_state = VETH_STATE_INITIALIZED;
	return 0;
}

static ssize_t show_veth(struct device *dev,
			 struct device_attribute *attr, char *buf);
DEVICE_ATTR(veth, (S_IRUGO|S_IWUGO)&(~S_IWOTH), show_veth, NULL);

static int
micveth_init_int(int num_bds, struct device *dev)
{
	int bd;
	int err = 0;

	micveth.lv_num_interfaces = num_bds;
	micveth.lv_num_clients = num_bds;
	micveth.lv_active_clients = 0;
	micveth.lv_num_links_remaining = num_bds;

	BUG_ON(rtnl_link_register(&micveth_link_ops));

	// Allocate space for the control of each device in the system.
	micveth.lv_info = kmalloc(sizeof(micveth_info_t) * num_bds, GFP_KERNEL);

	// Initialize state mutex.  Overloaded use for several fields.
	mutex_init(&micveth.lv_state_mutex);

	// Setup of timer for probeing active mic clients.  When the total active board
	// count is zero the poll is not running.
	micveth.lv_pollstate = CLIENT_POLL_STOPPED;
	INIT_DELAYED_WORK(&micveth.lv_poll, micveth_clientpoll);
	init_waitqueue_head(&micveth.lv_wq);

	// Init each of the existing boards.
	for (bd = 0; bd < num_bds; bd++) {
		micveth_probe_int(&micveth.lv_info[bd], &mic_data.dd_bi[bd]->bi_ctx);
	}

	err = device_create_file(dev, &dev_attr_veth);
	return err;
}
static int
micveth_probe_int(micveth_info_t *veth_info, mic_ctx_t *mic_ctx)
{
	struct net_device *dev_veth;
	ring_queue_t *queue;
	ring_desc_t *desc;
	ring_packet_t *packet;
	int idx;
	int err = 0;

	veth_info->vi_pdev = mic_ctx->bi_pdev;
	veth_info->vi_sbox = (uint8_t *)((unsigned long)mic_ctx->mmio.va +
					 HOST_SBOX_BASE_ADDRESS);
	veth_info->vi_scratch14 = (uint32_t *)((unsigned long)mic_ctx->mmio.va +
					       HOST_SBOX_BASE_ADDRESS + SBOX_SCRATCH14);
	veth_info->vi_scratch15 = (uint32_t *)((unsigned long)mic_ctx->mmio.va +
					       HOST_SBOX_BASE_ADDRESS + SBOX_SCRATCH15);
	veth_info->mic_ctx = mic_ctx;
	mic_ctx->bi_vethinfo = (void *)veth_info;

	spin_lock_init(&veth_info->vi_txlock);
	spin_lock_init(&veth_info->vi_rxlock);

	if (mic_vnet_mode == VNET_MODE_POLL)
		INIT_DELAYED_WORK(&veth_info->vi_poll, micveth_poll);

	// Set the current sk_buff allocation size
	veth_info->vi_skb_mtu = MICVETH_MAX_PACKET_SIZE + 32;

	// Get the physical memory address for the ring descriptors
	veth_info->vi_ring.phys = mic_ctx_map_single(veth_to_ctx(veth_info), &veth_info->vi_ring.ring,
						     sizeof(veth_ring_t));
	veth_info->vi_ring.length = sizeof(veth_ring_t);

	queue = &veth_info->vi_ring.ring.r_tx;
	queue->rq_head = 0;
	queue->rq_tail = 0;
	queue->rq_length = MICVETH_TRANSFER_FIFO_SIZE;

	veth_info->vi_pend = 0;

	packet = &veth_info->vi_tx_desc[0];
	for (idx = 0; idx < queue->rq_length; idx++) {
		desc = &queue->rq_descs[idx];
		packet[idx].pd_skb = NULL;
		packet[idx].pd_phys = 0;
		packet[idx].pd_length = 0;

		desc->rd_phys = 0;
		desc->rd_length = 0;
		desc->rd_valid = 0;
	}

	// This is the recieve end.
	queue = &veth_info->vi_ring.ring.r_rx;
	queue->rq_head = 0;
	queue->rq_tail = 0;
	queue->rq_length = MICVETH_TRANSFER_FIFO_SIZE;

	packet = &veth_info->vi_rx_desc[0];
	for (idx = 0; idx < queue->rq_length; idx++) {
		desc = &queue->rq_descs[idx];
		if (!(packet[idx].pd_skb = dev_alloc_skb(veth_info->vi_skb_mtu)))
			return -ENOMEM;
		packet[idx].pd_phys = mic_ctx_map_single(veth_to_ctx(veth_info), packet[idx].pd_skb->data,
							 veth_info->vi_skb_mtu);
		packet[idx].pd_length = veth_info->vi_skb_mtu;

		desc->rd_phys = packet[idx].pd_phys;
		desc->rd_length = packet[idx].pd_length;
		desc->rd_valid = 1;
	}

	if ((dev_veth = alloc_netdev(sizeof(micveth_info_t), "mic%d", micveth_setup)) == NULL) {
		return -ENOMEM;
	}

	veth_info->vi_netdev = dev_veth;
	dev_veth->ml_priv = veth_info;
	dev_veth->rtnl_link_ops = &micveth_link_ops;

	if ((err = register_netdev(dev_veth)) < 0) {
		printk("register netdev failed %d\n", err);
		free_netdev(dev_veth);
		return err;
	}

	veth_info->vi_state = VETH_STATE_INITIALIZED;
	return 0;
}