Example #1
0
static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
{
	u32 q_index = adapter->rx_queue.index;
	u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
	unsigned int pool = correlator >> 32;
	unsigned int index = correlator & 0xffffffffUL;
	union ibmveth_buf_desc desc;
	unsigned long lpar_rc;

	ibmveth_assert(pool < IbmVethNumBufferPools);
	ibmveth_assert(index < adapter->rx_buff_pool[pool].size);

	if(!adapter->rx_buff_pool[pool].active) {
		ibmveth_rxq_harvest_buffer(adapter);
		ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
		return;
	}

	desc.fields.flags_len = IBMVETH_BUF_VALID |
		adapter->rx_buff_pool[pool].buff_size;
	desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];

	lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);

	if(lpar_rc != H_SUCCESS) {
		ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
		ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
	}

	if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
		adapter->rx_queue.index = 0;
		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
	}
}
Example #2
0
/* recycle the current buffer on the rx queue */
static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
{
	u32 q_index = adapter->rx_queue.index;
	u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
	unsigned int pool = correlator >> 32;
	unsigned int index = correlator & 0xffffffffUL;
	union ibmveth_buf_desc desc;
	unsigned long lpar_rc;
	int ret = 1;

	BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
	BUG_ON(index >= adapter->rx_buff_pool[pool].size);

	if (!adapter->rx_buff_pool[pool].active) {
		ibmveth_rxq_harvest_buffer(adapter);
		ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
		goto out;
	}

	desc.fields.flags_len = IBMVETH_BUF_VALID |
		adapter->rx_buff_pool[pool].buff_size;
	desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];

	lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);

	if (lpar_rc != H_SUCCESS) {
		netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
			   "during recycle rc=%ld", lpar_rc);
		ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
		ret = 0;
	}

	if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
		adapter->rx_queue.index = 0;
		adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
	}

out:
	return ret;
}
Example #3
0
static int veth_receive(char *f_buffer_pc, int f_len_i)
{
	int packet = 0;

	dprintk("veth_receive()\n");

	while(!packet) {
		struct ibmveth_rx_q_entry *desc = &rx_queue[cur_rx_index];
		union ibmveth_buf_desc bdesc;
		void *buf;

		buf = (void *)desc->correlator;

		if ((desc->flags_off & IBMVETH_RXQ_TOGGLE) != cur_rx_toggle)
			break;

		if (!(desc->flags_off & IBMVETH_RXQ_VALID))
			goto recycle;
		if (desc->length > f_len_i) {
			printk("veth: Dropping too big packet [%d bytes]\n",
			       desc->length);
			goto recycle;
		}

		packet = desc->length;
		memcpy(f_buffer_pc,
		       buf + (desc->flags_off & IBMVETH_RXQ_OFF_MASK), packet);
	recycle:
		bdesc.fields.address = vaddr_to_dma(buf);
		bdesc.fields.flags_len = IBMVETH_BUF_VALID | RX_BUF_SIZE;
		h_add_logical_lan_buffer(g_reg, bdesc.desc);

		cur_rx_index = (cur_rx_index + 1) % RX_QUEUE_SIZE;
		if (cur_rx_index == 0)
			cur_rx_toggle ^= IBMVETH_RXQ_TOGGLE;
	}

	return packet;
}
Example #4
0
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
{
	u32 i;
	u32 count = pool->size - atomic_read(&pool->available);
	u32 buffers_added = 0;
	struct sk_buff *skb;
	unsigned int free_index, index;
	u64 correlator;
	unsigned long lpar_rc;
	dma_addr_t dma_addr;

	mb();

	for(i = 0; i < count; ++i) {
		union ibmveth_buf_desc desc;

		skb = alloc_skb(pool->buff_size, GFP_ATOMIC);

		if(!skb) {
			ibmveth_debug_printk("replenish: unable to allocate skb\n");
			adapter->replenish_no_mem++;
			break;
		}

		free_index = pool->consumer_index;
		pool->consumer_index = (pool->consumer_index + 1) % pool->size;
		index = pool->free_map[free_index];

		ibmveth_assert(index != IBM_VETH_INVALID_MAP);
		ibmveth_assert(pool->skbuff[index] == NULL);

		dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
				pool->buff_size, DMA_FROM_DEVICE);

		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
			goto failure;

		pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
		pool->dma_addr[index] = dma_addr;
		pool->skbuff[index] = skb;

		correlator = ((u64)pool->index << 32) | index;
		*(u64*)skb->data = correlator;

		desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
		desc.fields.address = dma_addr;

		lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);

		if (lpar_rc != H_SUCCESS)
			goto failure;
		else {
			buffers_added++;
			adapter->replenish_add_buff_success++;
		}
	}

	mb();
	atomic_add(buffers_added, &(pool->available));
	return;

failure:
	pool->free_map[free_index] = index;
	pool->skbuff[index] = NULL;
	if (pool->consumer_index == 0)
		pool->consumer_index = pool->size - 1;
	else
		pool->consumer_index--;
	if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
		dma_unmap_single(&adapter->vdev->dev,
		                 pool->dma_addr[index], pool->buff_size,
		                 DMA_FROM_DEVICE);
	dev_kfree_skb_any(skb);
	adapter->replenish_add_buff_failure++;

	mb();
	atomic_add(buffers_added, &(pool->available));
}
Example #5
0
static int veth_init(void)
{
	char *mac_addr = snk_module_interface->mac_addr;
	union ibmveth_buf_desc rxq_desc;
	unsigned long rx_queue_len = sizeof(struct ibmveth_rx_q_entry) *
		RX_QUEUE_SIZE;
	unsigned int i;
	long rc;

	dprintk("veth_init(%02x:%02x:%02x:%02x:%02x:%02x)\n",
		mac_addr[0], mac_addr[1], mac_addr[2],
		mac_addr[3], mac_addr[4], mac_addr[5]);

	if (snk_module_interface->running != 0)
		return 0;

	cur_rx_toggle = IBMVETH_RXQ_TOGGLE;
	cur_rx_index = 0;
	buffer_list = malloc_aligned(8192, 4096);
	filter_list = buffer_list + 4096;
	rx_queue = malloc_aligned(rx_queue_len, 16);
	rx_bufs = malloc(2048 * RX_QUEUE_SIZE + 4);
	if (!buffer_list || !filter_list || !rx_queue || !rx_bufs) {
		printk("veth: Failed to allocate memory !\n");
		goto fail;
	}
	rx_bufs_aligned = (uint64_t *)(((uint64_t)rx_bufs | 3) + 1);
	rxq_desc.fields.address = vaddr_to_dma(rx_queue);
	rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | rx_queue_len;

	rc = h_register_logical_lan(g_reg,
				    vaddr_to_dma(buffer_list),
				    rxq_desc.desc,
				    vaddr_to_dma(filter_list),
				    (*(uint64_t *)mac_addr) >> 16);
	if (rc != H_SUCCESS) {
		printk("veth: Error %ld registering interface !\n", rc);
		goto fail;
	}
	for (i = 0; i < RX_QUEUE_SIZE; i++) {
		uint64_t *buf = veth_get_rx_buf(i);
		union ibmveth_buf_desc desc;
		*buf = (uint64_t)buf;
		desc.fields.address = vaddr_to_dma(buf);
		desc.fields.flags_len = IBMVETH_BUF_VALID | RX_BUF_SIZE;
		h_add_logical_lan_buffer(g_reg, desc.desc);
	}

	snk_module_interface->running = 1;

	return 0;
 fail:
	if (filter_list)
		free(filter_list);
	if (buffer_list)
		free(buffer_list);
	if (rx_queue)
		free(rx_queue);
	if (rx_bufs)
		free(rx_bufs);
	return -1;
}