Esempio n. 1
0
/*
 * __hal_stats_enable
 * @stats: xge_hal_stats_t structure that contains, in particular,
 *         Xframe hw stat counters.
 *
 * Ask device to start collecting stats.
 * See also: xge_hal_stats_getinfo().
 */
void
__hal_stats_enable (xge_hal_stats_t *stats)
{
	xge_hal_device_t *hldev;
	xge_hal_pci_bar0_t *bar0;
	u64 val64;
	unsigned int refresh_time_pci_clocks;

	xge_assert(stats->hw_info);

	hldev = (xge_hal_device_t*)stats->devh;
	xge_assert(hldev);

	bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;

	/* enable statistics
	 * For Titan stat_addr offset == 0x09d8, and stat_cfg offset == 0x09d0
	*/
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
	    stats->dma_addr, &bar0->stat_addr);

	refresh_time_pci_clocks = XGE_HAL_XENA_PER_SEC *
	    hldev->config.stats_refresh_time_sec;
	refresh_time_pci_clocks =
	    __hal_fix_time_ival_herc(hldev,
	        refresh_time_pci_clocks);

#ifdef XGE_HAL_HERC_EMULATION
	/*
	 *  The clocks in the emulator are running ~1000 times slower
	 *  than real world, so the stats transfer will occur ~1000
	 *  times less frequent. STAT_CFG.STAT_TRSF_PERIOD should be
	 *  set to 0x20C for Hercules emulation (stats transferred
	 *  every 0.5 sec).
	*/

	val64 = (0x20C | XGE_HAL_STAT_CFG_STAT_RO |
	    XGE_HAL_STAT_CFG_STAT_EN);
#else
	val64 = XGE_HAL_SET_UPDT_PERIOD(refresh_time_pci_clocks) |
	                    XGE_HAL_STAT_CFG_STAT_RO |
	            XGE_HAL_STAT_CFG_STAT_EN;
#endif

	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
	    val64, &bar0->stat_cfg);

	xge_debug_stats(XGE_TRACE, "stats enabled at 0x"XGE_OS_LLXFMT,
	     (unsigned long long)stats->dma_addr);

	stats->is_enabled = 1;
}
Esempio n. 2
0
/**
 * xge_hal_fifo_dtr_next_completed - Retrieve next completed descriptor.
 * @channelh: Channel handle.
 * @dtrh: Descriptor handle. Returned by HAL.
 * @t_code: Transfer code, as per Xframe User Guide,
 *          Transmit Descriptor Format.
 *          Returned by HAL.
 *
 * Retrieve the _next_ completed descriptor.
 * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
 * upper-layer driver (ULD) of new completed descriptors. After that
 * the ULD can use xge_hal_fifo_dtr_next_completed to retrieve the rest
 * completions (the very first completion is passed by HAL via
 * xge_hal_channel_callback_f).
 *
 * Implementation-wise, the upper-layer driver is free to call
 * xge_hal_fifo_dtr_next_completed either immediately from inside the
 * channel callback, or in a deferred fashion and separate (from HAL)
 * context.
 *
 * Non-zero @t_code means failure to process the descriptor.
 * The failure could happen, for instance, when the link is
 * down, in which case Xframe completes the descriptor because it
 * is not able to send the data out.
 *
 * For details please refer to Xframe User Guide.
 *
 * Returns: XGE_HAL_OK - success.
 * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
 * are currently available for processing.
 *
 * See also: xge_hal_channel_callback_f{},
 * xge_hal_ring_dtr_next_completed().
 * Usage: See ex_tx_compl{}.
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,
	        xge_hal_dtr_h *dtrh, u8 *t_code)
{
	xge_hal_fifo_txd_t        *txdp;
	xge_hal_fifo_t            *fifo    = (xge_hal_fifo_t *)channelh;
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
	xge_hal_fifo_txdl_priv_t  *txdl_priv;
#endif

	__hal_channel_dtr_try_complete(channelh, dtrh);
	txdp = (xge_hal_fifo_txd_t *)*dtrh;
	if (txdp == NULL) {
	    return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
	}

#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
	txdl_priv = __hal_fifo_txdl_priv(txdp);

	/* sync TxDL to read the ownership
	 *
	 * Note: 16bytes means Control_1 & Control_2 */
	xge_os_dma_sync(fifo->channel.pdev,
	              txdl_priv->dma_handle,
	          txdl_priv->dma_addr,
	          txdl_priv->dma_offset,
	          16,
	          XGE_OS_DMA_DIR_FROMDEVICE);
#endif

	/* check whether host owns it */
	if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {

	    xge_assert(txdp->host_control!=0);

	    __hal_channel_dtr_complete(channelh);

	    *t_code = (u8)XGE_HAL_GET_TXD_T_CODE(txdp->control_1);

	            /* see XGE_HAL_SET_TXD_T_CODE() above.. */
	            xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5);

	    if (fifo->channel.usage_cnt > 0)
	        fifo->channel.usage_cnt--;

	    return XGE_HAL_OK;
	}

	/* no more completions */
	*dtrh = 0;
	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
Esempio n. 3
0
static xge_hal_status_e
__hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh,
			      void *memblock,
			      int memblock_index,
			      xge_hal_mempool_dma_t *dma_object,
			      void *item,
			      int index,
			      int is_last,
			      void *userdata)
{
	int memblock_item_idx;
	xge_hal_fifo_txdl_priv_t *txdl_priv;
#ifdef XGE_HAL_ALIGN_XMIT
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata;
#endif

	xge_assert(item);

	txdl_priv = __hal_mempool_item_priv(mempoolh, memblock_index,
					    item, &memblock_item_idx);
	xge_assert(txdl_priv);

#ifdef XGE_HAL_ALIGN_XMIT
	if (fifo->config->alignment_size) {
		if (txdl_priv->align_dma_addr != 0) {
			xge_os_dma_unmap(fifo->channel.pdev,
			       txdl_priv->align_dma_handle,
			       txdl_priv->align_dma_addr,
			       fifo->config->alignment_size *
					fifo->config->max_aligned_frags,
			       XGE_OS_DMA_DIR_TODEVICE);

			txdl_priv->align_dma_addr = 0;
		}

		if (txdl_priv->align_vaddr != NULL) {
			xge_os_dma_free(fifo->channel.pdev,
			      txdl_priv->align_vaddr,
			      fifo->config->alignment_size *
					fifo->config->max_aligned_frags,
			      &txdl_priv->align_dma_acch,
			      &txdl_priv->align_dma_handle);

			txdl_priv->align_vaddr = NULL;
		}
	}
#endif

	return XGE_HAL_OK;
}
Esempio n. 4
0
/**
 * xge_hal_fifo_dtr_free - Free descriptor.
 * @channelh: Channel handle.
 * @dtr: Descriptor handle.
 *
 * Free the reserved descriptor. This operation is "symmetrical" to
 * xge_hal_fifo_dtr_reserve or xge_hal_fifo_dtr_reserve_sp.
 * The "free-ing" completes the descriptor's lifecycle.
 *
 * After free-ing (see xge_hal_fifo_dtr_free()) the descriptor again can
 * be:
 *
 * - reserved (xge_hal_fifo_dtr_reserve);
 *
 * - posted (xge_hal_fifo_dtr_post);
 *
 * - completed (xge_hal_fifo_dtr_next_completed);
 *
 * - and recycled again (xge_hal_fifo_dtr_free).
 *
 * For alternative state transitions and more details please refer to
 * the design doc.
 *
 * See also: xge_hal_ring_dtr_free(), xge_hal_fifo_dtr_reserve().
 * Usage: See ex_tx_compl{}.
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
{
#if defined(XGE_HAL_TX_MULTI_FREE_IRQ)
	unsigned long flags = 0;
#endif
	xge_hal_fifo_txdl_priv_t *txdl_priv = __hal_fifo_txdl_priv(
	                (xge_hal_fifo_txd_t *)dtr);
	int max_frags = ((xge_hal_fifo_t *)channelh)->config->max_frags;
#if defined(XGE_HAL_TX_MULTI_FREE)
	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
	flags);
#endif

	if (txdl_priv->alloc_frags > max_frags) {
	    xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *)
	                    txdl_priv->dang_txdl;
	    int dang_frags = txdl_priv->dang_frags;
	    int alloc_frags = txdl_priv->alloc_frags;
	    txdl_priv->dang_txdl = NULL;
	    txdl_priv->dang_frags = 0;
	    txdl_priv->alloc_frags = 0;
	    /* dtrh must have a linked list of dtrh */
	    xge_assert(txdl_priv->next_txdl_priv);

	    /* free any dangling dtrh first */
	    if (dang_txdp) {
	        xge_debug_fifo(XGE_TRACE,
	            "freeing dangled dtrh %p for %d fragments",
	            dang_txdp, dang_frags);
	        __hal_fifo_txdl_free_many(channelh, dang_txdp,
	            max_frags, dang_frags);
	    }

	    /* now free the reserved dtrh list */
	    xge_debug_fifo(XGE_TRACE,
	            "freeing dtrh %p list of %d fragments", dtr,
	            alloc_frags);
	    __hal_fifo_txdl_free_many(channelh,
	            (xge_hal_fifo_txd_t *)dtr, max_frags,
	            alloc_frags);
	}
	else
	    __hal_channel_dtr_free(channelh, dtr);

	((xge_hal_channel_t *)channelh)->poll_bytes += txdl_priv->bytes_sent;

#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
	__hal_fifo_txdl_priv(dtr)->allocated = 0;
#endif

#if defined(XGE_HAL_TX_MULTI_FREE)
	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
	flags);
#endif
}
Esempio n. 5
0
/**
 * xge_hal_fifo_dtr_buffer_append - Append the contents of virtually
 * contiguous data buffer to a single physically contiguous buffer.
 * @channelh: Channel handle.
 * @dtrh: Descriptor handle.
 * @vaddr: Virtual address of the data buffer.
 * @size: Size of the data buffer (in bytes).
 *
 * This API is part of the transmit descriptor preparation for posting
 * (via xge_hal_fifo_dtr_post()).
 * The main difference of this API wrt to the APIs
 * xge_hal_fifo_dtr_buffer_set_aligned() is that this API appends the
 * contents of virtually contiguous data buffers received from
 * upper layer into a single physically contiguous data buffer and the
 * device will do a DMA from this buffer.
 *
 * See Also: xge_hal_fifo_dtr_buffer_finalize(), xge_hal_fifo_dtr_buffer_set(),
 * xge_hal_fifo_dtr_buffer_set_aligned().
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
	    void *vaddr, int size)
{
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_txdl_priv_t *txdl_priv;
	ptrdiff_t used;

	xge_assert(size > 0);

	txdl_priv = __hal_fifo_txdl_priv(dtrh);

	used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
	used += txdl_priv->align_dma_offset;
	if (used + (unsigned int)size > (unsigned int)fifo->align_size)
	        return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;

	xge_os_memcpy((char*)txdl_priv->align_vaddr_start +
	    txdl_priv->align_dma_offset, vaddr, size);

	fifo->channel.stats.copied_frags++;

	txdl_priv->align_dma_offset += size;
	return XGE_HAL_OK;
}
Esempio n. 6
0
 xge_hal_status_e
__hal_ring_initial_replenish(xge_hal_channel_t *channel,
			     xge_hal_channel_reopen_e reopen)
{
	xge_hal_dtr_h dtr;

	while (__hal_channel_dtr_count(channel) > 0) {
		xge_hal_status_e status;

		status = xge_hal_ring_dtr_reserve(channel, &dtr);
		xge_assert(status == XGE_HAL_OK);

		if (channel->dtr_init) {
		    status = channel->dtr_init(channel,
                                        dtr, channel->reserve_length,
                                        channel->userdata,
					reopen);
			if (status != XGE_HAL_OK) {
				xge_hal_ring_dtr_free(channel, dtr);
				xge_hal_channel_abort(channel,
					XGE_HAL_CHANNEL_OC_NORMAL);
				return status;
			}
		}

		xge_hal_ring_dtr_post(channel, dtr);
	}

	return XGE_HAL_OK;
}
Esempio n. 7
0
/**
 * xge_hal_fifo_dtr_post_many - Post multiple descriptors on fifo
 * channel.
 * @channelh: Channel to post descriptor.
 * @num: Number of descriptors (i.e., fifo TxDLs) in the %dtrs[].
 * @dtrs: Descriptors obtained via xge_hal_fifo_dtr_reserve().
 * @frags_arr: Number of fragments carried @dtrs descriptors.
 * Note that frag_arr[i] corresponds to descriptor dtrs[i].
 *
 * Post multi-descriptor on the fifo channel. The operation is atomic:
 * all descriptrs are posted on the channel "back-to-back' without
 * letting other posts (possibly driven by multiple transmitting threads)
 * to interleave.
 *
 * See also: xge_hal_fifo_dtr_post(), xge_hal_ring_dtr_post().
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
	        xge_hal_dtr_h dtrs[])
{
	int i;
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_txd_t *txdp_last;
	xge_hal_fifo_txd_t *txdp_first;
	xge_hal_fifo_txdl_priv_t *txdl_priv_last;
#if defined(XGE_HAL_TX_MULTI_POST_IRQ)
	unsigned long flags = 0;
#endif

	xge_assert(num > 1);

	txdp_first = (xge_hal_fifo_txd_t *)dtrs[0];
	txdp_first->control_1 |= XGE_HAL_TXD_GATHER_CODE_FIRST;
	txdp_first->control_2 |= fifo->interrupt_type;

	txdl_priv_last = __hal_fifo_txdl_priv(dtrs[num-1]);
	txdp_last = (xge_hal_fifo_txd_t *)dtrs[num-1] +
	                (txdl_priv_last->frags - 1);
	txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST;

#if defined(XGE_HAL_TX_MULTI_POST)
	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
	flags);
#endif

	for (i=0; i<num; i++) {
	    xge_hal_fifo_txdl_priv_t *txdl_priv;
	    u64 val64;
	    xge_hal_dtr_h dtrh = dtrs[i];

	    txdl_priv = __hal_fifo_txdl_priv(dtrh);
	    txdl_priv = txdl_priv; /* Cheat lint */

	    val64 = 0;
	    if (i == 0) {
	         val64 |= XGE_HAL_TX_FIFO_FIRST_LIST;
	    } else if (i == num -1) {
	         val64 |= XGE_HAL_TX_FIFO_LAST_LIST;
	    }

	    val64 |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
	    __hal_fifo_dtr_post_single(channelh, dtrh, val64);
	}

#if defined(XGE_HAL_TX_MULTI_POST)
	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
	flags);
#endif

	fifo->channel.stats.total_posts_many++;
}
Esempio n. 8
0
/*
 * __hal_stats_terminate
 * @stats: xge_hal_stats_t structure that contains, in particular,
 *         Xframe hw stat counters.
 * Terminate per-device statistics object.
 */
void
__hal_stats_terminate (xge_hal_stats_t *stats)
{
	xge_hal_device_t *hldev;

	xge_assert(stats->hw_info);

	hldev = (xge_hal_device_t*)stats->devh;
	xge_assert(hldev);
	xge_assert(stats->is_initialized);
	if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
	    xge_os_dma_unmap(hldev->pdev,
	               stats->hw_info_dmah,
	           stats->dma_addr,
	           sizeof(xge_hal_stats_hw_info_t),
	           XGE_OS_DMA_DIR_FROMDEVICE);

	    xge_os_dma_free(hldev->pdev,
	          stats->hw_info,
	          sizeof(xge_hal_stats_hw_info_t),
	          &stats->hw_info_dma_acch,
	          &stats->hw_info_dmah);
	} else {
	    xge_os_dma_unmap(hldev->pdev,
	               stats->hw_info_dmah,
	           stats->dma_addr,
	           sizeof(xge_hal_stats_pcim_info_t),
	           XGE_OS_DMA_DIR_FROMDEVICE);

	    xge_os_dma_free(hldev->pdev,
	          stats->pcim_info,
	          sizeof(xge_hal_stats_pcim_info_t),
	          &stats->hw_info_dma_acch,
	          &stats->hw_info_dmah);

	    xge_os_free(hldev->pdev, stats->pcim_info_saved,
	        sizeof(xge_hal_stats_pcim_info_t));

	    xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));

	}

	stats->is_initialized = 0;
	stats->is_enabled = 0;
}
Esempio n. 9
0
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_fifo_txdl_priv_t*
__hal_fifo_txdl_priv(xge_hal_dtr_h dtrh)
{
	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t*)dtrh;
	xge_hal_fifo_txdl_priv_t *txdl_priv;

	xge_assert(txdp);
	txdl_priv = (xge_hal_fifo_txdl_priv_t *)
	            (ulong_t)txdp->host_control;

	xge_assert(txdl_priv);
	xge_assert(txdl_priv->dma_object);
	xge_assert(txdl_priv->dma_addr);

	xge_assert(txdl_priv->dma_object->handle == txdl_priv->dma_handle);

	return txdl_priv;
}
Esempio n. 10
0
static void
__hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
                         xge_hal_ring_t *ring, int from, int to)
{
    xge_hal_ring_block_t *to_item, *from_item;
    dma_addr_t to_dma, from_dma;
    pci_dma_h to_dma_handle, from_dma_handle;

    /* get "from" RxD block */
    from_item = (xge_hal_ring_block_t *)
                __hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
    xge_assert(from_item);

    /* get "to" RxD block */
    to_item = (xge_hal_ring_block_t *)
              __hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
    xge_assert(to_item);

    /* return address of the beginning of previous RxD block */
    to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);

    /* set next pointer for this RxD block to point on
     * previous item's DMA start address */
    __hal_ring_block_next_pointer_set(from_item, to_dma);

    /* return "from" RxD block's DMA start address */
    from_dma =
        __hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);

#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
    /* we must sync "from" RxD block, so hardware will see it */
    xge_os_dma_sync(ring->channel.pdev,
                    from_dma_handle,
                    from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
                    __hal_ring_item_dma_offset(mempoolh, from_item) +
                    XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
                    sizeof(u64),
                    XGE_OS_DMA_DIR_TODEVICE);
#endif

    xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
                   from, (unsigned long long)from_dma, to,
                   (unsigned long long)to_dma);
}
Esempio n. 11
0
void
__hal_ring_prc_disable(xge_hal_channel_h channelh)
{
	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
	xge_hal_pci_bar0_t *bar0;
	u64 val64;

	xge_assert(ring);
	xge_assert(ring->channel.pdev);
	bar0 = (xge_hal_pci_bar0_t *) (void *)
			((xge_hal_device_t *)ring->channel.devh)->bar0;

	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
	ring->channel.regh0,
			      &bar0->prc_ctrl_n[ring->channel.post_qid]);
	val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
	xge_hal_channel_t *channel	  =	(xge_hal_channel_t*)channelh;

	xge_assert(channel->work_arr[channel->post_index] == NULL);

	channel->work_arr[channel->post_index++] = dtrh;

		/* wrap-around */
	if (channel->post_index	== channel->length)
		channel->post_index	= 0;
}
Esempio n. 13
0
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_txdl_free_many(xge_hal_channel_h channelh,
	          xge_hal_fifo_txd_t *txdp, int list_size, int frags)
{
	xge_hal_fifo_txdl_priv_t *current_txdl_priv;
	xge_hal_fifo_txdl_priv_t *next_txdl_priv;
	int invalid_frags = frags % list_size;
	if (invalid_frags){
	    xge_debug_fifo(XGE_ERR,
	        "freeing corrupt dtrh %p, fragments %d list size %d",
	        txdp, frags, list_size);
	    xge_assert(invalid_frags == 0);
	}
	while(txdp){
	    xge_debug_fifo(XGE_TRACE,
	        "freeing linked dtrh %p, fragments %d list size %d",
	        txdp, frags, list_size);
	    current_txdl_priv = __hal_fifo_txdl_priv(txdp);
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
	    current_txdl_priv->allocated = 0;
#endif
	    __hal_channel_dtr_free(channelh, txdp);
	    next_txdl_priv = current_txdl_priv->next_txdl_priv;
	    xge_assert(frags);
	    frags -= list_size;
	    if (next_txdl_priv) {
	        current_txdl_priv->next_txdl_priv = NULL;
	        txdp = next_txdl_priv->first_txdp;
	    }
	    else {
	        xge_debug_fifo(XGE_TRACE,
	        "freed linked dtrh fragments %d list size %d",
	        frags, list_size);
	        break;
	    }
	}
	xge_assert(frags == 0)
}
Esempio n. 14
0
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh)
{

	xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
	xge_hal_ring_rxd_priv_t *rxd_priv;

	xge_assert(rxdp);

#if defined(XGE_HAL_USE_5B_MODE)
	xge_assert(ring);
	if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
	    xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh;
#if defined (XGE_OS_PLATFORM_64BIT)
	    int memblock_idx = rxdp_5->host_control >> 16;
	    int i = rxdp_5->host_control & 0xFFFF;
	    rxd_priv = (xge_hal_ring_rxd_priv_t *)
	        ((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i);
#else
	    /* 32-bit case */
	    rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control;
#endif
	} else
Esempio n. 15
0
/**
 * xge_hal_fifo_dtr_buffer_finalize - Prepares a descriptor that contains the
 * single physically contiguous buffer.
 *
 * @channelh: Channel handle.
 * @dtrh: Descriptor handle.
 * @frag_idx: Index of the data buffer in the Txdl list.
 *
 * This API in conjuction with xge_hal_fifo_dtr_buffer_append() prepares
 * a descriptor that consists of a single physically contiguous buffer
 * which inturn contains the contents of one or more virtually contiguous
 * buffers received from the upper layer.
 *
 * See Also: xge_hal_fifo_dtr_buffer_append().
*/
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
	    int frag_idx)
{
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_txdl_priv_t *txdl_priv;
	xge_hal_fifo_txd_t *txdp;
	ptrdiff_t prev_boff;

	xge_assert(frag_idx < fifo->config->max_frags);

	txdl_priv = __hal_fifo_txdl_priv(dtrh);
	txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;

	if (frag_idx != 0) {
	    txdp->control_1 = txdp->control_2 = 0;
	}

	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
	txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
	txdp->control_1 |=
	            XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset);
	txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset;
	fifo->channel.stats.total_buffers++;
	fifo->channel.stats.copied_buffers++;
	txdl_priv->frags++;
	txdl_priv->align_used_frags++;

#if defined(XGE_OS_DMA_REQUIRES_SYNC)
	/* sync pre-mapped buffer */
	xge_os_dma_sync(fifo->channel.pdev,
	          txdl_priv->align_dma_handle,
	          txdp->buffer_pointer,
	          0,
	          txdl_priv->align_dma_offset,
	          XGE_OS_DMA_DIR_TODEVICE);
#endif

	/* increment vaddr_start for the next buffer_append() iteration */
	txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset;
	    txdl_priv->align_dma_offset = 0;
}
Esempio n. 16
0
/**
 * xge_hal_fifo_is_next_dtr_completed - Checks if the next dtr is completed
 * @channelh: Channel handle.
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh)
{
	xge_hal_fifo_txd_t *txdp;
	xge_hal_dtr_h dtrh;

	__hal_channel_dtr_try_complete(channelh, &dtrh);
	txdp = (xge_hal_fifo_txd_t *)dtrh;
	if (txdp == NULL) {
	    return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
	}

	/* check whether host owns it */
	if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
	    xge_assert(txdp->host_control!=0);
	    return XGE_HAL_OK;
	}

	/* no more completions */
	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
Esempio n. 17
0
void
__hal_ring_close(xge_hal_channel_h channelh)
{
	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
	xge_hal_ring_queue_t *queue;
#if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
    defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
#endif

	xge_assert(ring->channel.pdev);

	queue = &ring->config->queue[ring->channel.post_qid];

	if (ring->mempool) {
		__hal_mempool_destroy(ring->mempool);
	}

	if (ring->reserved_rxds_arr) {
		xge_os_free(ring->channel.pdev,
		          ring->reserved_rxds_arr,
			  sizeof(void*) * queue->max * ring->rxds_per_block);
	}

	__hal_channel_terminate(channelh);

#if defined(XGE_HAL_RX_MULTI_RESERVE)
	xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
#endif
#if defined(XGE_HAL_RX_MULTI_POST)
	xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
	xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
#endif
}
Esempio n. 18
0
xge_hal_status_e
__hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
        xge_hal_fifo_txdl_priv_t *txdl_priv;
	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;

	xge_assert(txdp);

	txdl_priv = __hal_fifo_txdl_priv(txdp);

	/* allocate alignment DMA-buffer */
	txdl_priv->align_vaddr = xge_os_dma_malloc(fifo->channel.pdev,
				fifo->config->alignment_size *
				   fifo->config->max_aligned_frags,
				XGE_OS_DMA_CACHELINE_ALIGNED |
				XGE_OS_DMA_STREAMING,
				&txdl_priv->align_dma_handle,
				&txdl_priv->align_dma_acch);
	if (txdl_priv->align_vaddr == NULL) {
		return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	/* map it */
	txdl_priv->align_dma_addr = xge_os_dma_map(fifo->channel.pdev,
		txdl_priv->align_dma_handle, txdl_priv->align_vaddr,
		fifo->config->alignment_size *
			       fifo->config->max_aligned_frags,
		XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING);

	if (txdl_priv->align_dma_addr == XGE_OS_INVALID_DMA_ADDR) {
                __hal_fifo_dtr_align_free_unmap(channelh, dtrh);
		return XGE_HAL_ERR_OUT_OF_MAPPING;
	}

	return XGE_HAL_OK;
}
Esempio n. 19
0
xge_hal_status_e
__hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
{
	xge_hal_status_e status;
	xge_hal_device_t *hldev;
	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
	xge_hal_ring_queue_t *queue;


	/* Note: at this point we have channel.devh and channel.pdev
	 *       pre-set only! */

	hldev = (xge_hal_device_t *)ring->channel.devh;
	ring->config = &hldev->config.ring;
	queue = &ring->config->queue[attr->post_qid];
	ring->indicate_max_pkts = queue->indicate_max_pkts;
	ring->buffer_mode = queue->buffer_mode;

	xge_assert(queue->configured);

#if defined(XGE_HAL_RX_MULTI_RESERVE)
	xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
#endif
#if defined(XGE_HAL_RX_MULTI_POST)
	xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
	xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
#endif

	ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
	ring->rxd_priv_size =
		sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;

	/* how many RxDs can fit into one block. Depends on configured
	 * buffer_mode. */
	ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);

	/* calculate actual RxD block private size */
	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;

	ring->reserved_rxds_arr = xge_os_malloc(ring->channel.pdev,
		      sizeof(void*) * queue->max * ring->rxds_per_block);
	if (ring->reserved_rxds_arr == NULL) {
		__hal_ring_close(channelh);
		return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	ring->mempool = __hal_mempool_create(
				     hldev->pdev,
				     ring->config->memblock_size,
				     XGE_HAL_RING_RXDBLOCK_SIZE,
				     ring->rxdblock_priv_size,
				     queue->initial, queue->max,
				     __hal_ring_mempool_item_alloc,
				     NULL, /* nothing to free */
				     ring);
	if (ring->mempool == NULL) {
		__hal_ring_close(channelh);
		return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	status = __hal_channel_initialize(channelh,
					  attr,
					  ring->reserved_rxds_arr,
					  queue->initial * ring->rxds_per_block,
					  queue->max * ring->rxds_per_block,
					  0 /* no threshold for ring! */);
	if (status != XGE_HAL_OK) {
		__hal_ring_close(channelh);
		return status;
	}

	/* sanity check that everything formatted ok */
	xge_assert(ring->reserved_rxds_arr[0] ==
		    (char *)ring->mempool->items_arr[0] +
		      (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));

        /* Note:
	 * Specifying dtr_init callback means two things:
	 * 1) dtrs need to be initialized by ULD at channel-open time;
	 * 2) dtrs need to be posted at channel-open time
	 *    (that's what the initial_replenish() below does)
	 * Currently we don't have a case when the 1) is done without the 2).
	 */
	if (ring->channel.dtr_init) {
		if ((status = __hal_ring_initial_replenish(channelh,
						XGE_HAL_CHANNEL_OC_NORMAL))
							!= XGE_HAL_OK) {
			__hal_ring_close(channelh);
			return status;
		}
	}

	return XGE_HAL_OK;
}
Esempio n. 20
0
static xge_hal_status_e
__hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
			      void *memblock,
			      int memblock_index,
			      xge_hal_mempool_dma_t *dma_object,
			      void *item,
			      int index,
			      int is_last,
			      void *userdata)
{
	int memblock_item_idx;
	xge_hal_fifo_txdl_priv_t *txdl_priv;
	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)item;
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata;

	xge_assert(item);
	txdl_priv = __hal_mempool_item_priv(mempoolh, memblock_index,
					    item, &memblock_item_idx);

	xge_assert(txdl_priv);

	/* pre-format HAL's TxDL's private */
	txdl_priv->dma_offset = (char*)item - (char*)memblock;
	txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
	txdl_priv->dma_handle = dma_object->handle;
	txdl_priv->memblock   = memblock;
	txdl_priv->first_txdp = (xge_hal_fifo_txd_t *)item;
	txdl_priv->next_txdl_priv = NULL;
	txdl_priv->dang_txdl = NULL;
	txdl_priv->dang_frags = 0;
	txdl_priv->alloc_frags = 0;

#ifdef XGE_DEBUG_ASSERT
	txdl_priv->dma_object = dma_object;
#endif
	txdp->host_control = (u64)(ulong_t)txdl_priv;

#ifdef XGE_HAL_ALIGN_XMIT
	txdl_priv->align_vaddr = NULL;
	txdl_priv->align_dma_addr = (dma_addr_t)0;

#ifndef XGE_HAL_ALIGN_XMIT_ALLOC_RT
	{
	xge_hal_status_e status;
	if (fifo->config->alignment_size) {
	        status =__hal_fifo_dtr_align_alloc_map(fifo, txdp);
		if (status != XGE_HAL_OK)  {
		        xge_debug_mm(XGE_ERR,
		              "align buffer[%d] %d bytes, status %d",
			      index,
			      fifo->config->alignment_size *
			          fifo->config->max_aligned_frags,
			      status);
		        return status;
		}
	}
	}
#endif
#endif

	if (fifo->channel.dtr_init) {
		fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index,
			   fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL);
	}

	return XGE_HAL_OK;
}
Esempio n. 21
0
static xge_hal_status_e
__hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
			      void *memblock,
			      int memblock_index,
			      xge_hal_mempool_dma_t *dma_object,
			      void *item,
			      int index,
			      int is_last,
			      void *userdata)
{
	int i;
	xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;

	xge_assert(item);
	xge_assert(ring);


	/* format rxds array */
	for (i=ring->rxds_per_block-1; i>=0; i--) {
		void *rxdblock_priv;
		xge_hal_ring_rxd_priv_t *rxd_priv;
		xge_hal_ring_rxd_1_t *rxdp;
		int reserve_index = index * ring->rxds_per_block + i;
		int memblock_item_idx;

		ring->reserved_rxds_arr[reserve_index] = (char *)item +
				(ring->rxds_per_block - 1 - i) * ring->rxd_size;

		/* Note: memblock_item_idx is index of the item within
		 *       the memblock. For instance, in case of three RxD-blocks
		 *       per memblock this value can be 0,1 or 2. */
		rxdblock_priv =
			__hal_mempool_item_priv(mempoolh, memblock_index, item,
						&memblock_item_idx);
		rxdp = (xge_hal_ring_rxd_1_t *)
			ring->reserved_rxds_arr[reserve_index];
		rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
			((char*)rxdblock_priv + ring->rxd_priv_size * i);

		/* pre-format per-RxD Ring's private */
		rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
		rxd_priv->dma_addr = dma_object->addr +  rxd_priv->dma_offset;
		rxd_priv->dma_handle = dma_object->handle;
#ifdef XGE_DEBUG_ASSERT
		rxd_priv->dma_object = dma_object;
#endif

		/* pre-format Host_Control */
#if defined(XGE_HAL_USE_5B_MODE)
		if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
			xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
#if defined(XGE_OS_PLATFORM_64BIT)
			xge_assert(memblock_index <= 0xFFFF);
			xge_assert(i <= 0xFFFF);
			/* store memblock's index */
			rxdp_5->host_control = (u32)memblock_index << 16;
			/* store index of memblock's private */
			rxdp_5->host_control |= (u32)(memblock_item_idx *
						    ring->rxds_per_block + i);
#else
			/* 32-bit case */
			rxdp_5->host_control = (u32)rxd_priv;
#endif
		} else {
			/* 1b and 3b modes */
			rxdp->host_control = (u64)(ulong_t)rxd_priv;
		}
#else
		/* 1b and 3b modes */
		rxdp->host_control = (u64)(ulong_t)rxd_priv;
#endif
	}

	__hal_ring_block_memblock_idx_set(item, memblock_index);

	if (is_last) {
		/* link last one with first one */
		__hal_ring_rxdblock_link(mempoolh, ring, 0, index);
	}

	if (index > 0 ) {
		 /* link this RxD block with previous one */
		__hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
	}

	return XGE_HAL_OK;
}
Esempio n. 22
0
/*
 * xge_hal_mempool_destroy
 */
void
__hal_mempool_destroy(xge_hal_mempool_t *mempool)
{
	int i, j;

	for (i=0; i<mempool->memblocks_allocated; i++) {
	    xge_hal_mempool_dma_t *dma_object;

	    xge_assert(mempool->memblocks_arr[i]);
	    xge_assert(mempool->memblocks_dma_arr + i);

	    dma_object = mempool->memblocks_dma_arr + i;

	    for (j=0; j<mempool->items_per_memblock; j++) {
	        int index = i*mempool->items_per_memblock + j;

	        /* to skip last partially filled(if any) memblock */
	        if (index >= mempool->items_current) {
	            break;
	        }

	        /* let caller to do more job on each item */
	        if (mempool->item_func_free != NULL) {

	            mempool->item_func_free(mempool,
	                mempool->memblocks_arr[i],
	                i, dma_object,
	                mempool->shadow_items_arr[index],
	                index, /* unused */ -1,
	                mempool->userdata);
	        }
	    }

	    xge_os_dma_unmap(mempool->pdev,
	               dma_object->handle, dma_object->addr,
	           mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL);

	    xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
	        mempool->items_priv_size * mempool->items_per_memblock);

	    xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i],
	              mempool->memblock_size, &dma_object->acc_handle,
	              &dma_object->handle);
	}

	if (mempool->items_arr) {
	    xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) *
	              mempool->items_max);
	}

	if (mempool->shadow_items_arr) {
	    xge_os_free(mempool->pdev, mempool->shadow_items_arr,
	          sizeof(void*) * mempool->items_max);
	}

	if (mempool->memblocks_dma_arr) {
	    xge_os_free(mempool->pdev, mempool->memblocks_dma_arr,
	              sizeof(xge_hal_mempool_dma_t) *
	             mempool->memblocks_max);
	}

	if (mempool->memblocks_priv_arr) {
	    xge_os_free(mempool->pdev, mempool->memblocks_priv_arr,
	              sizeof(void*) * mempool->memblocks_max);
	}

	if (mempool->memblocks_arr) {
	    xge_os_free(mempool->pdev, mempool->memblocks_arr,
	              sizeof(void*) * mempool->memblocks_max);
	}

	xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t));
}
Esempio n. 23
0
/*
 * __hal_stats_initialize
 * @stats: xge_hal_stats_t structure that contains, in particular,
 *         Xframe hw stat counters.
 * @devh: HAL device handle.
 *
 * Initialize per-device statistics object.
 * See also: xge_hal_stats_getinfo(), xge_hal_status_e{}.
 */
xge_hal_status_e
__hal_stats_initialize (xge_hal_stats_t *stats, xge_hal_device_h devh)
{
	int dma_flags;
	xge_hal_device_t *hldev = (xge_hal_device_t*)devh;

	xge_assert(!stats->is_initialized);

	dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	dma_flags |= XGE_OS_DMA_CONSISTENT;
#else
	dma_flags |= XGE_OS_DMA_STREAMING;
#endif
	if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
	    stats->hw_info =
	        (xge_hal_stats_hw_info_t *) xge_os_dma_malloc(
	                hldev->pdev,
	                sizeof(xge_hal_stats_hw_info_t),
	                dma_flags,
	                &stats->hw_info_dmah,
	                &stats->hw_info_dma_acch);

	    if (stats->hw_info == NULL) {
	        xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }
	    xge_os_memzero(stats->hw_info,
	        sizeof(xge_hal_stats_hw_info_t));
	    xge_os_memzero(&stats->hw_info_saved,
	        sizeof(xge_hal_stats_hw_info_t));
	    xge_os_memzero(&stats->hw_info_latest,
	        sizeof(xge_hal_stats_hw_info_t));



	    stats->dma_addr = xge_os_dma_map(hldev->pdev,
	                               stats->hw_info_dmah,
	                   stats->hw_info,
	                   sizeof(xge_hal_stats_hw_info_t),
	                   XGE_OS_DMA_DIR_FROMDEVICE,
	                   XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	                   XGE_OS_DMA_CONSISTENT
#else
	                       XGE_OS_DMA_STREAMING
#endif
	                                   );
	    if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_debug_stats(XGE_ERR,
	            "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
	            (unsigned long long)(ulong_t)stats->hw_info);
	        xge_os_dma_free(hldev->pdev,
	              stats->hw_info,
	              sizeof(xge_hal_stats_hw_info_t),
	              &stats->hw_info_dma_acch,
	              &stats->hw_info_dmah);
	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }
	}
	else {
	    stats->pcim_info_saved =
	        (xge_hal_stats_pcim_info_t *)xge_os_malloc(
	        hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
	    if (stats->pcim_info_saved == NULL) {
	        xge_debug_stats(XGE_ERR, "%s", "can not alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }

	    stats->pcim_info_latest =
	        (xge_hal_stats_pcim_info_t *)xge_os_malloc(
	        hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
	    if (stats->pcim_info_latest == NULL) {
	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_debug_stats(XGE_ERR, "%s", "can not alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }

	    stats->pcim_info =
	        (xge_hal_stats_pcim_info_t *) xge_os_dma_malloc(
	                hldev->pdev,
	                sizeof(xge_hal_stats_pcim_info_t),
	                dma_flags,
	                &stats->hw_info_dmah,
	                &stats->hw_info_dma_acch);

	    if (stats->pcim_info == NULL) {
	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }


	    xge_os_memzero(stats->pcim_info,
	        sizeof(xge_hal_stats_pcim_info_t));
	    xge_os_memzero(stats->pcim_info_saved,
	        sizeof(xge_hal_stats_pcim_info_t));
	    xge_os_memzero(stats->pcim_info_latest,
	        sizeof(xge_hal_stats_pcim_info_t));



	    stats->dma_addr = xge_os_dma_map(hldev->pdev,
	                               stats->hw_info_dmah,
	                   stats->pcim_info,
	                   sizeof(xge_hal_stats_pcim_info_t),
	                   XGE_OS_DMA_DIR_FROMDEVICE,
	                   XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	                   XGE_OS_DMA_CONSISTENT
#else
	                       XGE_OS_DMA_STREAMING
#endif
	                                   );
	    if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_debug_stats(XGE_ERR,
	            "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
	            (unsigned long long)(ulong_t)stats->hw_info);

	        xge_os_dma_free(hldev->pdev,
	              stats->pcim_info,
	              sizeof(xge_hal_stats_pcim_info_t),
	              &stats->hw_info_dma_acch,
	              &stats->hw_info_dmah);

	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));

	        xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));

	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }
	}
	stats->devh = devh;
	xge_os_memzero(&stats->sw_dev_info_stats,
	         sizeof(xge_hal_stats_device_info_t));

	stats->is_initialized = 1;

	return XGE_HAL_OK;
}
Esempio n. 24
0
xge_hal_status_e
__hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
{
	xge_hal_device_t *hldev;
	xge_hal_status_e status;
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_queue_t *queue;
	int i, txdl_size, max_arr_index, mid_point;
	xge_hal_dtr_h  dtrh;

	hldev = (xge_hal_device_t *)fifo->channel.devh;
	fifo->config = &hldev->config.fifo;
	queue = &fifo->config->queue[attr->post_qid];

#if defined(XGE_HAL_TX_MULTI_RESERVE)
	xge_os_spin_lock_init(&fifo->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_init_irq(&fifo->channel.reserve_lock, hldev->irqh);
#endif
#if defined(XGE_HAL_TX_MULTI_POST)
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)  {
                fifo->post_lock_ptr = &hldev->xena_post_lock;
	} else {
	        xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev);
                fifo->post_lock_ptr = &fifo->channel.post_lock;
	}
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)  {
                fifo->post_lock_ptr = &hldev->xena_post_lock;
	} else {
	        xge_os_spin_lock_init_irq(&fifo->channel.post_lock,
					hldev->irqh);
                fifo->post_lock_ptr = &fifo->channel.post_lock;
	}
#endif

	/* Initializing the BAR1 address as the start of
	 * the FIFO queue pointer and as a location of FIFO control
	 * word. */
	fifo->hw_pair =
	        (xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 +
		        (attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));

	/* apply "interrupts per txdl" attribute */
	fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ;
	if (queue->intr) {
		fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST;
	}
	fifo->no_snoop_bits =
		(int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits));

	/*
	 * FIFO memory management strategy:
	 *
	 * TxDL splitted into three independent parts:
	 *	- set of TxD's
	 *	- TxD HAL private part
	 *	- upper layer private part
	 *
	 * Adaptative memory allocation used. i.e. Memory allocated on
	 * demand with the size which will fit into one memory block.
	 * One memory block may contain more than one TxDL. In simple case
	 * memory block size can be equal to CPU page size. On more
	 * sophisticated OS's memory block can be contigious across
	 * several pages.
	 *
	 * During "reserve" operations more memory can be allocated on demand
	 * for example due to FIFO full condition.
	 *
	 * Pool of memory memblocks never shrinks except __hal_fifo_close
	 * routine which will essentially stop channel and free the resources.
	 */

	/* TxDL common private size == TxDL private + ULD private */
	fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) +
	attr->per_dtr_space;
	fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) /
                               __xge_os_cacheline_size) *
                               __xge_os_cacheline_size;

	/* recompute txdl size to be cacheline aligned */
	fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t);
	txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) /
			__xge_os_cacheline_size) * __xge_os_cacheline_size;

	if (fifo->txdl_size != txdl_size)
	        xge_debug_fifo(XGE_ERR, "cacheline > 128 (??): %d, %d, %d, %d",
		fifo->config->max_frags, fifo->txdl_size, txdl_size,
		__xge_os_cacheline_size);

	fifo->txdl_size = txdl_size;

	/* since dtr_init() callback will be called from item_alloc(),
	 * the same way channels userdata might be used prior to
	 * channel_initialize() */
	fifo->channel.dtr_init = attr->dtr_init;
	fifo->channel.userdata = attr->userdata;
	fifo->txdl_per_memblock = fifo->config->memblock_size /
		fifo->txdl_size;

	fifo->mempool = __hal_mempool_create(hldev->pdev,
					     fifo->config->memblock_size,
					     fifo->txdl_size,
					     fifo->priv_size,
					     queue->initial,
					     queue->max,
					     __hal_fifo_mempool_item_alloc,
					     __hal_fifo_mempool_item_free,
					     fifo);
	if (fifo->mempool == NULL) {
		return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	status = __hal_channel_initialize(channelh, attr,
					__hal_mempool_items_arr(fifo->mempool),
					queue->initial, queue->max,
					fifo->config->reserve_threshold);
	if (status != XGE_HAL_OK) {
		__hal_fifo_close(channelh);
		return status;
	}
	xge_debug_fifo(XGE_TRACE,
		"DTR  reserve_length:%d reserve_top:%d\n"
		"max_frags:%d reserve_threshold:%d\n"
		"memblock_size:%d alignment_size:%d max_aligned_frags:%d\n",
		fifo->channel.reserve_length, fifo->channel.reserve_top,
		fifo->config->max_frags, fifo->config->reserve_threshold,
		fifo->config->memblock_size, fifo->config->alignment_size,
		fifo->config->max_aligned_frags);

#ifdef XGE_DEBUG_ASSERT
	for ( i = 0; i < fifo->channel.reserve_length; i++) {
		xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d"
		" handle:%p\n", i, fifo->channel.reserve_arr[i]);
	}
#endif

	xge_assert(fifo->channel.reserve_length);
	/* reverse the FIFO dtr array */
	max_arr_index	= fifo->channel.reserve_length - 1;
	max_arr_index	-=fifo->channel.reserve_top;
	xge_assert(max_arr_index);
	mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2;
	for (i = 0; i < mid_point; i++) {
		dtrh = 	fifo->channel.reserve_arr[i];
		fifo->channel.reserve_arr[i] = 
			fifo->channel.reserve_arr[max_arr_index - i];
		fifo->channel.reserve_arr[max_arr_index  - i] = dtrh;
	}

#ifdef XGE_DEBUG_ASSERT
	for ( i = 0; i < fifo->channel.reserve_length; i++) {
		xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d"
		" handle:%p\n", i, fifo->channel.reserve_arr[i]);
	}
#endif

	return XGE_HAL_OK;
}
Esempio n. 25
0
void
__hal_ring_prc_enable(xge_hal_channel_h channelh)
{
	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
	xge_hal_pci_bar0_t *bar0;
	u64 val64;
	void *first_block;
	int block_num;
	xge_hal_ring_queue_t *queue;
	pci_dma_h dma_handle;

	xge_assert(ring);
	xge_assert(ring->channel.pdev);
	bar0 = (xge_hal_pci_bar0_t *) (void *)
			((xge_hal_device_t *)ring->channel.devh)->bar0;

	queue = &ring->config->queue[ring->channel.post_qid];
	xge_assert(queue->buffer_mode == 1 ||
		    queue->buffer_mode == 3 ||
		    queue->buffer_mode == 5);

	/* last block in fact becomes first. This is just the way it
	 * is filled up and linked by item_alloc() */

	block_num = queue->initial;
	first_block = __hal_mempool_item(ring->mempool, block_num - 1);
	val64 = __hal_ring_item_dma_addr(ring->mempool,
					 first_block, &dma_handle);
	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);

	xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x%llx initialized",
			ring->channel.post_qid, (unsigned long long)val64);

	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
		ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
	    !queue->rth_en) {
		val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
	}
	val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;

	val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
	val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
	val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
		(hldev->config.pci_freq_mherz * queue->backoff_interval_us));

	/* Beware: no snoop by the bridge if (no_snoop_bits) */
	val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);

        /* Herc: always use group_reads */
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
	        val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;

	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);

	/* Configure Receive Protocol Assist */
	val64 = xge_os_pio_mem_read64(ring->channel.pdev,
			ring->channel.regh0, &bar0->rx_pa_cfg);
	val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
	val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
	/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
	val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
	val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);

	xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
			val64, &bar0->rx_pa_cfg);

	xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
			ring->channel.post_qid, queue->buffer_mode);
}
Esempio n. 26
0
void
xge_hal_driver_bar0_offset_check(void)
{
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, adapter_status) ==
	       0x108);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_traffic_int) ==
	       0x08E0);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, dtx_control) ==
	       0x09E8);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_fifo_partition_0) ==
	       0x1108);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_enable) ==
	       0x1170);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, prc_rxd0_n[0]) ==
	       0x1930);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rti_command_mem) ==
	       0x19B8);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_cfg) ==
	       0x2100);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rmac_addr_cmd_mem) ==
	       0x2128);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_link_util) ==
	       0x2170);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_pause_thresh_q0q3) ==
	       0x2918);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_err_reg) ==
	       0x1040);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rxdma_int_status) ==
	       0x1800);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_tmac_err_reg) ==
	       0x2010);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_err_reg) ==
	       0x2810);
	xge_assert(xge_offsetof(xge_hal_pci_bar0_t, xgxs_int_status) ==
	       0x3000);
}
Esempio n. 27
0
/**
 * xge_hal_fifo_dtr_buffer_set_aligned - Align transmit buffer and fill
 * in fifo descriptor.
 * @channelh: Channel handle.
 * @dtrh: Descriptor handle.
 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
 *            (of buffers).
 * @vaddr: Virtual address of the data buffer.
 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
 * @size: Size of the data buffer (in bytes).
 * @misaligned_size: Size (in bytes) of the misaligned portion of the
 * data buffer. Calculated by the caller, based on the platform/OS/other
 * specific criteria, which is outside of HAL's domain. See notes below.
 *
 * This API is part of the transmit descriptor preparation for posting
 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
 * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
 * All three APIs fill in the fields of the fifo descriptor,
 * in accordance with the Xframe specification.
 * On the PCI-X based systems aligning transmit data typically provides better
 * transmit performance. The typical alignment granularity: L2 cacheline size.
 * However, HAL does not make assumptions in terms of the alignment granularity;
 * this is specified via additional @misaligned_size parameter described above.
 * Prior to calling xge_hal_fifo_dtr_buffer_set_aligned(),
 * ULD is supposed to check alignment of a given fragment/buffer. For this HAL
 * provides a separate xge_hal_check_alignment() API sufficient to cover
 * most (but not all) possible alignment criteria.
 * If the buffer appears to be aligned, the ULD calls
 * xge_hal_fifo_dtr_buffer_set().
 * Otherwise, ULD calls xge_hal_fifo_dtr_buffer_set_aligned().
 *
 * Note; This API is a "superset" of xge_hal_fifo_dtr_buffer_set(). In
 * addition to filling in the specified descriptor it aligns transmit data on
 * the specified boundary.
 * Note: Decision on whether to align or not to align a given contiguous
 * transmit buffer is outside of HAL's domain. To this end ULD can use any
 * programmable criteria, which can help to 1) boost transmit performance,
 * and/or 2) provide a workaround for PCI bridge bugs, if any.
 *
 * See also: xge_hal_fifo_dtr_buffer_set(),
 * xge_hal_check_alignment().
 *
 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
	        xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
	        dma_addr_t dma_pointer, int size, int misaligned_size)
{
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_txdl_priv_t *txdl_priv;
	xge_hal_fifo_txd_t *txdp;
	int remaining_size;
	ptrdiff_t prev_boff;

	txdl_priv = __hal_fifo_txdl_priv(dtrh);
	txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;

	if (frag_idx != 0) {
	    txdp->control_1 = txdp->control_2 = 0;
	}

	/* On some systems buffer size could be zero.
	 * It is the responsibility of ULD and *not HAL* to
	 * detect it and skip it. */
	xge_assert(size > 0);
	xge_assert(frag_idx < txdl_priv->alloc_frags);
	xge_assert(misaligned_size != 0 &&
	        misaligned_size <= fifo->config->alignment_size);

	remaining_size = size - misaligned_size;
	xge_assert(remaining_size >= 0);

	xge_os_memcpy((char*)txdl_priv->align_vaddr_start,
	                  vaddr, misaligned_size);

	    if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
	        return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
	    }

	/* setup new buffer */
	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
	txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
	txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(misaligned_size);
	txdl_priv->bytes_sent += misaligned_size;
	fifo->channel.stats.total_buffers++;
	txdl_priv->frags++;
	txdl_priv->align_used_frags++;
	txdl_priv->align_vaddr_start += fifo->config->alignment_size;
	    txdl_priv->align_dma_offset = 0;

#if defined(XGE_OS_DMA_REQUIRES_SYNC)
	/* sync new buffer */
	xge_os_dma_sync(fifo->channel.pdev,
	          txdl_priv->align_dma_handle,
	          txdp->buffer_pointer,
	          0,
	          misaligned_size,
	          XGE_OS_DMA_DIR_TODEVICE);
#endif

	if (remaining_size) {
	    xge_assert(frag_idx < txdl_priv->alloc_frags);
	    txdp++;
	    txdp->buffer_pointer = (u64)dma_pointer +
	                misaligned_size;
	    txdp->control_1 =
	        XGE_HAL_TXD_BUFFER0_SIZE(remaining_size);
	    txdl_priv->bytes_sent += remaining_size;
	    txdp->control_2 = 0;
	    fifo->channel.stats.total_buffers++;
	    txdl_priv->frags++;
	}

	return XGE_HAL_OK;
}
Esempio n. 28
0
/**
 * xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more
 *  than single txdl.
 * @channelh: Channel handle.
 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
 *        with a valid handle.
 * @frags: minimum number of fragments to be reserved.
 *
 * Reserve TxDL(s) (that is, fifo descriptor)
 * for the subsequent filling-in by upper layerdriver (ULD))
 * and posting on the corresponding channel (@channelh)
 * via xge_hal_fifo_dtr_post().
 *
 * Returns: XGE_HAL_OK - success;
 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
 *
 * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
 * Usage: See ex_xmit{}.
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
	            xge_hal_dtr_h *dtrh, const int frags)
{
	xge_hal_status_e status = XGE_HAL_OK;
	int alloc_frags = 0, dang_frags = 0;
	xge_hal_fifo_txd_t *curr_txdp = NULL;
	xge_hal_fifo_txd_t *next_txdp;
	xge_hal_fifo_txdl_priv_t *next_txdl_priv, *curr_txdl_priv = NULL;
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	int max_frags = fifo->config->max_frags;
	xge_hal_dtr_h dang_dtrh = NULL;
#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	unsigned long flags=0;
#endif
	xge_debug_fifo(XGE_TRACE, "dtr_reserve_many called for frags %d",
	    frags);
	xge_assert(frags < (fifo->txdl_per_memblock * max_frags));
#if defined(XGE_HAL_TX_MULTI_RESERVE)
	xge_os_spin_lock(&fifo->channel.reserve_lock);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_irq(&fifo->channel.reserve_lock, flags);
#endif
	while(alloc_frags < frags) {
	    status = __hal_channel_dtr_alloc(channelh,
	            (xge_hal_dtr_h *)(void*)&next_txdp);
	    if (status != XGE_HAL_OK){
	        xge_debug_fifo(XGE_ERR,
	            "failed to allocate linked fragments rc %d",
	             status);
	        xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS);
	        if (*dtrh) {
	            xge_assert(alloc_frags/max_frags);
	            __hal_fifo_txdl_restore_many(channelh,
	                (xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags);
	        }
	        if (dang_dtrh) {
	            xge_assert(dang_frags/max_frags);
	            __hal_fifo_txdl_restore_many(channelh,
	                (xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags);
	        }
	        break;
	    }
	    xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p"
	        " for frags %d", next_txdp, frags);
	    next_txdl_priv = __hal_fifo_txdl_priv(next_txdp);
	    xge_assert(next_txdl_priv);
	    xge_assert(next_txdl_priv->first_txdp == next_txdp);
	    next_txdl_priv->dang_txdl = NULL;
	    next_txdl_priv->dang_frags = 0;
	    next_txdl_priv->next_txdl_priv = NULL;
#if defined(XGE_OS_MEMORY_CHECK)
	    next_txdl_priv->allocated = 1;
#endif
	    if (!curr_txdp || !curr_txdl_priv) {
	        curr_txdp = next_txdp;
	        curr_txdl_priv = next_txdl_priv;
	        *dtrh = (xge_hal_dtr_h)next_txdp;
	        alloc_frags = max_frags;
	        continue;
	    }
	    if (curr_txdl_priv->memblock ==
	        next_txdl_priv->memblock) {
	        xge_debug_fifo(XGE_TRACE,
	            "linking dtrh %p, with %p",
	            *dtrh, next_txdp);
	        xge_assert (next_txdp ==
	            curr_txdp + max_frags);
	        alloc_frags += max_frags;
	        curr_txdl_priv->next_txdl_priv = next_txdl_priv;
	    }
	    else {
	        xge_assert(*dtrh);
	        xge_assert(dang_dtrh == NULL);
	        dang_dtrh = *dtrh;
	        dang_frags = alloc_frags;
	        xge_debug_fifo(XGE_TRACE,
	            "dangling dtrh %p, linked with dtrh %p",
	            *dtrh, next_txdp);
	        next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh;
	        next_txdl_priv->dang_frags = alloc_frags;
	        alloc_frags = max_frags;
	        *dtrh  = next_txdp;
	    }
	    curr_txdp = next_txdp;
	    curr_txdl_priv = next_txdl_priv;
	}

#if defined(XGE_HAL_TX_MULTI_RESERVE)
	xge_os_spin_unlock(&fifo->channel.reserve_lock);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	xge_os_spin_unlock_irq(&fifo->channel.reserve_lock, flags);
#endif

	if (status == XGE_HAL_OK) {
	    xge_hal_fifo_txdl_priv_t * txdl_priv;
	    xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
	    xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats;
	    txdl_priv = __hal_fifo_txdl_priv(txdp);
	    /* reset the TxDL's private */
	    txdl_priv->align_dma_offset = 0;
	    txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
	    txdl_priv->align_used_frags = 0;
	    txdl_priv->frags = 0;
	    txdl_priv->bytes_sent = 0;
	    txdl_priv->alloc_frags = alloc_frags;
	    /* reset TxD0 */
	    txdp->control_1 = txdp->control_2 = 0;

#if defined(XGE_OS_MEMORY_CHECK)
	    txdl_priv->allocated = 1;
#endif
	    /* update statistics */
	    statsp->total_posts_dtrs_many++;
	    statsp->total_posts_frags_many += txdl_priv->alloc_frags;
	    if (txdl_priv->dang_frags){
	        statsp->total_posts_dang_dtrs++;
	        statsp->total_posts_dang_frags += txdl_priv->dang_frags;
	    }
	}

	return status;
}