Beispiel #1
0
/**
 * xge_hal_fifo_dtr_free - Free descriptor.
 * @channelh: Channel handle.
 * @dtr: Descriptor handle.
 *
 * Free the reserved descriptor. This operation is "symmetrical" to
 * xge_hal_fifo_dtr_reserve or xge_hal_fifo_dtr_reserve_sp.
 * The "free-ing" completes the descriptor's lifecycle.
 *
 * After free-ing (see xge_hal_fifo_dtr_free()) the descriptor again can
 * be:
 *
 * - reserved (xge_hal_fifo_dtr_reserve);
 *
 * - posted (xge_hal_fifo_dtr_post);
 *
 * - completed (xge_hal_fifo_dtr_next_completed);
 *
 * - and recycled again (xge_hal_fifo_dtr_free).
 *
 * For alternative state transitions and more details please refer to
 * the design doc.
 *
 * See also: xge_hal_ring_dtr_free(), xge_hal_fifo_dtr_reserve().
 * Usage: See ex_tx_compl{}.
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
{
#if defined(XGE_HAL_TX_MULTI_FREE_IRQ)
	unsigned long flags = 0;
#endif
	xge_hal_fifo_txdl_priv_t *txdl_priv = __hal_fifo_txdl_priv(
	                (xge_hal_fifo_txd_t *)dtr);
	int max_frags = ((xge_hal_fifo_t *)channelh)->config->max_frags;
#if defined(XGE_HAL_TX_MULTI_FREE)
	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
	flags);
#endif

	if (txdl_priv->alloc_frags > max_frags) {
	    xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *)
	                    txdl_priv->dang_txdl;
	    int dang_frags = txdl_priv->dang_frags;
	    int alloc_frags = txdl_priv->alloc_frags;
	    txdl_priv->dang_txdl = NULL;
	    txdl_priv->dang_frags = 0;
	    txdl_priv->alloc_frags = 0;
	    /* dtrh must have a linked list of dtrh */
	    xge_assert(txdl_priv->next_txdl_priv);

	    /* free any dangling dtrh first */
	    if (dang_txdp) {
	        xge_debug_fifo(XGE_TRACE,
	            "freeing dangled dtrh %p for %d fragments",
	            dang_txdp, dang_frags);
	        __hal_fifo_txdl_free_many(channelh, dang_txdp,
	            max_frags, dang_frags);
	    }

	    /* now free the reserved dtrh list */
	    xge_debug_fifo(XGE_TRACE,
	            "freeing dtrh %p list of %d fragments", dtr,
	            alloc_frags);
	    __hal_fifo_txdl_free_many(channelh,
	            (xge_hal_fifo_txd_t *)dtr, max_frags,
	            alloc_frags);
	}
	else
	    __hal_channel_dtr_free(channelh, dtr);

	((xge_hal_channel_t *)channelh)->poll_bytes += txdl_priv->bytes_sent;

#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
	__hal_fifo_txdl_priv(dtr)->allocated = 0;
#endif

#if defined(XGE_HAL_TX_MULTI_FREE)
	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
#elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
	flags);
#endif
}
Beispiel #2
0
__HAL_STATIC_FIFO  __HAL_INLINE_FIFO void
__hal_fifo_txdl_restore_many(xge_hal_channel_h channelh,
	          xge_hal_fifo_txd_t *txdp, int txdl_count)
{
	xge_hal_fifo_txdl_priv_t *current_txdl_priv;
	xge_hal_fifo_txdl_priv_t *next_txdl_priv;
	int i = txdl_count;

	xge_assert(((xge_hal_channel_t *)channelh)->reserve_length +
	    txdl_count <= ((xge_hal_channel_t *)channelh)->reserve_initial);

	current_txdl_priv = __hal_fifo_txdl_priv(txdp);
	do{
	    xge_assert(i);
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
	    current_txdl_priv->allocated = 0;
#endif
	    next_txdl_priv = current_txdl_priv->next_txdl_priv;
	    txdp = current_txdl_priv->first_txdp;
	    current_txdl_priv->next_txdl_priv = NULL;
	    __hal_channel_dtr_restore(channelh, (xge_hal_dtr_h )txdp, --i);
	    xge_debug_fifo(XGE_TRACE,
	        "dtrh %p restored at offset %d", txdp, i);
	    current_txdl_priv = next_txdl_priv;
	} while(current_txdl_priv);
	__hal_channel_dtr_restore(channelh, NULL, txdl_count);
}
Beispiel #3
0
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_txdl_free_many(xge_hal_channel_h channelh,
	          xge_hal_fifo_txd_t *txdp, int list_size, int frags)
{
	xge_hal_fifo_txdl_priv_t *current_txdl_priv;
	xge_hal_fifo_txdl_priv_t *next_txdl_priv;
	int invalid_frags = frags % list_size;
	if (invalid_frags){
	    xge_debug_fifo(XGE_ERR,
	        "freeing corrupt dtrh %p, fragments %d list size %d",
	        txdp, frags, list_size);
	    xge_assert(invalid_frags == 0);
	}
	while(txdp){
	    xge_debug_fifo(XGE_TRACE,
	        "freeing linked dtrh %p, fragments %d list size %d",
	        txdp, frags, list_size);
	    current_txdl_priv = __hal_fifo_txdl_priv(txdp);
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
	    current_txdl_priv->allocated = 0;
#endif
	    __hal_channel_dtr_free(channelh, txdp);
	    next_txdl_priv = current_txdl_priv->next_txdl_priv;
	    xge_assert(frags);
	    frags -= list_size;
	    if (next_txdl_priv) {
	        current_txdl_priv->next_txdl_priv = NULL;
	        txdp = next_txdl_priv->first_txdp;
	    }
	    else {
	        xge_debug_fifo(XGE_TRACE,
	        "freed linked dtrh fragments %d list size %d",
	        frags, list_size);
	        break;
	    }
	}
	xge_assert(frags == 0)
}
Beispiel #4
0
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
	        u64 ctrl_1)
{
	xge_hal_fifo_t            *fifo    = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_hw_pair_t    *hw_pair = fifo->hw_pair;
	xge_hal_fifo_txd_t        *txdp    = (xge_hal_fifo_txd_t *)dtrh;
	xge_hal_fifo_txdl_priv_t  *txdl_priv;
	u64           ctrl;

	txdp->control_1 |= XGE_HAL_TXD_LIST_OWN_XENA;

#ifdef XGE_DEBUG_ASSERT
	    /* make sure Xena overwrites the (illegal) t_code value on completion */
	    XGE_HAL_SET_TXD_T_CODE(txdp->control_1, XGE_HAL_TXD_T_CODE_UNUSED_5);
#endif

	txdl_priv = __hal_fifo_txdl_priv(dtrh);

#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
	/* sync the TxDL to device */
	xge_os_dma_sync(fifo->channel.pdev,
	              txdl_priv->dma_handle,
	          txdl_priv->dma_addr,
	          txdl_priv->dma_offset,
	          txdl_priv->frags << 5 /* sizeof(xge_hal_fifo_txd_t) */,
	          XGE_OS_DMA_DIR_TODEVICE);
#endif
	/* write the pointer first */
	xge_os_pio_mem_write64(fifo->channel.pdev,
	             fifo->channel.regh1,
	                     txdl_priv->dma_addr,
	                     &hw_pair->txdl_pointer);

	/* spec: 0x00 = 1 TxD in the list */
	ctrl = XGE_HAL_TX_FIFO_LAST_TXD_NUM(txdl_priv->frags - 1);
	ctrl |= ctrl_1;
	ctrl |= fifo->no_snoop_bits;

	if (txdp->control_1 & XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO)) {
	    ctrl |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
	}

	/*
	 * according to the XENA spec:
	 *
	 * It is important to note that pointers and list control words are
	 * always written in pairs: in the first write, the host must write a
	 * pointer, and in the second write, it must write the list control
	 * word. Any other access will result in an error. Also, all 16 bytes
	 * of the pointer/control structure must be written, including any
	 * reserved bytes.
	 */
	xge_os_wmb();

	/*
	 * we want touch work_arr in order with ownership bit set to HW
	 */
	__hal_channel_dtr_post(channelh, dtrh);

	xge_os_pio_mem_write64(fifo->channel.pdev, fifo->channel.regh1,
	        ctrl, &hw_pair->list_control);

	xge_debug_fifo(XGE_TRACE, "posted txdl 0x"XGE_OS_LLXFMT" ctrl 0x"XGE_OS_LLXFMT" "
	    "into 0x"XGE_OS_LLXFMT"", (unsigned long long)txdl_priv->dma_addr,
	    (unsigned long long)ctrl,
	    (unsigned long long)(ulong_t)&hw_pair->txdl_pointer);

#ifdef XGE_HAL_FIFO_DUMP_TXD
	xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
	    XGE_OS_LLXFMT" dma "XGE_OS_LLXFMT,
	    txdp->control_1, txdp->control_2, txdp->buffer_pointer,
	    txdp->host_control, txdl_priv->dma_addr);
#endif

	fifo->channel.stats.total_posts++;
	fifo->channel.usage_cnt++;
	if (fifo->channel.stats.usage_max < fifo->channel.usage_cnt)
	    fifo->channel.stats.usage_max = fifo->channel.usage_cnt;
}
Beispiel #5
0
/**
 * xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more
 *  than single txdl.
 * @channelh: Channel handle.
 * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
 *        with a valid handle.
 * @frags: minimum number of fragments to be reserved.
 *
 * Reserve TxDL(s) (that is, fifo descriptor)
 * for the subsequent filling-in by upper layerdriver (ULD))
 * and posting on the corresponding channel (@channelh)
 * via xge_hal_fifo_dtr_post().
 *
 * Returns: XGE_HAL_OK - success;
 * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
 *
 * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
 * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
 * Usage: See ex_xmit{}.
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
	            xge_hal_dtr_h *dtrh, const int frags)
{
	xge_hal_status_e status = XGE_HAL_OK;
	int alloc_frags = 0, dang_frags = 0;
	xge_hal_fifo_txd_t *curr_txdp = NULL;
	xge_hal_fifo_txd_t *next_txdp;
	xge_hal_fifo_txdl_priv_t *next_txdl_priv, *curr_txdl_priv = NULL;
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	int max_frags = fifo->config->max_frags;
	xge_hal_dtr_h dang_dtrh = NULL;
#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	unsigned long flags=0;
#endif
	xge_debug_fifo(XGE_TRACE, "dtr_reserve_many called for frags %d",
	    frags);
	xge_assert(frags < (fifo->txdl_per_memblock * max_frags));
#if defined(XGE_HAL_TX_MULTI_RESERVE)
	xge_os_spin_lock(&fifo->channel.reserve_lock);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_irq(&fifo->channel.reserve_lock, flags);
#endif
	while(alloc_frags < frags) {
	    status = __hal_channel_dtr_alloc(channelh,
	            (xge_hal_dtr_h *)(void*)&next_txdp);
	    if (status != XGE_HAL_OK){
	        xge_debug_fifo(XGE_ERR,
	            "failed to allocate linked fragments rc %d",
	             status);
	        xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS);
	        if (*dtrh) {
	            xge_assert(alloc_frags/max_frags);
	            __hal_fifo_txdl_restore_many(channelh,
	                (xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags);
	        }
	        if (dang_dtrh) {
	            xge_assert(dang_frags/max_frags);
	            __hal_fifo_txdl_restore_many(channelh,
	                (xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags);
	        }
	        break;
	    }
	    xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p"
	        " for frags %d", next_txdp, frags);
	    next_txdl_priv = __hal_fifo_txdl_priv(next_txdp);
	    xge_assert(next_txdl_priv);
	    xge_assert(next_txdl_priv->first_txdp == next_txdp);
	    next_txdl_priv->dang_txdl = NULL;
	    next_txdl_priv->dang_frags = 0;
	    next_txdl_priv->next_txdl_priv = NULL;
#if defined(XGE_OS_MEMORY_CHECK)
	    next_txdl_priv->allocated = 1;
#endif
	    if (!curr_txdp || !curr_txdl_priv) {
	        curr_txdp = next_txdp;
	        curr_txdl_priv = next_txdl_priv;
	        *dtrh = (xge_hal_dtr_h)next_txdp;
	        alloc_frags = max_frags;
	        continue;
	    }
	    if (curr_txdl_priv->memblock ==
	        next_txdl_priv->memblock) {
	        xge_debug_fifo(XGE_TRACE,
	            "linking dtrh %p, with %p",
	            *dtrh, next_txdp);
	        xge_assert (next_txdp ==
	            curr_txdp + max_frags);
	        alloc_frags += max_frags;
	        curr_txdl_priv->next_txdl_priv = next_txdl_priv;
	    }
	    else {
	        xge_assert(*dtrh);
	        xge_assert(dang_dtrh == NULL);
	        dang_dtrh = *dtrh;
	        dang_frags = alloc_frags;
	        xge_debug_fifo(XGE_TRACE,
	            "dangling dtrh %p, linked with dtrh %p",
	            *dtrh, next_txdp);
	        next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh;
	        next_txdl_priv->dang_frags = alloc_frags;
	        alloc_frags = max_frags;
	        *dtrh  = next_txdp;
	    }
	    curr_txdp = next_txdp;
	    curr_txdl_priv = next_txdl_priv;
	}

#if defined(XGE_HAL_TX_MULTI_RESERVE)
	xge_os_spin_unlock(&fifo->channel.reserve_lock);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	xge_os_spin_unlock_irq(&fifo->channel.reserve_lock, flags);
#endif

	if (status == XGE_HAL_OK) {
	    xge_hal_fifo_txdl_priv_t * txdl_priv;
	    xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
	    xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats;
	    txdl_priv = __hal_fifo_txdl_priv(txdp);
	    /* reset the TxDL's private */
	    txdl_priv->align_dma_offset = 0;
	    txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
	    txdl_priv->align_used_frags = 0;
	    txdl_priv->frags = 0;
	    txdl_priv->bytes_sent = 0;
	    txdl_priv->alloc_frags = alloc_frags;
	    /* reset TxD0 */
	    txdp->control_1 = txdp->control_2 = 0;

#if defined(XGE_OS_MEMORY_CHECK)
	    txdl_priv->allocated = 1;
#endif
	    /* update statistics */
	    statsp->total_posts_dtrs_many++;
	    statsp->total_posts_frags_many += txdl_priv->alloc_frags;
	    if (txdl_priv->dang_frags){
	        statsp->total_posts_dang_dtrs++;
	        statsp->total_posts_dang_frags += txdl_priv->dang_frags;
	    }
	}

	return status;
}
Beispiel #6
0
void
__hal_fifo_hw_initialize(xge_hal_device_h devh)
{
	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
	xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void
	*)hldev->bar0;
	u64* tx_fifo_partitions[4];
	u64* tx_fifo_wrr[5];
	u64 val64, part0;
	int priority = 0;
	int i;

	/*  Tx DMA Initialization */

	tx_fifo_partitions[0] = &bar0->tx_fifo_partition_0;
	tx_fifo_partitions[1] = &bar0->tx_fifo_partition_1;
	tx_fifo_partitions[2] = &bar0->tx_fifo_partition_2;
	tx_fifo_partitions[3] = &bar0->tx_fifo_partition_3;

	tx_fifo_wrr[0] = &bar0->tx_w_round_robin_0;
	tx_fifo_wrr[1] = &bar0->tx_w_round_robin_1;
	tx_fifo_wrr[2] = &bar0->tx_w_round_robin_2;
	tx_fifo_wrr[3] = &bar0->tx_w_round_robin_3;
	tx_fifo_wrr[4] = &bar0->tx_w_round_robin_4;

	/* Note: WRR calendar must be configured before the transmit
	         FIFOs are enabled! page 6-77 user guide */

	/* all zeroes for Round-Robin */
	for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0,
				tx_fifo_wrr[i]);
	}

	/* reset all of them but '0' */
	for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) {
		xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
		                     tx_fifo_partitions[i]);
	}

	/* configure only configured FIFOs */
	val64 = 0; part0 = 0;
	for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
		int reg_half = i % 2;
		int reg_num = i / 2;

		priority = 0;

		if (hldev->config.fifo.queue[i].configured) {
			val64 |=
			    vBIT((hldev->config.fifo.queue[i].max-1),
				(((reg_half) * 32) + 19),
				13) | vBIT(priority, (((reg_half)*32) + 5), 3);
		}

		/* NOTE: do write operation for each second u64 half
		         or force for first one if configured number
			 is even */
		if (reg_half) {
			if (reg_num == 0) {
				/* skip partition '0', must write it once at
				 * the end */
				part0 = val64;
			} else {
				xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
				     val64, tx_fifo_partitions[reg_num]);
				xge_debug_fifo(XGE_TRACE,
					"fifo partition_%d at: "
					"0x%llx is: 0x%llx", reg_num,
					(unsigned long long)(ulong_t)
						tx_fifo_partitions[reg_num],
					(unsigned long long)val64);
			}
			val64 = 0;
		}
	}

	part0 |= BIT(0); /* to enable the FIFO partition. */
	__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)part0,
	                     tx_fifo_partitions[0]);
	xge_os_wmb();
	__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32),
	                     tx_fifo_partitions[0]);
	xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: "
			"0x%llx is: 0x%llx",
			(unsigned long long)(ulong_t)
				tx_fifo_partitions[0],
			(unsigned long long) part0);

	/*
	 * Initialization of Tx_PA_CONFIG register to ignore packet
	 * integrity checking.
	 */
	val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
	                            &bar0->tx_pa_cfg);
	val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR |
		 XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI |
		 XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL |
		 XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR;
	xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
	                     &bar0->tx_pa_cfg);
	xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized");
}
Beispiel #7
0
xge_hal_status_e
__hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
{
	xge_hal_device_t *hldev;
	xge_hal_status_e status;
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_queue_t *queue;
	int i, txdl_size, max_arr_index, mid_point;
	xge_hal_dtr_h  dtrh;

	hldev = (xge_hal_device_t *)fifo->channel.devh;
	fifo->config = &hldev->config.fifo;
	queue = &fifo->config->queue[attr->post_qid];

#if defined(XGE_HAL_TX_MULTI_RESERVE)
	xge_os_spin_lock_init(&fifo->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_init_irq(&fifo->channel.reserve_lock, hldev->irqh);
#endif
#if defined(XGE_HAL_TX_MULTI_POST)
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)  {
                fifo->post_lock_ptr = &hldev->xena_post_lock;
	} else {
	        xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev);
                fifo->post_lock_ptr = &fifo->channel.post_lock;
	}
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
	if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)  {
                fifo->post_lock_ptr = &hldev->xena_post_lock;
	} else {
	        xge_os_spin_lock_init_irq(&fifo->channel.post_lock,
					hldev->irqh);
                fifo->post_lock_ptr = &fifo->channel.post_lock;
	}
#endif

	/* Initializing the BAR1 address as the start of
	 * the FIFO queue pointer and as a location of FIFO control
	 * word. */
	fifo->hw_pair =
	        (xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 +
		        (attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));

	/* apply "interrupts per txdl" attribute */
	fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ;
	if (queue->intr) {
		fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST;
	}
	fifo->no_snoop_bits =
		(int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits));

	/*
	 * FIFO memory management strategy:
	 *
	 * TxDL splitted into three independent parts:
	 *	- set of TxD's
	 *	- TxD HAL private part
	 *	- upper layer private part
	 *
	 * Adaptative memory allocation used. i.e. Memory allocated on
	 * demand with the size which will fit into one memory block.
	 * One memory block may contain more than one TxDL. In simple case
	 * memory block size can be equal to CPU page size. On more
	 * sophisticated OS's memory block can be contigious across
	 * several pages.
	 *
	 * During "reserve" operations more memory can be allocated on demand
	 * for example due to FIFO full condition.
	 *
	 * Pool of memory memblocks never shrinks except __hal_fifo_close
	 * routine which will essentially stop channel and free the resources.
	 */

	/* TxDL common private size == TxDL private + ULD private */
	fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) +
	attr->per_dtr_space;
	fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) /
                               __xge_os_cacheline_size) *
                               __xge_os_cacheline_size;

	/* recompute txdl size to be cacheline aligned */
	fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t);
	txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) /
			__xge_os_cacheline_size) * __xge_os_cacheline_size;

	if (fifo->txdl_size != txdl_size)
	        xge_debug_fifo(XGE_ERR, "cacheline > 128 (??): %d, %d, %d, %d",
		fifo->config->max_frags, fifo->txdl_size, txdl_size,
		__xge_os_cacheline_size);

	fifo->txdl_size = txdl_size;

	/* since dtr_init() callback will be called from item_alloc(),
	 * the same way channels userdata might be used prior to
	 * channel_initialize() */
	fifo->channel.dtr_init = attr->dtr_init;
	fifo->channel.userdata = attr->userdata;
	fifo->txdl_per_memblock = fifo->config->memblock_size /
		fifo->txdl_size;

	fifo->mempool = __hal_mempool_create(hldev->pdev,
					     fifo->config->memblock_size,
					     fifo->txdl_size,
					     fifo->priv_size,
					     queue->initial,
					     queue->max,
					     __hal_fifo_mempool_item_alloc,
					     __hal_fifo_mempool_item_free,
					     fifo);
	if (fifo->mempool == NULL) {
		return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	status = __hal_channel_initialize(channelh, attr,
					__hal_mempool_items_arr(fifo->mempool),
					queue->initial, queue->max,
					fifo->config->reserve_threshold);
	if (status != XGE_HAL_OK) {
		__hal_fifo_close(channelh);
		return status;
	}
	xge_debug_fifo(XGE_TRACE,
		"DTR  reserve_length:%d reserve_top:%d\n"
		"max_frags:%d reserve_threshold:%d\n"
		"memblock_size:%d alignment_size:%d max_aligned_frags:%d\n",
		fifo->channel.reserve_length, fifo->channel.reserve_top,
		fifo->config->max_frags, fifo->config->reserve_threshold,
		fifo->config->memblock_size, fifo->config->alignment_size,
		fifo->config->max_aligned_frags);

#ifdef XGE_DEBUG_ASSERT
	for ( i = 0; i < fifo->channel.reserve_length; i++) {
		xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d"
		" handle:%p\n", i, fifo->channel.reserve_arr[i]);
	}
#endif

	xge_assert(fifo->channel.reserve_length);
	/* reverse the FIFO dtr array */
	max_arr_index	= fifo->channel.reserve_length - 1;
	max_arr_index	-=fifo->channel.reserve_top;
	xge_assert(max_arr_index);
	mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2;
	for (i = 0; i < mid_point; i++) {
		dtrh = 	fifo->channel.reserve_arr[i];
		fifo->channel.reserve_arr[i] = 
			fifo->channel.reserve_arr[max_arr_index - i];
		fifo->channel.reserve_arr[max_arr_index  - i] = dtrh;
	}

#ifdef XGE_DEBUG_ASSERT
	for ( i = 0; i < fifo->channel.reserve_length; i++) {
		xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d"
		" handle:%p\n", i, fifo->channel.reserve_arr[i]);
	}
#endif

	return XGE_HAL_OK;
}