Exemple #1
0
xge_hal_status_e
__hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
{
	xge_hal_status_e status;
	xge_hal_device_t *hldev;
	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
	xge_hal_ring_queue_t *queue;


	/* Note: at this point we have channel.devh and channel.pdev
	 *       pre-set only! */

	hldev = (xge_hal_device_t *)ring->channel.devh;
	ring->config = &hldev->config.ring;
	queue = &ring->config->queue[attr->post_qid];
	ring->indicate_max_pkts = queue->indicate_max_pkts;
	ring->buffer_mode = queue->buffer_mode;

	xge_assert(queue->configured);

#if defined(XGE_HAL_RX_MULTI_RESERVE)
	xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
#endif
#if defined(XGE_HAL_RX_MULTI_POST)
	xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
	xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
#endif

	ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
	ring->rxd_priv_size =
		sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;

	/* how many RxDs can fit into one block. Depends on configured
	 * buffer_mode. */
	ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);

	/* calculate actual RxD block private size */
	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;

	ring->reserved_rxds_arr = xge_os_malloc(ring->channel.pdev,
		      sizeof(void*) * queue->max * ring->rxds_per_block);
	if (ring->reserved_rxds_arr == NULL) {
		__hal_ring_close(channelh);
		return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	ring->mempool = __hal_mempool_create(
				     hldev->pdev,
				     ring->config->memblock_size,
				     XGE_HAL_RING_RXDBLOCK_SIZE,
				     ring->rxdblock_priv_size,
				     queue->initial, queue->max,
				     __hal_ring_mempool_item_alloc,
				     NULL, /* nothing to free */
				     ring);
	if (ring->mempool == NULL) {
		__hal_ring_close(channelh);
		return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	status = __hal_channel_initialize(channelh,
					  attr,
					  ring->reserved_rxds_arr,
					  queue->initial * ring->rxds_per_block,
					  queue->max * ring->rxds_per_block,
					  0 /* no threshold for ring! */);
	if (status != XGE_HAL_OK) {
		__hal_ring_close(channelh);
		return status;
	}

	/* sanity check that everything formatted ok */
	xge_assert(ring->reserved_rxds_arr[0] ==
		    (char *)ring->mempool->items_arr[0] +
		      (ring->rxds_per_block * ring->rxd_size - ring->rxd_size));

        /* Note:
	 * Specifying dtr_init callback means two things:
	 * 1) dtrs need to be initialized by ULD at channel-open time;
	 * 2) dtrs need to be posted at channel-open time
	 *    (that's what the initial_replenish() below does)
	 * Currently we don't have a case when the 1) is done without the 2).
	 */
	if (ring->channel.dtr_init) {
		if ((status = __hal_ring_initial_replenish(channelh,
						XGE_HAL_CHANNEL_OC_NORMAL))
							!= XGE_HAL_OK) {
			__hal_ring_close(channelh);
			return status;
		}
	}

	return XGE_HAL_OK;
}
Exemple #2
0
/**
 * xge_hal_driver_initialize - Initialize HAL.
 * @config: HAL configuration, see xge_hal_driver_config_t{}.
 * @uld_callbacks: Upper-layer driver callbacks, e.g. link-up.
 *
 * HAL initialization entry point. Not to confuse with device initialization
 * (note that HAL "contains" zero or more Xframe devices).
 *
 * Returns: XGE_HAL_OK - success;
 * XGE_HAL_ERR_BAD_DRIVER_CONFIG - Driver configuration params invalid.
 *
 * See also: xge_hal_device_initialize(), xge_hal_status_e{},
 * xge_hal_uld_cbs_t{}.
 */
xge_hal_status_e
xge_hal_driver_initialize(xge_hal_driver_config_t *config,
	        xge_hal_uld_cbs_t *uld_callbacks)
{
	xge_hal_status_e status;

	g_xge_hal_driver = &g_driver;

	xge_hal_driver_debug_module_mask_set(XGE_DEBUG_MODULE_MASK_DEF);
	xge_hal_driver_debug_level_set(XGE_DEBUG_LEVEL_DEF);

#ifdef XGE_HAL_DEBUG_BAR0_OFFSET
	xge_hal_driver_bar0_offset_check();
#endif

#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
	if (config->tracebuf_size == 0)
	    /*
	     * Trace buffer implementation is not lock protected.
	     * The only harm to expect is memcpy() to go beyond of
	     * allowed boundaries. To make it safe (driver-wise),
	     * we pre-allocate needed number of extra bytes.
	     */
	    config->tracebuf_size = XGE_HAL_DEF_CIRCULAR_ARR +
	                XGE_OS_TRACE_MSGBUF_MAX;
#endif

	status = __hal_driver_config_check(config);
	if (status != XGE_HAL_OK)
	    return status;

	xge_os_memzero(g_xge_hal_driver,  sizeof(xge_hal_driver_t));

	/* apply config */
	xge_os_memcpy(&g_xge_hal_driver->config, config,
	            sizeof(xge_hal_driver_config_t));

	/* apply ULD callbacks */
	xge_os_memcpy(&g_xge_hal_driver->uld_callbacks, uld_callbacks,
	                sizeof(xge_hal_uld_cbs_t));

	g_xge_hal_driver->is_initialized = 1;

#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
	g_tracebuf.size = config->tracebuf_size;
	g_tracebuf.data = (char *)xge_os_malloc(NULL, g_tracebuf.size);
	if (g_tracebuf.data == NULL) {
	    xge_os_printf("cannot allocate trace buffer!");
	    return XGE_HAL_ERR_OUT_OF_MEMORY;
	}
	/* timestamps disabled by default */
	g_tracebuf.timestamp = config->tracebuf_timestamp_en;
	if (g_tracebuf.timestamp) {
	    xge_os_timestamp(g_tracebuf.msg);
	    g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX -
	                xge_os_strlen(g_tracebuf.msg);
	} else
	    g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX;
	g_tracebuf.offset = 0;
	*g_tracebuf.msg = 0;
	xge_os_memzero(g_tracebuf.data, g_tracebuf.size);
	g_xge_os_tracebuf = &g_tracebuf;
	dmesg = g_tracebuf.data;
	*dmesg = 0;
#endif
	return XGE_HAL_OK;
}
Exemple #3
0
/*
 * xge_hal_mempool_create
 * @memblock_size:
 * @items_initial:
 * @items_max:
 * @item_size:
 * @item_func:
 *
 * This function will create memory pool object. Pool may grow but will
 * never shrink. Pool consists of number of dynamically allocated blocks
 * with size enough to hold %items_initial number of items. Memory is
 * DMA-able but client must map/unmap before interoperating with the device.
 * See also: xge_os_dma_map(), xge_hal_dma_unmap(), xge_hal_status_e{}.
 */
xge_hal_mempool_t*
__hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
	    int items_priv_size, int items_initial, int items_max,
	    xge_hal_mempool_item_f item_func_alloc,
	    xge_hal_mempool_item_f item_func_free, void *userdata)
{
	xge_hal_status_e status;
	int memblocks_to_allocate;
	xge_hal_mempool_t *mempool;
	int allocated;

	if (memblock_size < item_size) {
	    xge_debug_mm(XGE_ERR,
	        "memblock_size %d < item_size %d: misconfiguration",
	        memblock_size, item_size);
	    return NULL;
	}

	mempool = (xge_hal_mempool_t *) \
	        xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
	if (mempool == NULL) {
	    xge_debug_mm(XGE_ERR, "mempool allocation failure");
	    return NULL;
	}
	xge_os_memzero(mempool, sizeof(xge_hal_mempool_t));

	mempool->pdev           = pdev;
	mempool->memblock_size      = memblock_size;
	mempool->items_max      = items_max;
	mempool->items_initial      = items_initial;
	mempool->item_size      = item_size;
	mempool->items_priv_size    = items_priv_size;
	mempool->item_func_alloc    = item_func_alloc;
	mempool->item_func_free     = item_func_free;
	mempool->userdata       = userdata;

	mempool->memblocks_allocated = 0;

	mempool->items_per_memblock = memblock_size / item_size;

	mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
	                mempool->items_per_memblock;

	/* allocate array of memblocks */
	mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev,
	                sizeof(void*) * mempool->memblocks_max);
	if (mempool->memblocks_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->memblocks_arr,
	        sizeof(void*) * mempool->memblocks_max);

	/* allocate array of private parts of items per memblocks */
	mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev,
	                sizeof(void*) * mempool->memblocks_max);
	if (mempool->memblocks_priv_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->memblocks_priv_arr,
	        sizeof(void*) * mempool->memblocks_max);

	/* allocate array of memblocks DMA objects */
	mempool->memblocks_dma_arr =
	    (xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev,
	    sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);

	if (mempool->memblocks_dma_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->memblocks_dma_arr,
	         sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);

	/* allocate hash array of items */
	mempool->items_arr = (void **) xge_os_malloc(mempool->pdev,
	             sizeof(void*) * mempool->items_max);
	if (mempool->items_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "items_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max);

	mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev,
	                            sizeof(void*) *  mempool->items_max);
	if (mempool->shadow_items_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->shadow_items_arr,
	         sizeof(void *) * mempool->items_max);

	/* calculate initial number of memblocks */
	memblocks_to_allocate = (mempool->items_initial +
	             mempool->items_per_memblock - 1) /
	                    mempool->items_per_memblock;

	xge_debug_mm(XGE_TRACE, "allocating %d memblocks, "
	        "%d items per memblock", memblocks_to_allocate,
	        mempool->items_per_memblock);

	/* pre-allocate the mempool */
	status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated);
	xge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr,
	        sizeof(void*) * mempool->items_max);
	if (status != XGE_HAL_OK) {
	    xge_debug_mm(XGE_ERR, "mempool_grow failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}

	xge_debug_mm(XGE_TRACE,
	    "total: allocated %dk of DMA-capable memory",
	    mempool->memblock_size * allocated / 1024);

	return mempool;
}
Exemple #4
0
/*
 * __hal_mempool_grow
 *
 * Will resize mempool up to %num_allocate value.
 */
xge_hal_status_e
__hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
	    int *num_allocated)
{
	int i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
	int n_items = mempool->items_per_memblock;

	*num_allocated = 0;

	if ((mempool->memblocks_allocated + num_allocate) >
	                    mempool->memblocks_max) {
	    xge_debug_mm(XGE_ERR, "%s",
	              "__hal_mempool_grow: can grow anymore");
	    return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	for (i = mempool->memblocks_allocated;
	     i < mempool->memblocks_allocated + num_allocate; i++) {
	    int j;
	    int is_last =
	        ((mempool->memblocks_allocated+num_allocate-1) == i);
	    xge_hal_mempool_dma_t *dma_object =
	        mempool->memblocks_dma_arr + i;
	    void *the_memblock;
	    int dma_flags;

	    dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
#ifdef XGE_HAL_DMA_DTR_CONSISTENT
	    dma_flags |= XGE_OS_DMA_CONSISTENT;
#else
	    dma_flags |= XGE_OS_DMA_STREAMING;
#endif

	    /* allocate DMA-capable memblock */
	    mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev,
	                        mempool->memblock_size,
	                    dma_flags,
	                        &dma_object->handle,
	                        &dma_object->acc_handle);
	    if (mempool->memblocks_arr[i] == NULL) {
	        xge_debug_mm(XGE_ERR,
	                  "memblock[%d]: out of DMA memory", i);
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }
	    xge_os_memzero(mempool->memblocks_arr[i],
	    mempool->memblock_size);
	    the_memblock = mempool->memblocks_arr[i];

	    /* allocate memblock's private part. Each DMA memblock
	     * has a space allocated for item's private usage upon
	     * mempool's user request. Each time mempool grows, it will
	     * allocate new memblock and its private part at once.
	     * This helps to minimize memory usage a lot. */
	    mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev,
	                mempool->items_priv_size * n_items);
	    if (mempool->memblocks_priv_arr[i] == NULL) {
	        xge_os_dma_free(mempool->pdev,
	                  the_memblock,
	                  mempool->memblock_size,
	                  &dma_object->acc_handle,
	                  &dma_object->handle);
	        xge_debug_mm(XGE_ERR,
	                "memblock_priv[%d]: out of virtual memory, "
	                "requested %d(%d:%d) bytes", i,
	            mempool->items_priv_size * n_items,
	            mempool->items_priv_size, n_items);
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }
	    xge_os_memzero(mempool->memblocks_priv_arr[i],
	             mempool->items_priv_size * n_items);

	    /* map memblock to physical memory */
	    dma_object->addr = xge_os_dma_map(mempool->pdev,
	                                    dma_object->handle,
	                    the_memblock,
	                    mempool->memblock_size,
	                    XGE_OS_DMA_DIR_BIDIRECTIONAL,
#ifdef XGE_HAL_DMA_DTR_CONSISTENT
	                        XGE_OS_DMA_CONSISTENT
#else
	                        XGE_OS_DMA_STREAMING
#endif
	                                            );
	    if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
	              mempool->items_priv_size *
	                n_items);
	        xge_os_dma_free(mempool->pdev,
	                  the_memblock,
	                  mempool->memblock_size,
	                  &dma_object->acc_handle,
	                  &dma_object->handle);
	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }

	    /* fill the items hash array */
	    for (j=0; j<n_items; j++) {
	        int index = i*n_items + j;

	        if (first_time && index >= mempool->items_initial) {
	            break;
	        }

	        mempool->items_arr[index] =
	            ((char *)the_memblock + j*mempool->item_size);

	        /* let caller to do more job on each item */
	        if (mempool->item_func_alloc != NULL) {
	            xge_hal_status_e status;

	            if ((status = mempool->item_func_alloc(
	                mempool,
	                the_memblock,
	                i,
	                dma_object,
	                mempool->items_arr[index],
	                index,
	                is_last,
	                mempool->userdata)) != XGE_HAL_OK) {

	                if (mempool->item_func_free != NULL) {
	                    int k;

	                    for (k=0; k<j; k++) {

	                        index =i*n_items + k;

	                      (void)mempool->item_func_free(
	                         mempool, the_memblock,
	                         i, dma_object,
	                         mempool->items_arr[index],
	                         index, is_last,
	                         mempool->userdata);
	                    }
	                }

	                xge_os_free(mempool->pdev,
	                     mempool->memblocks_priv_arr[i],
	                     mempool->items_priv_size *
	                     n_items);
	                xge_os_dma_unmap(mempool->pdev,
	                     dma_object->handle,
	                     dma_object->addr,
	                     mempool->memblock_size,
	                     XGE_OS_DMA_DIR_BIDIRECTIONAL);
	                xge_os_dma_free(mempool->pdev,
	                     the_memblock,
	                     mempool->memblock_size,
	                     &dma_object->acc_handle,
	                     &dma_object->handle);
	                return status;
	            }
	        }

	        mempool->items_current = index + 1;
	    }

	    xge_debug_mm(XGE_TRACE,
	        "memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
	        "dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024,
	        (unsigned long long)(ulong_t)mempool->memblocks_arr[i],
	        (unsigned long long)dma_object->addr);

	    (*num_allocated)++;

	    if (first_time && mempool->items_current ==
	                    mempool->items_initial) {
	        break;
	    }
	}

	/* increment actual number of allocated memblocks */
	mempool->memblocks_allocated += *num_allocated;

	return XGE_HAL_OK;
}
/*
 * __hal_stats_initialize
 * @stats: xge_hal_stats_t structure that contains, in particular,
 *         Xframe hw stat counters.
 * @devh: HAL device handle.
 *
 * Initialize per-device statistics object.
 * See also: xge_hal_stats_getinfo(), xge_hal_status_e{}.
 */
xge_hal_status_e
__hal_stats_initialize (xge_hal_stats_t *stats, xge_hal_device_h devh)
{
	int dma_flags;
	xge_hal_device_t *hldev = (xge_hal_device_t*)devh;

	xge_assert(!stats->is_initialized);

	dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	dma_flags |= XGE_OS_DMA_CONSISTENT;
#else
	dma_flags |= XGE_OS_DMA_STREAMING;
#endif
	if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
	    stats->hw_info =
	        (xge_hal_stats_hw_info_t *) xge_os_dma_malloc(
	                hldev->pdev,
	                sizeof(xge_hal_stats_hw_info_t),
	                dma_flags,
	                &stats->hw_info_dmah,
	                &stats->hw_info_dma_acch);

	    if (stats->hw_info == NULL) {
	        xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }
	    xge_os_memzero(stats->hw_info,
	        sizeof(xge_hal_stats_hw_info_t));
	    xge_os_memzero(&stats->hw_info_saved,
	        sizeof(xge_hal_stats_hw_info_t));
	    xge_os_memzero(&stats->hw_info_latest,
	        sizeof(xge_hal_stats_hw_info_t));



	    stats->dma_addr = xge_os_dma_map(hldev->pdev,
	                               stats->hw_info_dmah,
	                   stats->hw_info,
	                   sizeof(xge_hal_stats_hw_info_t),
	                   XGE_OS_DMA_DIR_FROMDEVICE,
	                   XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	                   XGE_OS_DMA_CONSISTENT
#else
	                       XGE_OS_DMA_STREAMING
#endif
	                                   );
	    if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_debug_stats(XGE_ERR,
	            "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
	            (unsigned long long)(ulong_t)stats->hw_info);
	        xge_os_dma_free(hldev->pdev,
	              stats->hw_info,
	              sizeof(xge_hal_stats_hw_info_t),
	              &stats->hw_info_dma_acch,
	              &stats->hw_info_dmah);
	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }
	}
	else {
	    stats->pcim_info_saved =
	        (xge_hal_stats_pcim_info_t *)xge_os_malloc(
	        hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
	    if (stats->pcim_info_saved == NULL) {
	        xge_debug_stats(XGE_ERR, "%s", "can not alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }

	    stats->pcim_info_latest =
	        (xge_hal_stats_pcim_info_t *)xge_os_malloc(
	        hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
	    if (stats->pcim_info_latest == NULL) {
	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_debug_stats(XGE_ERR, "%s", "can not alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }

	    stats->pcim_info =
	        (xge_hal_stats_pcim_info_t *) xge_os_dma_malloc(
	                hldev->pdev,
	                sizeof(xge_hal_stats_pcim_info_t),
	                dma_flags,
	                &stats->hw_info_dmah,
	                &stats->hw_info_dma_acch);

	    if (stats->pcim_info == NULL) {
	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }


	    xge_os_memzero(stats->pcim_info,
	        sizeof(xge_hal_stats_pcim_info_t));
	    xge_os_memzero(stats->pcim_info_saved,
	        sizeof(xge_hal_stats_pcim_info_t));
	    xge_os_memzero(stats->pcim_info_latest,
	        sizeof(xge_hal_stats_pcim_info_t));



	    stats->dma_addr = xge_os_dma_map(hldev->pdev,
	                               stats->hw_info_dmah,
	                   stats->pcim_info,
	                   sizeof(xge_hal_stats_pcim_info_t),
	                   XGE_OS_DMA_DIR_FROMDEVICE,
	                   XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	                   XGE_OS_DMA_CONSISTENT
#else
	                       XGE_OS_DMA_STREAMING
#endif
	                                   );
	    if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_debug_stats(XGE_ERR,
	            "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
	            (unsigned long long)(ulong_t)stats->hw_info);

	        xge_os_dma_free(hldev->pdev,
	              stats->pcim_info,
	              sizeof(xge_hal_stats_pcim_info_t),
	              &stats->hw_info_dma_acch,
	              &stats->hw_info_dmah);

	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));

	        xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));

	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }
	}
	stats->devh = devh;
	xge_os_memzero(&stats->sw_dev_info_stats,
	         sizeof(xge_hal_stats_device_info_t));

	stats->is_initialized = 1;

	return XGE_HAL_OK;
}