示例#1
0
/*
 * __hal_stats_terminate
 * @stats: xge_hal_stats_t structure that contains, in particular,
 *         Xframe hw stat counters.
 * Terminate per-device statistics object.
 */
void
__hal_stats_terminate (xge_hal_stats_t *stats)
{
	xge_hal_device_t *hldev;

	xge_assert(stats->hw_info);

	hldev = (xge_hal_device_t*)stats->devh;
	xge_assert(hldev);
	xge_assert(stats->is_initialized);
	if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
	    xge_os_dma_unmap(hldev->pdev,
	               stats->hw_info_dmah,
	           stats->dma_addr,
	           sizeof(xge_hal_stats_hw_info_t),
	           XGE_OS_DMA_DIR_FROMDEVICE);

	    xge_os_dma_free(hldev->pdev,
	          stats->hw_info,
	          sizeof(xge_hal_stats_hw_info_t),
	          &stats->hw_info_dma_acch,
	          &stats->hw_info_dmah);
	} else {
	    xge_os_dma_unmap(hldev->pdev,
	               stats->hw_info_dmah,
	           stats->dma_addr,
	           sizeof(xge_hal_stats_pcim_info_t),
	           XGE_OS_DMA_DIR_FROMDEVICE);

	    xge_os_dma_free(hldev->pdev,
	          stats->pcim_info,
	          sizeof(xge_hal_stats_pcim_info_t),
	          &stats->hw_info_dma_acch,
	          &stats->hw_info_dmah);

	    xge_os_free(hldev->pdev, stats->pcim_info_saved,
	        sizeof(xge_hal_stats_pcim_info_t));

	    xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));

	}

	stats->is_initialized = 0;
	stats->is_enabled = 0;
}
示例#2
0
void
__hal_ring_close(xge_hal_channel_h channelh)
{
	xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
	xge_hal_ring_queue_t *queue;
#if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
    defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
	xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
#endif

	xge_assert(ring->channel.pdev);

	queue = &ring->config->queue[ring->channel.post_qid];

	if (ring->mempool) {
		__hal_mempool_destroy(ring->mempool);
	}

	if (ring->reserved_rxds_arr) {
		xge_os_free(ring->channel.pdev,
		          ring->reserved_rxds_arr,
			  sizeof(void*) * queue->max * ring->rxds_per_block);
	}

	__hal_channel_terminate(channelh);

#if defined(XGE_HAL_RX_MULTI_RESERVE)
	xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
	xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
#endif
#if defined(XGE_HAL_RX_MULTI_POST)
	xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
	xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
#endif
}
示例#3
0
/**
 * xge_hal_driver_terminate - Terminate HAL.
 *
 * HAL termination entry point.
 *
 * See also: xge_hal_device_terminate().
 */
void
xge_hal_driver_terminate(void)
{
	g_xge_hal_driver->is_initialized = 0;

#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
	if (g_tracebuf.size) {
	    xge_os_free(NULL, g_tracebuf.data, g_tracebuf.size);
	}
#endif

	g_xge_hal_driver = NULL;

#ifdef XGE_OS_MEMORY_CHECK
	{
	    int i, leaks=0;
	    xge_os_printf("OSPAL: max g_malloc_cnt %d", g_malloc_cnt);
	    for (i=0; i<g_malloc_cnt; i++) {
	        if (g_malloc_arr[i].ptr != NULL) {
	            xge_os_printf("OSPAL: memory leak detected at "
	                "%s:%d:"XGE_OS_LLXFMT":%d",
	                g_malloc_arr[i].file,
	                g_malloc_arr[i].line,
	                (unsigned long long)(ulong_t)
	                    g_malloc_arr[i].ptr,
	                g_malloc_arr[i].size);
	            leaks++;
	        }
	    }
	    if (leaks) {
	        xge_os_printf("OSPAL: %d memory leaks detected", leaks);
	    } else {
	        xge_os_printf("OSPAL: no memory leaks detected");
	    }
	}
#endif
}
示例#4
0
/*
 * __hal_mempool_grow
 *
 * Will resize mempool up to %num_allocate value.
 */
xge_hal_status_e
__hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
	    int *num_allocated)
{
	int i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
	int n_items = mempool->items_per_memblock;

	*num_allocated = 0;

	if ((mempool->memblocks_allocated + num_allocate) >
	                    mempool->memblocks_max) {
	    xge_debug_mm(XGE_ERR, "%s",
	              "__hal_mempool_grow: can grow anymore");
	    return XGE_HAL_ERR_OUT_OF_MEMORY;
	}

	for (i = mempool->memblocks_allocated;
	     i < mempool->memblocks_allocated + num_allocate; i++) {
	    int j;
	    int is_last =
	        ((mempool->memblocks_allocated+num_allocate-1) == i);
	    xge_hal_mempool_dma_t *dma_object =
	        mempool->memblocks_dma_arr + i;
	    void *the_memblock;
	    int dma_flags;

	    dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
#ifdef XGE_HAL_DMA_DTR_CONSISTENT
	    dma_flags |= XGE_OS_DMA_CONSISTENT;
#else
	    dma_flags |= XGE_OS_DMA_STREAMING;
#endif

	    /* allocate DMA-capable memblock */
	    mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev,
	                        mempool->memblock_size,
	                    dma_flags,
	                        &dma_object->handle,
	                        &dma_object->acc_handle);
	    if (mempool->memblocks_arr[i] == NULL) {
	        xge_debug_mm(XGE_ERR,
	                  "memblock[%d]: out of DMA memory", i);
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }
	    xge_os_memzero(mempool->memblocks_arr[i],
	    mempool->memblock_size);
	    the_memblock = mempool->memblocks_arr[i];

	    /* allocate memblock's private part. Each DMA memblock
	     * has a space allocated for item's private usage upon
	     * mempool's user request. Each time mempool grows, it will
	     * allocate new memblock and its private part at once.
	     * This helps to minimize memory usage a lot. */
	    mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev,
	                mempool->items_priv_size * n_items);
	    if (mempool->memblocks_priv_arr[i] == NULL) {
	        xge_os_dma_free(mempool->pdev,
	                  the_memblock,
	                  mempool->memblock_size,
	                  &dma_object->acc_handle,
	                  &dma_object->handle);
	        xge_debug_mm(XGE_ERR,
	                "memblock_priv[%d]: out of virtual memory, "
	                "requested %d(%d:%d) bytes", i,
	            mempool->items_priv_size * n_items,
	            mempool->items_priv_size, n_items);
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }
	    xge_os_memzero(mempool->memblocks_priv_arr[i],
	             mempool->items_priv_size * n_items);

	    /* map memblock to physical memory */
	    dma_object->addr = xge_os_dma_map(mempool->pdev,
	                                    dma_object->handle,
	                    the_memblock,
	                    mempool->memblock_size,
	                    XGE_OS_DMA_DIR_BIDIRECTIONAL,
#ifdef XGE_HAL_DMA_DTR_CONSISTENT
	                        XGE_OS_DMA_CONSISTENT
#else
	                        XGE_OS_DMA_STREAMING
#endif
	                                            );
	    if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
	              mempool->items_priv_size *
	                n_items);
	        xge_os_dma_free(mempool->pdev,
	                  the_memblock,
	                  mempool->memblock_size,
	                  &dma_object->acc_handle,
	                  &dma_object->handle);
	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }

	    /* fill the items hash array */
	    for (j=0; j<n_items; j++) {
	        int index = i*n_items + j;

	        if (first_time && index >= mempool->items_initial) {
	            break;
	        }

	        mempool->items_arr[index] =
	            ((char *)the_memblock + j*mempool->item_size);

	        /* let caller to do more job on each item */
	        if (mempool->item_func_alloc != NULL) {
	            xge_hal_status_e status;

	            if ((status = mempool->item_func_alloc(
	                mempool,
	                the_memblock,
	                i,
	                dma_object,
	                mempool->items_arr[index],
	                index,
	                is_last,
	                mempool->userdata)) != XGE_HAL_OK) {

	                if (mempool->item_func_free != NULL) {
	                    int k;

	                    for (k=0; k<j; k++) {

	                        index =i*n_items + k;

	                      (void)mempool->item_func_free(
	                         mempool, the_memblock,
	                         i, dma_object,
	                         mempool->items_arr[index],
	                         index, is_last,
	                         mempool->userdata);
	                    }
	                }

	                xge_os_free(mempool->pdev,
	                     mempool->memblocks_priv_arr[i],
	                     mempool->items_priv_size *
	                     n_items);
	                xge_os_dma_unmap(mempool->pdev,
	                     dma_object->handle,
	                     dma_object->addr,
	                     mempool->memblock_size,
	                     XGE_OS_DMA_DIR_BIDIRECTIONAL);
	                xge_os_dma_free(mempool->pdev,
	                     the_memblock,
	                     mempool->memblock_size,
	                     &dma_object->acc_handle,
	                     &dma_object->handle);
	                return status;
	            }
	        }

	        mempool->items_current = index + 1;
	    }

	    xge_debug_mm(XGE_TRACE,
	        "memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
	        "dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024,
	        (unsigned long long)(ulong_t)mempool->memblocks_arr[i],
	        (unsigned long long)dma_object->addr);

	    (*num_allocated)++;

	    if (first_time && mempool->items_current ==
	                    mempool->items_initial) {
	        break;
	    }
	}

	/* increment actual number of allocated memblocks */
	mempool->memblocks_allocated += *num_allocated;

	return XGE_HAL_OK;
}
示例#5
0
/*
 * xge_hal_mempool_destroy
 */
void
__hal_mempool_destroy(xge_hal_mempool_t *mempool)
{
	int i, j;

	for (i=0; i<mempool->memblocks_allocated; i++) {
	    xge_hal_mempool_dma_t *dma_object;

	    xge_assert(mempool->memblocks_arr[i]);
	    xge_assert(mempool->memblocks_dma_arr + i);

	    dma_object = mempool->memblocks_dma_arr + i;

	    for (j=0; j<mempool->items_per_memblock; j++) {
	        int index = i*mempool->items_per_memblock + j;

	        /* to skip last partially filled(if any) memblock */
	        if (index >= mempool->items_current) {
	            break;
	        }

	        /* let caller to do more job on each item */
	        if (mempool->item_func_free != NULL) {

	            mempool->item_func_free(mempool,
	                mempool->memblocks_arr[i],
	                i, dma_object,
	                mempool->shadow_items_arr[index],
	                index, /* unused */ -1,
	                mempool->userdata);
	        }
	    }

	    xge_os_dma_unmap(mempool->pdev,
	               dma_object->handle, dma_object->addr,
	           mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL);

	    xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
	        mempool->items_priv_size * mempool->items_per_memblock);

	    xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i],
	              mempool->memblock_size, &dma_object->acc_handle,
	              &dma_object->handle);
	}

	if (mempool->items_arr) {
	    xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) *
	              mempool->items_max);
	}

	if (mempool->shadow_items_arr) {
	    xge_os_free(mempool->pdev, mempool->shadow_items_arr,
	          sizeof(void*) * mempool->items_max);
	}

	if (mempool->memblocks_dma_arr) {
	    xge_os_free(mempool->pdev, mempool->memblocks_dma_arr,
	              sizeof(xge_hal_mempool_dma_t) *
	             mempool->memblocks_max);
	}

	if (mempool->memblocks_priv_arr) {
	    xge_os_free(mempool->pdev, mempool->memblocks_priv_arr,
	              sizeof(void*) * mempool->memblocks_max);
	}

	if (mempool->memblocks_arr) {
	    xge_os_free(mempool->pdev, mempool->memblocks_arr,
	              sizeof(void*) * mempool->memblocks_max);
	}

	xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t));
}
示例#6
0
/*
 * __hal_stats_initialize
 * @stats: xge_hal_stats_t structure that contains, in particular,
 *         Xframe hw stat counters.
 * @devh: HAL device handle.
 *
 * Initialize per-device statistics object.
 * See also: xge_hal_stats_getinfo(), xge_hal_status_e{}.
 */
xge_hal_status_e
__hal_stats_initialize (xge_hal_stats_t *stats, xge_hal_device_h devh)
{
	int dma_flags;
	xge_hal_device_t *hldev = (xge_hal_device_t*)devh;

	xge_assert(!stats->is_initialized);

	dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	dma_flags |= XGE_OS_DMA_CONSISTENT;
#else
	dma_flags |= XGE_OS_DMA_STREAMING;
#endif
	if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
	    stats->hw_info =
	        (xge_hal_stats_hw_info_t *) xge_os_dma_malloc(
	                hldev->pdev,
	                sizeof(xge_hal_stats_hw_info_t),
	                dma_flags,
	                &stats->hw_info_dmah,
	                &stats->hw_info_dma_acch);

	    if (stats->hw_info == NULL) {
	        xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }
	    xge_os_memzero(stats->hw_info,
	        sizeof(xge_hal_stats_hw_info_t));
	    xge_os_memzero(&stats->hw_info_saved,
	        sizeof(xge_hal_stats_hw_info_t));
	    xge_os_memzero(&stats->hw_info_latest,
	        sizeof(xge_hal_stats_hw_info_t));



	    stats->dma_addr = xge_os_dma_map(hldev->pdev,
	                               stats->hw_info_dmah,
	                   stats->hw_info,
	                   sizeof(xge_hal_stats_hw_info_t),
	                   XGE_OS_DMA_DIR_FROMDEVICE,
	                   XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	                   XGE_OS_DMA_CONSISTENT
#else
	                       XGE_OS_DMA_STREAMING
#endif
	                                   );
	    if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_debug_stats(XGE_ERR,
	            "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
	            (unsigned long long)(ulong_t)stats->hw_info);
	        xge_os_dma_free(hldev->pdev,
	              stats->hw_info,
	              sizeof(xge_hal_stats_hw_info_t),
	              &stats->hw_info_dma_acch,
	              &stats->hw_info_dmah);
	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }
	}
	else {
	    stats->pcim_info_saved =
	        (xge_hal_stats_pcim_info_t *)xge_os_malloc(
	        hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
	    if (stats->pcim_info_saved == NULL) {
	        xge_debug_stats(XGE_ERR, "%s", "can not alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }

	    stats->pcim_info_latest =
	        (xge_hal_stats_pcim_info_t *)xge_os_malloc(
	        hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
	    if (stats->pcim_info_latest == NULL) {
	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_debug_stats(XGE_ERR, "%s", "can not alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }

	    stats->pcim_info =
	        (xge_hal_stats_pcim_info_t *) xge_os_dma_malloc(
	                hldev->pdev,
	                sizeof(xge_hal_stats_pcim_info_t),
	                dma_flags,
	                &stats->hw_info_dmah,
	                &stats->hw_info_dma_acch);

	    if (stats->pcim_info == NULL) {
	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));
	        xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
	        return XGE_HAL_ERR_OUT_OF_MEMORY;
	    }


	    xge_os_memzero(stats->pcim_info,
	        sizeof(xge_hal_stats_pcim_info_t));
	    xge_os_memzero(stats->pcim_info_saved,
	        sizeof(xge_hal_stats_pcim_info_t));
	    xge_os_memzero(stats->pcim_info_latest,
	        sizeof(xge_hal_stats_pcim_info_t));



	    stats->dma_addr = xge_os_dma_map(hldev->pdev,
	                               stats->hw_info_dmah,
	                   stats->pcim_info,
	                   sizeof(xge_hal_stats_pcim_info_t),
	                   XGE_OS_DMA_DIR_FROMDEVICE,
	                   XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
	                   XGE_OS_DMA_CONSISTENT
#else
	                       XGE_OS_DMA_STREAMING
#endif
	                                   );
	    if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
	        xge_debug_stats(XGE_ERR,
	            "can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
	            (unsigned long long)(ulong_t)stats->hw_info);

	        xge_os_dma_free(hldev->pdev,
	              stats->pcim_info,
	              sizeof(xge_hal_stats_pcim_info_t),
	              &stats->hw_info_dma_acch,
	              &stats->hw_info_dmah);

	        xge_os_free(hldev->pdev, stats->pcim_info_saved,
	            sizeof(xge_hal_stats_pcim_info_t));

	        xge_os_free(hldev->pdev, stats->pcim_info_latest,
	            sizeof(xge_hal_stats_pcim_info_t));

	        return XGE_HAL_ERR_OUT_OF_MAPPING;
	    }
	}
	stats->devh = devh;
	xge_os_memzero(&stats->sw_dev_info_stats,
	         sizeof(xge_hal_stats_device_info_t));

	stats->is_initialized = 1;

	return XGE_HAL_OK;
}