Exemplo n.º 1
0
/**
 * xge_hal_fifo_dtr_buffer_append - Append the contents of virtually
 * contiguous data buffer to a single physically contiguous buffer.
 * @channelh: Channel handle.
 * @dtrh: Descriptor handle.
 * @vaddr: Virtual address of the data buffer.
 * @size: Size of the data buffer (in bytes).
 *
 * This API is part of the transmit descriptor preparation for posting
 * (via xge_hal_fifo_dtr_post()).
 * The main difference of this API wrt to the APIs
 * xge_hal_fifo_dtr_buffer_set_aligned() is that this API appends the
 * contents of virtually contiguous data buffers received from
 * upper layer into a single physically contiguous data buffer and the
 * device will do a DMA from this buffer.
 *
 * See Also: xge_hal_fifo_dtr_buffer_finalize(), xge_hal_fifo_dtr_buffer_set(),
 * xge_hal_fifo_dtr_buffer_set_aligned().
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
	    void *vaddr, int size)
{
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_txdl_priv_t *txdl_priv;
	ptrdiff_t used;

	xge_assert(size > 0);

	txdl_priv = __hal_fifo_txdl_priv(dtrh);

	used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
	used += txdl_priv->align_dma_offset;
	if (used + (unsigned int)size > (unsigned int)fifo->align_size)
	        return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;

	xge_os_memcpy((char*)txdl_priv->align_vaddr_start +
	    txdl_priv->align_dma_offset, vaddr, size);

	fifo->channel.stats.copied_frags++;

	txdl_priv->align_dma_offset += size;
	return XGE_HAL_OK;
}
Exemplo n.º 2
0
static void
__hal_stats_save (xge_hal_stats_t *stats)
{
	xge_hal_device_t *hldev = (xge_hal_device_t*)stats->devh;

	if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
	    xge_hal_stats_hw_info_t *latest;

	    (void) xge_hal_stats_hw(stats->devh, &latest);

	    xge_os_memcpy(&stats->hw_info_saved, stats->hw_info,
	          sizeof(xge_hal_stats_hw_info_t));
	} else {
	    xge_hal_stats_pcim_info_t   *latest;

	    (void) xge_hal_stats_pcim(stats->devh, &latest);

	    xge_os_memcpy(stats->pcim_info_saved, stats->pcim_info,
	          sizeof(xge_hal_stats_pcim_info_t));
	}
}
Exemplo n.º 3
0
/**
 * xge_hal_driver_initialize - Initialize HAL.
 * @config: HAL configuration, see xge_hal_driver_config_t{}.
 * @uld_callbacks: Upper-layer driver callbacks, e.g. link-up.
 *
 * HAL initialization entry point. Not to confuse with device initialization
 * (note that HAL "contains" zero or more Xframe devices).
 *
 * Returns: XGE_HAL_OK - success;
 * XGE_HAL_ERR_BAD_DRIVER_CONFIG - Driver configuration params invalid.
 *
 * See also: xge_hal_device_initialize(), xge_hal_status_e{},
 * xge_hal_uld_cbs_t{}.
 */
xge_hal_status_e
xge_hal_driver_initialize(xge_hal_driver_config_t *config,
	        xge_hal_uld_cbs_t *uld_callbacks)
{
	xge_hal_status_e status;

	g_xge_hal_driver = &g_driver;

	xge_hal_driver_debug_module_mask_set(XGE_DEBUG_MODULE_MASK_DEF);
	xge_hal_driver_debug_level_set(XGE_DEBUG_LEVEL_DEF);

#ifdef XGE_HAL_DEBUG_BAR0_OFFSET
	xge_hal_driver_bar0_offset_check();
#endif

#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
	if (config->tracebuf_size == 0)
	    /*
	     * Trace buffer implementation is not lock protected.
	     * The only harm to expect is memcpy() to go beyond of
	     * allowed boundaries. To make it safe (driver-wise),
	     * we pre-allocate needed number of extra bytes.
	     */
	    config->tracebuf_size = XGE_HAL_DEF_CIRCULAR_ARR +
	                XGE_OS_TRACE_MSGBUF_MAX;
#endif

	status = __hal_driver_config_check(config);
	if (status != XGE_HAL_OK)
	    return status;

	xge_os_memzero(g_xge_hal_driver,  sizeof(xge_hal_driver_t));

	/* apply config */
	xge_os_memcpy(&g_xge_hal_driver->config, config,
	            sizeof(xge_hal_driver_config_t));

	/* apply ULD callbacks */
	xge_os_memcpy(&g_xge_hal_driver->uld_callbacks, uld_callbacks,
	                sizeof(xge_hal_uld_cbs_t));

	g_xge_hal_driver->is_initialized = 1;

#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
	g_tracebuf.size = config->tracebuf_size;
	g_tracebuf.data = (char *)xge_os_malloc(NULL, g_tracebuf.size);
	if (g_tracebuf.data == NULL) {
	    xge_os_printf("cannot allocate trace buffer!");
	    return XGE_HAL_ERR_OUT_OF_MEMORY;
	}
	/* timestamps disabled by default */
	g_tracebuf.timestamp = config->tracebuf_timestamp_en;
	if (g_tracebuf.timestamp) {
	    xge_os_timestamp(g_tracebuf.msg);
	    g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX -
	                xge_os_strlen(g_tracebuf.msg);
	} else
	    g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX;
	g_tracebuf.offset = 0;
	*g_tracebuf.msg = 0;
	xge_os_memzero(g_tracebuf.data, g_tracebuf.size);
	g_xge_os_tracebuf = &g_tracebuf;
	dmesg = g_tracebuf.data;
	*dmesg = 0;
#endif
	return XGE_HAL_OK;
}
Exemplo n.º 4
0
/*
 * xge_hal_mempool_create
 * @memblock_size:
 * @items_initial:
 * @items_max:
 * @item_size:
 * @item_func:
 *
 * This function will create memory pool object. Pool may grow but will
 * never shrink. Pool consists of number of dynamically allocated blocks
 * with size enough to hold %items_initial number of items. Memory is
 * DMA-able but client must map/unmap before interoperating with the device.
 * See also: xge_os_dma_map(), xge_hal_dma_unmap(), xge_hal_status_e{}.
 */
xge_hal_mempool_t*
__hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
	    int items_priv_size, int items_initial, int items_max,
	    xge_hal_mempool_item_f item_func_alloc,
	    xge_hal_mempool_item_f item_func_free, void *userdata)
{
	xge_hal_status_e status;
	int memblocks_to_allocate;
	xge_hal_mempool_t *mempool;
	int allocated;

	if (memblock_size < item_size) {
	    xge_debug_mm(XGE_ERR,
	        "memblock_size %d < item_size %d: misconfiguration",
	        memblock_size, item_size);
	    return NULL;
	}

	mempool = (xge_hal_mempool_t *) \
	        xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
	if (mempool == NULL) {
	    xge_debug_mm(XGE_ERR, "mempool allocation failure");
	    return NULL;
	}
	xge_os_memzero(mempool, sizeof(xge_hal_mempool_t));

	mempool->pdev           = pdev;
	mempool->memblock_size      = memblock_size;
	mempool->items_max      = items_max;
	mempool->items_initial      = items_initial;
	mempool->item_size      = item_size;
	mempool->items_priv_size    = items_priv_size;
	mempool->item_func_alloc    = item_func_alloc;
	mempool->item_func_free     = item_func_free;
	mempool->userdata       = userdata;

	mempool->memblocks_allocated = 0;

	mempool->items_per_memblock = memblock_size / item_size;

	mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
	                mempool->items_per_memblock;

	/* allocate array of memblocks */
	mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev,
	                sizeof(void*) * mempool->memblocks_max);
	if (mempool->memblocks_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->memblocks_arr,
	        sizeof(void*) * mempool->memblocks_max);

	/* allocate array of private parts of items per memblocks */
	mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev,
	                sizeof(void*) * mempool->memblocks_max);
	if (mempool->memblocks_priv_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->memblocks_priv_arr,
	        sizeof(void*) * mempool->memblocks_max);

	/* allocate array of memblocks DMA objects */
	mempool->memblocks_dma_arr =
	    (xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev,
	    sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);

	if (mempool->memblocks_dma_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->memblocks_dma_arr,
	         sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);

	/* allocate hash array of items */
	mempool->items_arr = (void **) xge_os_malloc(mempool->pdev,
	             sizeof(void*) * mempool->items_max);
	if (mempool->items_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "items_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max);

	mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev,
	                            sizeof(void*) *  mempool->items_max);
	if (mempool->shadow_items_arr == NULL) {
	    xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}
	xge_os_memzero(mempool->shadow_items_arr,
	         sizeof(void *) * mempool->items_max);

	/* calculate initial number of memblocks */
	memblocks_to_allocate = (mempool->items_initial +
	             mempool->items_per_memblock - 1) /
	                    mempool->items_per_memblock;

	xge_debug_mm(XGE_TRACE, "allocating %d memblocks, "
	        "%d items per memblock", memblocks_to_allocate,
	        mempool->items_per_memblock);

	/* pre-allocate the mempool */
	status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated);
	xge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr,
	        sizeof(void*) * mempool->items_max);
	if (status != XGE_HAL_OK) {
	    xge_debug_mm(XGE_ERR, "mempool_grow failure");
	    __hal_mempool_destroy(mempool);
	    return NULL;
	}

	xge_debug_mm(XGE_TRACE,
	    "total: allocated %dk of DMA-capable memory",
	    mempool->memblock_size * allocated / 1024);

	return mempool;
}
Exemplo n.º 5
0
/**
 * xge_hal_fifo_dtr_buffer_set_aligned - Align transmit buffer and fill
 * in fifo descriptor.
 * @channelh: Channel handle.
 * @dtrh: Descriptor handle.
 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
 *            (of buffers).
 * @vaddr: Virtual address of the data buffer.
 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
 * @size: Size of the data buffer (in bytes).
 * @misaligned_size: Size (in bytes) of the misaligned portion of the
 * data buffer. Calculated by the caller, based on the platform/OS/other
 * specific criteria, which is outside of HAL's domain. See notes below.
 *
 * This API is part of the transmit descriptor preparation for posting
 * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
 * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
 * All three APIs fill in the fields of the fifo descriptor,
 * in accordance with the Xframe specification.
 * On the PCI-X based systems aligning transmit data typically provides better
 * transmit performance. The typical alignment granularity: L2 cacheline size.
 * However, HAL does not make assumptions in terms of the alignment granularity;
 * this is specified via additional @misaligned_size parameter described above.
 * Prior to calling xge_hal_fifo_dtr_buffer_set_aligned(),
 * ULD is supposed to check alignment of a given fragment/buffer. For this HAL
 * provides a separate xge_hal_check_alignment() API sufficient to cover
 * most (but not all) possible alignment criteria.
 * If the buffer appears to be aligned, the ULD calls
 * xge_hal_fifo_dtr_buffer_set().
 * Otherwise, ULD calls xge_hal_fifo_dtr_buffer_set_aligned().
 *
 * Note; This API is a "superset" of xge_hal_fifo_dtr_buffer_set(). In
 * addition to filling in the specified descriptor it aligns transmit data on
 * the specified boundary.
 * Note: Decision on whether to align or not to align a given contiguous
 * transmit buffer is outside of HAL's domain. To this end ULD can use any
 * programmable criteria, which can help to 1) boost transmit performance,
 * and/or 2) provide a workaround for PCI bridge bugs, if any.
 *
 * See also: xge_hal_fifo_dtr_buffer_set(),
 * xge_hal_check_alignment().
 *
 * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
 * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
 */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
	        xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
	        dma_addr_t dma_pointer, int size, int misaligned_size)
{
	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
	xge_hal_fifo_txdl_priv_t *txdl_priv;
	xge_hal_fifo_txd_t *txdp;
	int remaining_size;
	ptrdiff_t prev_boff;

	txdl_priv = __hal_fifo_txdl_priv(dtrh);
	txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;

	if (frag_idx != 0) {
	    txdp->control_1 = txdp->control_2 = 0;
	}

	/* On some systems buffer size could be zero.
	 * It is the responsibility of ULD and *not HAL* to
	 * detect it and skip it. */
	xge_assert(size > 0);
	xge_assert(frag_idx < txdl_priv->alloc_frags);
	xge_assert(misaligned_size != 0 &&
	        misaligned_size <= fifo->config->alignment_size);

	remaining_size = size - misaligned_size;
	xge_assert(remaining_size >= 0);

	xge_os_memcpy((char*)txdl_priv->align_vaddr_start,
	                  vaddr, misaligned_size);

	    if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
	        return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
	    }

	/* setup new buffer */
	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
	txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
	txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(misaligned_size);
	txdl_priv->bytes_sent += misaligned_size;
	fifo->channel.stats.total_buffers++;
	txdl_priv->frags++;
	txdl_priv->align_used_frags++;
	txdl_priv->align_vaddr_start += fifo->config->alignment_size;
	    txdl_priv->align_dma_offset = 0;

#if defined(XGE_OS_DMA_REQUIRES_SYNC)
	/* sync new buffer */
	xge_os_dma_sync(fifo->channel.pdev,
	          txdl_priv->align_dma_handle,
	          txdp->buffer_pointer,
	          0,
	          misaligned_size,
	          XGE_OS_DMA_DIR_TODEVICE);
#endif

	if (remaining_size) {
	    xge_assert(frag_idx < txdl_priv->alloc_frags);
	    txdp++;
	    txdp->buffer_pointer = (u64)dma_pointer +
	                misaligned_size;
	    txdp->control_1 =
	        XGE_HAL_TXD_BUFFER0_SIZE(remaining_size);
	    txdl_priv->bytes_sent += remaining_size;
	    txdp->control_2 = 0;
	    fifo->channel.stats.total_buffers++;
	    txdl_priv->frags++;
	}

	return XGE_HAL_OK;
}