Exemplo n.º 1
0
/**
 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
 *
 * @v adapter	e1000 private structure
 *
 * @ret rc       Returns 0 on success, negative on failure
 **/
static int
e1000_setup_tx_resources ( struct e1000_adapter *adapter )
{
	DBG ( "e1000_setup_tx_resources\n" );
	
	/* Allocate transmit descriptor ring memory.
	   It must not cross a 64K boundary because of hardware errata #23
	   so we use malloc_dma() requesting a 128 byte block that is
	   128 byte aligned. This should guarantee that the memory
	   allocated will not cross a 64K boundary, because 128 is an
	   even multiple of 65536 ( 65536 / 128 == 512 ), so all possible
	   allocations of 128 bytes on a 128 byte boundary will not
	   cross 64K bytes.
	 */

        adapter->tx_base = 
        	malloc_dma ( adapter->tx_ring_size, adapter->tx_ring_size );
        		             		     
       	if ( ! adapter->tx_base ) {
       		return -ENOMEM;
	}
	
	memset ( adapter->tx_base, 0, adapter->tx_ring_size );
	
	DBG ( "adapter->tx_base = %#08lx\n", virt_to_bus ( adapter->tx_base ) );

	return 0;
}
Exemplo n.º 2
0
/**
 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
 *
 * @v adapter	e1000 private structure
 *
 * @ret rc       Returns 0 on success, negative on failure
 **/
static int
e1000_setup_rx_resources ( struct e1000_adapter *adapter )
{
	int i, rc = 0;
	
	DBG ( "e1000_setup_rx_resources\n" );
	
	/* Allocate receive descriptor ring memory.
	   It must not cross a 64K boundary because of hardware errata
	 */

        adapter->rx_base = 
        	malloc_dma ( adapter->rx_ring_size, adapter->rx_ring_size );

       	if ( ! adapter->rx_base ) {
       		return -ENOMEM;
	}
	memset ( adapter->rx_base, 0, adapter->rx_ring_size );

	for ( i = 0; i < NUM_RX_DESC; i++ ) {
		/* let e1000_refill_rx_ring() io_buffer allocations */
		adapter->rx_iobuf[i] = NULL;
	}

	/* allocate io_buffers */
	rc = e1000_refill_rx_ring ( adapter );
	if ( rc < 0 )
		e1000_free_rx_resources ( adapter );

	return rc;
}
Exemplo n.º 3
0
Arquivo: rhine.c Projeto: 42wim/ipxe
/**
 * Create descriptor ring
 *
 * @v rhn		Rhine device
 * @v ring		Descriptor ring
 * @ret rc		Return status code
 */
static int rhine_create_ring ( struct rhine_nic *rhn,
			       struct rhine_ring *ring ) {
	size_t len = ( ring->count * sizeof ( ring->desc[0] ) );
	struct rhine_descriptor *next;
	physaddr_t address;
	unsigned int i;

	/* Allocate descriptors */
	ring->desc = malloc_dma ( len, RHINE_RING_ALIGN );
	if ( ! ring->desc )
		return -ENOMEM;

	/* Initialise descriptor ring */
	memset ( ring->desc, 0, len );
	for ( i = 0 ; i < ring->count ; i++ ) {
		next = &ring->desc[ ( i + 1 ) % ring->count ];
		ring->desc[i].next = cpu_to_le32 ( virt_to_bus ( next ) );
	}

	/* Program ring address */
	address = virt_to_bus ( ring->desc );
	writel ( address, rhn->regs + ring->reg );

	DBGC ( rhn, "RHINE %p ring %02x is at [%08llx,%08llx)\n",
	       rhn, ring->reg, ( ( unsigned long long ) address ),
	       ( ( unsigned long long ) address + len ) );

	return 0;
}
Exemplo n.º 4
0
/*
  setup for reading from memory to a device, allocating a bouncebuffer if needed
 */
void bouncebuffer_setup_write(struct bouncebuffer_t *bouncebuffer, const uint8_t **buf, uint32_t size)
{
    if (!bouncebuffer || IS_DMA_SAFE(*buf)) {
        // nothing needs to be done
        return;
    }
    osalDbgAssert((bouncebuffer->busy == false), "bouncebuffer write");        
    if (bouncebuffer->size < size) {
        if (bouncebuffer->size > 0) {
            free(bouncebuffer->dma_buf);
        }
        bouncebuffer->dma_buf = bouncebuffer->is_sdcard?malloc_sdcard_dma(size):malloc_dma(size);
        osalDbgAssert((bouncebuffer->dma_buf != NULL), "bouncebuffer write allocate");
        bouncebuffer->size = size;
    }
    if (*buf) {
        memcpy(bouncebuffer->dma_buf, *buf, size);
    }
    *buf = bouncebuffer->dma_buf;
#if defined(STM32H7)
    osalDbgAssert((((uint32_t)*buf)&31) == 0, "bouncebuffer write align");
    cacheBufferFlush(*buf, (size+31)&~31);
#endif
    bouncebuffer->busy = true;
}
Exemplo n.º 5
0
static int rtl818x_init_rx_ring(struct net80211_device *dev)
{
	struct rtl818x_priv *priv = dev->priv;
	struct rtl818x_rx_desc *entry;
	int i;

	priv->rx_ring = malloc_dma(sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE,
				   RTL818X_RING_ALIGN);
	priv->rx_ring_dma = virt_to_bus(priv->rx_ring);
	if (!priv->rx_ring) {
		DBG("rtl818x %s: cannot allocate RX ring\n", dev->netdev->name);
		return -ENOMEM;
	}

	memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * RTL818X_RX_RING_SIZE);
	priv->rx_idx = 0;

	for (i = 0; i < RTL818X_RX_RING_SIZE; i++) {
		struct io_buffer *iob = alloc_iob(MAX_RX_SIZE);
		entry = &priv->rx_ring[i];
		if (!iob)
			return -ENOMEM;

		priv->rx_buf[i] = iob;
		entry->rx_buf = cpu_to_le32(virt_to_bus(iob->data));
		entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
					   MAX_RX_SIZE);
	}
	entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
	return 0;
}
Exemplo n.º 6
0
/*
  initialise a bouncebuffer
 */
void bouncebuffer_init(struct bouncebuffer_t **bouncebuffer, uint32_t prealloc_bytes, bool sdcard)
{
    (*bouncebuffer) = calloc(1, sizeof(struct bouncebuffer_t));
    osalDbgAssert(((*bouncebuffer) != NULL), "bouncebuffer init");
    (*bouncebuffer)->is_sdcard = sdcard;
    if (prealloc_bytes) {
        (*bouncebuffer)->dma_buf = sdcard?malloc_sdcard_dma(prealloc_bytes):malloc_dma(prealloc_bytes);
        osalDbgAssert(((*bouncebuffer)->dma_buf != NULL), "bouncebuffer preallocate");
        (*bouncebuffer)->size = prealloc_bytes;
    }
}
Exemplo n.º 7
0
static int b44_init_tx_ring(struct b44_private *bp)
{
	b44_free_tx_ring(bp);

	bp->tx = malloc_dma(B44_TX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
	if (!bp->tx)
		return -ENOMEM;

	memset(bp->tx, 0, B44_TX_RING_LEN_BYTES);
	memset(bp->tx_iobuf, 0, sizeof(bp->tx_iobuf));

	DBG("Init TX rings: tx=0x%08lx\n", VIRT_TO_B44(bp->tx));
	return 0;
}
Exemplo n.º 8
0
/**
 * Create descriptor ring
 *
 * @v netfront		Netfront device
 * @v ring		Descriptor ring
 * @ret rc		Return status code
 */
static int netfront_create_ring ( struct netfront_nic *netfront,
				  struct netfront_ring *ring ) {
	struct xen_device *xendev = netfront->xendev;
	struct xen_hypervisor *xen = xendev->xen;
	unsigned int i;
	int rc;

	/* Initialise buffer ID ring */
	for ( i = 0 ; i < ring->count ; i++ ) {
		ring->ids[i] = i;
		assert ( ring->iobufs[i] == NULL );
	}
	ring->id_prod = 0;
	ring->id_cons = 0;

	/* Allocate and initialise shared ring */
	ring->sring.raw = malloc_dma ( PAGE_SIZE, PAGE_SIZE );
	if ( ! ring->sring.raw ) {
		rc = -ENOMEM;
		goto err_alloc;
	}

	/* Grant access to shared ring */
	if ( ( rc = xengrant_permit_access ( xen, ring->ref, xendev->backend_id,
					     0, ring->sring.raw ) ) != 0 ) {
		DBGC ( netfront, "NETFRONT %s could not permit access to "
		       "%#08lx: %s\n", xendev->key,
		       virt_to_phys ( ring->sring.raw ), strerror ( rc ) );
		goto err_permit_access;
	}

	/* Publish shared ring reference */
	if ( ( rc = netfront_write_num ( netfront, ring->ref_key,
					 ring->ref ) ) != 0 )
		goto err_write_num;

	DBGC ( netfront, "NETFRONT %s %s=\"%d\" [%08lx,%08lx)\n",
	       xendev->key, ring->ref_key, ring->ref,
	       virt_to_phys ( ring->sring.raw ),
	       ( virt_to_phys ( ring->sring.raw ) + PAGE_SIZE ) );
	return 0;

	netfront_rm ( netfront, ring->ref_key );
 err_write_num:
	xengrant_invalidate ( xen, ring->ref );
 err_permit_access:
	free_dma ( ring->sring.raw, PAGE_SIZE );
 err_alloc:
	return rc;
}
Exemplo n.º 9
0
/**
 * Create descriptor ring
 *
 * @v myson		Myson device
 * @v ring		Descriptor ring
 * @ret rc		Return status code
 */
static int myson_create_ring ( struct myson_nic *myson,
			       struct myson_ring *ring ) {
	size_t len = ( ring->count * sizeof ( ring->desc[0] ) );
	struct myson_descriptor *desc;
	struct myson_descriptor *next;
	physaddr_t address;
	unsigned int i;
	int rc;

	/* Allocate descriptor ring */
	ring->desc = malloc_dma ( len, MYSON_RING_ALIGN );
	if ( ! ring->desc ) {
		rc = -ENOMEM;
		goto err_alloc;
	}
	address = virt_to_bus ( ring->desc );

	/* Check address is usable by card */
	if ( ! myson_address_ok ( address + len ) ) {
		DBGC ( myson, "MYSON %p cannot support 64-bit ring address\n",
		       myson );
		rc = -ENOTSUP;
		goto err_64bit;
	}

	/* Initialise descriptor ring */
	memset ( ring->desc, 0, len );
	for ( i = 0 ; i < ring->count ; i++ ) {
		desc = &ring->desc[i];
		next = &ring->desc[ ( i + 1 ) % ring->count ];
		desc->next = cpu_to_le32 ( virt_to_bus ( next ) );
	}

	/* Program ring address */
	writel ( address, myson->regs + ring->reg );
	DBGC ( myson, "MYSON %p ring %02x is at [%08llx,%08llx)\n",
	       myson, ring->reg, ( ( unsigned long long ) address ),
	       ( ( unsigned long long ) address + len ) );

	return 0;

 err_64bit:
	free_dma ( ring->desc, len );
	ring->desc = NULL;
 err_alloc:
	return rc;
}
Exemplo n.º 10
0
static int b44_init_rx_ring(struct b44_private *bp)
{
	b44_free_rx_ring(bp);

	bp->rx = malloc_dma(B44_RX_RING_LEN_BYTES, B44_DMA_ALIGNMENT);
	if (!bp->rx)
		return -ENOMEM;

	memset(bp->rx_iobuf, 0, sizeof(bp->rx_iobuf));

	bp->rx_iobuf[0] = alloc_iob(RX_PKT_BUF_SZ);
	b44_populate_rx_descriptor(bp, 0);
	b44_rx_refill(bp, 0);

	DBG("Init RX rings: rx=0x%08lx\n", VIRT_TO_B44(bp->rx));
	return 0;
}
Exemplo n.º 11
0
/**
 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
 *
 * @v adapter	e1000 private structure
 *
 * @ret rc       Returns 0 on success, negative on failure
 **/
static int
e1000_setup_rx_resources ( struct e1000_adapter *adapter )
{
	int i, j;
	struct e1000_rx_desc *rx_curr_desc;
	
	DBG ( "e1000_setup_rx_resources\n" );
	
	/* Allocate receive descriptor ring memory.
	   It must not cross a 64K boundary because of hardware errata
	 */

        adapter->rx_base = 
        	malloc_dma ( adapter->rx_ring_size, adapter->rx_ring_size );
        		     
       	if ( ! adapter->rx_base ) {
       		return -ENOMEM;
	}
	memset ( adapter->rx_base, 0, adapter->rx_ring_size );

	for ( i = 0; i < NUM_RX_DESC; i++ ) {
	
		adapter->rx_iobuf[i] = alloc_iob ( MAXIMUM_ETHERNET_VLAN_SIZE );
		
		/* If unable to allocate all iobufs, free any that
		 * were successfully allocated, and return an error 
		 */
		if ( ! adapter->rx_iobuf[i] ) {
			for ( j = 0; j < i; j++ ) {
				free_iob ( adapter->rx_iobuf[j] );
			}
			return -ENOMEM;
		}

		rx_curr_desc = ( void * ) ( adapter->rx_base ) + 
					  ( i * sizeof ( *adapter->rx_base ) ); 
			
		rx_curr_desc->buffer_addr = virt_to_bus ( adapter->rx_iobuf[i]->data );	

		DBG ( "i = %d  rx_curr_desc->buffer_addr = %#16llx\n", 
		      i, rx_curr_desc->buffer_addr );
		
	}	
	return 0;
}
Exemplo n.º 12
0
static int rtl818x_init_tx_ring(struct net80211_device *dev)
{
	struct rtl818x_priv *priv = dev->priv;
	int i;

	priv->tx_ring = malloc_dma(sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE,
				   RTL818X_RING_ALIGN);
	priv->tx_ring_dma = virt_to_bus(priv->tx_ring);
	if (!priv->tx_ring) {
		DBG("rtl818x %s: cannot allocate TX ring\n", dev->netdev->name);
		return -ENOMEM;
	}

	memset(priv->tx_ring, 0, sizeof(*priv->tx_ring) * RTL818X_TX_RING_SIZE);
	priv->tx_prod = priv->tx_cons = 0;

	for (i = 0; i < RTL818X_TX_RING_SIZE; i++)
		priv->tx_ring[i].next_tx_desc = cpu_to_le32(priv->tx_ring_dma +
				((i + 1) % RTL818X_TX_RING_SIZE) * sizeof(*priv->tx_ring));

	return 0;
}
Exemplo n.º 13
0
/**
 * Allocate I/O buffer
 *
 * @v len	Required length of buffer
 * @ret iobuf	I/O buffer, or NULL if none available
 *
 * The I/O buffer will be physically aligned to a multiple of
 * @c IOBUF_SIZE.
 */
struct io_buffer * alloc_iob ( size_t len ) {
	struct io_buffer *iobuf = NULL;
	void *data;

	/* Pad to minimum length */
	if ( len < IOB_ZLEN )
		len = IOB_ZLEN;

	/* Align buffer length */
	len = ( len + __alignof__( *iobuf ) - 1 ) &
		~( __alignof__( *iobuf ) - 1 );
	
	/* Allocate memory for buffer plus descriptor */
	data = malloc_dma ( len + sizeof ( *iobuf ), IOB_ALIGN );
	if ( ! data )
		return NULL;

	iobuf = ( struct io_buffer * ) ( data + len );
	iobuf->head = iobuf->data = iobuf->tail = data;
	iobuf->end = iobuf;
	return iobuf;
}
Exemplo n.º 14
0
/*
 *  This function will allocate both the DMA descriptor structure, and the
 *  buffers it contains.  These are used to contain the descriptors used
 *  by the system.
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
		      struct list_head *head, const char *name,
		      int nbuf, int ndesc, int is_tx)
{
#define	DS2PHYS(_dd, _ds)						\
	((_dd)->dd_desc_paddr + ((char *)(_ds) - (char *)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF9F) ? 1 : 0)
	u8 *ds;
	struct ath_buf *bf;
	int i, bsize, error, desc_len;

	DBG2("ath9k: %s DMA: %d buffers %d desc/buf\n",
		name, nbuf, ndesc);

	INIT_LIST_HEAD(head);

	if (is_tx)
		desc_len = sc->sc_ah->caps.tx_desc_len;
	else
		desc_len = sizeof(struct ath_desc);

	/* ath_desc must be a multiple of DWORDs */
	if ((desc_len % 4) != 0) {
		DBG("ath9k: ath_desc not DWORD aligned\n");
		error = -ENOMEM;
		goto fail;
	}

	dd->dd_desc_len = desc_len * nbuf * ndesc;

	/*
	 * Need additional DMA memory because we can't use
	 * descriptors that cross the 4K page boundary.
	 * However, iPXE only utilizes 16 buffers, which
	 * will never make up more than half of one page,
	 * so we will only ever skip 1 descriptor, if that.
	 */
	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
		u32 ndesc_skipped = 1;
		u32 dma_len;

		dma_len = ndesc_skipped * desc_len;
		dd->dd_desc_len += dma_len;
	}

	/* allocate descriptors */
	dd->dd_desc = malloc_dma(dd->dd_desc_len, 16);
	if (dd->dd_desc == NULL) {
		error = -ENOMEM;
		goto fail;
	}
	dd->dd_desc_paddr = virt_to_bus(dd->dd_desc);
	ds = (u8 *) dd->dd_desc;
	DBG2("ath9k: %s DMA map: %p (%d) -> %llx (%d)\n",
		name, ds, (u32) dd->dd_desc_len,
		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);

	/* allocate buffers */
	bsize = sizeof(struct ath_buf) * nbuf;
	bf = zalloc(bsize);
	if (bf == NULL) {
		error = -ENOMEM;
		goto fail2;
	}
	dd->dd_bufptr = bf;

	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
		bf->bf_desc = ds;
		bf->bf_daddr = DS2PHYS(dd, ds);

		if (!(sc->sc_ah->caps.hw_caps &
		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
			/*
			 * Skip descriptor addresses which can cause 4KB
			 * boundary crossing (addr + length) with a 32 dword
			 * descriptor fetch.
			 */
			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
				ds += (desc_len * ndesc);
				bf->bf_desc = ds;
				bf->bf_daddr = DS2PHYS(dd, ds);
			}
		}
		list_add_tail(&bf->list, head);
	}
	return 0;
fail2:
	free_dma(dd->dd_desc, dd->dd_desc_len);
fail:
	memset(dd, 0, sizeof(*dd));
	return error;
#undef ATH_DESC_4KB_BOUND_CHECK
#undef DS2PHYS
}