コード例 #1
0
/*
 *  This function will allocate both the DMA descriptor structure, and the
 *  buffers it contains.  These are used to contain the descriptors used
 *  by the system.
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
		      struct list_head *head, const char *name,
		      int nbuf, int ndesc, bool is_tx)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	u8 *ds;
	struct ath_buf *bf;
	int i, bsize, error, desc_len;

	ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
		name, nbuf, ndesc);

	INIT_LIST_HEAD(head);

	if (is_tx)
		desc_len = sc->sc_ah->caps.tx_desc_len;
	else
		desc_len = sizeof(struct ath_desc);

	/* ath_desc must be a multiple of DWORDs */
	if ((desc_len % 4) != 0) {
		ath_err(common, "ath_desc not DWORD aligned\n");
		BUG_ON((desc_len % 4) != 0);
		error = -ENOMEM;
		goto fail;
	}

	dd->dd_desc_len = desc_len * nbuf * ndesc;

	/*
	 * Need additional DMA memory because we can't use
	 * descriptors that cross the 4K page boundary. Assume
	 * one skipped descriptor per 4K page.
	 */
	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
		u32 ndesc_skipped =
			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
		u32 dma_len;

		while (ndesc_skipped) {
			dma_len = ndesc_skipped * desc_len;
			dd->dd_desc_len += dma_len;

			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
		}
	}

	/* allocate descriptors */
	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
					 &dd->dd_desc_paddr, GFP_KERNEL);
	if (dd->dd_desc == NULL) {
		error = -ENOMEM;
		goto fail;
	}
	ds = (u8 *) dd->dd_desc;
	ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
		name, ds, (u32) dd->dd_desc_len,
		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);

	/* allocate buffers */
	bsize = sizeof(struct ath_buf) * nbuf;
	bf = kzalloc(bsize, GFP_KERNEL);
	if (bf == NULL) {
		error = -ENOMEM;
		goto fail2;
	}
	dd->dd_bufptr = bf;

	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
		bf->bf_desc = ds;
		bf->bf_daddr = DS2PHYS(dd, ds);

		if (!(sc->sc_ah->caps.hw_caps &
		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
			/*
			 * Skip descriptor addresses which can cause 4KB
			 * boundary crossing (addr + length) with a 32 dword
			 * descriptor fetch.
			 */
			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
				BUG_ON((caddr_t) bf->bf_desc >=
				       ((caddr_t) dd->dd_desc +
					dd->dd_desc_len));

				ds += (desc_len * ndesc);
				bf->bf_desc = ds;
				bf->bf_daddr = DS2PHYS(dd, ds);
			}
		}
		list_add_tail(&bf->list, head);
	}
	return 0;
fail2:
	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
			  dd->dd_desc_paddr);
fail:
	memset(dd, 0, sizeof(*dd));
	return error;
}
コード例 #2
0
ファイル: init.c プロジェクト: mjduddin/B14CKB1RD_kernel_m8
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
		      struct list_head *head, const char *name,
		      int nbuf, int ndesc, bool is_tx)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	u8 *ds;
	struct ath_buf *bf;
	int i, bsize, error, desc_len;

	ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
		name, nbuf, ndesc);

	INIT_LIST_HEAD(head);

	if (is_tx)
		desc_len = sc->sc_ah->caps.tx_desc_len;
	else
		desc_len = sizeof(struct ath_desc);

	
	if ((desc_len % 4) != 0) {
		ath_err(common, "ath_desc not DWORD aligned\n");
		BUG_ON((desc_len % 4) != 0);
		error = -ENOMEM;
		goto fail;
	}

	dd->dd_desc_len = desc_len * nbuf * ndesc;

	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
		u32 ndesc_skipped =
			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
		u32 dma_len;

		while (ndesc_skipped) {
			dma_len = ndesc_skipped * desc_len;
			dd->dd_desc_len += dma_len;

			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
		}
	}

	
	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
					 &dd->dd_desc_paddr, GFP_KERNEL);
	if (dd->dd_desc == NULL) {
		error = -ENOMEM;
		goto fail;
	}
	ds = (u8 *) dd->dd_desc;
	ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
		name, ds, (u32) dd->dd_desc_len,
		ito64(dd->dd_desc_paddr), (u32) dd->dd_desc_len);

	
	bsize = sizeof(struct ath_buf) * nbuf;
	bf = kzalloc(bsize, GFP_KERNEL);
	if (bf == NULL) {
		error = -ENOMEM;
		goto fail2;
	}
	dd->dd_bufptr = bf;

	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
		bf->bf_desc = ds;
		bf->bf_daddr = DS2PHYS(dd, ds);

		if (!(sc->sc_ah->caps.hw_caps &
		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
				BUG_ON((caddr_t) bf->bf_desc >=
				       ((caddr_t) dd->dd_desc +
					dd->dd_desc_len));

				ds += (desc_len * ndesc);
				bf->bf_desc = ds;
				bf->bf_daddr = DS2PHYS(dd, ds);
			}
		}
		list_add_tail(&bf->list, head);
	}
	return 0;
fail2:
	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
			  dd->dd_desc_paddr);
fail:
	memset(dd, 0, sizeof(*dd));
	return error;
}
コード例 #3
0
ファイル: ath_desc.c プロジェクト: jorneytu/wlan
int
ath_descdma_setup(
    struct ath_softc *sc,
    struct ath_descdma *dd, ath_bufhead *head,
    const char *name, int nbuf, int ndesc, int is_tx, int frag_per_msdu)
{
#define    DS2PHYS(_dd, _ds) \
    ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)

    u_int8_t *ds;
    struct ath_buf *bf;
    int i, bsize, error, desc_len;

    uint32_t nbuf_left, nbuf_alloc, alloc_size, buf_arr_size;
    int j, k;
    struct ath_buf **bf_arr;

    if (is_tx) {
        desc_len = sc->sc_txdesclen;

        /* legacy tx descriptor needs to be allocated for each fragment */
        if (sc->sc_num_txmaps == 1) {
            nbuf = nbuf * frag_per_msdu; /*ATH_FRAG_PER_MSDU;*/
        }
    } else {
        desc_len = sizeof(struct ath_desc);
    }

    DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf %d desc_len\n",
             __func__, name, nbuf, ndesc, desc_len);

    /* ath_desc must be a multiple of DWORDs */
    if ((desc_len % 4) != 0) {
        DPRINTF(sc, ATH_DEBUG_FATAL, "%s: ath_desc not DWORD aligned\n",
                __func__ );
        ASSERT((desc_len % 4) == 0);
        error = -ENOMEM;
        goto fail;
    }

    dd->dd_name = name;
    dd->dd_desc_len = desc_len * nbuf * ndesc;

    /*
     * WAR for bug 30982 (Merlin)
     * Need additional DMA memory because we can't use descriptors that cross the
     * 4K page boundary. Assume one skipped descriptor per 4K page.
     */

    if (!ath_hal_has4kbsplittrans(sc->sc_ah)) {
        int numdescpage = 4096/(desc_len*ndesc);
        dd->dd_desc_len = (nbuf/numdescpage + 1 ) * 4096;
    }
#if ATH_SUPPORT_DESC_CACHABLE
	if ((strcmp(name, "tx") == 0) && sc->sc_enhanceddmasupport) { 
    	dd->dd_desc = (void *)OS_MALLOC_NONCONSISTENT(sc->sc_osdev,
                                       dd->dd_desc_len, &dd->dd_desc_paddr,
                                       OS_GET_DMA_MEM_CONTEXT(dd, dd_dmacontext),
                                       sc->sc_reg_parm.shMemAllocRetry);
	}
	else 
#endif
	{
    /* allocate descriptors */
    	dd->dd_desc = (void *)OS_MALLOC_CONSISTENT(sc->sc_osdev,
                                       dd->dd_desc_len, &dd->dd_desc_paddr,
                                       OS_GET_DMA_MEM_CONTEXT(dd, dd_dmacontext),
                                       sc->sc_reg_parm.shMemAllocRetry);
	}
    if (dd->dd_desc == NULL) {
        error = -ENOMEM;
        goto fail;
    }
    ds = dd->dd_desc;
    DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
            __func__, dd->dd_name, ds, (u_int32_t) dd->dd_desc_len,
            ito64(dd->dd_desc_paddr), /*XXX*/ (u_int32_t) dd->dd_desc_len);

    /* allocate buffers */
    bsize = sizeof(struct ath_buf) * nbuf;

    buf_arr_size = ((nbuf * sizeof(struct ath_buf))/MAX_BUF_MEM_ALLOC_SIZE + 2) ;   /* one extra element is reserved at end to indicate the end of array */
    bf_arr = (struct ath_buf **)OS_MALLOC(sc->sc_osdev, 
                                        (sizeof(struct ath_buf *) * buf_arr_size), GFP_KERNEL);
    if(!bf_arr)
    {
        error = -ENOMEM;
        goto fail2;
    }
    OS_MEMZERO(bf_arr, (buf_arr_size * sizeof(struct ath_buf *)));

    TAILQ_INIT(head);

    nbuf_left = nbuf;
    nbuf_alloc = (MAX_BUF_MEM_ALLOC_SIZE / (sizeof(struct ath_buf)));

    for(j = 0; (nbuf_left && (j < (buf_arr_size-1))); j++)
    {
        nbuf_alloc = MIN(nbuf_left, nbuf_alloc);
        alloc_size = (nbuf_alloc * sizeof(struct ath_buf));

        bf_arr[j] = (struct ath_buf *)OS_MALLOC(sc->sc_osdev, alloc_size, GFP_KERNEL);
        if(bf_arr[j] == NULL)
        {
            for(k = 0; k < j; k++)
            {
                OS_FREE(bf_arr[k]);
                bf_arr[k] = NULL;
            }
            error = -ENOMEM;
            goto fail2;
        }
        else
        {
            OS_MEMZERO(bf_arr[j], alloc_size);
            nbuf_left -= nbuf_alloc;
            bf = bf_arr[j];
            for (i = 0; i < nbuf_alloc; i++, bf++, ds += (desc_len * ndesc)) {
                bf->bf_desc = ds;
                bf->bf_daddr = DS2PHYS(dd, ds);
                if (!ath_hal_has4kbsplittrans(sc->sc_ah)) {
                    /*
                    * WAR for bug 30982.
                    * Skip descriptor addresses which can cause 4KB boundary crossing (addr + length)
                    * with a 32 dword descriptor fetch.
                    */
                    if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, desc_len * ndesc)) {
                        ds += 0x1000 - (bf->bf_daddr & 0xFFF);    /* start from the next page */
                        bf->bf_desc = ds;
                        bf->bf_daddr = DS2PHYS(dd, ds);
                    }
                }
                TAILQ_INSERT_TAIL(head, bf, bf_list);
            }
        }
    }

    dd->dd_bufptr = bf_arr;

#ifdef ATH_DEBUG_MEM_LEAK
    dd->dd_num_buf = nbuf;
#endif

    /* 
     * For OS's that need to allocate dma context to be used to 
     * send down to hw, do that here. (BSD is the only one that needs
     * it currently.)
     */ 
    ALLOC_DMA_CONTEXT_POOL(sc->sc_osdev, name, nbuf);

    return 0;
fail2:

#if ATH_SUPPORT_DESC_CACHABLE
	if ((strcmp(name, "tx") == 0) && sc->sc_enhanceddmasupport) { 
    	OS_FREE_NONCONSISTENT(sc->sc_osdev, dd->dd_desc_len,
                       dd->dd_desc, dd->dd_desc_paddr,
                       OS_GET_DMA_MEM_CONTEXT(dd, dd_dmacontext));
	}else 
#endif
	{
	    OS_FREE_CONSISTENT(sc->sc_osdev, dd->dd_desc_len,
                       dd->dd_desc, dd->dd_desc_paddr,
                       OS_GET_DMA_MEM_CONTEXT(dd, dd_dmacontext));
	}
    if (bf_arr) {
        OS_FREE(bf_arr);
    }
fail:
    OS_MEMZERO(dd, sizeof(*dd));
    return error;
#undef ATH_DESC_4KB_BOUND_CHECK
#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
#undef DS2PHYS
}
コード例 #4
0
ファイル: ath9k_init.c プロジェクト: 42wim/ipxe
/*
 *  This function will allocate both the DMA descriptor structure, and the
 *  buffers it contains.  These are used to contain the descriptors used
 *  by the system.
*/
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
		      struct list_head *head, const char *name,
		      int nbuf, int ndesc, int is_tx)
{
#define	DS2PHYS(_dd, _ds)						\
	((_dd)->dd_desc_paddr + ((char *)(_ds) - (char *)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF9F) ? 1 : 0)
	u8 *ds;
	struct ath_buf *bf;
	int i, bsize, error, desc_len;

	DBG2("ath9k: %s DMA: %d buffers %d desc/buf\n",
		name, nbuf, ndesc);

	INIT_LIST_HEAD(head);

	if (is_tx)
		desc_len = sc->sc_ah->caps.tx_desc_len;
	else
		desc_len = sizeof(struct ath_desc);

	/* ath_desc must be a multiple of DWORDs */
	if ((desc_len % 4) != 0) {
		DBG("ath9k: ath_desc not DWORD aligned\n");
		error = -ENOMEM;
		goto fail;
	}

	dd->dd_desc_len = desc_len * nbuf * ndesc;

	/*
	 * Need additional DMA memory because we can't use
	 * descriptors that cross the 4K page boundary.
	 * However, iPXE only utilizes 16 buffers, which
	 * will never make up more than half of one page,
	 * so we will only ever skip 1 descriptor, if that.
	 */
	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
		u32 ndesc_skipped = 1;
		u32 dma_len;

		dma_len = ndesc_skipped * desc_len;
		dd->dd_desc_len += dma_len;
	}

	/* allocate descriptors */
	dd->dd_desc = malloc_dma(dd->dd_desc_len, 16);
	if (dd->dd_desc == NULL) {
		error = -ENOMEM;
		goto fail;
	}
	dd->dd_desc_paddr = virt_to_bus(dd->dd_desc);
	ds = (u8 *) dd->dd_desc;
	DBG2("ath9k: %s DMA map: %p (%d) -> %llx (%d)\n",
		name, ds, (u32) dd->dd_desc_len,
		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);

	/* allocate buffers */
	bsize = sizeof(struct ath_buf) * nbuf;
	bf = zalloc(bsize);
	if (bf == NULL) {
		error = -ENOMEM;
		goto fail2;
	}
	dd->dd_bufptr = bf;

	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
		bf->bf_desc = ds;
		bf->bf_daddr = DS2PHYS(dd, ds);

		if (!(sc->sc_ah->caps.hw_caps &
		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
			/*
			 * Skip descriptor addresses which can cause 4KB
			 * boundary crossing (addr + length) with a 32 dword
			 * descriptor fetch.
			 */
			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
				ds += (desc_len * ndesc);
				bf->bf_desc = ds;
				bf->bf_daddr = DS2PHYS(dd, ds);
			}
		}
		list_add_tail(&bf->list, head);
	}
	return 0;
fail2:
	free_dma(dd->dd_desc, dd->dd_desc_len);
fail:
	memset(dd, 0, sizeof(*dd));
	return error;
#undef ATH_DESC_4KB_BOUND_CHECK
#undef DS2PHYS
}