Ejemplo n.º 1
0
void
htt_tx_detach(struct htt_pdev_t *pdev)
{
    unsigned int i;
    struct htt_tx_desc_page_t *page_info;

    if (pdev){
        if (pdev->cfg.is_high_latency) {
            adf_os_mem_free(pdev->tx_descs.pool_vaddr);
            for (i = 0; i < pdev->num_pages; i++) {
                page_info = pdev->desc_pages + i;
                    adf_os_mem_free(page_info->page_v_addr_start);
            }
        } else {
            for (i = 0; i < pdev->num_pages; i++) {
                page_info = pdev->desc_pages + i;
                    adf_os_mem_free_consistent(
                    pdev->osdev,
                    pdev->num_desc_per_page * pdev->tx_descs.size,
                    page_info->page_v_addr_start,
                    page_info->page_p_addr,
                    adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
            }
        }
        adf_os_mem_free(pdev->desc_pages);
    }
}
/**
 * adf_os_mem_multi_pages_free() - free large size of kernel memory
 * @osdev:      OS device handle pointer
 * @pages:      Multi page information storage
 * @memctxt:    Memory context
 * @cacheable:  Coherent memory or cacheable memory
 *
 * This function will free large size of memory over multiple pages.
 *
 * Return: None
 */
void adf_os_mem_multi_pages_free(adf_os_device_t osdev,
			struct adf_os_mem_multi_page_t *pages,
			adf_os_dma_context_t memctxt,
			bool cacheable)
{
	unsigned int page_idx;
	struct adf_os_mem_dma_page_t *dma_pages;

	if (cacheable) {
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
			adf_os_mem_free(pages->cacheable_pages[page_idx]);
		adf_os_mem_free(pages->cacheable_pages);
	} else {
		dma_pages = pages->dma_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			adf_os_mem_free_consistent(osdev, PAGE_SIZE,
				dma_pages->page_v_addr_start,
				dma_pages->page_p_addr, memctxt);
			dma_pages++;
		}
		adf_os_mem_free(pages->dma_pages);
	}

	pages->cacheable_pages = NULL;
	pages->dma_pages = NULL;
	pages->num_pages = 0;
	return;
}
Ejemplo n.º 3
0
int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
   u_int16_t idx;

   if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   }

   if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
     adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   }

   /* Free each single buffer */
   for(idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
      if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
         adf_nbuf_unmap(pdev->osdev,
            pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
            ADF_OS_DMA_FROM_DEVICE);
         adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
      }
   }

   /* Free storage */
   adf_os_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);

   return 0;
}
Ejemplo n.º 4
0
int
htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
{
    int i, i_int, pool_size;
    uint32_t **p;
    adf_os_dma_addr_t pool_paddr = {0};
    struct htt_tx_desc_page_t *page_info;
    unsigned int num_link = 0;
    uint32_t page_size;

    if (pdev->cfg.is_high_latency) {
        pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
    } else {
        pdev->tx_descs.size =
            /*
             * Start with the size of the base struct
             * that actually gets downloaded.
             */
            sizeof(struct htt_host_tx_desc_t)
            /*
             * Add the fragmentation descriptor elements.
             * Add the most that the OS may deliver, plus one more in
             * case the txrx code adds a prefix fragment (for TSO or
             * audio interworking SNAP header)
             */
            + (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8 // 2x u_int32_t
            + 4; /* u_int32_t fragmentation list terminator */
    }

    /*
     * Make sure tx_descs.size is a multiple of 4-bytes.
     * It should be, but round up just to be sure.
     */
    pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
    pdev->tx_descs.pool_elems = desc_pool_elems;
    pdev->tx_descs.alloc_cnt = 0;

    pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;

   /* Calculate required page count first */
    page_size = adf_os_mem_get_page_size();
    pdev->num_pages = pool_size / page_size;
    if (pool_size % page_size)
        pdev->num_pages++;

    /* Put in as many as possible descriptors into single page */
    /* calculate how many descriptors can put in single page */
    pdev->num_desc_per_page = page_size / pdev->tx_descs.size;

    /* Pages information storage */
    pdev->desc_pages = (struct htt_tx_desc_page_t *)adf_os_mem_alloc(
        pdev->osdev, pdev->num_pages * sizeof(struct htt_tx_desc_page_t));
    if (!pdev->desc_pages) {
        adf_os_print("HTT Attach, desc page alloc fail");
        goto fail1;
    }

    page_info = pdev->desc_pages;
    p = (uint32_t **) pdev->tx_descs.freelist;
    /* Allocate required memory with multiple pages */
    for(i = 0; i < pdev->num_pages; i++) {
        if (pdev->cfg.is_high_latency) {
            page_info->page_v_addr_start = adf_os_mem_alloc(
                pdev->osdev, page_size);
            page_info->page_p_addr = pool_paddr;
            if (!page_info->page_v_addr_start) {
               page_info = pdev->desc_pages;
               for (i_int = 0 ; i_int < i; i_int++) {
                    page_info = pdev->desc_pages + i_int;
                    adf_os_mem_free(page_info->page_v_addr_start);
               }
               goto fail2;
            }
        } else {
            page_info->page_v_addr_start = adf_os_mem_alloc_consistent(
                pdev->osdev,
                page_size,
                &page_info->page_p_addr,
                adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
            if (!page_info->page_v_addr_start) {
               page_info = pdev->desc_pages;
               for (i_int = 0 ; i_int < i; i_int++) {
                    page_info = pdev->desc_pages + i_int;
                    adf_os_mem_free_consistent(
                        pdev->osdev,
                        pdev->num_desc_per_page * pdev->tx_descs.size,
                        page_info->page_v_addr_start,
                        page_info->page_p_addr,
                        adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
               }
               goto fail2;
            }
        }
        page_info->page_v_addr_end = page_info->page_v_addr_start +
            pdev->num_desc_per_page * pdev->tx_descs.size;
        page_info++;
    }

    page_info = pdev->desc_pages;
    pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start;
    p = (uint32_t **) pdev->tx_descs.freelist;
    for(i = 0; i < pdev->num_pages; i++) {
        for (i_int = 0; i_int < pdev->num_desc_per_page; i_int++) {
            if (i_int == (pdev->num_desc_per_page - 1)) {
                /* Last element on this page, should pint next page */
                if (!page_info->page_v_addr_start) {
                    adf_os_print("over flow num link %d\n", num_link);
                    goto fail3;
                }
                page_info++;
                *p = (uint32_t *)page_info->page_v_addr_start;
            }
            else {
                *p = (uint32_t *)(((char *) p) + pdev->tx_descs.size);
            }
            num_link++;
            p = (uint32_t **) *p;
            /* Last link established exit */
            if (num_link == (pdev->tx_descs.pool_elems - 1))
               break;
        }
    }
    *p = NULL;

    if (pdev->cfg.is_high_latency) {
        adf_os_atomic_init(&pdev->htt_tx_credit.target_delta);
        adf_os_atomic_init(&pdev->htt_tx_credit.bus_delta);
        adf_os_atomic_add(HTT_MAX_BUS_CREDIT,&pdev->htt_tx_credit.bus_delta);
    }
    return 0; /* success */

fail3:
    if (pdev->cfg.is_high_latency) {
        page_info = pdev->desc_pages;
        for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
            page_info = pdev->desc_pages + i_int;
            adf_os_mem_free(page_info->page_v_addr_start);
        }
    } else {
        page_info = pdev->desc_pages;
        for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
            page_info = pdev->desc_pages + i_int;
            adf_os_mem_free_consistent(
                pdev->osdev,
                pdev->num_desc_per_page * pdev->tx_descs.size,
                page_info->page_v_addr_start,
                page_info->page_p_addr,
                adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
        }
    }

fail2:
    adf_os_mem_free(pdev->desc_pages);

fail1:
    return -1;
}
Ejemplo n.º 5
0
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
    unsigned int uc_tx_buf_sz,
    unsigned int uc_tx_buf_cnt,
    unsigned int uc_tx_partition_base)
{
   unsigned int  tx_buffer_count;
   unsigned int  tx_buffer_count_pwr2;
   adf_nbuf_t    buffer_vaddr;
   u_int32_t     buffer_paddr;
   u_int32_t    *header_ptr;
   u_int32_t    *ring_vaddr;
   int           return_code = 0;
   uint16_t     idx;

   /* Allocate CE Write Index WORD */
   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                4,
                &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_print("%s: CE Write Index WORD alloc fail", __func__);
      return -1;
   }

   /* Allocate TX COMP Ring */
   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                uc_tx_buf_cnt * 4,
                &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
      adf_os_print("%s: TX COMP ring alloc fail", __func__);
      return_code = -2;
      goto free_tx_ce_idx;
   }

   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4);

   /* Allocate TX BUF vAddress Storage */
   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
         (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev,
                          uc_tx_buf_cnt * sizeof(adf_nbuf_t));
   if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
      adf_os_print("%s: TX BUF POOL vaddr storage alloc fail",
                   __func__);
      return_code = -3;
      goto free_tx_comp_base;
   }
   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
                   uc_tx_buf_cnt * sizeof(adf_nbuf_t));

   ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
   /* Allocate TX buffers as many as possible */
   for (tx_buffer_count = 0;
        tx_buffer_count < (uc_tx_buf_cnt - 1);
        tx_buffer_count++) {
      buffer_vaddr = adf_nbuf_alloc(pdev->osdev,
                uc_tx_buf_sz, 0, 4, FALSE);
      if (!buffer_vaddr)
      {
         adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d",
                      __func__, tx_buffer_count);
         break;
      }

      /* Init buffer */
      adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
      header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr);

      *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
      header_ptr++;
      *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16;

      adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL);
      buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
      header_ptr++;
      *header_ptr = (u_int32_t)(buffer_paddr + 16);

      header_ptr++;
      *header_ptr = 0xFFFFFFFF;

      /* FRAG Header */
      header_ptr++;
      *header_ptr = buffer_paddr + 32;

      *ring_vaddr = buffer_paddr;
      pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
            buffer_vaddr;
      /* Memory barrier to ensure actual value updated */

      ring_vaddr++;
   }

   /*
    * Tx complete ring buffer count should be power of 2.
    * So, allocated Tx buffer count should be one less than ring buffer size.
    */
   tx_buffer_count_pwr2 = vos_rounddown_pow_of_two(tx_buffer_count + 1) - 1;
   if (tx_buffer_count > tx_buffer_count_pwr2) {
       adf_os_print("%s: Allocated Tx buffer count %d is rounded down to %d",
                   __func__, tx_buffer_count, tx_buffer_count_pwr2);

       /* Free over allocated buffers below power of 2 */
       for(idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
           if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
               adf_nbuf_unmap(pdev->osdev,
                   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
                   ADF_OS_DMA_FROM_DEVICE);
               adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
           }
       }
   }

   if (tx_buffer_count_pwr2 < 0) {
       adf_os_print("%s: Failed to round down Tx buffer count %d",
                   __func__, tx_buffer_count_pwr2);
       goto free_tx_comp_base;
   }

   pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count_pwr2;

   return 0;

free_tx_comp_base:
   adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
free_tx_ce_idx:
   adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   return return_code;
}
Ejemplo n.º 6
0
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
    unsigned int uc_tx_buf_sz,
    unsigned int uc_tx_buf_cnt,
    unsigned int uc_tx_partition_base)
{
   unsigned int  tx_buffer_count;
   adf_nbuf_t    buffer_vaddr;
   u_int32_t     buffer_paddr;
   u_int32_t    *header_ptr;
   u_int32_t    *ring_vaddr;
   int           return_code = 0;

   /* Allocate CE Write Index WORD */
   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                4,
                &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_print("%s: CE Write Index WORD alloc fail", __func__);
      return -1;
   }

   /* Allocate TX COMP Ring */
   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                uc_tx_buf_cnt * 4,
                &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
      adf_os_print("%s: TX COMP ring alloc fail", __func__);
      return_code = -2;
      goto free_tx_ce_idx;
   }

   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4);

   /* Allocate TX BUF vAddress Storage */
   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
         (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev,
                          uc_tx_buf_cnt * sizeof(adf_nbuf_t));
   if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
      adf_os_print("%s: TX BUF POOL vaddr storage alloc fail",
                   __func__);
      return_code = -3;
      goto free_tx_comp_base;
   }
   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
                   uc_tx_buf_cnt * sizeof(adf_nbuf_t));

   ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
   /* Allocate TX buffers as many as possible */
   for (tx_buffer_count = 0;
        tx_buffer_count < (uc_tx_buf_cnt - 1);
        tx_buffer_count++) {
      buffer_vaddr = adf_nbuf_alloc(pdev->osdev,
                uc_tx_buf_sz, 0, 4, FALSE);
      if (!buffer_vaddr)
      {
         adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d",
                      __func__, tx_buffer_count);
         return 0;
      }

      /* Init buffer */
      adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
      header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr);

      *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
      header_ptr++;
      *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16;

      adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL);
      buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
      header_ptr++;
      *header_ptr = (u_int32_t)(buffer_paddr + 16);

      header_ptr++;
      *header_ptr = 0xFFFFFFFF;

      /* FRAG Header */
      header_ptr++;
      *header_ptr = buffer_paddr + 32;

      *ring_vaddr = buffer_paddr;
      pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
            buffer_vaddr;
      /* Memory barrier to ensure actual value updated */

      ring_vaddr++;
   }

   pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;

   return 0;

free_tx_comp_base:
   adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
free_tx_ce_idx:
   adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   return return_code;
}
/**
 * adf_os_mem_multi_pages_alloc() - allocate large size of kernel memory
 * @osdev:          OS device handle pointer
 * @pages:          Multi page information storage
 * @element_size:   Each element size
 * @element_num:    Total number of elements should be allocated
 * @memctxt:        Memory context
 * @cacheable:      Coherent memory or cacheable memory
 *
 * This function will allocate large size of memory over multiple pages.
 * Large size of contiguous memory allocation will fail frequentely, so
 * instead of allocate large memory by one shot, allocate through multiple, non
 * contiguous memory and combine pages when actual usage
 *
 * Return: None
 */
void adf_os_mem_multi_pages_alloc(adf_os_device_t osdev,
			struct adf_os_mem_multi_page_t *pages,
			size_t element_size,
			uint16_t element_num,
			adf_os_dma_context_t memctxt,
			bool cacheable)
{
	uint16_t page_idx;
	struct adf_os_mem_dma_page_t *dma_pages;
	void **cacheable_pages = NULL;
	uint16_t i;

	pages->num_element_per_page = PAGE_SIZE / element_size;
	if (!pages->num_element_per_page) {
		adf_os_print("Invalid page %d or element size %d",
			(int)PAGE_SIZE, (int)element_size);
		goto out_fail;
	}

	pages->num_pages = element_num / pages->num_element_per_page;
	if (element_num % pages->num_element_per_page)
		pages->num_pages++;

	if (cacheable) {
		/* Pages information storage */
		pages->cacheable_pages = adf_os_mem_alloc(osdev,
			pages->num_pages * sizeof(pages->cacheable_pages));
		if (!pages->cacheable_pages) {
			adf_os_print("Cacheable page storage alloc fail");
			goto out_fail;
		}

		cacheable_pages = pages->cacheable_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			cacheable_pages[page_idx] = adf_os_mem_alloc(
				osdev, PAGE_SIZE);
			if (!cacheable_pages[page_idx]) {
				adf_os_print("cacheable page alloc fail, pi %d",
					page_idx);
				goto page_alloc_fail;
			}
		}
		pages->dma_pages = NULL;
	} else {
		pages->dma_pages = adf_os_mem_alloc(osdev,
		       pages->num_pages * sizeof(struct adf_os_mem_dma_page_t));
		if (!pages->dma_pages) {
			adf_os_print("dmaable page storage alloc fail");
			goto out_fail;
		}

		dma_pages = pages->dma_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			dma_pages->page_v_addr_start =
				adf_os_mem_alloc_consistent(osdev, PAGE_SIZE,
				&dma_pages->page_p_addr, memctxt);
			if (!dma_pages->page_v_addr_start) {
				adf_os_print("dmaable page alloc fail pi %d",
					page_idx);
				goto page_alloc_fail;
			}
			dma_pages->page_v_addr_end =
				dma_pages->page_v_addr_start + PAGE_SIZE;
			dma_pages++;
		}
		pages->cacheable_pages = NULL;
	}
	return;

page_alloc_fail:
	if (cacheable) {
		for (i = 0; i < page_idx; i++)
			adf_os_mem_free(pages->cacheable_pages[i]);
		adf_os_mem_free(pages->cacheable_pages);
	} else {
		dma_pages = pages->dma_pages;
		for (i = 0; i < page_idx; i++) {
			adf_os_mem_free_consistent(osdev, PAGE_SIZE,
				dma_pages->page_v_addr_start,
				dma_pages->page_p_addr, memctxt);
			dma_pages++;
		}
		adf_os_mem_free(pages->dma_pages);
	}

out_fail:
	pages->cacheable_pages = NULL;
	pages->dma_pages = NULL;
	pages->num_pages = 0;
	return;
}