/**
 * adf_os_mem_multi_pages_free() - free large size of kernel memory
 * @osdev:      OS device handle pointer
 * @pages:      Multi page information storage
 * @memctxt:    Memory context
 * @cacheable:  Coherent memory or cacheable memory
 *
 * This function will free large size of memory over multiple pages.
 *
 * Return: None
 */
void adf_os_mem_multi_pages_free(adf_os_device_t osdev,
			struct adf_os_mem_multi_page_t *pages,
			adf_os_dma_context_t memctxt,
			bool cacheable)
{
	unsigned int page_idx;
	struct adf_os_mem_dma_page_t *dma_pages;

	if (cacheable) {
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
			adf_os_mem_free(pages->cacheable_pages[page_idx]);
		adf_os_mem_free(pages->cacheable_pages);
	} else {
		dma_pages = pages->dma_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			adf_os_mem_free_consistent(osdev, PAGE_SIZE,
				dma_pages->page_v_addr_start,
				dma_pages->page_p_addr, memctxt);
			dma_pages++;
		}
		adf_os_mem_free(pages->dma_pages);
	}

	pages->cacheable_pages = NULL;
	pages->dma_pages = NULL;
	pages->num_pages = 0;
	return;
}
Beispiel #2
0
void
htt_tx_detach(struct htt_pdev_t *pdev)
{
    unsigned int i;
    struct htt_tx_desc_page_t *page_info;

    if (pdev){
        if (pdev->cfg.is_high_latency) {
            adf_os_mem_free(pdev->tx_descs.pool_vaddr);
            for (i = 0; i < pdev->num_pages; i++) {
                page_info = pdev->desc_pages + i;
                    adf_os_mem_free(page_info->page_v_addr_start);
            }
        } else {
            for (i = 0; i < pdev->num_pages; i++) {
                page_info = pdev->desc_pages + i;
                    adf_os_mem_free_consistent(
                    pdev->osdev,
                    pdev->num_desc_per_page * pdev->tx_descs.size,
                    page_info->page_v_addr_start,
                    page_info->page_p_addr,
                    adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
            }
        }
        adf_os_mem_free(pdev->desc_pages);
    }
}
Beispiel #3
0
A_STATUS
//fwd_device_removed(void *ctx, a_uint8_t surpriseRemoved)
fwd_device_removed(void *ctx)
{
  adf_os_mem_free(ctx);
  return A_OK;
}
Beispiel #4
0
/* undo what was done in HIFInit() */
void HIFShutDown(HIF_HANDLE hHIF)
{
    HIF_DEVICE_USB *hif_dev = (HIF_DEVICE_USB *)hHIF;

    /* need to handle packets queued in the HIF */

    /* free memory for hif device */
    adf_os_mem_free(hif_dev);
}
Beispiel #5
0
void
htt_detach(htt_pdev_handle pdev)
{
    htt_rx_detach(pdev);
    htt_tx_detach(pdev);
    htt_htc_pkt_pool_free(pdev);
    HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex); 
    HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
    adf_os_mem_free(pdev);
}
Beispiel #6
0
void  
tx99_detach(struct ath_softc *sc)
{   
    tx99_stop(sc, 0);

    if (sc->sc_tx99) {
        adf_os_mem_free(sc->sc_tx99);
    }
    sc->sc_tx99 = NULL;
}
Beispiel #7
0
/**
 * @brief Free the S/W & H/W descriptor ring
 * 
 * @param osdev
 * @param dma_q
 * @param num_desc
 */
static void
pci_dma_free_swdesc(adf_os_device_t  osdev, pci_dma_softc_t  *dma_q,
                    a_uint32_t num_desc)
{
    a_uint32_t  size_hw;

    size_hw = sizeof(struct zsDmaDesc) * num_desc;
    adf_os_dmamem_free(osdev, size_hw, PCI_DMA_MAPPING, dma_q->hw_ring, 
                       dma_q->dmap);

    adf_os_mem_free(dma_q->sw_ring);
}
Beispiel #8
0
void
htt_htc_pkt_pool_free(struct htt_pdev_t *pdev)
{
    struct htt_htc_pkt_union *pkt, *next;
    pkt = pdev->htt_htc_pkt_freelist;
    while (pkt) {
        next = pkt->u.next;
        adf_os_mem_free(pkt);
        pkt = next;
    }
    pdev->htt_htc_pkt_freelist = NULL;
}
Beispiel #9
0
static void usb_hif_free_pipe_resources(HIF_USB_PIPE *pipe)
{
	HIF_URB_CONTEXT *urb_context;
	adf_nbuf_t nbuf;

	if (NULL == pipe->device) {
		/* nothing allocated for this pipe */
		AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("pipe->device is null\n"));
		return;
	}

	AR_DEBUG_PRINTF(ATH_DEBUG_TRC, (
			 "athusb: free resources lpipe:%d hpipe:0x%X urbs:%d avail:%d\n",
			 pipe->logical_pipe_num,
			 pipe->usb_pipe_handle, pipe->urb_alloc,
			 pipe->urb_cnt));

	if (pipe->urb_alloc != pipe->urb_cnt) {
		AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (
				 "athusb: urb leak! lpipe:%d hpipe:0x%X urbs:%d avail:%d\n",
				 pipe->logical_pipe_num,
				 pipe->usb_pipe_handle, pipe->urb_alloc,
				 pipe->urb_cnt));
	}

	while (TRUE) {
		urb_context = usb_hif_alloc_urb_from_pipe(pipe);
		if (NULL == urb_context)
			break;

		if (urb_context->buf) {
			adf_nbuf_free(urb_context->buf);
			urb_context->buf = NULL;
		}

		if (htc_bundle_send) {
			while ((nbuf =
				skb_dequeue(&urb_context->comp_queue)) !=
			       NULL) {
				adf_nbuf_free(nbuf);
			}
		}

		usb_free_urb(urb_context->urb);
		urb_context->urb = NULL;
		adf_os_mem_free(urb_context);
	}

}
Beispiel #10
0
/*
 * Completion routine for ALL HIF layer async I/O
 */
A_STATUS HIFDevRWCompletionHandler(void *context, A_STATUS status)
{
    struct HIFSendContext *pSendContext = (struct HIFSendContext *)context;
    unsigned int transferID = pSendContext->transferID;
    HIF_SDIO_DEVICE *pDev = pSendContext->pDev;
    adf_nbuf_t buf = pSendContext->netbuf;

    if (pSendContext->bNewAlloc){
        adf_os_mem_free((void*)pSendContext);
    } else {
        adf_nbuf_pull_head(buf, pSendContext->head_data_len);
    }
    if (pDev->hif_callbacks.txCompletionHandler) {
        pDev->hif_callbacks.txCompletionHandler(pDev->hif_callbacks.Context,
                buf,
                transferID);
    }
    return A_OK;
}
Beispiel #11
0
/* cleanup the HTC instance */
static void HTCCleanup(HTC_TARGET *target)
{
    HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);


    HIF_Cleanup(target->hif_dev);  /*Todo HIF should cleanup if any buffers are there*/

#ifdef HTC_HOST_CREDIT_DIST
    adf_os_timer_cancel(&target->host_htc_credit_debug_timer);
#endif

    /* release htc_rdy_mutex */
    adf_os_mutex_release(&target->htc_rdy_mutex);

    /* free our instance */
    adf_os_mem_free(target);
    /* TODO : other cleanup */

    /* free our instance */
    A_FREE(target);
}
Beispiel #12
0
void
ol_rx_delba_handler(
    ol_txrx_pdev_handle pdev,
    u_int16_t peer_id,
    u_int8_t tid)
{
    struct ol_txrx_peer_t *peer;
    struct ol_rx_reorder_t *rx_reorder;

    peer = ol_txrx_peer_find_by_id(pdev, peer_id);
    if (peer == NULL) {
        return;
    }
    rx_reorder = &peer->tids_rx_reorder[tid];

    /* deallocate the old rx reorder array */
    adf_os_mem_free(rx_reorder->array);

    /* set up the TID with default parameters (ARQ window size = 1) */
    ol_rx_reorder_init(rx_reorder, tid);
}
Beispiel #13
0
A_STATUS
wdi_event_detach(struct ol_txrx_pdev_t *txrx_pdev)
{
    int i;
    wdi_event_subscribe *wdi_sub;
    if (!txrx_pdev) {
        adf_os_print("Invalid device in %s\nWDI attach failed", __FUNCTION__);
        return A_ERROR;
    }
    if (!txrx_pdev->wdi_event_list) {
        return A_ERROR;
    }
    for (i = 0; i < WDI_NUM_EVENTS; i++) {
        wdi_sub = txrx_pdev->wdi_event_list[i];
        /* Delete all the subscribers */
        wdi_event_del_subs(wdi_sub, i); 
    }
    if (txrx_pdev->wdi_event_list) {
        adf_os_mem_free(txrx_pdev->wdi_event_list);
    }
    return A_OK;
}
void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
{
	struct wmi_unified *wmi_handle = (struct wmi_unified *)ctx;
	wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
#ifdef WMI_INTERFACE_EVENT_LOGGING
	u_int32_t cmd_id;
#endif

	ASSERT(wmi_cmd_buf);
#ifdef WMI_INTERFACE_EVENT_LOGGING
	cmd_id = WMI_GET_FIELD(adf_nbuf_data(wmi_cmd_buf),
		WMI_CMD_HDR, COMMANDID);
	adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock);
	/* Record 16 bytes of WMI cmd tx complete data
	   - exclude TLV and WMI headers */
	WMI_COMMAND_TX_CMP_RECORD(cmd_id,
		((u_int32_t *)adf_nbuf_data(wmi_cmd_buf) + 2));
	adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif
	adf_nbuf_free(wmi_cmd_buf);
	adf_os_mem_free(htc_pkt);
	adf_os_atomic_dec(&wmi_handle->pending_cmds);
}
Beispiel #15
0
static void _ieee80211_resmgr_delete(ieee80211_resmgr_t resmgr)
{
    struct ieee80211com *ic;
    struct ol_ath_softc_net80211 *scn;
    
    if (!resmgr)
        return ;

    ic = resmgr->ic;
    scn = OL_ATH_SOFTC_NET80211(ic);

    /* Register WMI event handlers */
    wmi_unified_unregister_event_handler(scn->wmi_handle, WMI_VDEV_START_RESP_EVENTID);

    adf_os_spinlock_destroy(&resmgr->rm_lock);
    adf_os_spinlock_destroy(&resmgr->rm_handler_lock);

    adf_os_mem_free(resmgr);

    ic->ic_resmgr = NULL;

    return;
}
Beispiel #16
0
int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
   u_int16_t idx;

   if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   }

   if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
     adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   }

   /* Free each single buffer */
   for(idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
      if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
         adf_nbuf_unmap(pdev->osdev,
            pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
            ADF_OS_DMA_FROM_DEVICE);
         adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
      }
   }

   /* Free storage */
   adf_os_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);

   return 0;
}
Beispiel #17
0
void __ahdecl
ath_hal_free(void* p)
{
	adf_os_mem_free(p);
}
LOCAL void _buf_pool_dynamic_shutdown(pool_handle_t handle) 
{
    BUF_POOL_DYNAMIC_CONTEXT *ctx = (BUF_POOL_DYNAMIC_CONTEXT *)handle;
    
    adf_os_mem_free(ctx);
}
void
ath_rate_detach(struct ath_ratectrl *rc)
{
	adf_os_mem_free(rc);
}
static void _WMI_Shutdown(wmi_handle_t handle)
{
	WMI_SVC_CONTEXT *pWMI = (WMI_SVC_CONTEXT *)handle;

	adf_os_mem_free(pWMI);
}
Beispiel #21
0
void
ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
{
    adf_os_mem_free(pdev->rx_reorder_trace.data);
}
/**
 * adf_os_mem_multi_pages_alloc() - allocate large size of kernel memory
 * @osdev:          OS device handle pointer
 * @pages:          Multi page information storage
 * @element_size:   Each element size
 * @element_num:    Total number of elements should be allocated
 * @memctxt:        Memory context
 * @cacheable:      Coherent memory or cacheable memory
 *
 * This function will allocate large size of memory over multiple pages.
 * Large size of contiguous memory allocation will fail frequentely, so
 * instead of allocate large memory by one shot, allocate through multiple, non
 * contiguous memory and combine pages when actual usage
 *
 * Return: None
 */
void adf_os_mem_multi_pages_alloc(adf_os_device_t osdev,
			struct adf_os_mem_multi_page_t *pages,
			size_t element_size,
			uint16_t element_num,
			adf_os_dma_context_t memctxt,
			bool cacheable)
{
	uint16_t page_idx;
	struct adf_os_mem_dma_page_t *dma_pages;
	void **cacheable_pages = NULL;
	uint16_t i;

	pages->num_element_per_page = PAGE_SIZE / element_size;
	if (!pages->num_element_per_page) {
		adf_os_print("Invalid page %d or element size %d",
			(int)PAGE_SIZE, (int)element_size);
		goto out_fail;
	}

	pages->num_pages = element_num / pages->num_element_per_page;
	if (element_num % pages->num_element_per_page)
		pages->num_pages++;

	if (cacheable) {
		/* Pages information storage */
		pages->cacheable_pages = adf_os_mem_alloc(osdev,
			pages->num_pages * sizeof(pages->cacheable_pages));
		if (!pages->cacheable_pages) {
			adf_os_print("Cacheable page storage alloc fail");
			goto out_fail;
		}

		cacheable_pages = pages->cacheable_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			cacheable_pages[page_idx] = adf_os_mem_alloc(
				osdev, PAGE_SIZE);
			if (!cacheable_pages[page_idx]) {
				adf_os_print("cacheable page alloc fail, pi %d",
					page_idx);
				goto page_alloc_fail;
			}
		}
		pages->dma_pages = NULL;
	} else {
		pages->dma_pages = adf_os_mem_alloc(osdev,
		       pages->num_pages * sizeof(struct adf_os_mem_dma_page_t));
		if (!pages->dma_pages) {
			adf_os_print("dmaable page storage alloc fail");
			goto out_fail;
		}

		dma_pages = pages->dma_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			dma_pages->page_v_addr_start =
				adf_os_mem_alloc_consistent(osdev, PAGE_SIZE,
				&dma_pages->page_p_addr, memctxt);
			if (!dma_pages->page_v_addr_start) {
				adf_os_print("dmaable page alloc fail pi %d",
					page_idx);
				goto page_alloc_fail;
			}
			dma_pages->page_v_addr_end =
				dma_pages->page_v_addr_start + PAGE_SIZE;
			dma_pages++;
		}
		pages->cacheable_pages = NULL;
	}
	return;

page_alloc_fail:
	if (cacheable) {
		for (i = 0; i < page_idx; i++)
			adf_os_mem_free(pages->cacheable_pages[i]);
		adf_os_mem_free(pages->cacheable_pages);
	} else {
		dma_pages = pages->dma_pages;
		for (i = 0; i < page_idx; i++) {
			adf_os_mem_free_consistent(osdev, PAGE_SIZE,
				dma_pages->page_v_addr_start,
				dma_pages->page_p_addr, memctxt);
			dma_pages++;
		}
		adf_os_mem_free(pages->dma_pages);
	}

out_fail:
	pages->cacheable_pages = NULL;
	pages->dma_pages = NULL;
	pages->num_pages = 0;
	return;
}
Beispiel #23
0
hif_status_t
fwd_device_removed(void *ctx)
{
  adf_os_mem_free(ctx);
  return HIF_OK;
}
Beispiel #24
0
htt_pdev_handle
htt_attach(
    ol_txrx_pdev_handle txrx_pdev,
    ol_pdev_handle ctrl_pdev,
    HTC_HANDLE htc_pdev,
    adf_os_device_t osdev,
    int desc_pool_size)
{
    struct htt_pdev_t *pdev;
    int i;

    pdev = adf_os_mem_alloc(osdev, sizeof(*pdev));

    if (!pdev) {
        goto fail1;
    }

    pdev->osdev = osdev;
    pdev->ctrl_pdev = ctrl_pdev;
    pdev->txrx_pdev = txrx_pdev;
    pdev->htc_pdev = htc_pdev;

    adf_os_mem_set(&pdev->stats, 0, sizeof(pdev->stats));
    pdev->htt_htc_pkt_freelist = NULL;

    /* for efficiency, store a local copy of the is_high_latency flag */
    pdev->cfg.is_high_latency = ol_cfg_is_high_latency(pdev->ctrl_pdev);

    /*
     * Connect to HTC service.
     * This has to be done before calling htt_rx_attach,
     * since htt_rx_attach involves sending a rx ring configure
     * message to the target.
     */
//AR6004 don't need HTT layer.
#ifndef AR6004_HW
    if (htt_htc_attach(pdev)) {
        goto fail2;
    }
#endif
    if (htt_tx_attach(pdev, desc_pool_size)) {
        goto fail2;
    }

    if (htt_rx_attach(pdev)) {
        goto fail3;
    }

    HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex); 
    HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev);

    /* pre-allocate some HTC_PACKET objects */
    for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
        struct htt_htc_pkt_union *pkt;
        pkt = adf_os_mem_alloc(pdev->osdev, sizeof(*pkt));
        if (! pkt) {
            break;
        }
        htt_htc_pkt_free(pdev, &pkt->u.pkt);
    }

    if (pdev->cfg.is_high_latency) {
        /*
         * HL - download the whole frame.
         * Specify a download length greater than the max MSDU size,
         * so the downloads will be limited by the actual frame sizes.
         */
        pdev->download_len = 5000;
        if (ol_cfg_tx_free_at_download(pdev->ctrl_pdev)) {
            pdev->tx_send_complete_part2 = ol_tx_download_done_hl_free;
        } else {
            pdev->tx_send_complete_part2 = ol_tx_download_done_hl_retain;
        }

        /*
         * For LL, the FW rx desc directly referenced at its location
         * inside the rx indication message.
         */
/*
 * CHECK THIS LATER: does the HL HTT version of htt_rx_mpdu_desc_list_next
 * (which is not currently implemented) present the adf_nbuf_data(rx_ind_msg)
 * as the abstract rx descriptor?
 * If not, the rx_fw_desc_offset initialization here will have to be
 * adjusted accordingly.
 * NOTE: for HL, because fw rx desc is in ind msg, not in rx desc, so the
 * offset should be negtive value
 */
        pdev->rx_fw_desc_offset =
            HTT_ENDIAN_BYTE_IDX_SWAP(
                    HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET
                    - HTT_RX_IND_HL_BYTES);

        htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_hl;
    } else {
        /*
         * LL - download just the initial portion of the frame.
         * Download enough to cover the encapsulation headers checked
         * by the target's tx classification descriptor engine.
         */
        /* Get the packet download length */
        pdev->download_len = htt_pkt_dl_len_get(pdev);

        /*
         * Account for the HTT tx descriptor, including the
         * HTC header + alignment padding.
         */
        pdev->download_len += sizeof(struct htt_host_tx_desc_t);

        pdev->tx_send_complete_part2 = ol_tx_download_done_ll;

        /*
         * For LL, the FW rx desc is alongside the HW rx desc fields in
         * the htt_host_rx_desc_base struct/.
         */
        pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;

        htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
    }

    return pdev;

fail3:
    htt_tx_detach(pdev);

fail2:
    adf_os_mem_free(pdev);

fail1:
    return NULL;
}
Beispiel #25
0
LOCAL void _HTC_Shutdown(htc_handle_t htcHandle)
{
	HTC_CONTEXT *pHTC = (HTC_CONTEXT *)htcHandle;
    
	adf_os_mem_free(pHTC);
}
Beispiel #26
0
int
htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
{
    int i, i_int, pool_size;
    uint32_t **p;
    adf_os_dma_addr_t pool_paddr = {0};
    struct htt_tx_desc_page_t *page_info;
    unsigned int num_link = 0;
    uint32_t page_size;

    if (pdev->cfg.is_high_latency) {
        pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
    } else {
        pdev->tx_descs.size =
            /*
             * Start with the size of the base struct
             * that actually gets downloaded.
             */
            sizeof(struct htt_host_tx_desc_t)
            /*
             * Add the fragmentation descriptor elements.
             * Add the most that the OS may deliver, plus one more in
             * case the txrx code adds a prefix fragment (for TSO or
             * audio interworking SNAP header)
             */
            + (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8 // 2x u_int32_t
            + 4; /* u_int32_t fragmentation list terminator */
    }

    /*
     * Make sure tx_descs.size is a multiple of 4-bytes.
     * It should be, but round up just to be sure.
     */
    pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
    pdev->tx_descs.pool_elems = desc_pool_elems;
    pdev->tx_descs.alloc_cnt = 0;

    pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;

   /* Calculate required page count first */
    page_size = adf_os_mem_get_page_size();
    pdev->num_pages = pool_size / page_size;
    if (pool_size % page_size)
        pdev->num_pages++;

    /* Put in as many as possible descriptors into single page */
    /* calculate how many descriptors can put in single page */
    pdev->num_desc_per_page = page_size / pdev->tx_descs.size;

    /* Pages information storage */
    pdev->desc_pages = (struct htt_tx_desc_page_t *)adf_os_mem_alloc(
        pdev->osdev, pdev->num_pages * sizeof(struct htt_tx_desc_page_t));
    if (!pdev->desc_pages) {
        adf_os_print("HTT Attach, desc page alloc fail");
        goto fail1;
    }

    page_info = pdev->desc_pages;
    p = (uint32_t **) pdev->tx_descs.freelist;
    /* Allocate required memory with multiple pages */
    for(i = 0; i < pdev->num_pages; i++) {
        if (pdev->cfg.is_high_latency) {
            page_info->page_v_addr_start = adf_os_mem_alloc(
                pdev->osdev, page_size);
            page_info->page_p_addr = pool_paddr;
            if (!page_info->page_v_addr_start) {
               page_info = pdev->desc_pages;
               for (i_int = 0 ; i_int < i; i_int++) {
                    page_info = pdev->desc_pages + i_int;
                    adf_os_mem_free(page_info->page_v_addr_start);
               }
               goto fail2;
            }
        } else {
            page_info->page_v_addr_start = adf_os_mem_alloc_consistent(
                pdev->osdev,
                page_size,
                &page_info->page_p_addr,
                adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
            if (!page_info->page_v_addr_start) {
               page_info = pdev->desc_pages;
               for (i_int = 0 ; i_int < i; i_int++) {
                    page_info = pdev->desc_pages + i_int;
                    adf_os_mem_free_consistent(
                        pdev->osdev,
                        pdev->num_desc_per_page * pdev->tx_descs.size,
                        page_info->page_v_addr_start,
                        page_info->page_p_addr,
                        adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
               }
               goto fail2;
            }
        }
        page_info->page_v_addr_end = page_info->page_v_addr_start +
            pdev->num_desc_per_page * pdev->tx_descs.size;
        page_info++;
    }

    page_info = pdev->desc_pages;
    pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start;
    p = (uint32_t **) pdev->tx_descs.freelist;
    for(i = 0; i < pdev->num_pages; i++) {
        for (i_int = 0; i_int < pdev->num_desc_per_page; i_int++) {
            if (i_int == (pdev->num_desc_per_page - 1)) {
                /* Last element on this page, should pint next page */
                if (!page_info->page_v_addr_start) {
                    adf_os_print("over flow num link %d\n", num_link);
                    goto fail3;
                }
                page_info++;
                *p = (uint32_t *)page_info->page_v_addr_start;
            }
            else {
                *p = (uint32_t *)(((char *) p) + pdev->tx_descs.size);
            }
            num_link++;
            p = (uint32_t **) *p;
            /* Last link established exit */
            if (num_link == (pdev->tx_descs.pool_elems - 1))
               break;
        }
    }
    *p = NULL;

    if (pdev->cfg.is_high_latency) {
        adf_os_atomic_init(&pdev->htt_tx_credit.target_delta);
        adf_os_atomic_init(&pdev->htt_tx_credit.bus_delta);
        adf_os_atomic_add(HTT_MAX_BUS_CREDIT,&pdev->htt_tx_credit.bus_delta);
    }
    return 0; /* success */

fail3:
    if (pdev->cfg.is_high_latency) {
        page_info = pdev->desc_pages;
        for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
            page_info = pdev->desc_pages + i_int;
            adf_os_mem_free(page_info->page_v_addr_start);
        }
    } else {
        page_info = pdev->desc_pages;
        for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
            page_info = pdev->desc_pages + i_int;
            adf_os_mem_free_consistent(
                pdev->osdev,
                pdev->num_desc_per_page * pdev->tx_descs.size,
                page_info->page_v_addr_start,
                page_info->page_p_addr,
                adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
        }
    }

fail2:
    adf_os_mem_free(pdev->desc_pages);

fail1:
    return -1;
}
Beispiel #27
0
A_STATUS HIFDevSendBuffer(HIF_SDIO_DEVICE *pDev, unsigned int transferID, a_uint8_t pipe,
        unsigned int nbytes, adf_nbuf_t buf)
{
    A_STATUS status;
    A_UINT32 paddedLength;
    int frag_count = 0, i, head_data_len;
    struct HIFSendContext *pSendContext;
    unsigned char *pData;
    A_UINT32 request = HIF_WR_ASYNC_BLOCK_INC;
    A_UINT8 mboxIndex = HIFDevMapPipeToMailBox(pDev, pipe);

    paddedLength = DEV_CALC_SEND_PADDED_LEN(pDev, nbytes);
#ifdef ENABLE_MBOX_DUMMY_SPACE_FEATURE
    A_ASSERT(paddedLength - nbytes < HIF_DUMMY_SPACE_MASK + 1);
    /*
     * two most significant bytes to save dummy data count
     * data written into the dummy space will not put into the final mbox FIFO
     *
     */
    request |= ((paddedLength - nbytes) << 16);
#endif

    frag_count = adf_nbuf_get_num_frags(buf);

    if (frag_count > 1){
        /* header data length should be total sending length substract internal data length of netbuf */
        /*
         * | HIFSendContext | fragments except internal buffer | netbuf->data
         */
        head_data_len = sizeof(struct HIFSendContext) +
                (nbytes - adf_nbuf_get_frag_len(buf, frag_count - 1));
    } else {
        /*
         * | HIFSendContext | netbuf->data
         */
        head_data_len = sizeof(struct HIFSendContext);
    }

    /* Check whether head room is enough to save extra head data */
    if ((head_data_len <= adf_nbuf_headroom(buf)) &&
                (adf_nbuf_tailroom(buf) >= (paddedLength - nbytes))){
        pSendContext = (struct HIFSendContext*)adf_nbuf_push_head(buf, head_data_len);
        pSendContext->bNewAlloc = FALSE;
    } else {
        pSendContext = (struct HIFSendContext*)adf_os_mem_alloc(NULL,
                sizeof(struct HIFSendContext) + paddedLength);
        pSendContext->bNewAlloc = TRUE;
    }

    pSendContext->netbuf = buf;
    pSendContext->pDev = pDev;
    pSendContext->transferID = transferID;
    pSendContext->head_data_len = head_data_len;
    /*
     * Copy data to head part of netbuf or head of allocated buffer.
     * if buffer is new allocated, the last buffer should be copied also.
     * It assume last fragment is internal buffer of netbuf
     * sometime total length of fragments larger than nbytes
     */
    pData = (unsigned char *)pSendContext + sizeof(struct HIFSendContext);
    for (i = 0; i < (pSendContext->bNewAlloc ? frag_count : frag_count - 1); i ++){
        int frag_len = adf_nbuf_get_frag_len(buf, i);
        unsigned char *frag_addr = adf_nbuf_get_frag_vaddr(buf, i);
        if (frag_len > nbytes){
            frag_len = nbytes;
        }
        memcpy(pData, frag_addr, frag_len);
        pData += frag_len;
        nbytes -= frag_len;
        if (nbytes <= 0) {
            break;
        }
    }

    /* Reset pData pointer and send out */
    pData = (unsigned char *)pSendContext + sizeof(struct HIFSendContext);
    status = HIFReadWrite(pDev->HIFDevice,
            pDev->MailBoxInfo.MboxProp[mboxIndex].ExtendedAddress,
            (char*) pData,
            paddedLength,
            request,
            (void*)pSendContext);

    if (status == A_PENDING){
        /*
         * it will return A_PENDING in native HIF implementation,
         * which should be treated as successful result here.
         */
        status = A_OK;
    }
    /* release buffer or move back data pointer when failed */
    if (status != A_OK){
        if (pSendContext->bNewAlloc){
            adf_os_mem_free(pSendContext);
        } else {
            adf_nbuf_pull_head(buf, head_data_len);
        }
    }

    return status;
}
Beispiel #28
0
static A_STATUS usb_hif_alloc_pipe_resources(HIF_USB_PIPE *pipe, int urb_cnt)
{
	A_STATUS status = A_OK;
	int i;
	HIF_URB_CONTEXT *urb_context;

	DL_LIST_INIT(&pipe->urb_list_head);
	DL_LIST_INIT(&pipe->urb_pending_list);

	for (i = 0; i < urb_cnt; i++) {
		urb_context = adf_os_mem_alloc(NULL, sizeof(*urb_context));
		if (NULL == urb_context) {
			status = A_NO_MEMORY;
			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
					("urb_context is null\n"));
			break;
		}
		adf_os_mem_zero(urb_context, sizeof(HIF_URB_CONTEXT));
		urb_context->pipe = pipe;
		urb_context->urb = usb_alloc_urb(0, GFP_KERNEL);

		if (NULL == urb_context->urb) {
			status = A_NO_MEMORY;
			adf_os_mem_free(urb_context);
			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
					("urb_context->urb is null\n"));
			break;
		}

		/* note we are only allocate the urb contexts here, the actual
		 * URB is
		 * allocated from the kernel as needed to do a transaction
		 */
		pipe->urb_alloc++;

		if (htc_bundle_send) {
			/* In tx bundle mode, only pre-allocate bundle buffers
			 * for data
			 * pipes
			 */
			if (pipe->logical_pipe_num >= HIF_TX_DATA_LP_PIPE &&
			    pipe->logical_pipe_num <= HIF_TX_DATA_HP_PIPE) {
				urb_context->buf = adf_nbuf_alloc(NULL,
						  HIF_USB_TX_BUNDLE_BUFFER_SIZE,
						  0, 4, FALSE);
				if (NULL == urb_context->buf) {
					status = A_NO_MEMORY;
					usb_free_urb(urb_context->urb);
					urb_context->urb = NULL;
					adf_os_mem_free(urb_context);
					AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (
					 "athusb: alloc send bundle buffer %d-byte failed\n",
					 HIF_USB_TX_BUNDLE_BUFFER_SIZE));
					break;
				}
			}
			skb_queue_head_init(&urb_context->comp_queue);
		}

		usb_hif_free_urb_to_pipe(pipe, urb_context);
	}

	AR_DEBUG_PRINTF(USB_HIF_DEBUG_ENUM, (
			 "athusb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n",
			 pipe->logical_pipe_num,
			 pipe->usb_pipe_handle,
			 pipe->urb_alloc));
	return status;
}