int htt_tx_credit_update(struct htt_pdev_t *pdev) { int credit_delta; credit_delta = MIN(adf_os_atomic_read(&pdev->htt_tx_credit.target_delta), adf_os_atomic_read(&pdev->htt_tx_credit.bus_delta)); if (credit_delta) { adf_os_atomic_add(-credit_delta, &pdev->htt_tx_credit.target_delta); adf_os_atomic_add(-credit_delta, &pdev->htt_tx_credit.bus_delta); } return credit_delta; }
void htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt) { void (*send_complete_part2)( void *pdev, A_STATUS status, adf_nbuf_t msdu, u_int16_t msdu_id); struct htt_pdev_t *pdev = (struct htt_pdev_t *) context; struct htt_htc_pkt *htt_pkt; adf_nbuf_t netbuf; send_complete_part2 = htc_pkt->pPktContext; htt_pkt = container_of(htc_pkt, struct htt_htc_pkt, htc_pkt); /* process (free or keep) the netbuf that held the message */ netbuf = (adf_nbuf_t) htc_pkt->pNetBufContext; if (send_complete_part2 != NULL) { send_complete_part2( htt_pkt->pdev_ctxt, htc_pkt->Status, netbuf, htt_pkt->msdu_id); } if (pdev->cfg.is_high_latency && !pdev->cfg.default_tx_comp_req) { int32_t credit_delta; adf_os_atomic_add(1, &pdev->htt_tx_credit.bus_delta); credit_delta = htt_tx_credit_update(pdev); if (credit_delta) { ol_tx_credit_completion_handler(pdev->txrx_pdev, credit_delta); } } /* free the htt_htc_pkt / HTC_PACKET object */ htt_htc_pkt_free(pdev, htt_pkt); }
int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems) { int i, i_int, pool_size; uint32_t **p; adf_os_dma_addr_t pool_paddr = {0}; struct htt_tx_desc_page_t *page_info; unsigned int num_link = 0; uint32_t page_size; if (pdev->cfg.is_high_latency) { pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t); } else { pdev->tx_descs.size = /* * Start with the size of the base struct * that actually gets downloaded. */ sizeof(struct htt_host_tx_desc_t) /* * Add the fragmentation descriptor elements. * Add the most that the OS may deliver, plus one more in * case the txrx code adds a prefix fragment (for TSO or * audio interworking SNAP header) */ + (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8 // 2x u_int32_t + 4; /* u_int32_t fragmentation list terminator */ } /* * Make sure tx_descs.size is a multiple of 4-bytes. * It should be, but round up just to be sure. */ pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3); pdev->tx_descs.pool_elems = desc_pool_elems; pdev->tx_descs.alloc_cnt = 0; pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size; /* Calculate required page count first */ page_size = adf_os_mem_get_page_size(); pdev->num_pages = pool_size / page_size; if (pool_size % page_size) pdev->num_pages++; /* Put in as many as possible descriptors into single page */ /* calculate how many descriptors can put in single page */ pdev->num_desc_per_page = page_size / pdev->tx_descs.size; /* Pages information storage */ pdev->desc_pages = (struct htt_tx_desc_page_t *)adf_os_mem_alloc( pdev->osdev, pdev->num_pages * sizeof(struct htt_tx_desc_page_t)); if (!pdev->desc_pages) { adf_os_print("HTT Attach, desc page alloc fail"); goto fail1; } page_info = pdev->desc_pages; p = (uint32_t **) pdev->tx_descs.freelist; /* Allocate required memory with multiple pages */ for(i = 0; i < pdev->num_pages; i++) { if (pdev->cfg.is_high_latency) { page_info->page_v_addr_start = adf_os_mem_alloc( pdev->osdev, page_size); page_info->page_p_addr = pool_paddr; if (!page_info->page_v_addr_start) { page_info = pdev->desc_pages; for (i_int = 0 ; i_int < i; i_int++) { page_info = pdev->desc_pages + i_int; adf_os_mem_free(page_info->page_v_addr_start); } goto fail2; } } else { page_info->page_v_addr_start = adf_os_mem_alloc_consistent( pdev->osdev, page_size, &page_info->page_p_addr, adf_os_get_dma_mem_context((&pdev->tx_descs), memctx)); if (!page_info->page_v_addr_start) { page_info = pdev->desc_pages; for (i_int = 0 ; i_int < i; i_int++) { page_info = pdev->desc_pages + i_int; adf_os_mem_free_consistent( pdev->osdev, pdev->num_desc_per_page * pdev->tx_descs.size, page_info->page_v_addr_start, page_info->page_p_addr, adf_os_get_dma_mem_context((&pdev->tx_descs), memctx)); } goto fail2; } } page_info->page_v_addr_end = page_info->page_v_addr_start + pdev->num_desc_per_page * pdev->tx_descs.size; page_info++; } page_info = pdev->desc_pages; pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start; p = (uint32_t **) pdev->tx_descs.freelist; for(i = 0; i < pdev->num_pages; i++) { for (i_int = 0; i_int < pdev->num_desc_per_page; i_int++) { if (i_int == (pdev->num_desc_per_page - 1)) { /* Last element on this page, should pint next page */ if (!page_info->page_v_addr_start) { adf_os_print("over flow num link %d\n", num_link); goto fail3; } page_info++; *p = (uint32_t *)page_info->page_v_addr_start; } else { *p = (uint32_t *)(((char *) p) + pdev->tx_descs.size); } num_link++; p = (uint32_t **) *p; /* Last link established exit */ if (num_link == (pdev->tx_descs.pool_elems - 1)) break; } } *p = NULL; if (pdev->cfg.is_high_latency) { adf_os_atomic_init(&pdev->htt_tx_credit.target_delta); adf_os_atomic_init(&pdev->htt_tx_credit.bus_delta); adf_os_atomic_add(HTT_MAX_BUS_CREDIT,&pdev->htt_tx_credit.bus_delta); } return 0; /* success */ fail3: if (pdev->cfg.is_high_latency) { page_info = pdev->desc_pages; for (i_int = 0 ; i_int < pdev->num_pages; i_int++) { page_info = pdev->desc_pages + i_int; adf_os_mem_free(page_info->page_v_addr_start); } } else { page_info = pdev->desc_pages; for (i_int = 0 ; i_int < pdev->num_pages; i_int++) { page_info = pdev->desc_pages + i_int; adf_os_mem_free_consistent( pdev->osdev, pdev->num_desc_per_page * pdev->tx_descs.size, page_info->page_v_addr_start, page_info->page_p_addr, adf_os_get_dma_mem_context((&pdev->tx_descs), memctx)); } } fail2: adf_os_mem_free(pdev->desc_pages); fail1: return -1; }