/** * @brief get the dma map of the nbuf * * @param osdev * @param bmap * @param skb * @param dir * * @return a_status_t */ a_status_t __adf_nbuf_map( adf_os_device_t osdev, struct sk_buff *skb, adf_os_dma_dir_t dir) { #ifdef ADF_OS_DEBUG struct skb_shared_info *sh = skb_shinfo(skb); #endif adf_os_assert( (dir == ADF_OS_DMA_TO_DEVICE) || (dir == ADF_OS_DMA_FROM_DEVICE)); /* * Assume there's only a single fragment. * To support multiple fragments, it would be necessary to change * adf_nbuf_t to be a separate object that stores meta-info * (including the bus address for each fragment) and a pointer * to the underlying sk_buff. */ adf_os_assert(sh->nr_frags == 0); return __adf_nbuf_map_single(osdev, skb, dir); return A_STATUS_OK; }
A_STATUS HTCStart(HTC_HANDLE HTCHandle) { adf_nbuf_t netbuf; A_STATUS status = A_OK; HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); HTC_SETUP_COMPLETE_MSG *SetupComp; AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Enter\n")); do { HTCConfigTargetHIFPipe(HTCHandle); #ifdef HTC_HOST_CREDIT_DIST adf_os_assert(target->InitCredits != NULL); adf_os_assert(target->EpCreditDistributionListHead != NULL); adf_os_assert(target->EpCreditDistributionListHead->pNext != NULL); /* call init credits callback to do the distribution , * NOTE: the first entry in the distribution list is ENDPOINT_0, so * we pass the start of the list after this one. */ target->InitCredits(target->pCredDistContext, target->EpCreditDistributionListHead->pNext, target->TargetCredits); #if 1 adf_os_timer_init(target->os_handle, &target->host_htc_credit_debug_timer, host_htc_credit_show, target); adf_os_timer_start(&target->host_htc_credit_debug_timer, 10000); #endif #endif /* allocate a buffer to send */ //netbuf = adf_nbuf_alloc(anet, sizeof(HTC_SETUP_COMPLETE_MSG), HTC_HDR_LENGTH, 0); netbuf = adf_nbuf_alloc(50, HTC_HDR_LENGTH, 0); if (netbuf == ADF_NBUF_NULL) { status = A_NO_MEMORY; break; } /* assemble setup complete message */ SetupComp = (HTC_SETUP_COMPLETE_MSG *)adf_nbuf_put_tail(netbuf, sizeof(HTC_SETUP_COMPLETE_MSG)); SetupComp->MessageID = adf_os_htons(HTC_MSG_SETUP_COMPLETE_ID); /* assemble the HTC header and send to HIF layer */ status = HTCIssueSend(target, ADF_NBUF_NULL, netbuf, 0, sizeof(HTC_SETUP_COMPLETE_MSG), ENDPOINT0); if (A_FAILED(status)) { break; } } while (FALSE); AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("HTCStart Exit\n")); return status; }
/** * @brief return the frag data & len, where frag no. is * specified by the index * * @param[in] buf * @param[out] sg (scatter/gather list of all the frags) * */ void __adf_nbuf_frag_info(struct sk_buff *skb, adf_os_sglist_t *sg) { #if defined(ADF_OS_DEBUG) || defined(__ADF_SUPPORT_FRAG_MEM) struct skb_shared_info *sh = skb_shinfo(skb); #endif adf_os_assert(skb != NULL); sg->sg_segs[0].vaddr = skb->data; sg->sg_segs[0].len = skb->len; sg->nsegs = 1; #ifndef __ADF_SUPPORT_FRAG_MEM adf_os_assert(sh->nr_frags == 0); #else for(int i = 1; i <= sh->nr_frags; i++) { skb_frag_t *f = &sh->frags[i - 1]; sg->sg_segs[i].vaddr = (uint8_t *)(page_address(f->page) + f->page_offset); sg->sg_segs[i].len = f->size; adf_os_assert(i < ADF_OS_MAX_SGLIST); } sg->nsegs += i; #endif }
/** * @brief Create a Eth networking device * * @param hdl * @param op * @param info * * @return adf_net_handle_t */ adf_net_handle_t __adf_net_create_ethdev(adf_drv_handle_t hdl, adf_dev_sw_t *op, adf_net_dev_info_t *info) { __adf_softc_t *sc = NULL; struct net_device *netdev = NULL; int error = 0; netdev = alloc_netdev(sizeof(struct __adf_softc), info->if_name, ether_setup); if (!netdev) return NULL; sc = netdev_to_softc(netdev); sc->netdev = netdev; sc->sw = *op; sc->drv_hdl = hdl; sc->vlgrp = NULL; /*Not part of any VLAN*/ sc->vid = 0; sc->cfg_api = NULL; netdev->watchdog_timeo = ADF_DEF_TX_TIMEOUT * HZ; netdev->features |= ( NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX ); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) netdev->netdev_ops = &__adf_net_vapdev_ops; #else netdev->open = __adf_net_open; netdev->stop = __adf_net_stop; netdev->hard_start_xmit = __adf_net_start_tx; netdev->do_ioctl = __adf_net_eth_ioctl; netdev->vlan_rx_register = __adf_net_vlan_register; netdev->vlan_rx_add_vid = __adf_net_vlan_add; netdev->vlan_rx_kill_vid = __adf_net_vlan_del; netdev->get_stats = __adf_net_get_stats; #endif netdev->destructor = __adf_net_free_netdev; netdev->hard_header_len = info->header_len ; adf_os_assert(!is_valid_ether_addr(info->dev_addr)); memcpy(netdev->dev_addr, info->dev_addr, ADF_NET_MAC_ADDR_MAX_LEN); memcpy(netdev->perm_addr, info->dev_addr, ADF_NET_MAC_ADDR_MAX_LEN); /** * make sure nothing's on before open */ netif_stop_queue(netdev); error = register_netdev(netdev) ; adf_os_assert(!error); return sc; }
/** * @brief return the dma map info * * @param[in] bmap * @param[out] sg (map_info ptr) */ void __adf_nbuf_dmamap_info(__adf_os_dma_map_t bmap, adf_os_dmamap_info_t *sg) { adf_os_assert(bmap->mapped); adf_os_assert(bmap->nsegs <= ADF_OS_MAX_SCATTER); memcpy(sg->dma_segs, bmap->seg, bmap->nsegs * sizeof(struct __adf_os_segment)); sg->nsegs = bmap->nsegs; }
/** * @brief Create a Wifi Networking device * * @param hdl * @param op * @param info * * @return adf_net_handle_t */ adf_net_handle_t __adf_net_create_wifidev(adf_drv_handle_t hdl, adf_dev_sw_t *op, adf_net_dev_info_t *info, void *wifi_cfg) { __adf_softc_t *sc = NULL; struct net_device *netdev = NULL; int error = 0; netdev = alloc_netdev(sizeof(struct __adf_softc), info->if_name, ether_setup); if (!netdev) return NULL; sc = netdev_to_softc(netdev); sc->netdev = netdev; sc->sw = *op; sc->drv_hdl = hdl; sc->vlgrp = NULL; /*Not part of any VLAN*/ sc->vid = 0; sc->cfg_api = wifi_cfg; netdev->watchdog_timeo = ADF_DEF_TX_TIMEOUT * HZ; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) netdev->netdev_ops = &__adf_net_wifidev_ops; #else netdev->open = __adf_net_open; netdev->stop = __adf_net_stop; netdev->hard_start_xmit = __adf_net_start_tx; netdev->do_ioctl = __adf_net_wifi_ioctl; netdev->get_stats = __adf_net_get_stats; netdev->set_mac_address = __adf_net_set_wifiaddr; #endif netdev->destructor = __adf_net_free_netdev; netdev->hard_header_len = info->header_len ; netdev->wireless_handlers = __adf_net_iwget_wifi(); netdev->type = ARPHRD_IEEE80211; /* WLAN device */ adf_os_assert(is_valid_ether_addr(info->dev_addr)); memcpy(netdev->dev_addr, info->dev_addr, ADF_NET_MAC_ADDR_MAX_LEN); memcpy(netdev->perm_addr, info->dev_addr, ADF_NET_MAC_ADDR_MAX_LEN); /** * make sure nothing's on before open */ netif_stop_queue(netdev); error = register_netdev(netdev) ; adf_os_assert(!error); return sc; }
/** * @brief Free the RX Ring ( S/W & H/W), dequeue all the SKB's * and free them starting from the head * NOTE: The NULL terminator doesn't have a SKB * * @param osdev * @param dma_q */ void pci_dma_deinit_rx(adf_os_device_t osdev, pci_dma_softc_t *dma_q) { a_uint32_t i, num_desc; zdma_swdesc_t *swdesc; adf_nbuf_t buf; num_desc = dma_q->num_desc; swdesc = dma_q->sw_ring; for (i = 0; i < num_desc; i++, swdesc++) { pci_zdma_mark_notrdy(swdesc); adf_nbuf_unmap(osdev, swdesc->nbuf_map, ADF_OS_DMA_TO_DEVICE); buf = pci_dma_unlink_buf(osdev, swdesc); adf_os_assert(buf); adf_nbuf_free(buf); adf_nbuf_dmamap_destroy(osdev, swdesc->nbuf_map); } pci_dma_free_swdesc(osdev, dma_q, num_desc); }
/** * @brief adf_nbuf_unmap() - to unmap a previously mapped buf */ void __adf_nbuf_unmap( adf_os_device_t osdev, struct sk_buff *skb, adf_os_dma_dir_t dir) { adf_os_assert( (dir == ADF_OS_DMA_TO_DEVICE) || (dir == ADF_OS_DMA_FROM_DEVICE)); adf_os_assert(((dir == ADF_OS_DMA_TO_DEVICE) || (dir == ADF_OS_DMA_FROM_DEVICE))); /* * Assume there's a single fragment. * If this is not true, the assertion in __adf_nbuf_map will catch it. */ __adf_nbuf_unmap_single(osdev, skb, dir); }
void _DMAengine_config_rx_queue(struct zsDmaQueue *q, int num_desc, int buf_size) { int i; VDESC *desc; VDESC *head = NULL; for(i=0; i < num_desc; i++) { desc = VDESC_alloc_vdesc(); adf_os_assert(desc != NULL); desc->buf_addr = (A_UINT8 *)adf_os_mem_alloc(buf_size); desc->buf_size = buf_size; desc->next_desc = NULL; desc->data_offset = 0; desc->data_size = 0; desc->control = 0; if ( head == NULL ) { head = desc; } else { desc->next_desc = head; head = desc; } } config_queue(q, head); }
A_BOOL HTC_busy(HTC_HANDLE HTCHandle, HTC_ENDPOINT_ID Ep, a_uint32_t nPkts) { HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle); A_BOOL busy_flag; HTC_ENDPOINT *pEndpoint; if (Ep >= ENDPOINT_MAX) { adf_os_print("Ep %u is invalid!\n", Ep); adf_os_assert(0); } pEndpoint = &target->EndPoint[Ep]; LOCK_HTC_TX(target); if (pEndpoint->TxBufCnt >= nPkts) { busy_flag = TRUE; } else { busy_flag = FALSE; } UNLOCK_HTC_TX(target); return busy_flag; }
a_status_t __adf_nbuf_set_rx_cksum(struct sk_buff *skb, adf_nbuf_rx_cksum_t *cksum) { switch (cksum->l4_result) { case ADF_NBUF_RX_CKSUM_NONE: skb->ip_summed = CHECKSUM_NONE; break; case ADF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: skb->ip_summed = CHECKSUM_UNNECESSARY; break; case ADF_NBUF_RX_CKSUM_TCP_UDP_HW: #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,19) skb->ip_summed = CHECKSUM_HW; #else skb->ip_summed = CHECKSUM_PARTIAL; #endif skb->csum = cksum->val; break; default: printk("ADF_NET:Unknown checksum type\n"); adf_os_assert(0); return A_STATUS_ENOTSUPP; } return A_STATUS_OK; }
static inline A_UINT8 ol_tx_tid_by_raw_type( A_UINT8 *datap, struct ol_txrx_msdu_info_t *tx_msdu_info) { A_UINT8 tid = HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST; /* adjust hdr_ptr to RA */ struct ieee80211_frame *wh = (struct ieee80211_frame *)datap; /* FIXME: This code does not handle 4 address formats. The QOS field * is not at usual location. */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) { /* dot11 encapsulated frame */ struct ieee80211_qosframe *whqos = (struct ieee80211_qosframe *)datap; if (whqos->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { tid = whqos->i_qos[0] & IEEE80211_QOS_TID; } else { tid = HTT_NON_QOS_TID; } } else { /* * This function should only be applied to data frames. * For management frames, we already know to use HTT_TX_EXT_TID_MGMT. */ adf_os_assert(0); } return tid; }
int __adf_net_pseudo_attach(const char *mod_name) { __adf_net_mod_t *elem = NULL; adf_os_resource_t drv_res = {0}; adf_os_attach_data_t drv_data = {{0}}; a_status_t status; printk("ADF_NET:Attaching the pseudo driver\n"); /** * Allocate the sc & zero down */ osdev[num] = kzalloc(sizeof(struct __adf_device), GFP_KERNEL); if (!osdev[num]) { printk("Cannot malloc softc\n"); status = A_STATUS_ENOMEM; goto mem_fail; } /** * set the references for sc & dev */ osdev[num]->dev = NULL; elem = __adf_net_find_mod(mod_name); /** * we should always find a drv */ adf_os_assert(elem); osdev[num]->drv_hdl = adf_drv_attach(elem, &drv_res, 1, &drv_data, osdev[num]); /** * We expect the at the driver_attach the create_dev has happened */ if(osdev[num]->drv_hdl == NULL) { printk("ADF_NET:Pseudo device is not created, asserting\n"); adf_os_assert(0); goto attach_fail; } num++; return 0; attach_fail: kfree(osdev[num]); mem_fail: return status; }
adf_nbuf_t htt_tx_send_batch(htt_pdev_handle pdev, adf_nbuf_t head_msdu, int num_msdus) { adf_os_print("*** %s curently only applies for HL systems\n", __func__); adf_os_assert(0); return head_msdu; }
void __adf_os_dma_load(void *arg, bus_dma_segment_t *dseg, int nseg, int error) { if (error) return; adf_os_assert(nseg == 1); ((bus_dma_segment_t *)arg)[0].ds_addr = dseg[0].ds_addr; ((bus_dma_segment_t *)arg)[0].ds_len = dseg[0].ds_len; }
/* process an incomming control message from the host */ LOCAL void HTCControlSvcProcessMsg(HTC_ENDPOINT_ID EndpointID, adf_nbuf_t hdr_buf, adf_nbuf_t pBuffers, void *arg) { A_BOOL setupComplete = FALSE; a_uint8_t *anbdata; a_uint32_t anblen; HTC_CONTEXT *pHTC = (HTC_CONTEXT *)arg; HTC_UNKNOWN_MSG *pMsg; adf_os_assert(hdr_buf == ADF_NBUF_NULL); /* we assume buffers are aligned such that we can access the message * parameters directly*/ adf_nbuf_peek_header(pBuffers, &anbdata, &anblen); pMsg = (HTC_UNKNOWN_MSG *)anbdata; /* we cannot handle fragmented messages across buffers */ switch ( adf_os_ntohs(pMsg->MessageID) ) { case HTC_MSG_CONNECT_SERVICE_ID: HTCProcessConnectMsg(pHTC, (HTC_CONNECT_SERVICE_MSG *)pMsg); break; case HTC_MSG_CONFIG_PIPE_ID: HTCProcessConfigPipeMsg(pHTC, (HTC_CONFIG_PIPE_MSG *)pMsg); break; case HTC_MSG_SETUP_COMPLETE_ID: /* the host has indicated that it has completed all setup tasks and we can now let the services take over to run the rest of the application */ setupComplete = TRUE; /* can't get this more than once */ break; default: ; } if (pHTC->StateFlags & HTC_STATE_SETUP_COMPLETE) { /* recycle buffer only if we are fully running */ HTC_ReturnBuffers(pHTC, ENDPOINT0,pBuffers); } else { /* supply some head-room again */ adf_nbuf_push_head(pBuffers, HTC_HDR_LENGTH); /* otherwise return the packet back to mbox */ HIF_return_recv_buf(pHTC->hifHandle, pHTC->Endpoints[EndpointID].UpLinkPipeID, pBuffers); } if (setupComplete) { /* mark that setup has completed */ pHTC->StateFlags |= HTC_STATE_SETUP_COMPLETE; if (pHTC->SetupCompleteCb != NULL) { pHTC->SetupCompleteCb(); } } }
/** * @brief this adds a IP ckecksum in the IP header of the packet * @param skb */ static void __adf_net_ip_cksum(struct sk_buff *skb) { struct iphdr *ih = {0}; struct skb_shared_info *sh = skb_shinfo(skb); adf_os_assert(sh->nr_frags == 0); ih = (struct iphdr *)(skb->data + sizeof(struct ethhdr)); ih->check = 0; ih->check = ip_fast_csum((unsigned char *)ih, ih->ihl); }
/** * @brief get the dma map of the nbuf * * @param osdev * @param bmap * @param skb * @param dir * @param nbytes * * @return a_status_t */ a_status_t __adf_nbuf_map_nbytes( adf_os_device_t osdev, struct sk_buff *skb, adf_os_dma_dir_t dir, int nbytes) { #ifdef ADF_OS_DEBUG struct skb_shared_info *sh = skb_shinfo(skb); #endif adf_os_assert( (dir == ADF_OS_DMA_TO_DEVICE) || (dir == ADF_OS_DMA_FROM_DEVICE)); /* * Assume there's only a single fragment. * To support multiple fragments, it would be necessary to change * adf_nbuf_t to be a separate object that stores meta-info * (including the bus address for each fragment) and a pointer * to the underlying sk_buff. */ adf_os_assert(sh->nr_frags == 0); return __adf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes); #if 0 { int i; adf_os_assert(sh->nr_frags <= __ADF_OS_MAX_SCATTER); for (i = 1; i <= sh->nr_frags; i++) { skb_frag_t *f = &sh->frags[i-1] NBUF_MAPPED_FRAG_PADDR_LO(buf, i) = dma_map_page( osdev->dev, f->page, f->page_offset, f->size, dir); } adf_os_assert(sh->frag_list == NULL); } return A_STATUS_OK; #endif }
/** * @brief Allocate & initialize the S/W descriptor & H/W * Descriptor * * @param osdev * @param sc * @param num_desc */ static void pci_dma_alloc_swdesc(adf_os_device_t osdev, pci_dma_softc_t *sc, a_uint32_t num_desc) { a_uint32_t size_sw, size_hw, i = 0; adf_os_dma_addr_t paddr; zdma_swdesc_t *swdesc; struct zsDmaDesc *hwdesc; // struct zsDmaDesc *tmpdesc; size_sw = sizeof(struct zdma_swdesc) * num_desc; size_hw = sizeof(struct zsDmaDesc) * num_desc; sc->sw_ring = adf_os_mem_alloc(osdev, size_sw); adf_os_assert(sc->sw_ring); sc->hw_ring = adf_os_dmamem_alloc(osdev, size_hw, PCI_DMA_MAPPING, &sc->dmap); // printk("sc->hw_ring %x paddr is %x \n",(unsigned int )sc->hw_ring, (unsigned int) sc->dmap->seg[0].daddr); adf_os_assert(sc->hw_ring); swdesc = sc->sw_ring; hwdesc = sc->hw_ring; paddr = adf_os_dmamem_map2addr(sc->dmap); // printk("sc->hw_ring paddr %x \n",paddr ); for (i = 0; i < num_desc; i++) { swdesc[i].descp = &hwdesc[i]; swdesc[i].hwaddr = paddr; paddr = (adf_os_dma_addr_t)((struct zsDmaDesc *)paddr + 1); } sc->num_desc = num_desc; pci_dma_init_ring(swdesc, num_desc); }
A_STATUS HTCIssueSend(HTC_TARGET *target, adf_nbuf_t hdr_buf, adf_nbuf_t netbuf, a_uint8_t SendFlags, a_uint16_t len, a_uint8_t EpID) { a_uint8_t pipeID; A_STATUS status = A_OK; //adf_net_handle_t anet = target->pInstanceContext; HTC_ENDPOINT *pEndpoint = &target->EndPoint[EpID]; HTC_FRAME_HDR *HtcHdr; adf_nbuf_t tmp_nbuf; //printk("bharath %s \n",__FUNCTION__); if (hdr_buf == ADF_NBUF_NULL) { /* HTC header needs to be added on the data nbuf */ tmp_nbuf = netbuf; } else { tmp_nbuf = hdr_buf; } /* setup HTC frame header */ HtcHdr = (HTC_FRAME_HDR *)adf_nbuf_push_head(tmp_nbuf, sizeof(HTC_FRAME_HDR)); adf_os_assert(HtcHdr); if ( HtcHdr == NULL ) { adf_os_print("%s_%d: HTC Header is NULL !!!\n", __FUNCTION__, __LINE__); return A_ERROR; } HtcHdr->EndpointID = EpID; HtcHdr->Flags = SendFlags; HtcHdr->PayloadLen = adf_os_htons(len); HTC_ADD_CREDIT_SEQNO(target,pEndpoint,len,HtcHdr); /* lookup the pipe id by the endpoint id */ pipeID = pEndpoint->UL_PipeID; // if (EpID == ENDPOINT0) { /* send the buffer to the HIF layer */ status = HIFSend(target->hif_dev/*anet*/, pipeID, hdr_buf, netbuf); // } #ifdef NBUF_PREALLOC_POOL if ( A_FAILED(status) ) { adf_nbuf_pull_head(netbuf, HTC_HDR_LENGTH); } #endif return status; }
/** * htt_tx_get_paddr() - get physical address for htt desc * * Get HTT descriptor physical address from virtaul address * Find page first and find offset * * Return: Physical address of descriptor */ adf_os_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev, char *target_vaddr) { unsigned int i; struct htt_tx_desc_page_t *page_info = NULL; for (i = 0; i < pdev->num_pages; i++) { page_info = pdev->desc_pages + i; if (!page_info || !page_info->page_v_addr_start) { adf_os_assert(0); return 0; } if ((target_vaddr >= page_info->page_v_addr_start) && (target_vaddr <= page_info->page_v_addr_end)) break; } if (!page_info || !page_info->page_v_addr_start || !page_info->page_p_addr) { adf_os_assert(0); return 0; } return page_info->page_p_addr + (adf_os_dma_addr_t)(target_vaddr - page_info->page_v_addr_start); }
hif_status_t fwd_recv(void *context, adf_nbuf_t nbuf, a_uint8_t epid) { fwd_softc_t *sc = (fwd_softc_t *)context; a_uint8_t *pld; a_uint32_t plen, rsp, offset; fwd_rsp_t *h; adf_nbuf_peek_header(nbuf, &pld, &plen); h = (fwd_rsp_t *)pld; rsp = adf_os_ntohl(h->rsp); offset = adf_os_ntohl(h->offset); /*adf_os_timer_cancel(&sc->tmr);*/ switch(rsp) { case FWD_RSP_ACK: if (offset == sc->offset) { // adf_os_printk("ACK for %#x\n", offset); adf_os_print("."); sc->offset += fwd_chunk_len(sc); fwd_send_next(sc); } break; case FWD_RSP_SUCCESS: adf_os_print("done!\n"); hif_boot_done(sc->hif_handle); break; case FWD_RSP_FAILED: if (sc->ntries < FWD_MAX_TRIES) fwd_start_upload(sc); else adf_os_print("FWD: Error: Max retries exceeded\n"); break; default: adf_os_assert(0); } adf_nbuf_free(nbuf); return A_OK; }
a_status_t pci_dma_recv_refill(adf_os_device_t osdev, zdma_swdesc_t *swdesc, a_uint32_t size) { adf_nbuf_t buf; buf = adf_nbuf_alloc(osdev, size, 0, PCI_NBUF_ALIGNMENT); if (!buf) adf_os_assert(0); pci_dma_link_buf(osdev, swdesc, buf); pci_zdma_mark_rdy(swdesc, (ZM_FS_BIT | ZM_LS_BIT)); return A_STATUS_OK; }
a_status_t __adf_net_dev_tx(adf_net_handle_t hdl, struct sk_buff *skb) { struct net_device *netdev = hdl_to_netdev(hdl); if(unlikely(!netdev)){ printk("ADF_NET:netdev not found\n"); adf_os_assert(0); } skb->dev = netdev; dev_queue_xmit(skb); return A_STATUS_OK; }
static A_UINT8 ol_tx_tid( struct ol_txrx_pdev_t *pdev, adf_nbuf_t tx_nbuf, struct ol_txrx_msdu_info_t *tx_msdu_info) { A_UINT8 *datap = adf_nbuf_data(tx_nbuf); A_UINT8 tid; if (pdev->frame_format == wlan_frm_fmt_raw) { tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_raw; ol_tx_set_ether_type(datap, tx_msdu_info); tid = tx_msdu_info->htt.info.ext_tid == ADF_NBUF_TX_EXT_TID_INVALID ? ol_tx_tid_by_raw_type(datap, tx_msdu_info) : tx_msdu_info->htt.info.ext_tid; } else if (pdev->frame_format == wlan_frm_fmt_802_3) { tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_ethernet; ol_tx_set_ether_type(datap, tx_msdu_info); tid = tx_msdu_info->htt.info.ext_tid == ADF_NBUF_TX_EXT_TID_INVALID ? ol_tx_tid_by_ether_type(datap, tx_msdu_info) : tx_msdu_info->htt.info.ext_tid; } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) { struct llc_snap_hdr_t *llc; tx_msdu_info->htt.info.l2_hdr_type = htt_pkt_type_native_wifi; tx_msdu_info->htt.info.l3_hdr_offset = sizeof(struct ieee80211_frame); llc = (struct llc_snap_hdr_t *) (datap + tx_msdu_info->htt.info.l3_hdr_offset); tx_msdu_info->htt.info.ethertype = (llc->ethertype[0] << 8) | llc->ethertype[1]; /* * Native WiFi is a special case of "raw" 802.11 header format. * However, we expect that for all cases that use native WiFi, * the TID will be directly specified out of band. */ tid = tx_msdu_info->htt.info.ext_tid; } else { VOS_TRACE(VOS_MODULE_ID_TXRX, VOS_TRACE_LEVEL_FATAL, "Invalid standard frame type: %d\n", pdev->frame_format); adf_os_assert(0); tid = HTT_TX_EXT_TID_INVALID; } return tid; }
/* * Allocate nbytes from the arena. At this point, which_arena should * be set to 0 for the default (and only) arena. A future allocation * module may support multiple separate arenas. */ LOCAL void * cmnos_allocram(void * which_arena, A_UINT32 nbytes) { void *ptr = (void *)allocram_current_addr; //nbytes = A_ROUND_UP(nbytes, A_CACHE_LINE_SIZE); nbytes = A_ROUND_UP(nbytes, 4); if (nbytes <= allocram_remaining_bytes) { allocram_remaining_bytes -= nbytes; allocram_current_addr += nbytes; } else { A_PRINTF("RAM allocation (%d bytes) failed!\n", nbytes); //A_ASSERT(0); adf_os_assert(0); } return ptr; }
void HTCWlanTxCompletionHandler(void *Context, a_uint8_t epnum) { HTC_TARGET *target = (HTC_TARGET *)Context; /* endpoint id check */ if (epnum >= ENDPOINT_MAX) { adf_os_print("[%s %d] Endpoint %u is invalid!\n", __FUNCTION__, __LINE__, epnum); adf_os_assert(0); } if (target->HTCInitInfo.HTCSchedNextEvent) { target->HTCInitInfo.HTCSchedNextEvent(target->host_handle, (HTC_ENDPOINT_ID)epnum); } else { adf_os_print("[%s %d] HTCSchedNextEvent handler is not registered!\n", __FUNCTION__, __LINE__); } }
struct ol_tx_desc_t * ol_tx_desc_hl( struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev, adf_nbuf_t netbuf, struct ol_txrx_msdu_info_t *msdu_info) { struct ol_tx_desc_t *tx_desc; /* FIX THIS: these inits should probably be done by tx classify */ msdu_info->htt.info.vdev_id = vdev->vdev_id; msdu_info->htt.info.frame_type = pdev->htt_pkt_type; msdu_info->htt.action.cksum_offload = adf_nbuf_get_tx_cksum(netbuf); switch (adf_nbuf_get_exemption_type(netbuf)) { case ADF_NBUF_EXEMPT_NO_EXEMPTION: case ADF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: /* We want to encrypt this frame */ msdu_info->htt.action.do_encrypt = 1; break; case ADF_NBUF_EXEMPT_ALWAYS: /* We don't want to encrypt this frame */ msdu_info->htt.action.do_encrypt = 0; break; default: adf_os_assert(0); break; } /* allocate the descriptor */ tx_desc = ol_tx_desc_alloc_hl(pdev, vdev); if (!tx_desc) return NULL; /* initialize the SW tx descriptor */ tx_desc->netbuf = netbuf; /* fix this - get pkt_type from msdu_info */ tx_desc->pkt_type = ol_tx_frm_std; #ifdef QCA_SUPPORT_SW_TXRX_ENCAP tx_desc->orig_l2_hdr_bytes = 0; #endif /* the HW tx descriptor will be initialized later by the caller */ return tx_desc; }
A_UINT8 __pci_get_pipe(dma_engine_t eng) { switch (eng) { case DMA_ENGINE_RX0: return HIF_PCI_PIPE_RX0; case DMA_ENGINE_RX1: return HIF_PCI_PIPE_RX1; case DMA_ENGINE_RX2: return HIF_PCI_PIPE_RX2; case DMA_ENGINE_RX3: return HIF_PCI_PIPE_RX3; case DMA_ENGINE_TX0: return HIF_PCI_PIPE_TX0; case DMA_ENGINE_TX1: return HIF_PCI_PIPE_TX1; default: adf_os_assert(0); } }
void _wmi_cmd_rsp(void *pContext, WMI_COMMAND_ID cmd_id, A_UINT16 SeqNo, A_UINT8 *buffer, int Length) { adf_nbuf_t netbuf = ADF_NBUF_NULL; A_UINT8 *pData; netbuf = WMI_AllocEvent(pContext, WMI_EVT_CLASS_CMD_REPLY, sizeof(WMI_CMD_HDR) + Length); if (netbuf == ADF_NBUF_NULL) { adf_os_print("%s: buffer allocation for event_id %x failed!\n", __FUNCTION__, cmd_id); adf_os_assert(0); return; } if (Length != 0 && buffer != NULL) { pData = (A_UINT8 *)adf_nbuf_put_tail(netbuf, Length); adf_os_mem_copy(pData, buffer, Length); } WMI_SendEvent(pContext, netbuf, cmd_id, SeqNo, Length); }