示例#1
0
void
ol_rx_reorder_release(
    struct ol_txrx_vdev_t *vdev,
    struct ol_txrx_peer_t *peer,
    unsigned tid,
    unsigned seq_num_start,
    unsigned seq_num_end)
{
    unsigned seq_num = 0;
    unsigned win_sz_mask;
    struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
    adf_nbuf_t head_msdu;
    adf_nbuf_t tail_msdu;

    win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
    seq_num_start &= win_sz_mask;
    seq_num_end   &= win_sz_mask;
    rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq_num_start];

    head_msdu = rx_reorder_array_elem->head;
    tail_msdu = rx_reorder_array_elem->tail;
    rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
    seq_num = (seq_num_start + 1) & win_sz_mask;
    while (seq_num != seq_num_end) {
        if(adf_os_unlikely(!tail_msdu)){
            /* This part of the code is added to debug when tail_msdu is NULL */
            printk("%s : tail msdu is null. peer %p (%s) tid %d seq_start %d seq_end %d win_sz_mask %d seq_num %d head %p tail %p  \n",
                    __func__, peer, ether_sprintf(peer->mac_addr.raw), tid, seq_num_start, seq_num_end, win_sz_mask, seq_num,
                    rx_reorder_array_elem->head, rx_reorder_array_elem->tail);
            adf_os_assert_always(tail_msdu);
        }
        rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq_num];
        adf_nbuf_set_next(tail_msdu, rx_reorder_array_elem->head);
        tail_msdu = rx_reorder_array_elem->tail;
        rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
        seq_num++;
        seq_num &= win_sz_mask;
    }
    
    if(adf_os_unlikely(!tail_msdu)){
        /* This part of the code is added to debug when tail_msdu is NULL */
        printk("%s : tail msdu is null. peer %p (%s) tid %d seq_start %d seq_end %d win_sz_mask %d seq_num %d head %p tail %p  \n",
                __func__, peer, ether_sprintf(peer->mac_addr.raw), tid, seq_num_start, seq_num_end, win_sz_mask, seq_num,
                rx_reorder_array_elem->head, rx_reorder_array_elem->tail);
        adf_os_assert_always(tail_msdu);
    }

    /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
    adf_nbuf_set_next(tail_msdu, NULL);
    peer->rx_opt_proc(vdev, peer, tid, head_msdu);
}
示例#2
0
void
ath_swba_event(void *Context, void *data, u_int32_t datalen)
{
    struct ath_softc_net80211 *scn = (struct ath_softc_net80211 *)Context;
    struct ieee80211com *ic = &scn->sc_ic;
    WMI_SWBA_EVENT *swba_event = (WMI_SWBA_EVENT *)data;

    /* XXX: need to fix if defer has not run & more events are arriving */
    if(atomic_read(&scn->sc_htc_swba_data.flags) == IEEE80211_SWBA_DEFER_PENDING)
    {
        /*adf_os_print("SWBA Event processing is pending ignoring event  \n");*/
    } else if(adf_os_unlikely(ic->ic_flags & IEEE80211_F_CHANSWITCH)){
        if (ic->ic_chanchange_cnt) {
            atomic_set(&scn->sc_htc_swba_data.flags, IEEE80211_SWBA_DEFER_PENDING);
            scn->sc_ops->ath_wmi_beacon(scn->sc_dev, swba_event->currentTsf, swba_event->beaconPendingCount, 0);
            atomic_set(&scn->sc_htc_swba_data.flags, IEEE80211_SWBA_DEFER_DONE);
        }
        else {
            scn->sc_htc_swba_data.currentTsf         = swba_event->currentTsf; 
            scn->sc_htc_swba_data.beaconPendingCount = swba_event->beaconPendingCount;
            atomic_set(&scn->sc_htc_swba_data.flags, IEEE80211_SWBA_DEFER_PENDING);

            OS_PUT_DEFER_ITEM(scn->sc_osdev, 
                    ath_swba_event_defer, 
                    WORK_ITEM_SET_BEACON_DEFERED, 
                    scn, NULL, NULL);

        }
    } else {
        scn->sc_ops->ath_wmi_beacon(scn->sc_dev, swba_event->currentTsf, swba_event->beaconPendingCount, 0);
    }
}
示例#3
0
文件: adf_net.c 项目: KHATEEBNSIT/AP
a_status_t
__adf_net_indicate_packet(adf_net_handle_t hdl, struct sk_buff *skb,
                          uint32_t len)
{
    struct net_device *netdev   = hdl_to_netdev(hdl);
    __adf_softc_t  *sc          = hdl_to_softc(hdl);
    /**
     * For pseudo devices IP checksum has to computed
     */
    if(adf_os_unlikely(skb->ip_summed == CHECKSUM_UNNECESSARY))
        __adf_net_ip_cksum(skb);

    /**
     * also pulls the ether header
     */
    skb->protocol           =   eth_type_trans(skb, netdev);
    skb->dev                =   netdev;
    netdev->last_rx         =   jiffies;
#ifdef LIMIT_MTU_SIZE

    if (skb->len >=  LIMITED_MTU) {
        skb->h.raw = skb->nh.raw = skb->data;

        skb->dst = (struct dst_entry *)&__fake_rtable;
        skb->pkt_type = PACKET_HOST;
        dst_hold(skb->dst);

#if 0
        printk("addrs : sa : %x : da:%x\n", skb->nh.iph->saddr, skb->nh.iph->daddr);
        printk("head : %p tail : %p iph %p %p\n", skb->head, skb->tail,
                skb->nh.iph, skb->mac.raw);
#endif 

        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(LIMITED_MTU - 4 ));
        dev_kfree_skb_any(skb);
        return A_STATUS_OK;
    }
#endif


    
    if(sc->vlgrp)       __vlan_hwaccel_put_tag(skb, sc->vid);
    if (in_irq())  	netif_rx(skb);
    else                netif_receive_skb(skb);

    return A_STATUS_OK;
}
/* Generic Target to host Msg/event  handler  for low priority messages
  Low priority message are handler in a different handler called from
  this function . So that the most likely succes path like Rx and
  Tx comp   has little code   foot print
 */
void
htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
{
    struct htt_pdev_t *pdev = (struct htt_pdev_t *) context;
    adf_nbuf_t htt_t2h_msg = (adf_nbuf_t) pkt->pPktContext;
    u_int32_t *msg_word;
    enum htt_t2h_msg_type msg_type;

    /* check for successful message reception */
    if (pkt->Status != A_OK) {
        if (pkt->Status != A_ECANCELED) {
            pdev->stats.htc_err_cnt++;
        }
        adf_nbuf_free(htt_t2h_msg);
        return;
    }

#ifdef HTT_RX_RESTORE
if (adf_os_unlikely(pdev->rx_ring.rx_reset)) {
        adf_os_print("rx restore ..\n");
        adf_nbuf_free(htt_t2h_msg);
        return;
    }
#endif

    /* confirm alignment */
    HTT_ASSERT3((((unsigned long) adf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);

    msg_word = (u_int32_t *) adf_nbuf_data(htt_t2h_msg);
    msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
    switch (msg_type) {
    case HTT_T2H_MSG_TYPE_RX_IND:
        {
            unsigned num_mpdu_ranges;
            unsigned num_msdu_bytes;
            u_int16_t peer_id;
            u_int8_t tid;

            if (adf_os_unlikely(pdev->cfg.is_full_reorder_offload)) {
                adf_os_print("HTT_T2H_MSG_TYPE_RX_IND not supported with full "
                             "reorder offload\n");
                break;
            }
            peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
            tid = HTT_RX_IND_EXT_TID_GET(*msg_word);

            if (tid >= OL_TXRX_NUM_EXT_TIDS) {
                adf_os_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n", tid);
                break;
            }

            num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
                *(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32));
            /*
             * 1 word for the message header,
             * HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc
             * 1 word to specify the number of MSDU bytes,
             * 1 word for every 4 MSDU bytes (round up),
             * 1 word for the MPDU range header
             */
            pdev->rx_mpdu_range_offset_words =
                (HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3) >> 2;
            num_mpdu_ranges = HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
            pdev->rx_ind_msdu_byte_idx = 0;

            if (pdev->cfg.is_high_latency) {
                /*
                 * TODO: remove copy after stopping reuse skb on HIF layer
                 * because SDIO HIF may reuse skb before upper layer release it
                 */
                ol_rx_indication_handler(
                    pdev->txrx_pdev, htt_t2h_msg, peer_id, tid,
                    num_mpdu_ranges);

                return;
            } else {
                ol_rx_indication_handler(
                    pdev->txrx_pdev, htt_t2h_msg, peer_id, tid,
                    num_mpdu_ranges);
            }
            break;
        }
    case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
        {
            int num_msdus;
            enum htt_tx_status status;

            /* status - no enum translation needed */
            status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
            num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
            if (num_msdus & 0x1) {
                struct htt_tx_compl_ind_base *compl = (void *)msg_word;

                /*
                 * Host CPU endianness can be different from FW CPU. This
                 * can result in even and odd MSDU IDs being switched. If
                 * this happens, copy the switched final odd MSDU ID from
                 * location payload[size], to location payload[size-1],
                 * where the message handler function expects to find it
                 */
                if (compl->payload[num_msdus] != HTT_TX_COMPL_INV_MSDU_ID) {
                    compl->payload[num_msdus - 1] =
                        compl->payload[num_msdus];
                }
            }
            if (pdev->cfg.is_high_latency) {
                ol_tx_target_credit_update(
                    pdev->txrx_pdev, num_msdus /* 1 credit per MSDU */);
            }
            ol_tx_completion_handler(
                pdev->txrx_pdev, num_msdus, status, msg_word + 1);
            HTT_TX_SCHED(pdev);
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_PN_IND:
        {
            u_int16_t peer_id;
            u_int8_t tid, pn_ie_cnt, *pn_ie=NULL;
            int seq_num_start, seq_num_end;

            /*First dword */
            peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
            tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);

            msg_word++;
            /*Second dword */
            seq_num_start = HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
            seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
            pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);

            msg_word++;
            /*Third dword*/
            if (pn_ie_cnt) {
                pn_ie = (u_int8_t *)msg_word;
            }

            ol_rx_pn_ind_handler(
                pdev->txrx_pdev, peer_id, tid, seq_num_start, seq_num_end,
                pn_ie_cnt, pn_ie);

            break;
        }
    case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
        {
            int num_msdus;

            num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
            if (num_msdus & 0x1) {
                struct htt_tx_compl_ind_base *compl = (void *)msg_word;

                /*
                 * Host CPU endianness can be different from FW CPU. This
                 * can result in even and odd MSDU IDs being switched. If
                 * this happens, copy the switched final odd MSDU ID from
                 * location payload[size], to location payload[size-1],
                 * where the message handler function expects to find it
                 */
                if (compl->payload[num_msdus] != HTT_TX_COMPL_INV_MSDU_ID) {
                    compl->payload[num_msdus - 1] =
                        compl->payload[num_msdus];
                }
            }
            ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus, msg_word + 1);
            HTT_TX_SCHED(pdev);
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
        {
            u_int16_t peer_id;
            u_int8_t tid;
            u_int8_t offload_ind, frag_ind;

            if (adf_os_unlikely(!pdev->cfg.is_full_reorder_offload)) {
                adf_os_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported"
                             " when full reorder offload is disabled\n");
                break;
            }

            if (adf_os_unlikely(pdev->cfg.is_high_latency)) {
                adf_os_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not supported"
                             " on high latency\n");
                break;
            }

            peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word);
            tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word);
            offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
            frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);

            if (adf_os_unlikely(frag_ind)) {
                ol_rx_frag_indication_handler(pdev->txrx_pdev, htt_t2h_msg,
                                               peer_id, tid);
                break;
            }

            ol_rx_in_order_indication_handler(pdev->txrx_pdev, htt_t2h_msg,
                                               peer_id, tid, offload_ind);
            break;
     }

    default:
        htt_t2h_lp_msg_handler(context, htt_t2h_msg);
        return ;

    };

    /* Free the indication buffer */
    adf_nbuf_free(htt_t2h_msg);
}