Ejemplo n.º 1
0
static void
htt_t2h_tx_ppdu_bitmaps_pr(uint32_t *queued_ptr, uint32_t *acked_ptr)
{
	char queued_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW + 1];
	char acked_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW + 1];
	int i, j, word;

	cdf_mem_set(queued_str, HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW, '0');
	cdf_mem_set(acked_str, HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW, '-');
	i = 0;
	for (word = 0; word < 2; word++) {
		uint32_t queued = *(queued_ptr + word);
		uint32_t acked = *(acked_ptr + word);
		for (j = 0; j < 32; j++, i++) {
			if (queued & (1 << j)) {
				queued_str[i] = '1';
				acked_str[i] = (acked & (1 << j)) ? 'y' : 'N';
			}
		}
	}
	queued_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW] = '\0';
	acked_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW] = '\0';
	cdf_print("%s\n", queued_str);
	cdf_print("%s\n", acked_str);
}
Ejemplo n.º 2
0
static void
htt_t2h_stats_txbf_snd_buf_stats_print(
	struct wlan_dbg_txbf_snd_stats *stats_ptr)
{
	cdf_print("TXBF snd Buffer Statistics:\n");
	cdf_print("cbf_20: ");
	cdf_print("%u, %u, %u, %u\n",
		  stats_ptr->cbf_20[0],
		  stats_ptr->cbf_20[1],
		  stats_ptr->cbf_20[2],
		  stats_ptr->cbf_20[3]);
	cdf_print("cbf_40: ");
	cdf_print("%u, %u, %u, %u\n",
		  stats_ptr->cbf_40[0],
		  stats_ptr->cbf_40[1],
		  stats_ptr->cbf_40[2],
		  stats_ptr->cbf_40[3]);
	cdf_print("cbf_80: ");
	cdf_print("%u, %u, %u, %u\n",
		  stats_ptr->cbf_80[0],
		  stats_ptr->cbf_80[1],
		  stats_ptr->cbf_80[2],
		  stats_ptr->cbf_80[3]);
	cdf_print("sounding: ");
	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u, %u\n",
		  stats_ptr->sounding[0],
		  stats_ptr->sounding[1],
		  stats_ptr->sounding[2],
		  stats_ptr->sounding[3],
		  stats_ptr->sounding[4],
		  stats_ptr->sounding[5],
		  stats_ptr->sounding[6],
		  stats_ptr->sounding[7],
		  stats_ptr->sounding[8]);
}
Ejemplo n.º 3
0
static HTC_PACKET *build_htc_tx_ctrl_packet(cdf_device_t osdev)
{
	HTC_PACKET *pPacket = NULL;
	cdf_nbuf_t netbuf;

	do {
		pPacket = (HTC_PACKET *) cdf_mem_malloc(sizeof(HTC_PACKET));
		if (NULL == pPacket) {
			break;
		}
		A_MEMZERO(pPacket, sizeof(HTC_PACKET));
		netbuf =
			cdf_nbuf_alloc(osdev, HTC_CONTROL_BUFFER_SIZE, 20, 4, true);
		if (NULL == netbuf) {
			cdf_mem_free(pPacket);
			pPacket = NULL;
			cdf_print("%s: nbuf alloc failed\n", __func__);
			break;
		}
		AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
				("alloc ctrl netbuf :0x%p \n", netbuf));
		SET_HTC_PACKET_NET_BUF_CONTEXT(pPacket, netbuf);
	} while (false);

	return pPacket;
}
Ejemplo n.º 4
0
cdf_nbuf_t
htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
{
	cdf_print("*** %s curently only applies for HL systems\n", __func__);
	cdf_assert(0);
	return head_msdu;

}
Ejemplo n.º 5
0
void
dump_pkt(cdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
{
	cdf_print("%s: Pkt: VA 0x%p PA 0x%x len %d\n", __func__,
		  cdf_nbuf_data(nbuf), nbuf_paddr, len);
	print_hex_dump(KERN_DEBUG, "Pkt:   ", DUMP_PREFIX_NONE, 16, 4,
		       cdf_nbuf_data(nbuf), len, true);
}
Ejemplo n.º 6
0
/**
 * ol_tx_desc_free() - put descriptor to pool freelist
 * @pdev: pdev handle
 * @tx_desc: tx descriptor
 *
 * Return: None
 */
void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
{
	struct ol_tx_flow_pool_t *pool = tx_desc->pool;

#if defined(FEATURE_TSO)
	if (tx_desc->pkt_type == ol_tx_frm_tso) {
		if (cdf_unlikely(tx_desc->tso_desc == NULL))
			cdf_print("%s %d TSO desc is NULL!\n",
				 __func__, __LINE__);
		else
			ol_tso_free_segment(pdev, tx_desc->tso_desc);
	}
#endif
	ol_tx_desc_reset_pkt_type(tx_desc);
	ol_tx_desc_reset_timestamp(tx_desc);

	cdf_spin_lock_bh(&pool->flow_pool_lock);
	ol_tx_put_desc_flow_pool(pool, tx_desc);
	switch (pool->status) {
	case FLOW_POOL_ACTIVE_PAUSED:
		if (pool->avail_desc > pool->start_th) {
			pdev->pause_cb(pool->member_flow_id,
				       WLAN_WAKE_ALL_NETIF_QUEUE,
				       WLAN_DATA_FLOW_CONTROL);
			pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
		}
		break;
	case FLOW_POOL_INVALID:
		if (pool->avail_desc == pool->flow_pool_size) {
			cdf_spin_unlock_bh(&pool->flow_pool_lock);
			ol_tx_free_invalid_flow_pool(pool);
			cdf_print("%s %d pool is INVALID State!!\n",
				 __func__, __LINE__);
			return;
		}
		break;
	case FLOW_POOL_ACTIVE_UNPAUSED:
		break;
	default:
		cdf_print("%s %d pool is INACTIVE State!!\n",
				 __func__, __LINE__);
		break;
	};
	cdf_spin_unlock_bh(&pool->flow_pool_lock);

}
Ejemplo n.º 7
0
void htt_tx_detach(struct htt_pdev_t *pdev)
{
	if (!pdev) {
		cdf_print("htt tx detach invalid instance");
		return;
	}

	htt_tx_frag_desc_detach(pdev);
	cdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
		cdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
}
Ejemplo n.º 8
0
/**
 * cds_regulatory_init() - regulatory_init
 * Return: CDF_STATUS
 */
CDF_STATUS cds_regulatory_init(void)
{
	v_CONTEXT_t cds_context = NULL;
	hdd_context_t *hdd_ctx = NULL;
	struct wiphy *wiphy = NULL;
	int ret_val = 0;
	struct regulatory *reg_info;

	cds_context = cds_get_global_context();

	if (!cds_context)
		return CDF_STATUS_E_FAULT;

	hdd_ctx = cds_get_context(CDF_MODULE_ID_HDD);
	if (!hdd_ctx) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  ("Invalid pHddCtx pointer"));
		return CDF_STATUS_E_FAULT;
	}

	wiphy = hdd_ctx->wiphy;

	reg_info = &hdd_ctx->reg;

	cds_regulatory_wiphy_init(hdd_ctx, reg_info, wiphy);

	temp_reg_domain = REGDOMAIN_WORLD;

	if (cds_process_regulatory_data(wiphy,
					hdd_ctx->config->
					nBandCapability, true) != 0) {
		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
			  ("Error while creating regulatory entry"));
		return CDF_STATUS_E_FAULT;
	}

	reg_info->cc_src = COUNTRY_CODE_SET_BY_DRIVER;

	ret_val = cds_fill_some_regulatory_info(reg_info);
	if (ret_val) {
		cdf_print(KERN_ERR "Error in getting country code\n");
		return ret_val;
	}

	reg_table.default_country[0] = reg_info->alpha2[0];
	reg_table.default_country[1] = reg_info->alpha2[1];

	init_completion(&hdd_ctx->reg_init);

	cds_fill_and_send_ctl_to_fw(reg_info);

	return CDF_STATUS_SUCCESS;
}
Ejemplo n.º 9
0
/**
 * ol_tx_desc_free() - put descriptor to freelist
 * @pdev: pdev handle
 * @tx_desc: tx descriptor
 *
 * Return: None
 */
void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
{
	cdf_spin_lock_bh(&pdev->tx_mutex);
#if defined(FEATURE_TSO)
	if (tx_desc->pkt_type == ol_tx_frm_tso) {
		if (cdf_unlikely(tx_desc->tso_desc == NULL))
			cdf_print("%s %d TSO desc is NULL!\n",
				 __func__, __LINE__);
		else
			ol_tso_free_segment(pdev, tx_desc->tso_desc);
	}
#endif
	ol_tx_desc_reset_pkt_type(tx_desc);
	ol_tx_desc_reset_timestamp(tx_desc);

	ol_tx_put_desc_global_pool(pdev, tx_desc);
	cdf_spin_unlock_bh(&pdev->tx_mutex);
}
Ejemplo n.º 10
0
int main(int argc, char *argv[])
{
    int val;
    struct list_elem *h;
    int res, cnt;

    h = NULL;
    cnt = 0;
    while(!feof(stdin)) {
        res = scanf("%d\n", &val);
        if (res == 1) {
            h = list_insert(h, ((val + 100) / 100) * 100);
        }
    }

    cdf_print(h);
    return 0;
}
Ejemplo n.º 11
0
static void
htt_t2h_rx_musu_ndpa_pkts_stats_print(
	struct rx_txbf_musu_ndpa_pkts_stats *stats_ptr)
{
	cdf_print("Rx TXBF MU/SU Packets and NDPA Statistics:\n");
	cdf_print("  %u Number of TXBF MU packets received\n",
			stats_ptr->number_mu_pkts);
	cdf_print("  %u Number of TXBF SU packets received\n",
			stats_ptr->number_su_pkts);
	cdf_print("  %u Number of TXBF directed NDPA\n",
			stats_ptr->txbf_directed_ndpa_count);
	cdf_print("  %u Number of TXBF retried NDPA\n",
			stats_ptr->txbf_ndpa_retry_count);
	cdf_print("  %u Total number of TXBF NDPA\n",
			stats_ptr->txbf_total_ndpa_count);
}
Ejemplo n.º 12
0
static void
htt_t2h_stats_wifi2_error_stats_print(
	struct wlan_dbg_wifi2_error_stats *stats_ptr)
{
	int i;

	cdf_print("Scheduler error Statistics:\n");
	cdf_print("urrn_stats: ");
	cdf_print("%d, %d, %d\n",
		  stats_ptr->urrn_stats[0],
		  stats_ptr->urrn_stats[1],
		  stats_ptr->urrn_stats[2]);
	cdf_print("flush_errs (0..%d): ",
			WHAL_DBG_FLUSH_REASON_MAXCNT);
	for (i = 0; i < WHAL_DBG_FLUSH_REASON_MAXCNT; i++)
		cdf_print("  %u", stats_ptr->flush_errs[i]);
	cdf_print("\n");
	cdf_print("schd_stall_errs (0..3): ");
	cdf_print("%d, %d, %d, %d\n",
		  stats_ptr->schd_stall_errs[0],
		  stats_ptr->schd_stall_errs[1],
		  stats_ptr->schd_stall_errs[2],
		  stats_ptr->schd_stall_errs[3]);
	cdf_print("schd_cmd_result (0..%d): ",
			WHAL_DBG_CMD_RESULT_MAXCNT);
	for (i = 0; i < WHAL_DBG_CMD_RESULT_MAXCNT; i++)
		cdf_print("  %u", stats_ptr->schd_cmd_result[i]);
	cdf_print("\n");
	cdf_print("sifs_status (0..%d): ",
			WHAL_DBG_SIFS_STATUS_MAXCNT);
	for (i = 0; i < WHAL_DBG_SIFS_STATUS_MAXCNT; i++)
		cdf_print("  %u", stats_ptr->sifs_status[i]);
	cdf_print("\n");
	cdf_print("phy_errs (0..%d): ",
			WHAL_DBG_PHY_ERR_MAXCNT);
	for (i = 0; i < WHAL_DBG_PHY_ERR_MAXCNT; i++)
		cdf_print("  %u", stats_ptr->phy_errs[i]);
	cdf_print("\n");
	cdf_print("  %u rx_rate_inval\n",
			stats_ptr->rx_rate_inval);
}
Ejemplo n.º 13
0
/**
 * cdf_mem_multi_pages_alloc() - allocate large size of kernel memory
 * @osdev:		OS device handle pointer
 * @pages:		Multi page information storage
 * @element_size:	Each element size
 * @element_num:	Total number of elements should be allocated
 * @memctxt:		Memory context
 * @cacheable:		Coherent memory or cacheable memory
 *
 * This function will allocate large size of memory over multiple pages.
 * Large size of contiguous memory allocation will fail frequentely, then
 * instead of allocate large memory by one shot, allocate through multiple, non
 * contiguous memory and combine pages when actual usage
 *
 * Return: None
 */
void cdf_mem_multi_pages_alloc(cdf_device_t osdev,
				struct cdf_mem_multi_page_t *pages,
				size_t element_size,
				uint16_t element_num,
				cdf_dma_context_t memctxt,
				bool cacheable)
{
	uint16_t page_idx;
	struct cdf_mem_dma_page_t *dma_pages;
	void **cacheable_pages = NULL;
	uint16_t i;

	pages->num_element_per_page = PAGE_SIZE / element_size;
	if (!pages->num_element_per_page) {
		cdf_print("Invalid page %d or element size %d",
			(int)PAGE_SIZE, (int)element_size);
		goto out_fail;
	}

	pages->num_pages = element_num / pages->num_element_per_page;
	if (element_num % pages->num_element_per_page)
		pages->num_pages++;

	if (cacheable) {
		/* Pages information storage */
		pages->cacheable_pages = cdf_mem_malloc(
			pages->num_pages * sizeof(pages->cacheable_pages));
		if (!pages->cacheable_pages) {
			cdf_print("Cacheable page storage alloc fail");
			goto out_fail;
		}

		cacheable_pages = pages->cacheable_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			cacheable_pages[page_idx] = cdf_mem_malloc(PAGE_SIZE);
			if (!cacheable_pages[page_idx]) {
				cdf_print("cacheable page alloc fail, pi %d",
					page_idx);
				goto page_alloc_fail;
			}
		}
		pages->dma_pages = NULL;
	} else {
		pages->dma_pages = cdf_mem_malloc(
			pages->num_pages * sizeof(struct cdf_mem_dma_page_t));
		if (!pages->dma_pages) {
			cdf_print("dmaable page storage alloc fail");
			goto out_fail;
		}

		dma_pages = pages->dma_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			dma_pages->page_v_addr_start =
				cdf_os_mem_alloc_consistent(osdev, PAGE_SIZE,
					&dma_pages->page_p_addr, memctxt);
			if (!dma_pages->page_v_addr_start) {
				cdf_print("dmaable page alloc fail pi %d",
					page_idx);
				goto page_alloc_fail;
			}
			dma_pages->page_v_addr_end =
				dma_pages->page_v_addr_start + PAGE_SIZE;
			dma_pages++;
		}
		pages->cacheable_pages = NULL;
	}
	return;

page_alloc_fail:
	if (cacheable) {
		for (i = 0; i < page_idx; i++)
			cdf_mem_free(pages->cacheable_pages[i]);
		cdf_mem_free(pages->cacheable_pages);
	} else {
		dma_pages = pages->dma_pages;
		for (i = 0; i < page_idx; i++) {
			cdf_os_mem_free_consistent(osdev, PAGE_SIZE,
				dma_pages->page_v_addr_start,
				dma_pages->page_p_addr, memctxt);
			dma_pages++;
		}
		cdf_mem_free(pages->dma_pages);
	}

out_fail:
	pages->cacheable_pages = NULL;
	pages->dma_pages = NULL;
	pages->num_pages = 0;
	return;
}
Ejemplo n.º 14
0
struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
				   struct ol_txrx_vdev_t *vdev,
				   cdf_nbuf_t netbuf,
				   struct ol_txrx_msdu_info_t *msdu_info)
{
	struct ol_tx_desc_t *tx_desc;
	unsigned int i;
	uint32_t num_frags;

	msdu_info->htt.info.vdev_id = vdev->vdev_id;
	msdu_info->htt.action.cksum_offload = cdf_nbuf_get_tx_cksum(netbuf);
	switch (cdf_nbuf_get_exemption_type(netbuf)) {
	case CDF_NBUF_EXEMPT_NO_EXEMPTION:
	case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
		/* We want to encrypt this frame */
		msdu_info->htt.action.do_encrypt = 1;
		break;
	case CDF_NBUF_EXEMPT_ALWAYS:
		/* We don't want to encrypt this frame */
		msdu_info->htt.action.do_encrypt = 0;
		break;
	default:
		cdf_assert(0);
		break;
	}

	/* allocate the descriptor */
	tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
	if (!tx_desc)
		return NULL;

	/* initialize the SW tx descriptor */
	tx_desc->netbuf = netbuf;

	if (msdu_info->tso_info.is_tso) {
		tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
		tx_desc->pkt_type = ol_tx_frm_tso;
		TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
	} else {
		tx_desc->pkt_type = ol_tx_frm_std;
	}

	/* initialize the HW tx descriptor */

	htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc,
			 tx_desc->htt_tx_desc_paddr,
			 ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt,
			 &msdu_info->tso_info,
			 NULL, vdev->opmode == wlan_op_mode_ocb);

	/*
	 * Initialize the fragmentation descriptor.
	 * Skip the prefix fragment (HTT tx descriptor) that was added
	 * during the call to htt_tx_desc_init above.
	 */
	num_frags = cdf_nbuf_get_num_frags(netbuf);
	/* num_frags are expected to be 2 max */
	num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS)
		? CVG_NBUF_MAX_EXTRA_FRAGS
		: num_frags;
#if defined(HELIUMPLUS_PADDR64)
	/*
	 * Use num_frags - 1, since 1 frag is used to store
	 * the HTT/HTC descriptor
	 * Refer to htt_tx_desc_init()
	 */
	htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
			      num_frags - 1);
#else /* ! defined(HELIUMPLUSPADDR64) */
	htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
			      num_frags - 1);
#endif /* defined(HELIUMPLUS_PADDR64) */

	if (msdu_info->tso_info.is_tso) {
		htt_tx_desc_fill_tso_info(pdev->htt_pdev,
			 tx_desc->htt_frag_desc, &msdu_info->tso_info);
		TXRX_STATS_TSO_SEG_UPDATE(pdev,
			 msdu_info->tso_info.curr_seg->seg);
	} else {
		for (i = 1; i < num_frags; i++) {
			cdf_size_t frag_len;
			uint32_t frag_paddr;

			frag_len = cdf_nbuf_get_frag_len(netbuf, i);
			frag_paddr = cdf_nbuf_get_frag_paddr_lo(netbuf, i);
#if defined(HELIUMPLUS_PADDR64)
			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1,
				 frag_paddr, frag_len);
#if defined(HELIUMPLUS_DEBUG)
			cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n",
					  __func__, __LINE__, tx_desc->htt_frag_desc,
					  frag_paddr, frag_len);
			dump_pkt(netbuf, frag_paddr, 64);
#endif /* HELIUMPLUS_DEBUG */
#else /* ! defined(HELIUMPLUSPADDR64) */
			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, i - 1,
							 frag_paddr, frag_len);
#endif /* defined(HELIUMPLUS_PADDR64) */
		}
	}

#if defined(HELIUMPLUS_DEBUG)
	dump_frag_desc("ol_tx_desc_ll()", tx_desc);
#endif
	return tx_desc;
}
Ejemplo n.º 15
0
static void
htt_t2h_stats_tx_selfgen_buf_stats_print(
	struct wlan_dbg_tx_selfgen_stats *stats_ptr)
{
	cdf_print("Tx selfgen Buffer Statistics:\n");
	cdf_print("  %u su_ndpa\n",
			stats_ptr->su_ndpa);
	cdf_print("  %u mu_ndp\n",
			stats_ptr->mu_ndp);
	cdf_print("  %u mu_ndpa\n",
			stats_ptr->mu_ndpa);
	cdf_print("  %u mu_ndp\n",
			stats_ptr->mu_ndp);
	cdf_print("  %u mu_brpoll_1\n",
			stats_ptr->mu_brpoll_1);
	cdf_print("  %u mu_brpoll_2\n",
			stats_ptr->mu_brpoll_2);
	cdf_print("  %u mu_bar_1\n",
			stats_ptr->mu_bar_1);
	cdf_print("  %u mu_bar_2\n",
			stats_ptr->mu_bar_2);
	cdf_print("  %u cts_burst\n",
			stats_ptr->cts_burst);
	cdf_print("  %u su_ndp_err\n",
			stats_ptr->su_ndp_err);
	cdf_print("  %u su_ndpa_err\n",
			stats_ptr->su_ndpa_err);
	cdf_print("  %u mu_ndp_err\n",
			stats_ptr->mu_ndp_err);
	cdf_print("  %u mu_brp1_err\n",
			stats_ptr->mu_brp1_err);
	cdf_print("  %u mu_brp2_err\n",
			stats_ptr->mu_brp2_err);
}
Ejemplo n.º 16
0
static void
htt_t2h_stats_rx_rem_buf_stats_print(
    struct rx_remote_buffer_mgmt_stats *stats_ptr, int concise)
{
	cdf_print("Rx Remote Buffer Statistics:\n");
	cdf_print("  %u MSDU's reaped for Rx processing\n",
			stats_ptr->remote_reaped);
	cdf_print("  %u MSDU's recycled within firmware\n",
			stats_ptr->remote_recycled);
	cdf_print("  %u MSDU's stored by Data Rx\n",
			stats_ptr->data_rx_msdus_stored);
	cdf_print("  %u HTT indications from WAL Rx MSDU\n",
			stats_ptr->wal_rx_ind);
	cdf_print("  %u HTT indications unconsumed from WAL Rx MSDU\n",
			stats_ptr->wal_rx_ind_unconsumed);
	cdf_print("  %u HTT indications from Data Rx MSDU\n",
			stats_ptr->data_rx_ind);
	cdf_print("  %u HTT indications unconsumed from Data Rx MSDU\n",
			stats_ptr->data_rx_ind_unconsumed);
	cdf_print("  %u HTT indications from ATHBUF\n",
			stats_ptr->athbuf_rx_ind);
	cdf_print("  %u Remote buffers requested for refill\n",
			stats_ptr->refill_buf_req);
	cdf_print("  %u Remote buffers filled by host\n",
			stats_ptr->refill_buf_rsp);
	cdf_print("  %u times MAC has no buffers\n",
			stats_ptr->mac_no_bufs);
	cdf_print("  %u times f/w write & read indices on MAC ring are equal\n",
			stats_ptr->fw_indices_equal);
	cdf_print("  %u times f/w has no remote buffers to post to MAC\n",
			stats_ptr->host_no_bufs);
}
Ejemplo n.º 17
0
static void
htt_t2h_stats_txbf_info_buf_stats_print(
	struct wlan_dbg_txbf_data_stats *stats_ptr)
{
	cdf_print("TXBF data Statistics:\n");
	cdf_print("tx_txbf_vht (0..9): ");
	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u, %u, %d\n",
		  stats_ptr->tx_txbf_vht[0],
		  stats_ptr->tx_txbf_vht[1],
		  stats_ptr->tx_txbf_vht[2],
		  stats_ptr->tx_txbf_vht[3],
		  stats_ptr->tx_txbf_vht[4],
		  stats_ptr->tx_txbf_vht[5],
		  stats_ptr->tx_txbf_vht[6],
		  stats_ptr->tx_txbf_vht[7],
		  stats_ptr->tx_txbf_vht[8],
		  stats_ptr->tx_txbf_vht[9]);
	cdf_print("rx_txbf_vht (0..9): ");
	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u, %u, %u\n",
		  stats_ptr->rx_txbf_vht[0],
		  stats_ptr->rx_txbf_vht[1],
		  stats_ptr->rx_txbf_vht[2],
		  stats_ptr->rx_txbf_vht[3],
		  stats_ptr->rx_txbf_vht[4],
		  stats_ptr->rx_txbf_vht[5],
		  stats_ptr->rx_txbf_vht[6],
		  stats_ptr->rx_txbf_vht[7],
		  stats_ptr->rx_txbf_vht[8],
		  stats_ptr->rx_txbf_vht[9]);
	cdf_print("tx_txbf_ht (0..7): ");
	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u\n",
		  stats_ptr->tx_txbf_ht[0],
		  stats_ptr->tx_txbf_ht[1],
		  stats_ptr->tx_txbf_ht[2],
		  stats_ptr->tx_txbf_ht[3],
		  stats_ptr->tx_txbf_ht[4],
		  stats_ptr->tx_txbf_ht[5],
		  stats_ptr->tx_txbf_ht[6],
		  stats_ptr->tx_txbf_ht[7]);
	cdf_print("tx_txbf_ofdm (0..7): ");
	cdf_print("%u, %u, %u, %u, %u, %u, %u, %u\n",
		  stats_ptr->tx_txbf_ofdm[0],
		  stats_ptr->tx_txbf_ofdm[1],
		  stats_ptr->tx_txbf_ofdm[2],
		  stats_ptr->tx_txbf_ofdm[3],
		  stats_ptr->tx_txbf_ofdm[4],
		  stats_ptr->tx_txbf_ofdm[5],
		  stats_ptr->tx_txbf_ofdm[6],
		  stats_ptr->tx_txbf_ofdm[7]);
	cdf_print("tx_txbf_cck (0..6): ");
	cdf_print("%u, %u, %u, %u, %u, %u, %u\n",
		  stats_ptr->tx_txbf_cck[0],
		  stats_ptr->tx_txbf_cck[1],
		  stats_ptr->tx_txbf_cck[2],
		  stats_ptr->tx_txbf_cck[3],
		  stats_ptr->tx_txbf_cck[4],
		  stats_ptr->tx_txbf_cck[5],
		  stats_ptr->tx_txbf_cck[6]);
}
Ejemplo n.º 18
0
static void
htt_t2h_stats_rx_reorder_stats_print(struct rx_reorder_stats *stats_ptr,
				     int concise)
{
	cdf_print("Rx reorder statistics:\n");
	cdf_print("  %u non-QoS frames received\n", stats_ptr->deliver_non_qos);
	cdf_print("  %u frames received in-order\n",
		  stats_ptr->deliver_in_order);
	cdf_print("  %u frames flushed due to timeout\n",
		  stats_ptr->deliver_flush_timeout);
	cdf_print("  %u frames flushed due to moving out of window\n",
		  stats_ptr->deliver_flush_oow);
	cdf_print("  %u frames flushed due to receiving DELBA\n",
		  stats_ptr->deliver_flush_delba);
	cdf_print("  %u frames discarded due to FCS error\n",
		  stats_ptr->fcs_error);
	cdf_print("  %u frames discarded due to invalid peer\n",
		  stats_ptr->invalid_peer);
	cdf_print
		("  %u frames discarded due to duplication (non aggregation)\n",
		stats_ptr->dup_non_aggr);
	cdf_print("  %u frames discarded due to duplication in reorder queue\n",
		 stats_ptr->dup_in_reorder);
	cdf_print("  %u frames discarded due to processed before\n",
		  stats_ptr->dup_past);
	cdf_print("  %u times reorder timeout happened\n",
		  stats_ptr->reorder_timeout);
	cdf_print("  %u times incorrect bar received\n",
		  stats_ptr->invalid_bar_ssn);
	cdf_print("  %u times bar ssn reset happened\n",
			stats_ptr->ssn_reset);
	cdf_print("  %u times flushed due to peer delete\n",
			stats_ptr->deliver_flush_delpeer);
	cdf_print("  %u times flushed due to offload\n",
			stats_ptr->deliver_flush_offload);
	cdf_print("  %u times flushed due to ouf of buffer\n",
			stats_ptr->deliver_flush_oob);
	cdf_print("  %u MPDU's dropped due to PN check fail\n",
			stats_ptr->pn_fail);
	cdf_print("  %u MPDU's dropped due to lack of memory\n",
			stats_ptr->store_fail);
	cdf_print("  %u times tid pool alloc succeeded\n",
			stats_ptr->tid_pool_alloc_succ);
	cdf_print("  %u times MPDU pool alloc succeeded\n",
			stats_ptr->mpdu_pool_alloc_succ);
	cdf_print("  %u times MSDU pool alloc succeeded\n",
			stats_ptr->msdu_pool_alloc_succ);
	cdf_print("  %u times tid pool alloc failed\n",
			stats_ptr->tid_pool_alloc_fail);
	cdf_print("  %u times MPDU pool alloc failed\n",
			stats_ptr->mpdu_pool_alloc_fail);
	cdf_print("  %u times MSDU pool alloc failed\n",
			stats_ptr->msdu_pool_alloc_fail);
	cdf_print("  %u times tid pool freed\n",
			stats_ptr->tid_pool_free);
	cdf_print("  %u times MPDU pool freed\n",
			stats_ptr->mpdu_pool_free);
	cdf_print("  %u times MSDU pool freed\n",
			stats_ptr->msdu_pool_free);
	cdf_print("  %u MSDUs undelivered to HTT, queued to Rx MSDU free list\n",
			stats_ptr->msdu_queued);
	cdf_print("  %u MSDUs released from Rx MSDU list to MAC ring\n",
			stats_ptr->msdu_recycled);
	cdf_print("  %u MPDUs with invalid peer but A2 found in AST\n",
			stats_ptr->invalid_peer_a2_in_ast);
	cdf_print("  %u MPDUs with invalid peer but A3 found in AST\n",
			stats_ptr->invalid_peer_a3_in_ast);
	cdf_print("  %u MPDUs with invalid peer, Broadcast or Mulitcast frame\n",
			stats_ptr->invalid_peer_bmc_mpdus);
	cdf_print("  %u MSDUs with err attention word\n",
			stats_ptr->rxdesc_err_att);
	cdf_print("  %u MSDUs with flag of peer_idx_invalid\n",
			stats_ptr->rxdesc_err_peer_idx_inv);
	cdf_print("  %u MSDUs with  flag of peer_idx_timeout\n",
			stats_ptr->rxdesc_err_peer_idx_to);
	cdf_print("  %u MSDUs with  flag of overflow\n",
			stats_ptr->rxdesc_err_ov);
	cdf_print("  %u MSDUs with  flag of msdu_length_err\n",
			stats_ptr->rxdesc_err_msdu_len);
	cdf_print("  %u MSDUs with  flag of mpdu_length_err\n",
			stats_ptr->rxdesc_err_mpdu_len);
	cdf_print("  %u MSDUs with  flag of tkip_mic_err\n",
			stats_ptr->rxdesc_err_tkip_mic);
	cdf_print("  %u MSDUs with  flag of decrypt_err\n",
			stats_ptr->rxdesc_err_decrypt);
	cdf_print("  %u MSDUs with  flag of fcs_err\n",
			stats_ptr->rxdesc_err_fcs);
	cdf_print("  %u Unicast frames with invalid peer handler\n",
			stats_ptr->rxdesc_uc_msdus_inv_peer);
	cdf_print("  %u unicast frame directly to DUT with invalid peer handler\n",
			stats_ptr->rxdesc_direct_msdus_inv_peer);
	cdf_print("  %u Broadcast/Multicast frames with invalid peer handler\n",
			stats_ptr->rxdesc_bmc_msdus_inv_peer);
	cdf_print("  %u MSDUs dropped due to no first MSDU flag\n",
			stats_ptr->rxdesc_no_1st_msdu);
	cdf_print("  %u MSDUs dropped due to ring overflow\n",
			stats_ptr->msdu_drop_ring_ov);
	cdf_print("  %u MSDUs dropped due to FC mismatch\n",
			stats_ptr->msdu_drop_fc_mismatch);
	cdf_print("  %u MSDUs dropped due to mgt frame in Remote ring\n",
			stats_ptr->msdu_drop_mgmt_remote_ring);
	cdf_print("  %u MSDUs dropped due to misc non error\n",
			stats_ptr->msdu_drop_misc);
	cdf_print("  %u MSDUs go to offload before reorder\n",
			stats_ptr->offload_msdu_wal);
	cdf_print("  %u data frame dropped by offload after reorder\n",
			stats_ptr->offload_msdu_reorder);
	cdf_print("  %u  MPDUs with SN in the past & within BA window\n",
			stats_ptr->dup_past_within_window);
	cdf_print("  %u  MPDUs with SN in the past & outside BA window\n",
			stats_ptr->dup_past_outside_window);
}
Ejemplo n.º 19
0
static void
htt_t2h_stats_pdev_stats_print(struct wlan_dbg_stats *wlan_pdev_stats,
			       int concise)
{
	struct wlan_dbg_tx_stats *tx = &wlan_pdev_stats->tx;
	struct wlan_dbg_rx_stats *rx = &wlan_pdev_stats->rx;

	cdf_print("WAL Pdev stats:\n");
	cdf_print("\n### Tx ###\n");

	/* Num HTT cookies queued to dispatch list */
	cdf_print("comp_queued       :\t%d\n", tx->comp_queued);
	/* Num HTT cookies dispatched */
	cdf_print("comp_delivered    :\t%d\n", tx->comp_delivered);
	/* Num MSDU queued to WAL */
	cdf_print("msdu_enqued       :\t%d\n", tx->msdu_enqued);
	/* Num MPDU queued to WAL */
	cdf_print("mpdu_enqued       :\t%d\n", tx->mpdu_enqued);
	/* Num MSDUs dropped by WMM limit */
	cdf_print("wmm_drop          :\t%d\n", tx->wmm_drop);
	/* Num Local frames queued */
	cdf_print("local_enqued      :\t%d\n", tx->local_enqued);
	/* Num Local frames done */
	cdf_print("local_freed       :\t%d\n", tx->local_freed);
	/* Num queued to HW */
	cdf_print("hw_queued         :\t%d\n", tx->hw_queued);
	/* Num PPDU reaped from HW */
	cdf_print("hw_reaped         :\t%d\n", tx->hw_reaped);
	/* Num underruns */
	cdf_print("mac underrun      :\t%d\n", tx->underrun);
	/* Num underruns */
	cdf_print("phy underrun      :\t%d\n", tx->phy_underrun);
	/* Num PPDUs cleaned up in TX abort */
	cdf_print("tx_abort          :\t%d\n", tx->tx_abort);
	/* Num MPDUs requed by SW */
	cdf_print("mpdus_requed      :\t%d\n", tx->mpdus_requed);
	/* Excessive retries */
	cdf_print("excess retries    :\t%d\n", tx->tx_ko);
	/* last data rate */
	cdf_print("last rc           :\t%d\n", tx->data_rc);
	/* scheduler self triggers */
	cdf_print("sched self trig   :\t%d\n", tx->self_triggers);
	/* SW retry failures */
	cdf_print("ampdu retry failed:\t%d\n", tx->sw_retry_failure);
	/* ilegal phy rate errirs */
	cdf_print("illegal rate errs :\t%d\n", tx->illgl_rate_phy_err);
	/* pdev continous excessive retries  */
	cdf_print("pdev cont xretry  :\t%d\n", tx->pdev_cont_xretry);
	/* pdev continous excessive retries  */
	cdf_print("pdev tx timeout   :\t%d\n", tx->pdev_tx_timeout);
	/* pdev resets  */
	cdf_print("pdev resets       :\t%d\n", tx->pdev_resets);
	/* PPDU > txop duration  */
	cdf_print("ppdu txop ovf     :\t%d\n", tx->txop_ovf);

	cdf_print("\n### Rx ###\n");
	/* Cnts any change in ring routing mid-ppdu */
	cdf_print("ppdu_route_change :\t%d\n", rx->mid_ppdu_route_change);
	/* Total number of statuses processed */
	cdf_print("status_rcvd       :\t%d\n", rx->status_rcvd);
	/* Extra frags on rings 0-3 */
	cdf_print("r0_frags          :\t%d\n", rx->r0_frags);
	cdf_print("r1_frags          :\t%d\n", rx->r1_frags);
	cdf_print("r2_frags          :\t%d\n", rx->r2_frags);
	cdf_print("r3_frags          :\t%d\n", rx->r3_frags);
	/* MSDUs / MPDUs delivered to HTT */
	cdf_print("htt_msdus         :\t%d\n", rx->htt_msdus);
	cdf_print("htt_mpdus         :\t%d\n", rx->htt_mpdus);
	/* MSDUs / MPDUs delivered to local stack */
	cdf_print("loc_msdus         :\t%d\n", rx->loc_msdus);
	cdf_print("loc_mpdus         :\t%d\n", rx->loc_mpdus);
	/* AMSDUs that have more MSDUs than the status ring size */
	cdf_print("oversize_amsdu    :\t%d\n", rx->oversize_amsdu);
	/* Number of PHY errors */
	cdf_print("phy_errs          :\t%d\n", rx->phy_errs);
	/* Number of PHY errors dropped */
	cdf_print("phy_errs dropped  :\t%d\n", rx->phy_err_drop);
	/* Number of mpdu errors - FCS, MIC, ENC etc. */
	cdf_print("mpdu_errs         :\t%d\n", rx->mpdu_errs);

}
Ejemplo n.º 20
0
void htt_tx_desc_display(void *tx_desc)
{
	struct htt_tx_msdu_desc_t *htt_tx_desc;

	htt_tx_desc = (struct htt_tx_msdu_desc_t *)tx_desc;

	/* only works for little-endian */
	cdf_print("HTT tx desc (@ %p):\n", htt_tx_desc);
	cdf_print("  msg type = %d\n", htt_tx_desc->msg_type);
	cdf_print("  pkt subtype = %d\n", htt_tx_desc->pkt_subtype);
	cdf_print("  pkt type = %d\n", htt_tx_desc->pkt_type);
	cdf_print("  vdev ID = %d\n", htt_tx_desc->vdev_id);
	cdf_print("  ext TID = %d\n", htt_tx_desc->ext_tid);
	cdf_print("  postponed = %d\n", htt_tx_desc->postponed);
#if HTT_PADDR64
	cdf_print("  reserved_dword0_bits28 = %d\n", htt_tx_desc->reserved_dword0_bits28);
	cdf_print("  cksum_offload = %d\n", htt_tx_desc->cksum_offload);
	cdf_print("  tx_compl_req= %d\n", htt_tx_desc->tx_compl_req);
#else /* !HTT_PADDR64 */
	cdf_print("  batch more = %d\n", htt_tx_desc->more_in_batch);
#endif /* HTT_PADDR64 */
	cdf_print("  length = %d\n", htt_tx_desc->len);
	cdf_print("  id = %d\n", htt_tx_desc->id);
#if HTT_PADDR64
	cdf_print("  frag desc addr.lo = %#x\n",
		  htt_tx_desc->frags_desc_ptr.lo);
	cdf_print("  frag desc addr.hi = %#x\n",
		  htt_tx_desc->frags_desc_ptr.hi);
	cdf_print("  peerid = %d\n", htt_tx_desc->peerid);
	cdf_print("  chanfreq = %d\n", htt_tx_desc->chanfreq);
#else /* ! HTT_PADDR64 */
	cdf_print("  frag desc addr = %#x\n", htt_tx_desc->frags_desc_ptr);
#endif /* HTT_PADDR64 */
}
Ejemplo n.º 21
0
/**
 * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
 * @pdev: htt context
 * @uc_tx_buf_sz: single tx buffer size
 * @uc_tx_buf_cnt: total tx buffer count
 * @uc_tx_partition_base: tx buffer partition start
 *
 * Return: 0 success
 *         ENOBUFS No memory fail
 */
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
			 unsigned int uc_tx_buf_sz,
			 unsigned int uc_tx_buf_cnt,
			 unsigned int uc_tx_partition_base)
{
	unsigned int tx_buffer_count;
	cdf_nbuf_t buffer_vaddr;
	cdf_dma_addr_t buffer_paddr;
	uint32_t *header_ptr;
	uint32_t *ring_vaddr;
	int return_code = 0;
	unsigned int tx_comp_ring_size;

	/* Allocate CE Write Index WORD */
	pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
		cdf_os_mem_alloc_consistent(
			pdev->osdev,
			4,
			&pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
			cdf_get_dma_mem_context(
				(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
				memctx));
	if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
		cdf_print("%s: CE Write Index WORD alloc fail", __func__);
		return -ENOBUFS;
	}

	/* Allocate TX COMP Ring */
	tx_comp_ring_size = uc_tx_buf_cnt * sizeof(cdf_nbuf_t);
	pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
		cdf_os_mem_alloc_consistent(
			pdev->osdev,
			tx_comp_ring_size,
			&pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
			cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
						 tx_comp_base),
						memctx));
	if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
		cdf_print("%s: TX COMP ring alloc fail", __func__);
		return_code = -ENOBUFS;
		goto free_tx_ce_idx;
	}

	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, tx_comp_ring_size);

	/* Allocate TX BUF vAddress Storage */
	pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
		(cdf_nbuf_t *) cdf_mem_malloc(uc_tx_buf_cnt *
					      sizeof(cdf_nbuf_t));
	if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
		cdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
		return_code = -ENOBUFS;
		goto free_tx_comp_base;
	}
	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
		     uc_tx_buf_cnt * sizeof(cdf_nbuf_t));

	ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
	/* Allocate TX buffers as many as possible */
	for (tx_buffer_count = 0;
	     tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
		buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
					      uc_tx_buf_sz, 0, 4, false);
		if (!buffer_vaddr) {
			cdf_print("%s: TX BUF alloc fail, loop index: %d",
				  __func__, tx_buffer_count);
			return 0;
		}

		/* Init buffer */
		cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
		header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);

		/* HTT control header */
		*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
		header_ptr++;

		/* PKT ID */
		*header_ptr |= ((uint16_t) uc_tx_partition_base +
				tx_buffer_count) << 16;

		cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
		buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
		header_ptr++;

		/* Frag Desc Pointer */
		/* 64bits descriptor, Low 32bits */
		*header_ptr = (uint32_t) (buffer_paddr + 20);
		header_ptr++;

		/* 64bits descriptor, high 32bits */
		*header_ptr = 0;
		header_ptr++;

		/* chanreq, peerid */
		*header_ptr = 0xFFFFFFFF;
		header_ptr++;

		/* FRAG Header */
		/* 6 words TSO header */
		header_ptr += 6;
		*header_ptr = buffer_paddr + 64;

		*ring_vaddr = buffer_paddr;
		pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
			buffer_vaddr;
		/* Memory barrier to ensure actual value updated */

		ring_vaddr += 2;
	}

	pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;

	return 0;

free_tx_comp_base:
	cdf_os_mem_free_consistent(pdev->osdev,
				   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->
								ctrl_pdev) * 4,
				   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
				   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
				   cdf_get_dma_mem_context((&pdev->
							    ipa_uc_tx_rsc.
							    tx_comp_base),
							   memctx));
free_tx_ce_idx:
	cdf_os_mem_free_consistent(pdev->osdev,
				   4,
				   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
				   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
				   cdf_get_dma_mem_context((&pdev->
							    ipa_uc_tx_rsc.
							    tx_ce_idx),
							   memctx));
	return return_code;
}
Ejemplo n.º 22
0
A_STATUS htc_start(HTC_HANDLE HTCHandle)
{
	cdf_nbuf_t netbuf;
	A_STATUS status = A_OK;
	HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(HTCHandle);
	HTC_SETUP_COMPLETE_EX_MSG *pSetupComp;
	HTC_PACKET *pSendPacket;

	AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_start Enter\n"));

	do {

		htc_config_target_hif_pipe(target);

		/* allocate a buffer to send */
		pSendPacket = htc_alloc_control_tx_packet(target);
		if (NULL == pSendPacket) {
			AR_DEBUG_ASSERT(false);
			cdf_print("%s: allocControlTxPacket failed\n",
				  __func__);
			status = A_NO_MEMORY;
			break;
		}

		netbuf =
			(cdf_nbuf_t) GET_HTC_PACKET_NET_BUF_CONTEXT(pSendPacket);
		/* assemble setup complete message */
		cdf_nbuf_put_tail(netbuf, sizeof(HTC_SETUP_COMPLETE_EX_MSG));
		pSetupComp =
			(HTC_SETUP_COMPLETE_EX_MSG *) cdf_nbuf_data(netbuf);
		A_MEMZERO(pSetupComp, sizeof(HTC_SETUP_COMPLETE_EX_MSG));

		HTC_SET_FIELD(pSetupComp, HTC_SETUP_COMPLETE_EX_MSG,
			      MESSAGEID, HTC_MSG_SETUP_COMPLETE_EX_ID);

		if (!htc_credit_flow) {
			AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
					("HTC will not use TX credit flow control\n"));
			pSetupComp->SetupFlags |=
				HTC_SETUP_COMPLETE_FLAGS_DISABLE_TX_CREDIT_FLOW;
		} else {
			AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
					("HTC using TX credit flow control\n"));
		}

#ifdef HIF_SDIO
#if ENABLE_BUNDLE_RX
		if (HTC_ENABLE_BUNDLE(target))
			pSetupComp->SetupFlags |=
				HTC_SETUP_COMPLETE_FLAGS_ENABLE_BUNDLE_RECV;
#endif /* ENABLE_BUNDLE_RX */
#endif /* HIF_SDIO */

		SET_HTC_PACKET_INFO_TX(pSendPacket,
				       NULL,
				       (A_UINT8 *) pSetupComp,
				       sizeof(HTC_SETUP_COMPLETE_EX_MSG),
				       ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);

		status = htc_send_pkt((HTC_HANDLE) target, pSendPacket);
		if (A_FAILED(status)) {
			break;
		}

	} while (false);

	AR_DEBUG_PRINTF(ATH_DEBUG_TRC, ("htc_start Exit\n"));
	return status;
}
Ejemplo n.º 23
0
void
htt_t2h_tx_ppdu_log_print(struct ol_fw_tx_dbg_ppdu_msg_hdr *hdr,
			  struct ol_fw_tx_dbg_ppdu_base *record,
			  int length, int concise)
{
	int i;
	int record_size;
	int num_records;

	record_size =
		sizeof(*record) +
		hdr->mpdu_bytes_array_len * sizeof(uint16_t) +
		hdr->mpdu_msdus_array_len * sizeof(uint8_t) +
		hdr->msdu_bytes_array_len * sizeof(uint16_t);
	num_records = (length - sizeof(*hdr)) / record_size;
	cdf_print("Tx PPDU log elements:\n");

	for (i = 0; i < num_records; i++) {
		uint16_t start_seq_num;
		uint16_t start_pn_lsbs;
		uint8_t num_mpdus;
		uint16_t peer_id;
		uint8_t ext_tid;
		uint8_t rate_code;
		uint8_t rate_flags;
		uint8_t tries;
		uint8_t complete;
		uint32_t time_enqueue_us;
		uint32_t time_completion_us;
		uint32_t *msg_word = (uint32_t *) record;

		/* fields used for both concise and complete printouts */
		start_seq_num =
			((*(msg_word + OL_FW_TX_DBG_PPDU_START_SEQ_NUM_16)) &
			 OL_FW_TX_DBG_PPDU_START_SEQ_NUM_M) >>
			OL_FW_TX_DBG_PPDU_START_SEQ_NUM_S;
		complete =
			((*(msg_word + OL_FW_TX_DBG_PPDU_COMPLETE_16)) &
			 OL_FW_TX_DBG_PPDU_COMPLETE_M) >>
			OL_FW_TX_DBG_PPDU_COMPLETE_S;

		/* fields used only for complete printouts */
		if (!concise) {
#define BUF_SIZE 80
			char buf[BUF_SIZE];
			uint8_t *p8;
			time_enqueue_us =
				HTT_TICK_TO_USEC(record->timestamp_enqueue,
						 hdr->microsec_per_tick);
			time_completion_us =
				HTT_TICK_TO_USEC(record->timestamp_completion,
						 hdr->microsec_per_tick);

			start_pn_lsbs =
				((*
				  (msg_word +
				   OL_FW_TX_DBG_PPDU_START_PN_LSBS_16)) &
				 OL_FW_TX_DBG_PPDU_START_PN_LSBS_M) >>
				OL_FW_TX_DBG_PPDU_START_PN_LSBS_S;
			num_mpdus =
				((*(msg_word + OL_FW_TX_DBG_PPDU_NUM_MPDUS_16))&
				 OL_FW_TX_DBG_PPDU_NUM_MPDUS_M) >>
				OL_FW_TX_DBG_PPDU_NUM_MPDUS_S;
			peer_id =
				((*(msg_word + OL_FW_TX_DBG_PPDU_PEER_ID_16)) &
				 OL_FW_TX_DBG_PPDU_PEER_ID_M) >>
				OL_FW_TX_DBG_PPDU_PEER_ID_S;
			ext_tid =
				((*(msg_word + OL_FW_TX_DBG_PPDU_EXT_TID_16)) &
				 OL_FW_TX_DBG_PPDU_EXT_TID_M) >>
				OL_FW_TX_DBG_PPDU_EXT_TID_S;
			rate_code =
				((*(msg_word + OL_FW_TX_DBG_PPDU_RATE_CODE_16))&
				 OL_FW_TX_DBG_PPDU_RATE_CODE_M) >>
				OL_FW_TX_DBG_PPDU_RATE_CODE_S;
			rate_flags =
				((*(msg_word + OL_FW_TX_DBG_PPDU_RATEFLAGS_16))&
				 OL_FW_TX_DBG_PPDU_RATE_FLAGS_M) >>
				OL_FW_TX_DBG_PPDU_RATE_FLAGS_S;
			tries =
				((*(msg_word + OL_FW_TX_DBG_PPDU_TRIES_16)) &
				 OL_FW_TX_DBG_PPDU_TRIES_M) >>
				OL_FW_TX_DBG_PPDU_TRIES_S;

			cdf_print(" - PPDU tx to peer %d, TID %d\n", peer_id,
				  ext_tid);
			cdf_print
				("   start seq num= %u, start PN LSBs= %#04x\n",
				start_seq_num, start_pn_lsbs);
			cdf_print
				("   PPDU: %d MPDUs, (?) MSDUs, %d bytes\n",
				num_mpdus,
				 /* num_msdus - not yet computed in target */
				record->num_bytes);
			if (complete) {
				cdf_print
				      ("   enqueued: %u, completed: %u usec)\n",
				       time_enqueue_us, time_completion_us);
				cdf_print
					("   %d tries, last tx used rate %d ",
					 tries, rate_code);
				cdf_print("on %d MHz chan (flags = %#x)\n",
					  htt_rate_flags_to_mhz
					  (rate_flags), rate_flags);
				cdf_print
				      ("  enqueued and acked MPDU bitmaps:\n");
				htt_t2h_tx_ppdu_bitmaps_pr(msg_word +
					   OL_FW_TX_DBG_PPDU_ENQUEUED_LSBS_16,
							   msg_word +
					   OL_FW_TX_DBG_PPDU_BLOCK_ACK_LSBS_16);
			} else {
				cdf_print
				      ("  enqueued: %d us, not yet completed\n",
					time_enqueue_us);
			}
			/* skip the regular msg fields to reach the tail area */
			p8 = (uint8_t *) record;
			p8 += sizeof(struct ol_fw_tx_dbg_ppdu_base);
			if (hdr->mpdu_bytes_array_len) {
				htt_make_u16_list_str((uint32_t *) p8, buf,
						      BUF_SIZE,
						      hdr->
						      mpdu_bytes_array_len);
				cdf_print("   MPDU bytes: %s\n", buf);
			}
			p8 += hdr->mpdu_bytes_array_len * sizeof(uint16_t);
			if (hdr->mpdu_msdus_array_len) {
				htt_make_u8_list_str((uint32_t *) p8, buf,
						     BUF_SIZE,
						     hdr->mpdu_msdus_array_len);
				cdf_print("   MPDU MSDUs: %s\n", buf);
			}
			p8 += hdr->mpdu_msdus_array_len * sizeof(uint8_t);
			if (hdr->msdu_bytes_array_len) {
				htt_make_u16_list_str((uint32_t *) p8, buf,
						      BUF_SIZE,
						      hdr->
						      msdu_bytes_array_len);
				cdf_print("   MSDU bytes: %s\n", buf);
			}
		} else {
Ejemplo n.º 24
0
/* WMI command API */
int wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, int len,
			 WMI_CMD_ID cmd_id)
{
	HTC_PACKET *pkt;
	A_STATUS status;
	struct ol_softc *scn;

	if (cdf_atomic_read(&wmi_handle->is_target_suspended) &&
	    ((WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID != cmd_id) &&
	     (WMI_PDEV_RESUME_CMDID != cmd_id))) {
		pr_err("%s: Target is suspended  could not send WMI command\n",
		       __func__);
		CDF_ASSERT(0);
		return -EBUSY;
	}

	/* Do sanity check on the TLV parameter structure */
	{
		void *buf_ptr = (void *)cdf_nbuf_data(buf);

		if (wmitlv_check_command_tlv_params(NULL, buf_ptr, len, cmd_id)
		    != 0) {
			cdf_print
				("\nERROR: %s: Invalid WMI Parameter Buffer for Cmd:%d\n",
				__func__, cmd_id);
			return -1;
		}
	}

	if (cdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
		pr_err("%s, Failed to send cmd %x, no memory\n",
		       __func__, cmd_id);
		return -ENOMEM;
	}

	WMI_SET_FIELD(cdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);

	cdf_atomic_inc(&wmi_handle->pending_cmds);
	if (cdf_atomic_read(&wmi_handle->pending_cmds) >= WMI_MAX_CMDS) {
		scn = cds_get_context(CDF_MODULE_ID_HIF);
		pr_err("\n%s: hostcredits = %d\n", __func__,
		       wmi_get_host_credits(wmi_handle));
		htc_dump_counter_info(wmi_handle->htc_handle);
		/* dump_ce_register(scn); */
		/* dump_ce_debug_register(scn->hif_sc); */
		cdf_atomic_dec(&wmi_handle->pending_cmds);
		pr_err("%s: MAX 1024 WMI Pending cmds reached.\n", __func__);
		CDF_BUG(0);
		return -EBUSY;
	}

	pkt = cdf_mem_malloc(sizeof(*pkt));
	if (!pkt) {
		cdf_atomic_dec(&wmi_handle->pending_cmds);
		pr_err("%s, Failed to alloc htc packet %x, no memory\n",
		       __func__, cmd_id);
		return -ENOMEM;
	}

	SET_HTC_PACKET_INFO_TX(pkt,
			       NULL,
			       cdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
	                       /* htt_host_data_dl_len(buf)+20 */
			       wmi_handle->wmi_endpoint_id, 0 /*htc_tag */ );

	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);

	wma_log_cmd_id(cmd_id);

#ifdef WMI_INTERFACE_EVENT_LOGGING
	cdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
	/*Record 16 bytes of WMI cmd data - exclude TLV and WMI headers */
	if (cmd_id == WMI_MGMT_TX_SEND_CMDID) {
		WMI_MGMT_COMMAND_RECORD(cmd_id,
					((uint32_t *)cdf_nbuf_data(buf) + 2));
	} else {
		WMI_COMMAND_RECORD(cmd_id, ((uint32_t *) cdf_nbuf_data(buf) +
					    2));
	}

	cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif

	status = htc_send_pkt(wmi_handle->htc_handle, pkt);

	if (A_OK != status) {
		cdf_atomic_dec(&wmi_handle->pending_cmds);
		pr_err("%s %d, htc_send_pkt failed\n", __func__, __LINE__);
	}

	return ((status == A_OK) ? EOK : -1);
}