/*
 * nss_stats_gmac_read()
 *	Read GMAC stats
 */
static ssize_t nss_stats_gmac_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
{
	uint32_t i, id;

	/*
	 * max output lines = ((#stats + start tag + one blank) * #GMACs) + start/end tag + 3 blank
	 */
	uint32_t max_output_lines = ((NSS_STATS_GMAC_MAX + 2) * NSS_MAX_PHYSICAL_INTERFACES) + 5;
	size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
	size_t size_wr = 0;
	ssize_t bytes_read = 0;
	uint64_t *stats_shadow;

	char *lbuf = kzalloc(size_al, GFP_KERNEL);
	if (unlikely(lbuf == NULL)) {
		nss_warning("Could not allocate memory for local statistics buffer");
		return 0;
	}

	stats_shadow = kzalloc(NSS_STATS_GMAC_MAX * 8, GFP_KERNEL);
	if (unlikely(stats_shadow == NULL)) {
		nss_warning("Could not allocate memory for local shadow buffer");
		return 0;
	}

	size_wr = scnprintf(lbuf, size_al, "gmac stats start:\n\n");

	for (id = 0; id < NSS_MAX_PHYSICAL_INTERFACES; id++) {
		spin_lock_bh(&nss_top_main.stats_lock);
		for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
			stats_shadow[i] = nss_top_main.stats_gmac[id][i];
		}

		spin_unlock_bh(&nss_top_main.stats_lock);

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "GMAC ID: %d\n", id);
		for (i = 0; (i < NSS_STATS_GMAC_MAX); i++) {
			size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
					"%s = %llu\n", nss_stats_str_gmac[i], stats_shadow[i]);
		}
		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,"\n");
	}

	size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngmac stats end\n\n");
	bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
	kfree(lbuf);
	kfree(stats_shadow);

	return bytes_read;
}
/*
 * nss_stats_pppoe_read()
 *	Read PPPoE stats
 */
static ssize_t nss_stats_pppoe_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
{
	int32_t i;

	/*
	 * max output lines = #stats + start tag line + end tag line + three blank lines
	 */
	uint32_t max_output_lines = NSS_STATS_PPPOE_MAX + 5;
	size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines;
	size_t size_wr = 0;
	ssize_t bytes_read = 0;
	uint64_t *stats_shadow;

	char *lbuf = kzalloc(size_al, GFP_KERNEL);
	if (unlikely(lbuf == NULL)) {
		nss_warning("Could not allocate memory for local statistics buffer");
		return 0;
	}

	stats_shadow = kzalloc(NSS_STATS_PPPOE_MAX * 8, GFP_KERNEL);
	if (unlikely(stats_shadow == NULL)) {
		nss_warning("Could not allocate memory for local shadow buffer");
		return 0;
	}

	size_wr = scnprintf(lbuf, size_al, "pppoe stats start:\n\n");
	spin_lock_bh(&nss_top_main.stats_lock);
	for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
		stats_shadow[i] = nss_top_main.stats_pppoe[i];
	}

	spin_unlock_bh(&nss_top_main.stats_lock);

	for (i = 0; (i < NSS_STATS_PPPOE_MAX); i++) {
		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
					"%s = %llu\n", nss_stats_str_pppoe[i], stats_shadow[i]);
	}

	size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npppoe stats end\n\n");
	bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
	kfree(lbuf);
	kfree(stats_shadow);

	return bytes_read;
}
/*
 * nss_data_plane_schedule_registration()
 *	Called from nss_init to schedule a work to do data_plane register to nss-gmac
 */
bool nss_data_plane_schedule_registration(void)
{
	if (!queue_work_on(1, nss_data_plane_workqueue, &nss_data_plane_work.work)) {
		nss_warning("Failed to register data plane workqueue on core 1\n");
		return false;
	} else {
		nss_info("Register data plane workqueue on core 1\n");
		return true;
	}
}
/*
 * nss_data_plane_init_delay_work()
 */
int nss_data_plane_init_delay_work(void)
{
	nss_data_plane_workqueue = create_singlethread_workqueue("nss_data_plane_workqueue");
	if (!nss_data_plane_workqueue) {
		nss_warning("Can't allocate workqueue\n");
		return -ENOMEM;
	}

	INIT_DELAYED_WORK(&nss_data_plane_work, nss_data_plane_work_function);
	return 0;
}
/*
 * nss_data_plane_work_function()
 *	Work function that gets queued to "install" the gmac overlays
 */
static void nss_data_plane_work_function(struct work_struct *work)
{
	int i;
	struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[NSS_CORE_0];

	for (i = 0; i < NSS_MAX_PHYSICAL_INTERFACES; i++) {
		if (!nss_data_plane_register_to_nss_gmac(nss_ctx, i)) {
			nss_warning("%p: Register data plane failed for gmac:%d\n", nss_ctx, i);
		} else {
			nss_info("%p: Register data plan to gmac:%d success\n", nss_ctx, i);
		}
	}
}
Exemple #6
0
/*
 * nss_ipv4_driver_conn_sync_many_update()
 *	Update driver specific information from the conn_sync_many messsage.
 */
static void nss_ipv4_driver_conn_sync_many_update(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_conn_sync_many_msg *nicsm)
{
	int i;

	/*
	 * Sanity check for the stats count
	 */
	if (nicsm->count * sizeof(struct nss_ipv4_conn_sync) >= nicsm->size) {
		nss_warning("%p: stats sync count %u exceeds the size of this msg %u", nss_ctx, nicsm->count, nicsm->size);
		return;
	}

	for (i = 0; i < nicsm->count; i++) {
		nss_ipv4_driver_conn_sync_update(nss_ctx, &nicsm->conn_sync[i]);
	}
}
/*
 * nss_map_t_instance_stats_get()
 *	Get map_t statitics.
 */
void nss_map_t_instance_debug_stats_get(void *stats_mem)
{
	struct nss_stats_map_t_instance_debug *stats = (struct nss_stats_map_t_instance_debug *)stats_mem;
	int i;

	if (!stats) {
		nss_warning("No memory to copy map_t stats");
		return;
	}

	spin_lock_bh(&nss_map_t_debug_stats_lock);
	for (i = 0; i < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; i++) {
		if (nss_map_t_debug_stats[i].valid) {
			memcpy(stats, &nss_map_t_debug_stats[i], sizeof(struct nss_stats_map_t_instance_debug));
			stats++;
		}
	}
	spin_unlock_bh(&nss_map_t_debug_stats_lock);
}
/*
 * nss_stats_init()
 * 	Enable NSS statistics
 */
void nss_stats_init(void)
{
	/*
	 * NSS driver entry
	 */
	nss_top_main.top_dentry = debugfs_create_dir("qca-nss-drv", NULL);
	if (unlikely(nss_top_main.top_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv directory in debugfs");

		/*
		 * Non availability of debugfs directory is not a catastrophy
		 * We can still go ahead with other initialization
		 */
		return;
	}

	nss_top_main.stats_dentry = debugfs_create_dir("stats", nss_top_main.top_dentry);
	if (unlikely(nss_top_main.stats_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv directory in debugfs");

		/*
		 * Non availability of debugfs directory is not a catastrophy
		 * We can still go ahead with rest of initialization
		 */
		return;
	}

	/*
	 * Create files to obtain statistics
	 */

	/*
	 * ipv4_stats
	 */
	nss_top_main.ipv4_dentry = debugfs_create_file("ipv4", 0400,
						nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv4_ops);
	if (unlikely(nss_top_main.ipv4_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv/stats/ipv4 file in debugfs");
		return;
	}

	/*
	 * ipv6_stats
	 */
	nss_top_main.ipv6_dentry = debugfs_create_file("ipv6", 0400,
						nss_top_main.stats_dentry, &nss_top_main, &nss_stats_ipv6_ops);
	if (unlikely(nss_top_main.ipv6_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv/stats/ipv6 file in debugfs");
		return;
	}

	/*
	 * pbuf_stats
	 */
	nss_top_main.pbuf_dentry = debugfs_create_file("pbuf_mgr", 0400,
						nss_top_main.stats_dentry, &nss_top_main, &nss_stats_pbuf_ops);
	if (unlikely(nss_top_main.pbuf_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv/stats/pbuf file in debugfs");
		return;
	}

	/*
	 * n2h_stats
	 */
	nss_top_main.n2h_dentry = debugfs_create_file("n2h", 0400,
						nss_top_main.stats_dentry, &nss_top_main, &nss_stats_n2h_ops);
	if (unlikely(nss_top_main.n2h_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv/stats/n2h directory in debugfs");
		return;
	}

	/*
	 * drv_stats
	 */
	nss_top_main.drv_dentry = debugfs_create_file("drv", 0400,
						nss_top_main.stats_dentry, &nss_top_main, &nss_stats_drv_ops);
	if (unlikely(nss_top_main.drv_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv/stats/drv directory in debugfs");
		return;
	}

	/*
	 * pppoe_stats
	 */
	nss_top_main.pppoe_dentry = debugfs_create_file("pppoe", 0400,
						nss_top_main.stats_dentry, &nss_top_main, &nss_stats_pppoe_ops);
	if (unlikely(nss_top_main.pppoe_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv/stats/pppoe file in debugfs");
		return;
	}

	/*
	 * gmac_stats
	 */
	nss_top_main.gmac_dentry = debugfs_create_file("gmac", 0400,
						nss_top_main.stats_dentry, &nss_top_main, &nss_stats_gmac_ops);
	if (unlikely(nss_top_main.gmac_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv/stats/gmac file in debugfs");
		return;
	}

	/*
	 * interface_stats
	 */
	nss_top_main.if_dentry = debugfs_create_file("interface", 0400,
						nss_top_main.stats_dentry, &nss_top_main, &nss_stats_if_ops);
	if (unlikely(nss_top_main.if_dentry == NULL)) {
		nss_warning("Failed to create qca-nss-drv/stats/interface file in debugfs");
		return;
	}
}
/*
 * nss_stats_if_read()
 *	Read interface stats
 */
static ssize_t nss_stats_if_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
{
	uint32_t i, k, id;
	void *ifctx;

	/*
	 * max output lines per interface =
	 * (#ipv4 stats + start tag + blank line) +
	 * (#ipv6 stats + start tag + blank line) +
	 * (#host stats + start tag + blank line) +
	 * (#unknown exception stats + start tag + blank line) +
	 * (#ipv4 exception + start tag + blank line) +
	 * (#ipv6 exception + start tag + blank line) +
	 * (#pppoe exception + start tag + blank line) + interface start tag
	 *
	 * max output lines =
	 * (max output lines per interface * #interfaces) +
	 * (start tag + end tag + 3 blank lines)
	 */
	uint32_t max_output_lines_interface = ((NSS_STATS_IF_IPV4_MAX + 2) + (NSS_STATS_IF_IPV6_MAX + 2) +
					(NSS_STATS_IF_HOST_MAX + 2) + (NSS_EXCEPTION_EVENT_UNKNOWN_MAX + 2) +
					(NSS_EXCEPTION_EVENT_IPV4_MAX + 2) + (NSS_EXCEPTION_EVENT_IPV6_MAX + 2) +
					(NSS_EXCEPTION_EVENT_PPPOE_MAX + 2)) + 1;
	size_t size_al = NSS_STATS_MAX_STR_LENGTH * ((max_output_lines_interface * NSS_MAX_NET_INTERFACES) + 5);
	size_t size_wr = 0;
	ssize_t bytes_read = 0;
	uint64_t *stats_shadow;
	uint64_t pppoe_stats_shadow[NSS_PPPOE_NUM_SESSION_PER_INTERFACE][NSS_EXCEPTION_EVENT_PPPOE_MAX];

	char *lbuf = kzalloc(size_al, GFP_KERNEL);
	if (unlikely(lbuf == NULL)) {
		nss_warning("Could not allocate memory for local statistics buffer");
		return 0;
	}

	/*
	 * WARNING: We are only allocating memory for 64 stats counters per stats type
	 *		Developers must ensure that number of counters are not more than 64
	 */

	if ( (NSS_STATS_IF_IPV4_MAX > 64) ||
			(NSS_STATS_IF_IPV6_MAX > 64) ||
			(NSS_STATS_IF_HOST_MAX > 64) ||
			(NSS_EXCEPTION_EVENT_UNKNOWN_MAX > 64) ||
			(NSS_EXCEPTION_EVENT_IPV4_MAX > 64) ||
			(NSS_EXCEPTION_EVENT_IPV6_MAX > 64) ||
			(NSS_EXCEPTION_EVENT_PPPOE_MAX > 64)) {
		nss_warning("Size of shadow stats structure is not enough to copy all stats");
	}

	stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
	if (unlikely(stats_shadow == NULL)) {
		nss_warning("Could not allocate memory for local shadow buffer");
		kfree(lbuf);
		return 0;
	}

	size_wr = scnprintf(lbuf, size_al, "if stats start:\n\n");

	for (id = NSS_DEVICE_IF_START; id < NSS_MAX_DEVICE_INTERFACES; id++) {

		spin_lock_bh(&nss_top_main.lock);
		ifctx = nss_top_main.if_ctx[id];
		spin_unlock_bh(&nss_top_main.lock);

		if (!ifctx) {
			continue;
		}

		/*
		 * Host Stats
		 */
		spin_lock_bh(&nss_top_main.stats_lock);
		for (i = 0; (i < NSS_STATS_IF_HOST_MAX); i++) {
			stats_shadow[i] = nss_top_main.stats_if_host[id][i];
		}

		spin_unlock_bh(&nss_top_main.stats_lock);

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Interface ID: %d\n", id);
		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Host:\n");
		for (i = 0; (i < NSS_STATS_IF_HOST_MAX); i++) {
			size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
					"%s = %llu\n", nss_stats_str_if_host[i], stats_shadow[i]);
		}

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");

		/*
		 * IPv4 stats
		 */
		spin_lock_bh(&nss_top_main.stats_lock);
		for (i = 0; (i < NSS_STATS_IF_IPV4_MAX); i++) {
			stats_shadow[i] = nss_top_main.stats_if_ipv4[id][i];
		}

		spin_unlock_bh(&nss_top_main.stats_lock);

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "IPv4:\n");
		for (i = 0; (i < NSS_STATS_IF_IPV4_MAX); i++) {
			size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
					"%s = %llu\n", nss_stats_str_if_ipv4[i], stats_shadow[i]);
		}

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");

		/*
		 * IPv6 stats
		 */
		spin_lock_bh(&nss_top_main.stats_lock);
		for (i = 0; (i < NSS_STATS_IF_IPV6_MAX); i++) {
			stats_shadow[i] = nss_top_main.stats_if_ipv6[id][i];
		}

		spin_unlock_bh(&nss_top_main.stats_lock);

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "IPv6:\n");
		for (i = 0; (i < NSS_STATS_IF_IPV6_MAX); i++) {
			size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
					"%s = %llu\n", nss_stats_str_if_ipv6[i], stats_shadow[i]);
		}

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");

		/*
		 * Unknown exception stats
		 */
		spin_lock_bh(&nss_top_main.stats_lock);
		for (i = 0; (i < NSS_EXCEPTION_EVENT_UNKNOWN_MAX); i++) {
			stats_shadow[i] = nss_top_main.stats_if_exception_unknown[id][i];
		}

		spin_unlock_bh(&nss_top_main.stats_lock);

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Exception Unknown:\n");
		for (i = 0; (i < NSS_EXCEPTION_EVENT_UNKNOWN_MAX); i++) {
			size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
					"%s = %llu\n",
					nss_stats_str_if_exception_unknown[i],
					stats_shadow[i]);
		}

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");

		/*
		 * IPv4 exception stats
		 */
		spin_lock_bh(&nss_top_main.stats_lock);
		for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
			stats_shadow[i] = nss_top_main.stats_if_exception_ipv4[id][i];
		}

		spin_unlock_bh(&nss_top_main.stats_lock);

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Exception IPv4:\n");
		for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV4_MAX); i++) {
			size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
					"%s = %llu\n",
					nss_stats_str_if_exception_ipv4[i],
					stats_shadow[i]);
		}

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");

		/*
		 * IPv6 exception stats
		 */
		spin_lock_bh(&nss_top_main.stats_lock);
		for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
			stats_shadow[i] = nss_top_main.stats_if_exception_ipv6[id][i];
		}

		spin_unlock_bh(&nss_top_main.stats_lock);

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Exception IPv6:\n");
		for (i = 0; (i < NSS_EXCEPTION_EVENT_IPV6_MAX); i++) {
			size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
					"%s = %llu\n",
					nss_stats_str_if_exception_ipv6[i],
					stats_shadow[i]);
		}
		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");


		/*
		 * Exception PPPoE
		 */
		spin_lock_bh(&nss_top_main.stats_lock);
		for (k = 0; k < NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
			for (i = 0; (i < NSS_EXCEPTION_EVENT_PPPOE_MAX); i++) {
				pppoe_stats_shadow[k][i] = nss_top_main.stats_if_exception_pppoe[id][k][i];
			}
		}

		spin_unlock_bh(&nss_top_main.stats_lock);

		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Exception PPPoE:\n");
		for (k = 0; k < NSS_PPPOE_NUM_SESSION_PER_INTERFACE; k++) {
			size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. Session\n", k);
			for (i = 0; (i < NSS_EXCEPTION_EVENT_PPPOE_MAX); i++) {
				size_wr += scnprintf(lbuf + size_wr, size_al - size_wr,
						"%s = %llu\n",
						nss_stats_str_if_exception_pppoe[i],
						pppoe_stats_shadow[k][i]);
			}
		}
		size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n");
	}

	size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nif stats end\n\n");
	bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf));
	kfree(lbuf);
	kfree(stats_shadow);

	return bytes_read;
}
/*
 * nss_phys_if_gmac_stats_sync()
 *	Handle the syncing of GMAC stats.
 */
void nss_phys_if_gmac_stats_sync(struct nss_ctx_instance *nss_ctx,
		struct nss_phys_if_stats *stats, uint16_t interface)
{
	void *ctx;
	struct nss_gmac_stats gmac_stats;
	uint32_t id = interface;

	/*
	 * Since the new extended statistics are not the same as the older stats
	 * parameter, we must do a field by field copy.
	 */
	gmac_stats.interface = interface;
	gmac_stats.rx_bytes = stats->if_stats.rx_bytes;
	gmac_stats.rx_packets = stats->if_stats.rx_packets;
	gmac_stats.rx_errors = stats->estats.rx_errors;
	gmac_stats.rx_receive_errors = stats->estats.rx_receive_errors;
	gmac_stats.rx_overflow_errors = stats->estats.rx_overflow_errors;
	gmac_stats.rx_descriptor_errors = stats->estats.rx_descriptor_errors;
	gmac_stats.rx_watchdog_timeout_errors = stats->estats.rx_watchdog_timeout_errors;
	gmac_stats.rx_crc_errors = stats->estats.rx_crc_errors;
	gmac_stats.rx_late_collision_errors = stats->estats.rx_late_collision_errors;
	gmac_stats.rx_dribble_bit_errors = stats->estats.rx_dribble_bit_errors;
	gmac_stats.rx_length_errors = stats->estats.rx_length_errors;
	gmac_stats.rx_ip_header_errors = stats->estats.rx_ip_header_errors;
	gmac_stats.rx_ip_payload_errors = stats->estats.rx_ip_payload_errors;
	gmac_stats.rx_no_buffer_errors = stats->estats.rx_no_buffer_errors;
	gmac_stats.rx_transport_csum_bypassed = stats->estats.rx_transport_csum_bypassed;
	gmac_stats.tx_bytes = stats->if_stats.tx_bytes;
	gmac_stats.tx_packets = stats->if_stats.tx_packets;
	gmac_stats.tx_collisions = stats->estats.tx_collisions;
	gmac_stats.tx_errors = stats->estats.tx_errors;
	gmac_stats.tx_jabber_timeout_errors = stats->estats.tx_jabber_timeout_errors;
	gmac_stats.tx_frame_flushed_errors = stats->estats.tx_frame_flushed_errors;
	gmac_stats.tx_loss_of_carrier_errors = stats->estats.tx_loss_of_carrier_errors;
	gmac_stats.tx_no_carrier_errors = stats->estats.tx_no_carrier_errors;
	gmac_stats.tx_late_collision_errors = stats->estats.tx_late_collision_errors;
	gmac_stats.tx_excessive_collision_errors = stats->estats.tx_excessive_collision_errors;
	gmac_stats.tx_excessive_deferral_errors = stats->estats.tx_excessive_deferral_errors;
	gmac_stats.tx_underflow_errors = stats->estats.tx_underflow_errors;
	gmac_stats.tx_ip_header_errors = stats->estats.tx_ip_header_errors;
	gmac_stats.tx_ip_payload_errors = stats->estats.tx_ip_payload_errors;
	gmac_stats.tx_dropped = stats->estats.tx_dropped;
	gmac_stats.hw_errs[0] = stats->estats.hw_errs[0];
	gmac_stats.hw_errs[1] = stats->estats.hw_errs[1];
	gmac_stats.hw_errs[2] = stats->estats.hw_errs[2];
	gmac_stats.hw_errs[3] = stats->estats.hw_errs[3];
	gmac_stats.hw_errs[4] = stats->estats.hw_errs[4];
	gmac_stats.hw_errs[5] = stats->estats.hw_errs[5];
	gmac_stats.hw_errs[6] = stats->estats.hw_errs[6];
	gmac_stats.hw_errs[7] = stats->estats.hw_errs[7];
	gmac_stats.hw_errs[8] = stats->estats.hw_errs[8];
	gmac_stats.hw_errs[9] = stats->estats.hw_errs[9];
	gmac_stats.rx_missed = stats->estats.rx_missed;
	gmac_stats.fifo_overflows = stats->estats.fifo_overflows;
	gmac_stats.rx_scatter_errors = stats->estats.rx_scatter_errors;
	gmac_stats.tx_ts_create_errors = stats->estats.tx_ts_create_errors;
	gmac_stats.gmac_total_ticks = stats->estats.gmac_total_ticks;
	gmac_stats.gmac_worst_case_ticks = stats->estats.gmac_worst_case_ticks;
	gmac_stats.gmac_iterations = stats->estats.gmac_iterations;
	gmac_stats.tx_pause_frames = stats->estats.tx_pause_frames;

	/*
	 * Get the netdev ctx
	 */
	ctx = nss_ctx->nss_top->subsys_dp_register[id].ndev;

	/*
	 * Pass through gmac exported api
	 */
	if (!ctx) {
		nss_warning("%p: Event received for GMAC interface %d before registration", nss_ctx, interface);
		return;
	}

	nss_gmac_event_receive(ctx, NSS_GMAC_EVENT_STATS, (void *)&gmac_stats, sizeof(struct nss_gmac_stats));
}
Exemple #11
0
static int nss_probe(struct platform_device *nss_dev)
#endif
{
	struct nss_top_instance *nss_top = &nss_top_main;
	struct nss_ctx_instance *nss_ctx = NULL;
	struct nss_platform_data *npd = NULL;
	struct netdev_priv_instance *ndev_priv;
#if (NSS_DT_SUPPORT == 1)
	struct reset_control *rstctl = NULL;
#endif
	int i, err = 0;

	const struct firmware *nss_fw = NULL;
	int rc = -ENODEV;
	void __iomem *load_mem;

#if (NSS_DT_SUPPORT == 1)
	struct device_node *np = NULL;

	if (nss_top_main.nss_hal_common_init_done == false) {
		/*
		 * Perform clock init common to all NSS cores
		 */
		struct clk *nss_tcm_src = NULL;
		struct clk *nss_tcm_clk = NULL;

		/*
		 * Attach debug interface to TLMM
		 */
		nss_write_32((uint32_t)nss_top_main.nss_fpb_base, NSS_REGS_FPB_CSR_CFG_OFFSET, 0x360);

		/*
		 * NSS TCM CLOCK
		 */
		nss_tcm_src = clk_get(&nss_dev->dev, NSS_TCM_SRC_CLK);
		if (IS_ERR(nss_tcm_src)) {
			pr_err("nss-driver: cannot get clock: " NSS_TCM_SRC_CLK);
			return -EFAULT;
		}

		clk_set_rate(nss_tcm_src, NSSTCM_FREQ);
		clk_prepare(nss_tcm_src);
		clk_enable(nss_tcm_src);

		nss_tcm_clk = clk_get(&nss_dev->dev, NSS_TCM_CLK);
		if (IS_ERR(nss_tcm_clk)) {
			pr_err("nss-driver: cannot get clock: " NSS_TCM_CLK);
			return -EFAULT;
		}

		clk_prepare(nss_tcm_clk);
		clk_enable(nss_tcm_clk);

		nss_top_main.nss_hal_common_init_done = true;
		nss_info("nss_hal_common_reset Done.\n");
	}

	if (nss_dev->dev.of_node) {
		/*
		 * Device Tree based init
		 */

		np = of_node_get(nss_dev->dev.of_node);
		npd = nss_drv_of_get_pdata(np, nss_dev);

		of_node_put(np);

		if (!npd) {
			return -EFAULT;
		}

		nss_ctx = &nss_top->nss[npd->id];
		nss_ctx->id = npd->id;
		nss_dev->id = nss_ctx->id;

	} else {
		/*
		 * Platform Device based init
		 */

		npd = (struct nss_platform_data *) nss_dev->dev.platform_data;
		nss_ctx = &nss_top->nss[nss_dev->id];
		nss_ctx->id = nss_dev->id;
	}

#else
	npd = (struct nss_platform_data *) nss_dev->dev.platform_data;
	nss_ctx = &nss_top->nss[nss_dev->id];
	nss_ctx->id = nss_dev->id;
#endif
	nss_ctx->nss_top = nss_top;

	nss_info("%p: NSS_DEV_ID %s \n", nss_ctx, dev_name(&nss_dev->dev));

	/*
	 * F/W load from NSS Driver
	 */
	if (nss_ctx->id == 0) {
		rc = request_firmware(&nss_fw, NETAP0_IMAGE, &(nss_dev->dev));
	} else if (nss_ctx->id == 1) {
		rc = request_firmware(&nss_fw, NETAP1_IMAGE, &(nss_dev->dev));
	} else {
		nss_warning("%p: Invalid nss dev: %d \n", nss_ctx, nss_dev->id);
	}

	/*
	 *  Check if the file read is successful
	 */
	if (rc) {
		nss_warning("%p: request_firmware failed with err code: %d", nss_ctx, rc);
		err = rc;
		goto err_init_0;
	}

	if (nss_fw->size < MIN_IMG_SIZE) {
		nss_warning("%p: nss firmware is truncated, size:%d", nss_ctx, nss_fw->size);
	}

	load_mem = ioremap_nocache(npd->load_addr, nss_fw->size);
	if (load_mem == NULL) {
		nss_warning("%p: ioremap_nocache failed: %x", nss_ctx, npd->load_addr);
		release_firmware(nss_fw);
		goto err_init_0;
	}

	printk("nss_driver - fw of size %u  bytes copied to load addr: %x, nss_id : %d\n", nss_fw->size, npd->load_addr, nss_dev->id);
	memcpy_toio(load_mem, nss_fw->data, nss_fw->size);
	release_firmware(nss_fw);
	iounmap(load_mem);

	/*
	 * Both NSS cores controlled by same regulator, Hook only Once
	 */
	if (!nss_ctx->id) {
		nss_core0_clk = clk_get(&nss_dev->dev, "nss_core_clk");
		if (IS_ERR(nss_core0_clk)) {

			err = PTR_ERR(nss_core0_clk);
			nss_info("%p: Regulator %s get failed, err=%d\n", nss_ctx, dev_name(&nss_dev->dev), err);
			return err;

		}
		clk_set_rate(nss_core0_clk, NSS_FREQ_550);
		clk_prepare(nss_core0_clk);
		clk_enable(nss_core0_clk);

#if (NSS_PM_SUPPORT == 1)
		/*
		 * Check if turbo is supported
		 */
		if (npd->turbo_frequency) {
			/*
			 * Turbo is supported
			 */
			printk("nss_driver - Turbo Support %d\n", npd->turbo_frequency);
			nss_runtime_samples.freq_scale_sup_max = NSS_MAX_CPU_SCALES;
			nss_pm_set_turbo();
		} else {
			printk("nss_driver - Turbo No Support %d\n", npd->turbo_frequency);
			nss_runtime_samples.freq_scale_sup_max = NSS_MAX_CPU_SCALES - 1;
		}
#else
		printk("nss_driver - Turbo Not Supported\n");
#endif
	}

	/*
	 * Get load address of NSS firmware
	 */
	nss_info("%p: Setting NSS%d Firmware load address to %x\n", nss_ctx, nss_ctx->id, npd->load_addr);
	nss_top->nss[nss_ctx->id].load = npd->load_addr;

	/*
	 * Get virtual and physical memory addresses for nss logical/hardware address maps
	 */

	/*
	 * Virtual address of CSM space
	 */
	nss_ctx->nmap = npd->nmap;
	nss_assert(nss_ctx->nmap);

	/*
	 * Physical address of CSM space
	 */
	nss_ctx->nphys = npd->nphys;
	nss_assert(nss_ctx->nphys);

	/*
	 * Virtual address of logical registers space
	 */
	nss_ctx->vmap = npd->vmap;
	nss_assert(nss_ctx->vmap);

	/*
	 * Physical address of logical registers space
	 */
	nss_ctx->vphys = npd->vphys;
	nss_assert(nss_ctx->vphys);
	nss_info("%d:ctx=%p, vphys=%x, vmap=%x, nphys=%x, nmap=%x",
			nss_ctx->id, nss_ctx, nss_ctx->vphys, nss_ctx->vmap, nss_ctx->nphys, nss_ctx->nmap);

	/*
	 * Register netdevice handlers
	 */
	nss_ctx->int_ctx[0].ndev = alloc_netdev(sizeof(struct netdev_priv_instance),
					"qca-nss-dev%d", nss_dummy_netdev_setup);
	if (nss_ctx->int_ctx[0].ndev == NULL) {
		nss_warning("%p: Could not allocate net_device #0", nss_ctx);
		err = -ENOMEM;
		goto err_init_0;
	}

	nss_ctx->int_ctx[0].ndev->netdev_ops = &nss_netdev_ops;
	nss_ctx->int_ctx[0].ndev->ethtool_ops = &nss_ethtool_ops;
	err = register_netdev(nss_ctx->int_ctx[0].ndev);
	if (err) {
		nss_warning("%p: Could not register net_device #0", nss_ctx);
		goto err_init_1;
	}

	/*
	 * request for IRQs
	 *
	 * WARNING: CPU affinities should be set using OS supported methods
	 */
	nss_ctx->int_ctx[0].nss_ctx = nss_ctx;
	nss_ctx->int_ctx[0].shift_factor = 0;
	nss_ctx->int_ctx[0].irq = npd->irq[0];
	err = request_irq(npd->irq[0], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[0]);
	if (err) {
		nss_warning("%d: IRQ0 request failed", nss_dev->id);
		goto err_init_2;
	}

	/*
	 * Register NAPI for NSS core interrupt #0
	 */
	ndev_priv = netdev_priv(nss_ctx->int_ctx[0].ndev);
	ndev_priv->int_ctx = &nss_ctx->int_ctx[0];
	netif_napi_add(nss_ctx->int_ctx[0].ndev, &nss_ctx->int_ctx[0].napi, nss_core_handle_napi, 64);
	napi_enable(&nss_ctx->int_ctx[0].napi);
	nss_ctx->int_ctx[0].napi_active = true;

	/*
	 * Check if second interrupt is supported on this nss core
	 */
	if (npd->num_irq > 1) {
		nss_info("%d: This NSS core supports two interrupts", nss_dev->id);

		/*
		 * Register netdevice handlers
		 */
		nss_ctx->int_ctx[1].ndev = alloc_netdev(sizeof(struct netdev_priv_instance),
						"qca-nss-dev%d", nss_dummy_netdev_setup);
		if (nss_ctx->int_ctx[1].ndev == NULL) {
			nss_warning("%p: Could not allocate net_device #1", nss_ctx);
			err = -ENOMEM;
			goto err_init_3;
		}

		nss_ctx->int_ctx[1].ndev->netdev_ops = &nss_netdev_ops;
		nss_ctx->int_ctx[1].ndev->ethtool_ops = &nss_ethtool_ops;
		err = register_netdev(nss_ctx->int_ctx[1].ndev);
		if (err) {
			nss_warning("%p: Could not register net_device #1", nss_ctx);
			goto err_init_4;
		}

		nss_ctx->int_ctx[1].nss_ctx = nss_ctx;
		nss_ctx->int_ctx[1].shift_factor = 15;
		nss_ctx->int_ctx[1].irq = npd->irq[1];
		err = request_irq(npd->irq[1], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[1]);
		if (err) {
			nss_warning("%d: IRQ1 request failed for nss", nss_dev->id);
			goto err_init_5;
		}

		/*
		 * Register NAPI for NSS core interrupt #1
		 */
		ndev_priv = netdev_priv(nss_ctx->int_ctx[1].ndev);
		ndev_priv->int_ctx = &nss_ctx->int_ctx[1];
		netif_napi_add(nss_ctx->int_ctx[1].ndev, &nss_ctx->int_ctx[1].napi, nss_core_handle_napi, 64);
		napi_enable(&nss_ctx->int_ctx[1].napi);
		nss_ctx->int_ctx[1].napi_active = true;
	}

	spin_lock_bh(&(nss_top->lock));

	/*
	 * Check functionalities are supported by this NSS core
	 */
	if (npd->shaping_enabled == NSS_FEATURE_ENABLED) {
		nss_top->shaping_handler_id = nss_dev->id;
		printk(KERN_INFO "%p: NSS Shaping is enabled, handler id: %u\n", __func__, nss_top->shaping_handler_id);
	}

	if (npd->ipv4_enabled == NSS_FEATURE_ENABLED) {
		nss_top->ipv4_handler_id = nss_dev->id;
		nss_ipv4_register_handler();
		nss_pppoe_register_handler();
		nss_eth_rx_register_handler();
		nss_n2h_register_handler();
		nss_virt_if_register_handler();
		nss_lag_register_handler();
		nss_dynamic_interface_register_handler();
		nss_top->capwap_handler_id = nss_dev->id;
		nss_capwap_init();

		for (i = 0; i < NSS_MAX_VIRTUAL_INTERFACES; i++) {
			nss_top->virt_if_handler_id[i] = nss_dev->id;
		}

		nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_802_3_REDIR] = nss_dev->id;
	}

	if (npd->ipv4_reasm_enabled == NSS_FEATURE_ENABLED) {
		nss_top->ipv4_reasm_handler_id = nss_dev->id;
		nss_ipv4_reasm_register_handler();
	}

	if (npd->ipv6_enabled == NSS_FEATURE_ENABLED) {
		nss_top->ipv6_handler_id = nss_dev->id;
		nss_ipv6_register_handler();
	}

	if (npd->crypto_enabled == NSS_FEATURE_ENABLED) {
		nss_top->crypto_handler_id = nss_dev->id;
		nss_crypto_register_handler();
	}

	if (npd->ipsec_enabled == NSS_FEATURE_ENABLED) {
		nss_top->ipsec_handler_id = nss_dev->id;
		nss_ipsec_register_handler();
	}

	if (npd->wlan_enabled == NSS_FEATURE_ENABLED) {
		nss_top->wlan_handler_id = nss_dev->id;
	}

	if (npd->tun6rd_enabled == NSS_FEATURE_ENABLED) {
		nss_top->tun6rd_handler_id = nss_dev->id;
	}

	if (npd->tunipip6_enabled == NSS_FEATURE_ENABLED) {
		nss_top->tunipip6_handler_id = nss_dev->id;
		nss_tunipip6_register_handler();
	}

	if (npd->gre_redir_enabled == NSS_FEATURE_ENABLED) {
		nss_top->gre_redir_handler_id = nss_dev->id;
		nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR] =  nss_dev->id;
		nss_gre_redir_register_handler();
		nss_sjack_register_handler();
	}

	/*
	 * Mark data plane enabled so when nss core init done we call register to nss-gmac
	 */
	for (i = 0 ; i < NSS_MAX_PHYSICAL_INTERFACES ; i ++) {
		if (npd->gmac_enabled[i] == NSS_FEATURE_ENABLED) {
			nss_data_plane_set_enabled(i);
		}
	}

#if (NSS_PM_SUPPORT == 1)
	nss_freq_register_handler();
#endif
	nss_lso_rx_register_handler();

	nss_top->frequency_handler_id = nss_dev->id;

	spin_unlock_bh(&(nss_top->lock));

	/*
	 * Initialize decongestion callbacks to NULL
	 */
	for (i = 0; i< NSS_MAX_CLIENTS; i++) {
		nss_ctx->queue_decongestion_callback[i] = 0;
		nss_ctx->queue_decongestion_ctx[i] = 0;
	}

	spin_lock_init(&(nss_ctx->decongest_cb_lock));
	nss_ctx->magic = NSS_CTX_MAGIC;

	nss_info("%p: Reseting NSS core %d now", nss_ctx, nss_ctx->id);

	/*
	 * Enable clocks and bring NSS core out of reset
	 */
#if (NSS_DT_SUPPORT == 1)
	/*
	 * Remove UBI32 reset clamp
	 */
	rstctl = devm_reset_control_get(&nss_dev->dev, "clkrst_clamp");
	if (IS_ERR(rstctl)) {
		nss_info("%p: Deassert UBI32 reset clamp failed", nss_ctx, nss_ctx->id);
		err = -EFAULT;
		goto err_init_5;
	}
	reset_control_deassert(rstctl);
	mdelay(1);
	reset_control_put(rstctl);

	/*
	 * Remove UBI32 core clamp
	 */
	rstctl = devm_reset_control_get(&nss_dev->dev, "clamp");
	if (IS_ERR(rstctl)) {
		nss_info("%p: Deassert UBI32 core clamp failed", nss_ctx, nss_ctx->id);
		err = -EFAULT;
		goto err_init_5;
	}
	reset_control_deassert(rstctl);
	mdelay(1);
	reset_control_put(rstctl);

	/*
	 * Remove UBI32 AHB reset
	 */
	rstctl = devm_reset_control_get(&nss_dev->dev, "ahb");
	if (IS_ERR(rstctl)) {
		nss_info("%p: Deassert AHB reset failed", nss_ctx, nss_ctx->id);
		err = -EFAULT;
		goto err_init_5;
	}
	reset_control_deassert(rstctl);
	mdelay(1);
	reset_control_put(rstctl);

	/*
	 * Remove UBI32 AXI reset
	 */
	rstctl = devm_reset_control_get(&nss_dev->dev, "axi");
	if (IS_ERR(rstctl)) {
		nss_info("%p: Deassert AXI reset failed", nss_ctx, nss_ctx->id);
		err = -EFAULT;
		goto err_init_5;
	}
	reset_control_deassert(rstctl);
	mdelay(1);
	reset_control_put(rstctl);

	nss_hal_core_reset(nss_ctx->nmap, nss_ctx->load);
#else
	nss_hal_core_reset(nss_dev->id, nss_ctx->nmap, nss_ctx->load, nss_top->clk_src);
#endif
	/*
	 * Enable interrupts for NSS core
	 */
	nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq,
					nss_ctx->int_ctx[0].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);

	if (npd->num_irq > 1) {
		nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[1].irq,
					nss_ctx->int_ctx[1].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
	}

	/*
	 * Initialize max buffer size for NSS core
	 */
	nss_ctx->max_buf_size = NSS_NBUF_PAYLOAD_SIZE;
	nss_info("%p: All resources initialized and nss core%d has been brought out of reset", nss_ctx, nss_dev->id);
	goto err_init_0;

err_init_5:
	unregister_netdev(nss_ctx->int_ctx[1].ndev);
err_init_4:
	free_netdev(nss_ctx->int_ctx[1].ndev);
err_init_3:
	free_irq(npd->irq[0], &nss_ctx->int_ctx[0]);
err_init_2:
	unregister_netdev(nss_ctx->int_ctx[0].ndev);
err_init_1:
	free_netdev(nss_ctx->int_ctx[0].ndev);

#if (NSS_DT_SUPPORT == 1)
	if (nss_dev->dev.of_node) {
		if (npd->nmap) {
			iounmap((void *)npd->nmap);
		}

		if (npd->vmap) {
			iounmap((void *)npd->vmap);
		}
	}
#endif

err_init_0:

#if (NSS_DT_SUPPORT == 1)
	if (nss_dev->dev.of_node) {
		devm_kfree(&nss_dev->dev, npd);
	}

#endif
	return err;
}
Exemple #12
0
/*
 * nss_init()
 *	Registers nss driver
 */
static int __init nss_init(void)
{
#if (NSS_DT_SUPPORT == 1)
	struct device_node *cmn = NULL;
	struct resource res_nss_fpb_base;
#endif

	nss_info("Init NSS driver");

#if (NSS_PM_SUPPORT == 1)
	nss_freq_change_context = nss_freq_get_mgr();
#else
	nss_freq_change_context = NULL;
#endif

#if (NSS_DT_SUPPORT == 1)
	/*
	 * Get reference to NSS common device node
	 */
	cmn = of_find_node_by_name(NULL, "nss-common");
	if (!cmn) {
		nss_info("cannot find nss-common node\n");
		return -EFAULT;
	}

	if (of_address_to_resource(cmn, 0, &res_nss_fpb_base) != 0) {
		nss_info("of_address_to_resource() return error for nss_fpb_base\n");
		of_node_put(cmn);
		return -EFAULT;
	}

	nss_top_main.nss_fpb_base = ioremap_nocache(res_nss_fpb_base.start,
						    resource_size(&res_nss_fpb_base));
	if (!nss_top_main.nss_fpb_base) {
		nss_info("ioremap fail for nss_fpb_base\n");
		of_node_put(cmn);
		return -EFAULT;
	}

	nss_top_main.nss_hal_common_init_done = false;

	/*
	 * Release reference to NSS common device node
	 */
	of_node_put(cmn);
	cmn = NULL;
#else
	/*
	 * Perform clock init common to all NSS cores
	 */
	nss_hal_common_reset(&(nss_top_main.clk_src));

#endif /* NSS_DT_SUPPORT */

	/*
	 * Enable spin locks
	 */
	spin_lock_init(&(nss_top_main.lock));
	spin_lock_init(&(nss_top_main.stats_lock));

	/*
	 * Enable NSS statistics
	 */
	nss_stats_init();

	/*
	 * Register sysctl table.
	 */
	nss_dev_header = register_sysctl_table(nss_root);

	/*
	 * Registering sysctl for ipv4/6 specific config.
	 */
	nss_ipv4_register_sysctl();
	nss_ipv6_register_sysctl();

#if (NSS_PM_SUPPORT == 1)
	/*
	 * Setup Runtime Sample values
	 */
	nss_runtime_samples.freq_scale[0].frequency = 	NSS_FREQ_110;
	nss_runtime_samples.freq_scale[0].minimum =	NSS_FREQ_110_MIN;
	nss_runtime_samples.freq_scale[0].maximum = 	NSS_FREQ_110_MAX;
	nss_runtime_samples.freq_scale[1].frequency = 	NSS_FREQ_550;
	nss_runtime_samples.freq_scale[1].minimum = 	NSS_FREQ_550_MIN;
	nss_runtime_samples.freq_scale[1].maximum = 	NSS_FREQ_550_MAX;
	nss_runtime_samples.freq_scale[2].frequency = 	NSS_FREQ_733;
	nss_runtime_samples.freq_scale[2].minimum = 	NSS_FREQ_733_MIN;
	nss_runtime_samples.freq_scale[2].maximum = 	NSS_FREQ_733_MAX;
	nss_runtime_samples.freq_scale_index = 1;
	nss_runtime_samples.freq_scale_ready = 0;
	nss_runtime_samples.freq_scale_rate_limit_up = 0;
	nss_runtime_samples.freq_scale_rate_limit_down = 0;
	nss_runtime_samples.buffer_index = 0;
	nss_runtime_samples.sum = 0;
	nss_runtime_samples.sample_count = 0;
	nss_runtime_samples.average = 0;
	nss_runtime_samples.message_rate_limit = 0;
	nss_runtime_samples.initialized = 0;

	nss_cmd_buf.current_freq = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency;

	/*
	 * Initial Workqueue
	 */
	nss_wq = create_workqueue("nss_freq_queue");

	/*
	 * Initialize NSS Bus PM module
	 */
	nss_pm_init();

	/*
	 * Register with Bus driver
	 */
	pm_client = nss_pm_client_register(NSS_PM_CLIENT_NETAP);
	if (!pm_client) {
		nss_warning("Error registering with PM driver");
	}
#endif

	/*
	 * Register platform_driver
	 */
	return platform_driver_register(&nss_driver);
}