コード例 #1
0
/*
 * nss_data_plane_register_to_nss_gmac()
 */
bool nss_data_plane_register_to_nss_gmac(struct nss_ctx_instance *nss_ctx, int if_num)
{
	struct nss_data_plane_param *ndpp = &nss_data_plane_params[if_num];
	struct nss_top_instance *nss_top = nss_ctx->nss_top;
	struct net_device *netdev;
	bool is_open;

	if (!ndpp->enabled) {
		return false;
	}

	netdev = nss_gmac_get_netdev_by_macid(if_num);
	if (!netdev) {
		nss_info("Platform don't have gmac%d enabled, don't bring up nss_phys_if and don't register to nss-gmac", if_num);
		return false;
	}

	is_open = nss_gmac_is_in_open_state(netdev);
	ndpp->dev = netdev;
	ndpp->nss_ctx = nss_ctx;
	ndpp->if_num = if_num;
	ndpp->notify_open = 0;
	ndpp->features = 0;

	/*
	 * Check if NSS NW processing to be bypassed for this GMAC
	 */
	if (nss_skip_nw_process) {
		ndpp->bypass_nw_process = 1;
	} else {
		ndpp->bypass_nw_process = 0;
	}

	if (nss_gmac_override_data_plane(netdev, &dp_ops, ndpp) != NSS_GMAC_SUCCESS) {
		nss_info("Override nss-gmac data plane failed\n");
		return false;
	}

	/*
	 * Setup the receive callback so that data pkts received form NSS-FW will
	 * be redirected to the gmac driver as we are overriding the data plane
	 */
	nss_top->phys_if_handler_id[if_num] = nss_ctx->id;
	nss_phys_if_register_handler(if_num);

	nss_top->subsys_dp_register[if_num].ndev = netdev;
	nss_top->subsys_dp_register[if_num].cb = nss_gmac_receive;
	nss_top->subsys_dp_register[if_num].app_data = NULL;
	nss_top->subsys_dp_register[if_num].features = ndpp->features;

	/*
	 * Now we are registered and our side is ready, if the gmac was opened, ask it to start again
	 */
	if (is_open) {
		nss_gmac_start_data_plane(netdev, ndpp);
	}
	return true;
}
コード例 #2
0
static int nss_remove(struct platform_device *nss_dev)
#endif
{
	struct nss_top_instance *nss_top = &nss_top_main;
	struct nss_ctx_instance *nss_ctx = &nss_top->nss[nss_dev->id];
	int i;

	/*
	 * Clean-up debugfs
	 */
	nss_stats_clean();

	/*
	 * Disable interrupts and bottom halves in HLOS
	 * Disable interrupts from NSS to HLOS
	 */
	nss_hal_disable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq,
					nss_ctx->int_ctx[0].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);

	free_irq(nss_ctx->int_ctx[0].irq, &nss_ctx->int_ctx[0]);
	unregister_netdev(nss_ctx->int_ctx[0].ndev);
	free_netdev(nss_ctx->int_ctx[0].ndev);

	/*
	 * Check if second interrupt is supported
	 * If so then clear resources for second interrupt as well
	 */
	if (nss_ctx->int_ctx[1].irq) {
		nss_hal_disable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[1].irq,
					nss_ctx->int_ctx[1].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
		free_irq(nss_ctx->int_ctx[1].irq, &nss_ctx->int_ctx[1]);
		unregister_netdev(nss_ctx->int_ctx[1].ndev);
		free_netdev(nss_ctx->int_ctx[1].ndev);
	}

	/*
	 * nss-drv is exiting, remove from nss-gmac
	 */
	for (i = 0 ; i < NSS_MAX_PHYSICAL_INTERFACES ; i ++) {
		if (nss_top->subsys_dp_register[i].ndev) {
			nss_data_plane_unregister_from_nss_gmac(i);
		}
	}
#if (NSS_DT_SUPPORT == 1)
	if (nss_dev->dev.of_node) {
		if (nss_ctx->nmap) {
			iounmap((void *)nss_ctx->nmap);
			nss_ctx->nmap = 0;
		}

		if (nss_ctx->vmap) {
			iounmap((void *)nss_ctx->vmap);
			nss_ctx->vmap = 0;
		}
	}
#endif

	nss_info("%p: All resources freed for nss core%d", nss_ctx, nss_dev->id);
	return 0;
}
コード例 #3
0
/*
 * nss_cleanup()
 *	Unregisters nss driver
 */
static void __exit nss_cleanup(void)
{
	nss_info("Exit NSS driver");

	if (nss_dev_header)
		unregister_sysctl_table(nss_dev_header);

	/*
	 * Unregister n2h specific sysctl
	 */
	nss_n2h_empty_pool_buf_unregister_sysctl();

	/*
	 * Unregister ipv4/6 specific sysctl
	 */
	nss_ipv4_unregister_sysctl();
	nss_ipv6_unregister_sysctl();

#if (NSS_DT_SUPPORT == 1)
	if(nss_top_main.nss_fpb_base) {
		iounmap(nss_top_main.nss_fpb_base);
		nss_top_main.nss_fpb_base = 0;
	}
#endif

	platform_driver_unregister(&nss_driver);
}
コード例 #4
0
/*
 * nss_data_plane_schedule_registration()
 *	Called from nss_init to schedule a work to do data_plane register to nss-gmac
 */
bool nss_data_plane_schedule_registration(void)
{
	if (!queue_work_on(1, nss_data_plane_workqueue, &nss_data_plane_work.work)) {
		nss_warning("Failed to register data plane workqueue on core 1\n");
		return false;
	} else {
		nss_info("Register data plane workqueue on core 1\n");
		return true;
	}
}
コード例 #5
0
/*
 * nss_data_plane_work_function()
 *	Work function that gets queued to "install" the gmac overlays
 */
static void nss_data_plane_work_function(struct work_struct *work)
{
	int i;
	struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[NSS_CORE_0];

	for (i = 0; i < NSS_MAX_PHYSICAL_INTERFACES; i++) {
		if (!nss_data_plane_register_to_nss_gmac(nss_ctx, i)) {
			nss_warning("%p: Register data plane failed for gmac:%d\n", nss_ctx, i);
		} else {
			nss_info("%p: Register data plan to gmac:%d success\n", nss_ctx, i);
		}
	}
}
コード例 #6
0
static int nss_paged_mode_handler(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int ret;

	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
	if (ret) {
		return ret;
	}

	if (write) {
		nss_core_set_paged_mode(nss_paged_mode);
		nss_info("paged_mode set to %d\n", nss_paged_mode);
	}

	return ret;
}
コード例 #7
0
/*
 * nss_current_freq_handler()
 *	Handle Userspace Frequency Change Requests
 */
static int nss_current_freq_handler (ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int ret;

	BUG_ON(!nss_wq);

	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);

	if (!write) {
		printk("Frequency Set to %d\n", nss_cmd_buf.current_freq);
		return ret;
	}

	/* Turn off Auto Scale */
	nss_cmd_buf.auto_scale = 0;
	nss_runtime_samples.freq_scale_ready = 0;

	/* If support NSS freq is in the table send the new frequency request to NSS or If No Turbo and ask for turbo freq */
	if (((nss_cmd_buf.current_freq != NSS_FREQ_110) && (nss_cmd_buf.current_freq != NSS_FREQ_275) && (nss_cmd_buf.current_freq != NSS_FREQ_550) && (nss_cmd_buf.current_freq != NSS_FREQ_733)) || ((nss_runtime_samples.freq_scale_sup_max != NSS_MAX_CPU_SCALES) && (nss_cmd_buf.current_freq == NSS_FREQ_733))) {
		printk("Frequency not found. Please check Frequency Table\n");
		return ret;
	}

	nss_work = (nss_work_t *)kmalloc(sizeof(nss_work_t), GFP_ATOMIC);
	if (!nss_work) {
		nss_info("NSS Freq WQ kmalloc fail");
		return ret;
	}
	INIT_WORK((struct work_struct *)nss_work, nss_wq_function);
	nss_work->frequency = nss_cmd_buf.current_freq;
	nss_work->stats_enable = 0;

	/* Ensure we start with a fresh set of samples later */
	nss_reset_frequency_stats_samples();

	queue_work(nss_wq, (struct work_struct *)nss_work);

	return ret;
}
コード例 #8
0
/*
 * nss_auto_scale_handler()
 *	Enables or Disable Auto Scaling
 */
static int nss_auto_scale_handler (ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos)
{
	int ret;

	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);

	if (!write) {
		return ret;
	}

	if (nss_cmd_buf.auto_scale != 1) {
		/*
		 * Is auto scaling currently enabled? If so, send the command to
		 * disable stats reporting to NSS
		 */
		if (nss_runtime_samples.freq_scale_ready != 0) {
			nss_cmd_buf.current_freq = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency;
			nss_work = (nss_work_t *)kmalloc(sizeof(nss_work_t), GFP_ATOMIC);
			if (!nss_work) {
				nss_info("NSS Freq WQ kmalloc fail");
				return ret;
			}
			INIT_WORK((struct work_struct *)nss_work, nss_wq_function);
			nss_work->frequency = nss_cmd_buf.current_freq;
			nss_work->stats_enable = 0;
			queue_work(nss_wq, (struct work_struct *)nss_work);
			nss_runtime_samples.freq_scale_ready = 0;

			/*
			 * The current samples would be stale later when scaling is
			 * enabled again, hence reset them
			 */
			nss_reset_frequency_stats_samples();
		}
		return ret;
	}

	/*
	 * Auto Scaling is already being done
	 */
	if (nss_runtime_samples.freq_scale_ready == 1) {
		return ret;
	}

	/*
	 * Setup default values - Middle of Freq Scale Band
	 */
	nss_runtime_samples.freq_scale_index = 1;
	nss_cmd_buf.current_freq = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency;

	nss_work = (nss_work_t *)kmalloc(sizeof(nss_work_t), GFP_ATOMIC);
	if (!nss_work) {
		nss_info("NSS Freq WQ kmalloc fail");
		return ret;
	}
	INIT_WORK((struct work_struct *)nss_work, nss_wq_function);
	nss_work->frequency = nss_cmd_buf.current_freq;
	nss_work->stats_enable = 1;
	queue_work(nss_wq, (struct work_struct *)nss_work);

	nss_runtime_samples.freq_scale_ready = 1;

	return ret;
}
コード例 #9
0
static int nss_probe(struct platform_device *nss_dev)
#endif
{
	struct nss_top_instance *nss_top = &nss_top_main;
	struct nss_ctx_instance *nss_ctx = NULL;
	struct nss_platform_data *npd = NULL;
	struct netdev_priv_instance *ndev_priv;
#if (NSS_DT_SUPPORT == 1)
	struct reset_control *rstctl = NULL;
#endif
	int i, err = 0;

	const struct firmware *nss_fw = NULL;
	int rc = -ENODEV;
	void __iomem *load_mem;

#if (NSS_DT_SUPPORT == 1)
	struct device_node *np = NULL;

	if (nss_top_main.nss_hal_common_init_done == false) {
		/*
		 * Perform clock init common to all NSS cores
		 */
		struct clk *nss_tcm_src = NULL;
		struct clk *nss_tcm_clk = NULL;

		/*
		 * Attach debug interface to TLMM
		 */
		nss_write_32((uint32_t)nss_top_main.nss_fpb_base, NSS_REGS_FPB_CSR_CFG_OFFSET, 0x360);

		/*
		 * NSS TCM CLOCK
		 */
		nss_tcm_src = clk_get(&nss_dev->dev, NSS_TCM_SRC_CLK);
		if (IS_ERR(nss_tcm_src)) {
			pr_err("nss-driver: cannot get clock: " NSS_TCM_SRC_CLK);
			return -EFAULT;
		}

		clk_set_rate(nss_tcm_src, NSSTCM_FREQ);
		clk_prepare(nss_tcm_src);
		clk_enable(nss_tcm_src);

		nss_tcm_clk = clk_get(&nss_dev->dev, NSS_TCM_CLK);
		if (IS_ERR(nss_tcm_clk)) {
			pr_err("nss-driver: cannot get clock: " NSS_TCM_CLK);
			return -EFAULT;
		}

		clk_prepare(nss_tcm_clk);
		clk_enable(nss_tcm_clk);

		nss_top_main.nss_hal_common_init_done = true;
		nss_info("nss_hal_common_reset Done.\n");
	}

	if (nss_dev->dev.of_node) {
		/*
		 * Device Tree based init
		 */

		np = of_node_get(nss_dev->dev.of_node);
		npd = nss_drv_of_get_pdata(np, nss_dev);

		of_node_put(np);

		if (!npd) {
			return -EFAULT;
		}

		nss_ctx = &nss_top->nss[npd->id];
		nss_ctx->id = npd->id;
		nss_dev->id = nss_ctx->id;

	} else {
		/*
		 * Platform Device based init
		 */

		npd = (struct nss_platform_data *) nss_dev->dev.platform_data;
		nss_ctx = &nss_top->nss[nss_dev->id];
		nss_ctx->id = nss_dev->id;
	}

#else
	npd = (struct nss_platform_data *) nss_dev->dev.platform_data;
	nss_ctx = &nss_top->nss[nss_dev->id];
	nss_ctx->id = nss_dev->id;
#endif
	nss_ctx->nss_top = nss_top;

	nss_info("%p: NSS_DEV_ID %s \n", nss_ctx, dev_name(&nss_dev->dev));

	/*
	 * F/W load from NSS Driver
	 */
	if (nss_ctx->id == 0) {
		rc = request_firmware(&nss_fw, NETAP0_IMAGE, &(nss_dev->dev));
	} else if (nss_ctx->id == 1) {
		rc = request_firmware(&nss_fw, NETAP1_IMAGE, &(nss_dev->dev));
	} else {
		nss_warning("%p: Invalid nss dev: %d \n", nss_ctx, nss_dev->id);
	}

	/*
	 *  Check if the file read is successful
	 */
	if (rc) {
		nss_warning("%p: request_firmware failed with err code: %d", nss_ctx, rc);
		err = rc;
		goto err_init_0;
	}

	if (nss_fw->size < MIN_IMG_SIZE) {
		nss_warning("%p: nss firmware is truncated, size:%d", nss_ctx, nss_fw->size);
	}

	load_mem = ioremap_nocache(npd->load_addr, nss_fw->size);
	if (load_mem == NULL) {
		nss_warning("%p: ioremap_nocache failed: %x", nss_ctx, npd->load_addr);
		release_firmware(nss_fw);
		goto err_init_0;
	}

	printk("nss_driver - fw of size %u  bytes copied to load addr: %x, nss_id : %d\n", nss_fw->size, npd->load_addr, nss_dev->id);
	memcpy_toio(load_mem, nss_fw->data, nss_fw->size);
	release_firmware(nss_fw);
	iounmap(load_mem);

	/*
	 * Both NSS cores controlled by same regulator, Hook only Once
	 */
	if (!nss_ctx->id) {
		nss_core0_clk = clk_get(&nss_dev->dev, "nss_core_clk");
		if (IS_ERR(nss_core0_clk)) {

			err = PTR_ERR(nss_core0_clk);
			nss_info("%p: Regulator %s get failed, err=%d\n", nss_ctx, dev_name(&nss_dev->dev), err);
			return err;

		}
		clk_set_rate(nss_core0_clk, NSS_FREQ_550);
		clk_prepare(nss_core0_clk);
		clk_enable(nss_core0_clk);

#if (NSS_PM_SUPPORT == 1)
		/*
		 * Check if turbo is supported
		 */
		if (npd->turbo_frequency) {
			/*
			 * Turbo is supported
			 */
			printk("nss_driver - Turbo Support %d\n", npd->turbo_frequency);
			nss_runtime_samples.freq_scale_sup_max = NSS_MAX_CPU_SCALES;
			nss_pm_set_turbo();
		} else {
			printk("nss_driver - Turbo No Support %d\n", npd->turbo_frequency);
			nss_runtime_samples.freq_scale_sup_max = NSS_MAX_CPU_SCALES - 1;
		}
#else
		printk("nss_driver - Turbo Not Supported\n");
#endif
	}

	/*
	 * Get load address of NSS firmware
	 */
	nss_info("%p: Setting NSS%d Firmware load address to %x\n", nss_ctx, nss_ctx->id, npd->load_addr);
	nss_top->nss[nss_ctx->id].load = npd->load_addr;

	/*
	 * Get virtual and physical memory addresses for nss logical/hardware address maps
	 */

	/*
	 * Virtual address of CSM space
	 */
	nss_ctx->nmap = npd->nmap;
	nss_assert(nss_ctx->nmap);

	/*
	 * Physical address of CSM space
	 */
	nss_ctx->nphys = npd->nphys;
	nss_assert(nss_ctx->nphys);

	/*
	 * Virtual address of logical registers space
	 */
	nss_ctx->vmap = npd->vmap;
	nss_assert(nss_ctx->vmap);

	/*
	 * Physical address of logical registers space
	 */
	nss_ctx->vphys = npd->vphys;
	nss_assert(nss_ctx->vphys);
	nss_info("%d:ctx=%p, vphys=%x, vmap=%x, nphys=%x, nmap=%x",
			nss_ctx->id, nss_ctx, nss_ctx->vphys, nss_ctx->vmap, nss_ctx->nphys, nss_ctx->nmap);

	/*
	 * Register netdevice handlers
	 */
	nss_ctx->int_ctx[0].ndev = alloc_netdev(sizeof(struct netdev_priv_instance),
					"qca-nss-dev%d", nss_dummy_netdev_setup);
	if (nss_ctx->int_ctx[0].ndev == NULL) {
		nss_warning("%p: Could not allocate net_device #0", nss_ctx);
		err = -ENOMEM;
		goto err_init_0;
	}

	nss_ctx->int_ctx[0].ndev->netdev_ops = &nss_netdev_ops;
	nss_ctx->int_ctx[0].ndev->ethtool_ops = &nss_ethtool_ops;
	err = register_netdev(nss_ctx->int_ctx[0].ndev);
	if (err) {
		nss_warning("%p: Could not register net_device #0", nss_ctx);
		goto err_init_1;
	}

	/*
	 * request for IRQs
	 *
	 * WARNING: CPU affinities should be set using OS supported methods
	 */
	nss_ctx->int_ctx[0].nss_ctx = nss_ctx;
	nss_ctx->int_ctx[0].shift_factor = 0;
	nss_ctx->int_ctx[0].irq = npd->irq[0];
	err = request_irq(npd->irq[0], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[0]);
	if (err) {
		nss_warning("%d: IRQ0 request failed", nss_dev->id);
		goto err_init_2;
	}

	/*
	 * Register NAPI for NSS core interrupt #0
	 */
	ndev_priv = netdev_priv(nss_ctx->int_ctx[0].ndev);
	ndev_priv->int_ctx = &nss_ctx->int_ctx[0];
	netif_napi_add(nss_ctx->int_ctx[0].ndev, &nss_ctx->int_ctx[0].napi, nss_core_handle_napi, 64);
	napi_enable(&nss_ctx->int_ctx[0].napi);
	nss_ctx->int_ctx[0].napi_active = true;

	/*
	 * Check if second interrupt is supported on this nss core
	 */
	if (npd->num_irq > 1) {
		nss_info("%d: This NSS core supports two interrupts", nss_dev->id);

		/*
		 * Register netdevice handlers
		 */
		nss_ctx->int_ctx[1].ndev = alloc_netdev(sizeof(struct netdev_priv_instance),
						"qca-nss-dev%d", nss_dummy_netdev_setup);
		if (nss_ctx->int_ctx[1].ndev == NULL) {
			nss_warning("%p: Could not allocate net_device #1", nss_ctx);
			err = -ENOMEM;
			goto err_init_3;
		}

		nss_ctx->int_ctx[1].ndev->netdev_ops = &nss_netdev_ops;
		nss_ctx->int_ctx[1].ndev->ethtool_ops = &nss_ethtool_ops;
		err = register_netdev(nss_ctx->int_ctx[1].ndev);
		if (err) {
			nss_warning("%p: Could not register net_device #1", nss_ctx);
			goto err_init_4;
		}

		nss_ctx->int_ctx[1].nss_ctx = nss_ctx;
		nss_ctx->int_ctx[1].shift_factor = 15;
		nss_ctx->int_ctx[1].irq = npd->irq[1];
		err = request_irq(npd->irq[1], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[1]);
		if (err) {
			nss_warning("%d: IRQ1 request failed for nss", nss_dev->id);
			goto err_init_5;
		}

		/*
		 * Register NAPI for NSS core interrupt #1
		 */
		ndev_priv = netdev_priv(nss_ctx->int_ctx[1].ndev);
		ndev_priv->int_ctx = &nss_ctx->int_ctx[1];
		netif_napi_add(nss_ctx->int_ctx[1].ndev, &nss_ctx->int_ctx[1].napi, nss_core_handle_napi, 64);
		napi_enable(&nss_ctx->int_ctx[1].napi);
		nss_ctx->int_ctx[1].napi_active = true;
	}

	spin_lock_bh(&(nss_top->lock));

	/*
	 * Check functionalities are supported by this NSS core
	 */
	if (npd->shaping_enabled == NSS_FEATURE_ENABLED) {
		nss_top->shaping_handler_id = nss_dev->id;
		printk(KERN_INFO "%p: NSS Shaping is enabled, handler id: %u\n", __func__, nss_top->shaping_handler_id);
	}

	if (npd->ipv4_enabled == NSS_FEATURE_ENABLED) {
		nss_top->ipv4_handler_id = nss_dev->id;
		nss_ipv4_register_handler();
		nss_pppoe_register_handler();
		nss_eth_rx_register_handler();
		nss_n2h_register_handler();
		nss_virt_if_register_handler();
		nss_lag_register_handler();
		nss_dynamic_interface_register_handler();
		nss_top->capwap_handler_id = nss_dev->id;
		nss_capwap_init();

		for (i = 0; i < NSS_MAX_VIRTUAL_INTERFACES; i++) {
			nss_top->virt_if_handler_id[i] = nss_dev->id;
		}

		nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_802_3_REDIR] = nss_dev->id;
	}

	if (npd->ipv4_reasm_enabled == NSS_FEATURE_ENABLED) {
		nss_top->ipv4_reasm_handler_id = nss_dev->id;
		nss_ipv4_reasm_register_handler();
	}

	if (npd->ipv6_enabled == NSS_FEATURE_ENABLED) {
		nss_top->ipv6_handler_id = nss_dev->id;
		nss_ipv6_register_handler();
	}

	if (npd->crypto_enabled == NSS_FEATURE_ENABLED) {
		nss_top->crypto_handler_id = nss_dev->id;
		nss_crypto_register_handler();
	}

	if (npd->ipsec_enabled == NSS_FEATURE_ENABLED) {
		nss_top->ipsec_handler_id = nss_dev->id;
		nss_ipsec_register_handler();
	}

	if (npd->wlan_enabled == NSS_FEATURE_ENABLED) {
		nss_top->wlan_handler_id = nss_dev->id;
	}

	if (npd->tun6rd_enabled == NSS_FEATURE_ENABLED) {
		nss_top->tun6rd_handler_id = nss_dev->id;
	}

	if (npd->tunipip6_enabled == NSS_FEATURE_ENABLED) {
		nss_top->tunipip6_handler_id = nss_dev->id;
		nss_tunipip6_register_handler();
	}

	if (npd->gre_redir_enabled == NSS_FEATURE_ENABLED) {
		nss_top->gre_redir_handler_id = nss_dev->id;
		nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR] =  nss_dev->id;
		nss_gre_redir_register_handler();
		nss_sjack_register_handler();
	}

	/*
	 * Mark data plane enabled so when nss core init done we call register to nss-gmac
	 */
	for (i = 0 ; i < NSS_MAX_PHYSICAL_INTERFACES ; i ++) {
		if (npd->gmac_enabled[i] == NSS_FEATURE_ENABLED) {
			nss_data_plane_set_enabled(i);
		}
	}

#if (NSS_PM_SUPPORT == 1)
	nss_freq_register_handler();
#endif
	nss_lso_rx_register_handler();

	nss_top->frequency_handler_id = nss_dev->id;

	spin_unlock_bh(&(nss_top->lock));

	/*
	 * Initialize decongestion callbacks to NULL
	 */
	for (i = 0; i< NSS_MAX_CLIENTS; i++) {
		nss_ctx->queue_decongestion_callback[i] = 0;
		nss_ctx->queue_decongestion_ctx[i] = 0;
	}

	spin_lock_init(&(nss_ctx->decongest_cb_lock));
	nss_ctx->magic = NSS_CTX_MAGIC;

	nss_info("%p: Reseting NSS core %d now", nss_ctx, nss_ctx->id);

	/*
	 * Enable clocks and bring NSS core out of reset
	 */
#if (NSS_DT_SUPPORT == 1)
	/*
	 * Remove UBI32 reset clamp
	 */
	rstctl = devm_reset_control_get(&nss_dev->dev, "clkrst_clamp");
	if (IS_ERR(rstctl)) {
		nss_info("%p: Deassert UBI32 reset clamp failed", nss_ctx, nss_ctx->id);
		err = -EFAULT;
		goto err_init_5;
	}
	reset_control_deassert(rstctl);
	mdelay(1);
	reset_control_put(rstctl);

	/*
	 * Remove UBI32 core clamp
	 */
	rstctl = devm_reset_control_get(&nss_dev->dev, "clamp");
	if (IS_ERR(rstctl)) {
		nss_info("%p: Deassert UBI32 core clamp failed", nss_ctx, nss_ctx->id);
		err = -EFAULT;
		goto err_init_5;
	}
	reset_control_deassert(rstctl);
	mdelay(1);
	reset_control_put(rstctl);

	/*
	 * Remove UBI32 AHB reset
	 */
	rstctl = devm_reset_control_get(&nss_dev->dev, "ahb");
	if (IS_ERR(rstctl)) {
		nss_info("%p: Deassert AHB reset failed", nss_ctx, nss_ctx->id);
		err = -EFAULT;
		goto err_init_5;
	}
	reset_control_deassert(rstctl);
	mdelay(1);
	reset_control_put(rstctl);

	/*
	 * Remove UBI32 AXI reset
	 */
	rstctl = devm_reset_control_get(&nss_dev->dev, "axi");
	if (IS_ERR(rstctl)) {
		nss_info("%p: Deassert AXI reset failed", nss_ctx, nss_ctx->id);
		err = -EFAULT;
		goto err_init_5;
	}
	reset_control_deassert(rstctl);
	mdelay(1);
	reset_control_put(rstctl);

	nss_hal_core_reset(nss_ctx->nmap, nss_ctx->load);
#else
	nss_hal_core_reset(nss_dev->id, nss_ctx->nmap, nss_ctx->load, nss_top->clk_src);
#endif
	/*
	 * Enable interrupts for NSS core
	 */
	nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq,
					nss_ctx->int_ctx[0].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);

	if (npd->num_irq > 1) {
		nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[1].irq,
					nss_ctx->int_ctx[1].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS);
	}

	/*
	 * Initialize max buffer size for NSS core
	 */
	nss_ctx->max_buf_size = NSS_NBUF_PAYLOAD_SIZE;
	nss_info("%p: All resources initialized and nss core%d has been brought out of reset", nss_ctx, nss_dev->id);
	goto err_init_0;

err_init_5:
	unregister_netdev(nss_ctx->int_ctx[1].ndev);
err_init_4:
	free_netdev(nss_ctx->int_ctx[1].ndev);
err_init_3:
	free_irq(npd->irq[0], &nss_ctx->int_ctx[0]);
err_init_2:
	unregister_netdev(nss_ctx->int_ctx[0].ndev);
err_init_1:
	free_netdev(nss_ctx->int_ctx[0].ndev);

#if (NSS_DT_SUPPORT == 1)
	if (nss_dev->dev.of_node) {
		if (npd->nmap) {
			iounmap((void *)npd->nmap);
		}

		if (npd->vmap) {
			iounmap((void *)npd->vmap);
		}
	}
#endif

err_init_0:

#if (NSS_DT_SUPPORT == 1)
	if (nss_dev->dev.of_node) {
		devm_kfree(&nss_dev->dev, npd);
	}

#endif
	return err;
}
コード例 #10
0
/*
 * nss_drv_of_get_pdata()
 *	Retrieve platform data from device node.
 */
static struct nss_platform_data *nss_drv_of_get_pdata(struct device_node *np,
						      struct platform_device *pdev)
{
	struct nss_platform_data *npd = NULL;
	struct nss_ctx_instance *nss_ctx = NULL;
	struct nss_top_instance *nss_top = &nss_top_main;
	uint32_t val;
	struct resource res_nphys, res_vphys;
	int32_t i;

	npd = devm_kzalloc(&pdev->dev, sizeof(struct nss_platform_data), GFP_KERNEL);
	if (!npd) {
		return NULL;
	}

	if (of_property_read_u32(np, "qcom,id", &npd->id)
	    || of_property_read_u32(np, "qcom,rst_addr", &npd->rst_addr)
	    || of_property_read_u32(np, "qcom,load_addr", &npd->load_addr)
	    || of_property_read_u32(np, "qcom,turbo_frequency", &npd->turbo_frequency)
	    || of_property_read_u32(np, "qcom,gmac0_enabled", &npd->gmac_enabled[0])
	    || of_property_read_u32(np, "qcom,gmac1_enabled", &npd->gmac_enabled[1])
	    || of_property_read_u32(np, "qcom,gmac2_enabled", &npd->gmac_enabled[2])
	    || of_property_read_u32(np, "qcom,gmac3_enabled", &npd->gmac_enabled[3])
	    || of_property_read_u32(np, "qcom,num_irq", &npd->num_irq)) {
		pr_err("%s: error reading critical device node properties\n", np->name);
		goto out;
	}

	nss_ctx = &nss_top->nss[npd->id];
	nss_ctx->id = npd->id;

	if (of_address_to_resource(np, 0, &res_nphys) != 0) {
		nss_info("%p: nss%d: of_address_to_resource() fail for nphys \n", nss_ctx, nss_ctx->id);
		goto out;
	}

	if (of_address_to_resource(np, 1, &res_vphys) != 0) {
		nss_info("%p: nss%d: of_address_to_resource() fail for vphys \n", nss_ctx, nss_ctx->id);
		goto out;
	}

	/*
	 * Save physical addresses
	 */
	npd->nphys = res_nphys.start;
	npd->vphys = res_vphys.start;

	npd->nmap = (uint32_t)ioremap_nocache(npd->nphys, resource_size(&res_nphys));
	if (!npd->nmap) {
		nss_info("%p: nss%d: ioremap() fail for nphys \n", nss_ctx, nss_ctx->id);
		goto out;
	}

	npd->vmap = (uint32_t)ioremap_nocache(npd->vphys, resource_size(&res_vphys));
	if (!npd->vmap) {
		nss_info("%p: nss%d: ioremap() fail for vphys \n", nss_ctx, nss_ctx->id);
		goto out;
	}

	/*
	 * Clear TCM memory used by this core
	 */
	for (i = 0; i < resource_size(&res_vphys) ; i += 4) {
		nss_write_32((uint32_t)npd->vmap, i, 0);
	}

	/*
	 * Get IRQ numbers
	 */
	for (val = 0 ; val < npd->num_irq ; val++) {
		npd->irq[val] = irq_of_parse_and_map(np, val);
		if (!npd->irq[val]) {
			nss_info("%p: nss%d: irq_of_parse_and_map() fail for irq %d\n",
				 nss_ctx, nss_ctx->id, val);
			goto out;
		}
	}

	if (of_property_read_u32(np, "qcom,ipv4_enabled", &npd->ipv4_enabled)
	    || of_property_read_u32(np, "qcom,ipv6_enabled", &npd->ipv6_enabled)
	    || of_property_read_u32(np, "qcom,l2switch_enabled", &npd->l2switch_enabled)
	    || of_property_read_u32(np, "qcom,crypto_enabled", &npd->crypto_enabled)
	    || of_property_read_u32(np, "qcom,ipsec_enabled", &npd->ipsec_enabled)
	    || of_property_read_u32(np, "qcom,wlan_enabled", &npd->wlan_enabled)
	    || of_property_read_u32(np, "qcom,tun6rd_enabled", &npd->tun6rd_enabled)
	    || of_property_read_u32(np, "qcom,tunipip6_enabled", &npd->tunipip6_enabled)
	    || of_property_read_u32(np, "qcom,shaping_enabled", &npd->shaping_enabled)) {
		pr_warn("%s: error reading non-critical device node properties\n", np->name);
	}

	return npd;

out:
	if (npd->nmap) {
		iounmap((void *)npd->nmap);
	}

	if (npd->vmap) {
		iounmap((void *)npd->vmap);
	}

	devm_kfree(&pdev->dev, npd);

	return NULL;
}
コード例 #11
0
/*
 * nss_init()
 *	Registers nss driver
 */
static int __init nss_init(void)
{
#if (NSS_DT_SUPPORT == 1)
	struct device_node *cmn = NULL;
	struct resource res_nss_fpb_base;
#endif

	nss_info("Init NSS driver");

#if (NSS_PM_SUPPORT == 1)
	nss_freq_change_context = nss_freq_get_mgr();
#else
	nss_freq_change_context = NULL;
#endif

#if (NSS_DT_SUPPORT == 1)
	/*
	 * Get reference to NSS common device node
	 */
	cmn = of_find_node_by_name(NULL, "nss-common");
	if (!cmn) {
		nss_info("cannot find nss-common node\n");
		return -EFAULT;
	}

	if (of_address_to_resource(cmn, 0, &res_nss_fpb_base) != 0) {
		nss_info("of_address_to_resource() return error for nss_fpb_base\n");
		of_node_put(cmn);
		return -EFAULT;
	}

	nss_top_main.nss_fpb_base = ioremap_nocache(res_nss_fpb_base.start,
						    resource_size(&res_nss_fpb_base));
	if (!nss_top_main.nss_fpb_base) {
		nss_info("ioremap fail for nss_fpb_base\n");
		of_node_put(cmn);
		return -EFAULT;
	}

	nss_top_main.nss_hal_common_init_done = false;

	/*
	 * Release reference to NSS common device node
	 */
	of_node_put(cmn);
	cmn = NULL;
#else
	/*
	 * Perform clock init common to all NSS cores
	 */
	nss_hal_common_reset(&(nss_top_main.clk_src));

#endif /* NSS_DT_SUPPORT */

	/*
	 * Enable spin locks
	 */
	spin_lock_init(&(nss_top_main.lock));
	spin_lock_init(&(nss_top_main.stats_lock));

	/*
	 * Enable NSS statistics
	 */
	nss_stats_init();

	/*
	 * Register sysctl table.
	 */
	nss_dev_header = register_sysctl_table(nss_root);

	/*
	 * Registering sysctl for ipv4/6 specific config.
	 */
	nss_ipv4_register_sysctl();
	nss_ipv6_register_sysctl();

#if (NSS_PM_SUPPORT == 1)
	/*
	 * Setup Runtime Sample values
	 */
	nss_runtime_samples.freq_scale[0].frequency = 	NSS_FREQ_110;
	nss_runtime_samples.freq_scale[0].minimum =	NSS_FREQ_110_MIN;
	nss_runtime_samples.freq_scale[0].maximum = 	NSS_FREQ_110_MAX;
	nss_runtime_samples.freq_scale[1].frequency = 	NSS_FREQ_550;
	nss_runtime_samples.freq_scale[1].minimum = 	NSS_FREQ_550_MIN;
	nss_runtime_samples.freq_scale[1].maximum = 	NSS_FREQ_550_MAX;
	nss_runtime_samples.freq_scale[2].frequency = 	NSS_FREQ_733;
	nss_runtime_samples.freq_scale[2].minimum = 	NSS_FREQ_733_MIN;
	nss_runtime_samples.freq_scale[2].maximum = 	NSS_FREQ_733_MAX;
	nss_runtime_samples.freq_scale_index = 1;
	nss_runtime_samples.freq_scale_ready = 0;
	nss_runtime_samples.freq_scale_rate_limit_up = 0;
	nss_runtime_samples.freq_scale_rate_limit_down = 0;
	nss_runtime_samples.buffer_index = 0;
	nss_runtime_samples.sum = 0;
	nss_runtime_samples.sample_count = 0;
	nss_runtime_samples.average = 0;
	nss_runtime_samples.message_rate_limit = 0;
	nss_runtime_samples.initialized = 0;

	nss_cmd_buf.current_freq = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency;

	/*
	 * Initial Workqueue
	 */
	nss_wq = create_workqueue("nss_freq_queue");

	/*
	 * Initialize NSS Bus PM module
	 */
	nss_pm_init();

	/*
	 * Register with Bus driver
	 */
	pm_client = nss_pm_client_register(NSS_PM_CLIENT_NETAP);
	if (!pm_client) {
		nss_warning("Error registering with PM driver");
	}
#endif

	/*
	 * Register platform_driver
	 */
	return platform_driver_register(&nss_driver);
}