Exemplo n.º 1
0
static s32
wl_genl_handle_msg(
	struct sk_buff *skb,
	struct genl_info *info)
{
	struct nlattr *na;
	u8 *data = NULL;

	WL_DBG(("Enter \n"));

	if (info == NULL) {
		return -EINVAL;
	}

	na = info->attrs[BCM_GENL_ATTR_MSG];
	if (!na) {
		WL_ERR(("nlattribute NULL\n"));
		return -EINVAL;
	}

	data = (char *)nla_data(na);
	if (!data) {
		WL_ERR(("Invalid data\n"));
		return -EINVAL;
	} else {
		/* Handle the data */
		WL_DBG(("Data received from pid (%d) \n", info->snd_pid));
	}

	return 0;
}
Exemplo n.º 2
0
int get_roam_channel_list(int target_chan, chanspec_t *channels, const wlc_ssid_t *ssid)
{
	int i, n = 1;
	uint band;

	WL_DBG((" %s: %02d\n", __FUNCTION__, target_chan));

	if (target_chan <= 14)
		band = WL_CHANSPEC_BAND_2G;
	else
		band = WL_CHANSPEC_BAND_5G;
	*channels++ = (target_chan & WL_CHANSPEC_CHAN_MASK) | band | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;

	for (i = 0; i < n_roam_cache; i++) {
		if ((roam_cache[i].ssid_len == ssid->SSID_len) &&
			((roam_cache[i].chanspec & WL_CHANSPEC_CHAN_MASK) != target_chan) &&
			(memcmp(roam_cache[i].ssid, ssid->SSID, ssid->SSID_len) == 0)) {
			/* match found, add it */
			*channels = roam_cache[i].chanspec & WL_CHANSPEC_CHAN_MASK;
			WL_DBG((" %s: %02d\n", __FUNCTION__, *channels));
			if (*channels <= 14)
				*channels |= WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
			else
				*channels |= WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;

			channels++; n++;
		}
	}

	return n;
}
Exemplo n.º 3
0
void print_roam_cache(void)
{
	int i;

	WL_DBG((" %d cache\n", n_roam_cache));

	for (i = 0; i < n_roam_cache; i++) {
		roam_cache[i].ssid[roam_cache[i].ssid_len] = 0;
		WL_DBG(("0x%02X %02d %s\n", roam_cache[i].chanspec, roam_cache[i].ssid_len, roam_cache[i].ssid));
	}
}
Exemplo n.º 4
0
/* Generic Netlink Initializaiton */
static int wl_genl_init(void)
{
	int ret;

	WL_DBG(("GEN Netlink Init\n\n"));

	/* register new family */
	ret = genl_register_family(&wl_genl_family);
	if (ret != 0)
		goto failure;

	/* register functions (commands) of the new family */
	ret = genl_register_ops(&wl_genl_family, &wl_genl_ops);
	if (ret != 0) {
		WL_ERR(("register ops failed: %i\n", ret));
		genl_unregister_family(&wl_genl_family);
		goto failure;
	}

	ret = genl_register_mc_group(&wl_genl_family, &wl_genl_mcast);
	if (ret != 0) {
		WL_ERR(("register mc_group failed: %i\n", ret));
		genl_unregister_ops(&wl_genl_family, &wl_genl_ops);
		genl_unregister_family(&wl_genl_family);
		goto failure;
	}

	return 0;

failure:
	WL_ERR(("Registering Netlink failed!!\n"));
	return -1;
}
Exemplo n.º 5
0
void add_roam_cache(wl_bss_info_t *bi)
{
	int i;
	uint8 channel;

#if defined(CUSTOMER_HW4) && defined(WES_SUPPORT)
	if (roamscan_mode == ROAMSCAN_MODE_WES)
		return;
#endif

	if (n_roam_cache >= MAX_ROAM_CACHE)
		return;

	for (i = 0; i < n_roam_cache; i++) {
		if ((roam_cache[i].ssid_len == bi->SSID_len) &&
			(roam_cache[i].chanspec == bi->chanspec) &&
			(memcmp(roam_cache[i].ssid, bi->SSID, bi->SSID_len) == 0)) {
			/* identical one found, just return */
			return;
		}
	}

	roam_cache[n_roam_cache].ssid_len = bi->SSID_len;
	channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
	WL_DBG(("CHSPEC 0x%X %d, CTL %d\n",
		bi->chanspec, CHSPEC_CHANNEL(bi->chanspec), bi->ctl_ch));
	roam_cache[n_roam_cache].chanspec =
		(channel <= CH_MAX_2G_CHANNEL ? band2G : band5G) | band_bw | channel;
	memcpy(roam_cache[n_roam_cache].ssid, bi->SSID, bi->SSID_len);

	n_roam_cache++;
}
Exemplo n.º 6
0
void add_roam_cache(wl_bss_info_t *bi)
{
	int i;
	uint8 channel;
	char chanbuf[CHANSPEC_STR_LEN];

#if defined(CUSTOMER_HW4) && defined(WES_SUPPORT)
	if (roamscan_mode == ROAMSCAN_MODE_WES)
		return;
#endif

	if (n_roam_cache >= MAX_ROAM_CACHE)
		return;

	for (i = 0; i < n_roam_cache; i++) {
		if ((roam_cache[i].ssid_len == bi->SSID_len) &&
			(roam_cache[i].chanspec == bi->chanspec) &&
			(memcmp(roam_cache[i].ssid, bi->SSID, bi->SSID_len) == 0)) {
			/* identical one found, just return */
			return;
		}
	}

	roam_cache[n_roam_cache].ssid_len = bi->SSID_len;
	channel = wf_chspec_ctlchan(bi->chanspec);
	WL_DBG(("CHSPEC  = %s, CTL %d\n", wf_chspec_ntoa_ex(bi->chanspec, chanbuf), channel));
	roam_cache[n_roam_cache].chanspec =
		(channel <= CH_MAX_2G_CHANNEL ? band2G : band5G) | band_bw | channel;
	memcpy(roam_cache[n_roam_cache].ssid, bi->SSID, bi->SSID_len);

	n_roam_cache++;
}
Exemplo n.º 7
0
int set_roamscan_channel_list(struct net_device *dev,
	unsigned char n, unsigned char channels[], int ioctl_ver)
{
	int i;
	int error;
	struct {
		int n;
		chanspec_t channels[20];
	} channel_list;
	char iobuf[200];
	uint band, band2G, band5G, bw;
	roamscan_mode = 1;

	if (n > 20)
		n = 20;

#ifdef D11AC_IOTYPES
	if (ioctl_ver == 1) {
		/* legacy chanspec */
		band2G = WL_LCHANSPEC_BAND_2G;
		band5G = WL_LCHANSPEC_BAND_5G;
		bw = WL_LCHANSPEC_BW_20 | WL_LCHANSPEC_CTL_SB_NONE;
	} else {
		band2G = WL_CHANSPEC_BAND_2G;
		band5G = WL_CHANSPEC_BAND_5G;
		bw = WL_CHANSPEC_BW_20;
	}
#else
	band2G = WL_CHANSPEC_BAND_2G;
	band5G = WL_CHANSPEC_BAND_5G;
	bw = WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
#endif /* D11AC_IOTYPES */

	for (i = 0; i < n; i++) {
		chanspec_t chanspec;

		if (channels[i] <= 14) {
			chanspec = band2G | bw | channels[i];
		} else {
			chanspec = band5G | bw | channels[i];
		}
		roam_cache[i].chanspec = chanspec;
		channel_list.channels[i] = chanspec;

		WL_DBG(("%s: channel[%d] - [%02d] \n", __FUNCTION__, i, channels[i]));
	}

	n_roam_cache = n;
	channel_list.n = n;

	error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
		sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
	if (error) {
		WL_ERROR(("Failed to set roamscan channels, error = %d\n", error));
	}

	return error;
}
Exemplo n.º 8
0
int set_roamscan_channel_list(struct net_device *dev,
	unsigned char n, unsigned char channels[], int ioctl_ver)
{
	int i;
	int error;
	channel_list_t channel_list;
	char iobuf[WLC_IOCTL_SMLEN];
	roamscan_mode = ROAMSCAN_MODE_WES;

	if (n > MAX_CHANNEL_LIST)
		n = MAX_CHANNEL_LIST;

	for (i = 0; i < n; i++) {
		chanspec_t chanspec;

		if (channels[i] <= CH_MAX_2G_CHANNEL) {
			chanspec = band2G | band_bw | channels[i];
		} else {
			chanspec = band5G | band_bw | channels[i];
		}
		roam_cache[i].chanspec = chanspec;
		channel_list.channels[i] = chanspec;

		WL_DBG(("channel[%d] - [%02d] \n", i, channels[i]));
	}

	n_roam_cache = n;
	channel_list.n = n;

	/* need to set ROAMSCAN_MODE_NORMAL to update roamscan_channels,
	 * otherwise, it won't be updated
	 */
	wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_NORMAL);
	error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
		sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
	if (error) {
		WL_DBG(("Failed to set roamscan channels, error = %d\n", error));
	}
	wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_WES);

	return error;
}
Exemplo n.º 9
0
int get_roamscan_channel_list(struct net_device *dev, unsigned char channels[])
{
	int n = 0;

	if (roamscan_mode) {
		for (n = 0; n < n_roam_cache; n++) {
			channels[n] = roam_cache[n].chanspec & WL_CHANSPEC_CHAN_MASK;

			WL_DBG(("%s: channel[%d] - [%02d] \n", __FUNCTION__, n, channels[n]));
		}
	}

	return n;
}
Exemplo n.º 10
0
int
wl_cfgnan_support_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ioc_t *nanioc = NULL;
	void *pxtlv;
	s32 ret = BCME_OK;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	/*
	 * command to test
	 *
	 * wl: wl nan
	 *
	 * wpa_cli: DRIVER NAN_SUPPORT
	 */

	/* nan support */
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_ENABLE);
	pxtlv = nanioc->data;
	nanioc->len = htod16(BCM_XTLV_HDR_SIZE + 1);
	nanioc_size = sizeof(wl_nan_ioc_t) + sizeof(bcm_xtlv_t);
	ret = wldev_iovar_getbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
	if (unlikely(ret)) {
		WL_ERR((" nan is not supported, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan is supported \n"));
	}

fail:
	if (nanioc) {
		kfree(nanioc);
	}

	return ret;
}
Exemplo n.º 11
0
int
wl_cfgnan_cmd_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
	char *cmd, int cmd_len)
{
	nan_cmd_data_t cmd_data;
	u8 *buf = cmd;
	u8 *cmd_name = NULL;
	nan_cmd_t *nanc = NULL;
	int buf_len = 0;
	int ret = BCME_OK;

	cmd_name = strsep((char **)&buf, " ");
	if (buf) {
		buf_len = strlen(buf);
	}

	WL_DBG((" cmd_name: %s, buf_len: %d, buf: %s \n", cmd_name, buf_len, buf));

	memset(&cmd_data, 0, sizeof(nan_cmd_data_t));
	ret = wl_cfgnan_parse_args(buf, &cmd_data);
	if (unlikely(ret)) {
		WL_ERR((" argument parsing failed with error (%d), buf = %s \n",
			ret, buf));
		goto fail;
	}

	for (nanc = nan_cmds; nanc->name; nanc++) {
		if (strncmp(nanc->name, cmd_name, strlen(nanc->name)) == 0) {
			ret = (*nanc->func)(ndev, cfg, cmd, &cmd_data);
			if (ret < BCME_OK) {
				WL_ERR((" command (%s) failed with error (%d) \n",
					cmd_name, ret));
			}
		}
	}

fail:
	return ret;
}
Exemplo n.º 12
0
int
wl_cfgnan_rtt_config_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ranging_config_t rtt_config;
	s32 ret = BCME_OK;

	/* proceed only if mandatory argument is present - channel */
	if (!cmd_data->chanspec) {
		WL_ERR((" mandatory argument is not present \n"));
		return -EINVAL;
	}

	/*
	 * command to test
	 *
	 * wl: wl proxd_nancfg 44/80 128 32 ff:ff:ff:ff:ff:ff 1
	 *
	 * wpa_cli: DRIVER NAN_RTT_CONFIG CHAN=44/80
	 */

	memset(&rtt_config, 0, sizeof(wl_nan_ranging_config_t));
	rtt_config.chanspec = cmd_data->chanspec;
	rtt_config.timeslot = 128;
	rtt_config.duration = 32;
	memcpy(&rtt_config.allow_mac, &ether_bcast, ETHER_ADDR_LEN);
	rtt_config.flags = 1;

	ret = wldev_iovar_setbuf(ndev, "proxd_nancfg", &rtt_config,
		sizeof(wl_nan_ranging_config_t), cfg->ioctl_buf,
		WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan rtt config failed, error = %d \n", ret));
	} else {
		WL_DBG((" nan rtt config successful \n"));
	}

	return ret;
}
Exemplo n.º 13
0
int
wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ioc_t *nanioc = NULL;
	struct bcm_tlvbuf *tbuf = NULL;
	wl_nan_disc_params_t params;
	s32 ret = BCME_OK;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;

	/* proceed only if mandatory argument is present - subscriber id */
	if (!cmd_data->sub_id) {
		WL_ERR((" mandatory argument is not present \n"));
		return -EINVAL;
	}

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	tbuf = bcm_xtlv_buf_alloc(NULL, BCM_XTLV_HDR_SIZE + sizeof(params));
	if (!tbuf) {
		WL_ERR((" memory allocation failed \n"));
		ret = -ENOMEM;
		goto fail;
	}

	/*
	 * command to test
	 *
	 * wl: wl nan cancel_subscribe 10
	 *
	 * wpa_cli: DRIVER NAN_CANCEL_SUBSCRIBE PUB_ID=10
	 */

	bcm_xtlv_put_data(tbuf, WL_NAN_XTLV_INSTANCE_ID, &cmd_data->sub_id,
		sizeof(wl_nan_instance_id_t));

	/* nan cancel subscribe */
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_CANCEL_SUBSCRIBE);
	nanioc->len = htod16(bcm_xtlv_buf_len(tbuf));
	bcopy(bcm_xtlv_head(tbuf), nanioc->data, bcm_xtlv_buf_len(tbuf));
	nanioc_size = sizeof(wl_nan_ioc_t) + bcm_xtlv_buf_len(tbuf);
	ret = wldev_iovar_setbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan cancel subscribe failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan cancel subscribe successful \n"));
	}

fail:
	if (tbuf) {
		bcm_xtlv_buf_free(NULL, tbuf);
	}
	if (nanioc) {
		kfree(nanioc);
	}

	return ret;
}
Exemplo n.º 14
0
int
wl_cfgnan_sub_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ioc_t *nanioc = NULL;
	struct bcm_tlvbuf *tbuf = NULL;
	wl_nan_disc_params_t params;
	s32 ret = BCME_OK;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;

	/*
	 * proceed only if mandatory arguments are present - subscriber id,
	 * service hash
	 */
	if ((!cmd_data->sub_id) || (!cmd_data->svc_hash.data) ||
		(!cmd_data->svc_hash.dlen)) {
		WL_ERR((" mandatory arguments are not present \n"));
		return -EINVAL;
	}

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	tbuf = bcm_xtlv_buf_alloc(NULL, BCM_XTLV_HDR_SIZE + sizeof(params));
	if (!tbuf) {
		WL_ERR((" memory allocation failed \n"));
		ret = -ENOMEM;
		goto fail;
	}

	/*
	 * command to test
	 *
	 * wl: wl nan subscribe 10 NAN123
	 *
	 * wpa_cli: DRIVER NAN_SUBSCRIBE SUB_ID=10 SVC_HASH=NAN123
	 */

	/* nan subscribe */
	params.period = 1;
	params.ttl = WL_NAN_TTL_UNTIL_CANCEL;
	params.flags = 0;
	params.instance_id = (wl_nan_instance_id_t)cmd_data->sub_id;
	memcpy((char *)params.svc_hash, cmd_data->svc_hash.data,
		cmd_data->svc_hash.dlen);
	bcm_xtlv_put_data(tbuf, WL_NAN_XTLV_SVC_PARAMS, &params, sizeof(params));

	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_SUBSCRIBE);
	nanioc->len = htod16(bcm_xtlv_buf_len(tbuf));
	bcopy(bcm_xtlv_head(tbuf), nanioc->data, bcm_xtlv_buf_len(tbuf));
	nanioc_size = sizeof(wl_nan_ioc_t) + bcm_xtlv_buf_len(tbuf);
	ret = wldev_iovar_setbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan subscribe failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan subscribe successful \n"));
	}

fail:
	if (tbuf) {
		bcm_xtlv_buf_free(NULL, tbuf);
	}
	if (nanioc) {
		kfree(nanioc);
	}

	return ret;
}
Exemplo n.º 15
0
int
wl_cfgnan_pub_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ioc_t *nanioc = NULL;
	struct bcm_tlvbuf *tbuf = NULL;
	wl_nan_disc_params_t params;
	s32 ret = BCME_OK;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 tbuf_size = BCM_XTLV_HDR_SIZE + sizeof(params);
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;
	void *pxtlv;
	u16 start, end;

	/*
	 * proceed only if mandatory arguments are present - publisher id,
	 * service hash
	 */
	if ((!cmd_data->pub_id) || (!cmd_data->svc_hash.data) ||
		(!cmd_data->svc_hash.dlen)) {
		WL_ERR((" mandatory arguments are not present \n"));
		return -EINVAL;
	}

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	tbuf = bcm_xtlv_buf_alloc(NULL, tbuf_size);
	if (!tbuf) {
		WL_ERR((" memory allocation failed \n"));
		ret = -ENOMEM;
		goto fail;
	}

	/*
	 * command to test
	 *
	 * wl: wl nan publish 10 NAN123 -info <hex_string
	 *     wl nan publish 10 NAN123 -info <hex_string -period 1 -ttl 0xffffffff
	 *
	 * wpa_cli: DRIVER NAN_PUBLISH PUB_ID=10 SVC_HASH=NAN123
	 *          SVC_INFO=<hex_string>
	 *          DRIVER NAN_PUBLISH PUB_ID=10 SVC_HASH=NAN123
	 *          SVC_INFO=<hex_string> PUB_PR=1 PUB_INT=0xffffffff
	 */

	/* nan publish */
	start = end = NAN_IOCTL_BUF_SIZE;
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_PUBLISH);
	pxtlv = nanioc->data;

	/* disovery parameters */
	if (cmd_data->pub_pr) {
		params.period = cmd_data->pub_pr;
	} else {
		params.period = 1;
	}
	if (cmd_data->pub_int) {
		params.ttl = cmd_data->pub_int;
	} else {
		params.ttl = WL_NAN_TTL_UNTIL_CANCEL;
	}
	params.flags = WL_NAN_PUB_BOTH;
	params.instance_id = (wl_nan_instance_id_t)cmd_data->pub_id;
	memcpy((char *)params.svc_hash, cmd_data->svc_hash.data,
		cmd_data->svc_hash.dlen);
	ret = bcm_pack_xtlv_entry(&pxtlv,
		&end, WL_NAN_XTLV_SVC_PARAMS, sizeof(wl_nan_disc_params_t), &params);
	if (unlikely(ret)) {
		goto fail;
	}
	if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
		WL_DBG((" optional svc_info present, pack it \n"));
		ret = bcm_pack_xtlv_entry_from_hex_string(&pxtlv,
			&end, WL_NAN_XTLV_SVC_INFO, cmd_data->svc_info.data);
		if (unlikely(ret)) {
			goto fail;
		}
	}

	nanioc->len = start - end;
	nanioc_size = sizeof(wl_nan_ioc_t) + nanioc->len;
	ret = wldev_iovar_setbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan publish failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan publish successful \n"));
	}

fail:
	if (tbuf) {
		bcm_xtlv_buf_free(NULL, tbuf);
	}
	if (nanioc) {
		kfree(nanioc);
	}

	return ret;
}
Exemplo n.º 16
0
int
wl_cfgnan_status_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ioc_t *nanioc = NULL;
	void *pxtlv;
	char *ptr = cmd;
	wl_nan_tlv_data_t tlv_data;
	s32 ret = BCME_OK;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	/*
	 * command to test
	 *
	 * wl: wl nan status
	 *
	 * wpa_cli: DRIVER NAN_STATUS
	 */

	/* nan status */
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_STATUS);
	pxtlv = nanioc->data;
	nanioc->len = NAN_IOCTL_BUF_SIZE;
	nanioc_size = sizeof(wl_nan_ioc_t) + sizeof(bcm_xtlv_t);
	ret = wldev_iovar_getbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan status failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan status successful \n"));
	}

	/* unpack the tlvs */
	memset(&tlv_data, 0, sizeof(tlv_data));
	nanioc = (wl_nan_ioc_t *)cfg->ioctl_buf;
	if (g_nan_debug) {
		prhex(" nanioc->data: ", (uint8 *)nanioc->data, nanioc->len);
	}
	bcm_unpack_xtlv_buf(&tlv_data, nanioc->data, nanioc->len,
		wl_cfgnan_set_vars_cbfn);

	ptr += sprintf(ptr, CLUS_ID_PREFIX MACF, ETHER_TO_MACF(tlv_data.clus_id));
	ptr += sprintf(ptr, " " ROLE_PREFIX"%d", tlv_data.dev_role);
	ptr += sprintf(ptr, " " AMR_PREFIX);
	ptr += bcm_format_hex(ptr, tlv_data.amr, NAN_MASTER_RANK_LEN);
	ptr += sprintf(ptr, " " AMBTT_PREFIX"0x%x", tlv_data.ambtt);
	ptr += sprintf(ptr, " " HOP_COUNT_PREFIX"%d", tlv_data.hop_count);

	WL_DBG((" formatted string for userspace: %s, len: %zu \n",
		cmd, strlen(cmd)));

fail:
	if (nanioc) {
		kfree(nanioc);
	}
	if (tlv_data.svc_info.data) {
		kfree(tlv_data.svc_info.data);
		tlv_data.svc_info.data = NULL;
		tlv_data.svc_info.dlen = 0;
	}
	if (tlv_data.vend_info.data) {
		kfree(tlv_data.vend_info.data);
		tlv_data.vend_info.data = NULL;
		tlv_data.vend_info.dlen = 0;
	}

	return ret;
}
Exemplo n.º 17
0
int
wl_cfgnan_set_config_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ioc_t *nanioc = NULL;
	void *pxtlv;
	s32 ret = BCME_OK;
	u16 start, end;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	/*
	 * command to test
	 *
	 * wl: wl nan <attr> <value> (wl nan role 1)
	 *
	 * wpa_cli: DRIVER NAN_CONFIG_SET ATTR=<attr> <value>...<value>
	 *
	 * wpa_cli: DRIVER NAN_SET_CONFIG ATTR=ATTR_ROLE ROLE=1
	 */

	/* nan set config */
	start = end = NAN_IOCTL_BUF_SIZE;
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_ATTR);
	pxtlv = nanioc->data;

	switch (cmd_data->attr.type) {
	case WL_NAN_XTLV_ROLE:
		ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_ROLE,
			sizeof(u32), &cmd_data->role);
		break;
	case WL_NAN_XTLV_MASTER_PREF:
		ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_MASTER_PREF,
			sizeof(u16), &cmd_data->master_pref);
		break;
	case WL_NAN_XTLV_DW_LEN:
		ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_DW_LEN,
			sizeof(u16), &cmd_data->dw_len);
		break;
	case WL_NAN_XTLV_CLUSTER_ID:
	case WL_NAN_XTLV_IF_ADDR:
	case WL_NAN_XTLV_BCN_INTERVAL:
	case WL_NAN_XTLV_MAC_CHANSPEC:
	case WL_NAN_XTLV_MAC_TXRATE:
	default:
		ret = -EINVAL;
		break;
	}
	if (unlikely(ret)) {
		WL_ERR((" unsupported attribute, attr = %s (%d) \n",
			cmd_data->attr.name, cmd_data->attr.type));
		goto fail;
	}

	nanioc->len = start - end;
	nanioc_size = sizeof(wl_nan_ioc_t) + nanioc->len;
	ret = wldev_iovar_setbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan set config failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan set config successful \n"));
	}

fail:
	if (nanioc) {
		kfree(nanioc);
	}

	return ret;
}
Exemplo n.º 18
0
s32
wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *event, void *data)
{
	s32 ret = BCME_OK;
	u16 data_len;
	u32 event_num;
	s32 event_type;
	nan_event_hdr_t nan_hdr;
	wl_nan_tlv_data_t tlv_data;
	u8 *buf = NULL;
	u32 buf_len;
	u8 *ptr;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;

	if (!event || !data) {
		WL_ERR((" event data is NULL \n"));
		return -EINVAL;
	}

	event_type = ntoh32(event->event_type);
	event_num = ntoh32(event->reason);
	data_len = ntoh32(event->datalen);
	memset(&nan_hdr, 0, sizeof(nan_event_hdr_t));
	nan_hdr.event_subtype = event_num;

	WL_DBG((" nan event: type: %d num: %d len: %d \n",
		event_type, event_num, data_len));

	if (INVALID_NAN_EVENT(event_num)) {
		WL_ERR((" unsupported event, num: %d \n", event_num));
		return -EINVAL;
	}

	if (g_nan_debug) {
		WL_DBG((" event name: %s \n", nan_event_name[event_num]));
		WL_DBG((" event data: \n"));
		prhex(NULL, data, data_len);
	}

	/* unpack the tlvs */
	memset(&tlv_data, 0, sizeof(wl_nan_tlv_data_t));
	bcm_unpack_xtlv_buf(&tlv_data, data, data_len,
		wl_cfgnan_set_vars_cbfn);

	/*
	 * send as preformatted hex string
	 *
	 * EVENT_NAN <event_type> <tlv_hex_string>
	 */

	buf_len = NAN_IOCTL_BUF_SIZE;
	buf = ptr = kzalloc(buf_len, kflags);
	if (!buf) {
		WL_ERR((" memory allocation failed \n"));
		ret = -ENOMEM;
		goto fail;
	}

	switch (event_num) {
	case WL_NAN_EVENT_START:
	case WL_NAN_EVENT_JOIN:
	case WL_NAN_EVENT_STOP:
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s " CLUS_ID_PREFIX MACF,
			nan_event_str[event_num], ETHER_TO_MACF(tlv_data.nstatus.cid));
		break;
	case WL_NAN_EVENT_ROLE:
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s "ROLE_PREFIX "%d "
			CLUS_ID_PREFIX MACF, nan_event_str[event_num],
			tlv_data.nstatus.role, ETHER_TO_MACF(tlv_data.nstatus.cid));
		break;
	case WL_NAN_EVENT_DISCOVERY_RESULT:
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s " PUB_ID_PREFIX"%d "
			SUB_ID_PREFIX"%d " MAC_ADDR_PREFIX MACF,
			nan_event_str[event_num], tlv_data.pub_id, tlv_data.sub_id,
			ETHER_TO_MACF(tlv_data.mac_addr));
		if (tlv_data.svc_info.data && tlv_data.svc_info.dlen) {
			WL_DBG((" service info present \n"));
			if ((strlen(ptr) + tlv_data.svc_info.dlen) >= buf_len) {
				WL_ERR((" service info length = %d\n",
					tlv_data.svc_info.dlen));
				WL_ERR((" insufficent buffer to copy service info \n"));
				ret = -EOVERFLOW;
				goto fail;
			}
			ptr += sprintf(ptr, " %s", SVC_INFO_PREFIX);
			ptr += bcm_format_hex(ptr, tlv_data.svc_info.data,
				tlv_data.svc_info.dlen);
		} else {
			WL_DBG((" service info not present \n"));
		}

		if (tlv_data.vend_info.data && tlv_data.vend_info.dlen) {
			struct ether_addr *ea;
			u8 *data = tlv_data.vend_info.data;
			uint32 bitmap;
			u16 dlen = tlv_data.vend_info.dlen;
			chanspec_t chanspec;
			uint8 mapcontrol;
			uint8 proto;

			WL_DBG((" vendor info present \n"));
			if ((*data != NAN_ATTR_VENDOR_SPECIFIC) ||
				(dlen < NAN_VENDOR_HDR_SIZE)) {
				WL_ERR((" error in vendor info attribute \n"));
				ret = -EINVAL;
				goto fail;
			} else {
				WL_DBG((" vendor info not present \n"));
			}

			if (*(data + 6) == NAN_VENDOR_TYPE_RTT) {
				data += NAN_VENDOR_HDR_SIZE;
				ea = (struct ether_addr *)data;
				data += ETHER_ADDR_LEN;
				mapcontrol = *data++;
				proto = *data++;
				bitmap = *(uint32 *)data;
				data += 4;
				chanspec = *(chanspec_t *)data;
				ptr += sprintf(ptr, " "BITMAP_PREFIX"0x%x "CHAN_PREFIX"%d/%s",
					bitmap, wf_chspec_ctlchan(chanspec),
					wf_chspec_to_bw_str(chanspec));
				WL_DBG((" bitmap: 0x%x channel: %d bandwidth: %s \n", bitmap,
					wf_chspec_ctlchan(chanspec),
					wf_chspec_to_bw_str(chanspec)));
			}
		}
		break;
	case WL_NAN_EVENT_REPLIED:
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s " PUB_ID_PREFIX"%d "
				MAC_ADDR_PREFIX MACF, nan_event_str[event_num],
				tlv_data.pub_id, ETHER_TO_MACF(tlv_data.mac_addr));
		break;
	case WL_NAN_EVENT_TERMINATED:
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s " PUB_ID_PREFIX"%d ",
			nan_event_str[event_num], tlv_data.pub_id);
		break;
	case WL_NAN_EVENT_RECEIVE:
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s " PUB_ID_PREFIX"%d "
				MAC_ADDR_PREFIX MACF, nan_event_str[event_num],
				tlv_data.pub_id, ETHER_TO_MACF(tlv_data.mac_addr));
		if (tlv_data.svc_info.data && tlv_data.svc_info.dlen) {
			WL_DBG((" service info present \n"));
			if ((strlen(ptr) + tlv_data.svc_info.dlen) >= buf_len) {
				WL_ERR((" service info length = %d\n",
					tlv_data.svc_info.dlen));
				WL_ERR((" insufficent buffer to copy service info \n"));
				ret = -EOVERFLOW;
				goto fail;
			}
			ptr += sprintf(ptr, " %s", SVC_INFO_PREFIX);
			ptr += bcm_format_hex(ptr, tlv_data.svc_info.data,
				tlv_data.svc_info.dlen);
		} else {
			WL_DBG((" service info not present \n"));
		}
		break;
	case WL_NAN_EVENT_SCAN_COMPLETE:
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s " CLUS_ID_PREFIX MACF,
			nan_event_str[event_num], ETHER_TO_MACF(tlv_data.nstatus.cid));
		break;
	case WL_NAN_EVENT_STATUS_CHG:
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s " CLUS_ID_PREFIX MACF,
			nan_event_str[event_num], ETHER_TO_MACF(tlv_data.nstatus.cid));
		break;
	case WL_NAN_EVENT_MERGE:
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s " CLUS_ID_PREFIX MACF,
			nan_event_str[event_num], ETHER_TO_MACF(tlv_data.nstatus.cid));
		break;
	default:
		WL_ERR((" unknown event \n"));
		break;
	}

#ifdef WL_GENL
	/* send the preformatted string to the upper layer as event */
	WL_DBG((" formatted string for userspace: %s, len: %zu \n",
		buf, strlen(buf)));
	wl_genl_send_msg(bcmcfg_to_prmry_ndev(cfg), 0, buf, strlen(buf), 0, 0);
#endif /* WL_GENL */

fail:
	if (buf) {
		kfree(buf);
	}
	if (tlv_data.svc_info.data) {
		kfree(tlv_data.svc_info.data);
		tlv_data.svc_info.data = NULL;
		tlv_data.svc_info.dlen = 0;
	}
	if (tlv_data.vend_info.data) {
		kfree(tlv_data.vend_info.data);
		tlv_data.vend_info.data = NULL;
		tlv_data.vend_info.dlen = 0;
	}

	return ret;
}
Exemplo n.º 19
0
s32
wl_genl_send_msg(
	struct net_device *ndev,
	u32 event_type,
	u8 *buf,
	u16 len,
	u8 *subhdr,
	u16 subhdr_len)
{
	int ret = 0;
	struct sk_buff *skb;
	void *msg;
	u32 attr_type = 0;
	bcm_event_hdr_t *hdr = NULL;
	int mcast = 1; /* By default sent as mutlicast type */
	int pid = 0;
	u8 *ptr = NULL, *p = NULL;
	u32 tot_len = sizeof(bcm_event_hdr_t) + subhdr_len + len;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;


	WL_DBG(("Enter \n"));

	/* Decide between STRING event and Data event */
	if (event_type == 0)
		attr_type = BCM_GENL_ATTR_STRING;
	else
		attr_type = BCM_GENL_ATTR_MSG;

	skb = genlmsg_new(NLMSG_GOODSIZE, kflags);
	if (skb == NULL) {
		ret = -ENOMEM;
		goto out;
	}

	msg = genlmsg_put(skb, 0, 0, &wl_genl_family, 0, BCM_GENL_CMD_MSG);
	if (msg == NULL) {
		ret = -ENOMEM;
		goto out;
	}


	if (attr_type == BCM_GENL_ATTR_STRING) {
		/* Add a BCM_GENL_MSG attribute. Since it is specified as a string.
		 * make sure it is null terminated
		 */
		if (subhdr || subhdr_len) {
			WL_ERR(("No sub hdr support for the ATTR STRING type \n"));
			ret =  -EINVAL;
			goto out;
		}

		ret = nla_put_string(skb, BCM_GENL_ATTR_STRING, buf);
		if (ret != 0) {
			WL_ERR(("nla_put_string failed\n"));
			goto out;
		}
	} else {
		/* ATTR_MSG */

		/* Create a single buffer for all */
		p = ptr = kzalloc(tot_len, kflags);
		if (!ptr) {
			ret = -ENOMEM;
			WL_ERR(("ENOMEM!!\n"));
			goto out;
		}

		/* Include the bcm event header */
		hdr = (bcm_event_hdr_t *)ptr;
		hdr->event_type = wl_event_to_bcm_event(event_type);
		hdr->len = len + subhdr_len;
		ptr += sizeof(bcm_event_hdr_t);

		/* Copy subhdr (if any) */
		if (subhdr && subhdr_len) {
			memcpy(ptr, subhdr, subhdr_len);
			ptr += subhdr_len;
		}

		/* Copy the data */
		if (buf && len) {
			memcpy(ptr, buf, len);
		}

		ret = nla_put(skb, BCM_GENL_ATTR_MSG, tot_len, p);
		if (ret != 0) {
			WL_ERR(("nla_put_string failed\n"));
			goto out;
		}
	}

	if (mcast) {
		int err = 0;
		/* finalize the message */
		genlmsg_end(skb, msg);
		/* NETLINK_CB(skb).dst_group = 1; */
		if ((err = genlmsg_multicast(skb, 0, wl_genl_mcast.id, GFP_ATOMIC)) < 0)
			WL_ERR(("genlmsg_multicast for attr(%d) failed. Error:%d \n",
				attr_type, err));
		else
			WL_DBG(("Multicast msg sent successfully. attr_type:%d len:%d \n",
				attr_type, tot_len));
	} else {
		NETLINK_CB(skb).dst_group = 0; /* Not in multicast group */

		/* finalize the message */
		genlmsg_end(skb, msg);

		/* send the message back */
		if (genlmsg_unicast(&init_net, skb, pid) < 0)
			WL_ERR(("genlmsg_unicast failed\n"));
	}

out:
	if (p)
		kfree(p);
	if (ret)
		nlmsg_free(skb);

	return ret;
}
Exemplo n.º 20
0
int wl_cfgnan_set_vars_cbfn(void *ctx, void **tlv_buf,
	uint16 type, uint16 len)
{
	wl_nan_tlv_data_t *ndata = ((wl_nan_tlv_data_t *)(ctx));
	bcm_xtlv_t *ptlv;
	struct ether_addr mac;
	int ret = BCME_OK;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint8 uv8;
	char buf[64];

	WL_DBG((" enter, xtlv_type: 0x%x \n", type));

	switch (type) {
	case WL_NAN_XTLV_ENABLE:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_ENABLE,
			sizeof(uv8), &ndata->enabled);
		break;
	case WL_NAN_XTLV_MASTER_PREF:
		/*
		 * master role and preference  mac has them as two u8's,
		 *
		 * masterpref: val & 0x0ff
		 * rnd_factor: val >> 8
		 */
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_MASTER_PREF,
			sizeof(uint16), &ndata->master_pref);
		break;
	case WL_NAN_XTLV_IF_ADDR:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_IF_ADDR,
			ETHER_ADDR_LEN, &mac);
		memcpy(&ndata->mac_addr, &mac, ETHER_ADDR_LEN);
		break;
	case WL_NAN_XTLV_CLUSTER_ID:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_CLUSTER_ID,
			ETHER_ADDR_LEN, &mac);
		memcpy(&ndata->clus_id, &mac, ETHER_ADDR_LEN);
		break;
	case WL_NAN_XTLV_ROLE:
		/*  nan device role, master, master-sync nosync etc  */
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_ROLE, 4,
			&ndata->dev_role);
		break;
	case WL_NAN_XTLV_MAC_CHANSPEC:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_MAC_CHANSPEC,
			sizeof(chanspec_t), &ndata->chanspec);
		if (wf_chspec_valid(ndata->chanspec)) {
			wf_chspec_ntoa(ndata->chanspec, buf);
			WL_DBG((" chanspec: %s 0x%x \n", buf, ndata->chanspec));
		} else {
			WL_DBG((" chanspec: 0x%x is not valid \n", ndata->chanspec));
		}
		break;
	case WL_NAN_XTLV_MAC_AMR:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_MAC_AMR,
			NAN_MASTER_RANK_LEN, buf);
		memcpy(ndata->amr, buf, NAN_MASTER_RANK_LEN);
		break;
	case WL_NAN_XTLV_MAC_AMBTT:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_MAC_AMBTT,
			sizeof(uint32), &ndata->ambtt);
		break;
	case WL_NAN_XTLV_MAC_HOPCNT:
		ret = bcm_unpack_xtlv_entry(tlv_buf,
			WL_NAN_XTLV_MAC_HOPCNT, sizeof(uint8), &ndata->hop_count);
		break;
	case WL_NAN_XTLV_INSTANCE_ID:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_INSTANCE_ID,
			sizeof(wl_nan_instance_id_t), &ndata->inst_id);
		break;
	case WL_NAN_XTLV_SVC_NAME:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_SVC_NAME,
			WL_NAN_SVC_HASH_LEN, ndata->svc_name);
		break;
	case WL_NAN_XTLV_SVC_PARAMS:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_SVC_PARAMS,
			sizeof(wl_nan_disc_params_t), &ndata->params);
		break;
	case WL_NAN_XTLV_MAC_STATUS:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_MAC_STATUS,
			sizeof(wl_nan_status_t), &ndata->nstatus);
		break;
	case WL_NAN_XTLV_PUBLR_ID:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_PUBLR_ID,
			sizeof(uint8), &ndata->pub_id);
		break;
	case WL_NAN_XTLV_SUBSCR_ID:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_SUBSCR_ID,
			sizeof(uint8), &ndata->sub_id);
		break;
	case WL_NAN_XTLV_MAC_ADDR:
		ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_MAC_ADDR,
			ETHER_ADDR_LEN, &mac);
		memcpy(&ndata->mac_addr, &mac, ETHER_ADDR_LEN);
		break;
	case WL_NAN_XTLV_VNDR:
		ptlv = *tlv_buf;
		ndata->vend_info.dlen = BCM_XTLV_LEN(ptlv);
		ndata->vend_info.data = kzalloc(ndata->vend_info.dlen, kflags);
		if (!ndata->vend_info.data) {
			WL_ERR((" memory allocation failed \n"));
			ret = -ENOMEM;
			goto fail;
		}
		if (ndata->vend_info.data && ndata->vend_info.dlen) {
			ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_VNDR,
				ndata->vend_info.dlen, ndata->vend_info.data);
		}
		break;
	case WL_NAN_XTLV_SVC_INFO:
		ptlv = *tlv_buf;
		ndata->svc_info.dlen = BCM_XTLV_LEN(ptlv);
		ndata->svc_info.data = kzalloc(ndata->svc_info.dlen, kflags);
		if (!ndata->svc_info.data) {
			WL_ERR((" memory allocation failed \n"));
			ret = -ENOMEM;
			goto fail;
		}
		if (ndata->svc_info.data && ndata->svc_info.dlen) {
			ret = bcm_unpack_xtlv_entry(tlv_buf, WL_NAN_XTLV_SVC_INFO,
				ndata->svc_info.dlen, ndata->svc_info.data);
		}
		break;
	case WL_NAN_XTLV_ZERO:
		/* don't parse empty space in the buffer */
		ret = BCME_ERROR;
		break;

	default:
		/* skip current tlv, if we don't have a handler */
		ret = bcm_skip_xtlv(tlv_buf);
		break;
	}

fail:
	return ret;
}
Exemplo n.º 21
0
int
wl_cfgnan_rtt_find_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	void *iovbuf;
	wl_nan_ranging_list_t *rtt_list;
	s32 iovbuf_size = NAN_RTT_IOVAR_BUF_SIZE;
	s32 ret = BCME_OK;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;

	/*
	 * proceed only if mandatory arguments are present - channel, bitmap,
	 * mac address
	 */
	if ((!cmd_data->chanspec) || (!cmd_data->bmap) ||
		ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
		WL_ERR((" mandatory arguments are not present \n"));
		return -EINVAL;
	}

	iovbuf = kzalloc(iovbuf_size, kflags);
	if (!iovbuf) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	/*
	 * command to test
	 *
	 * wl: wl proxd_nanfind 1 44/80 <mac_addr> 0x300 5 6 1
	 *
	 * wpa_cli: DRIVER NAN_RTT_FIND MAC_ADDR=<mac_addr> CHAN=44/80 BMAP=0x300
	 *
	 */
	rtt_list = (wl_nan_ranging_list_t *)iovbuf;
	rtt_list->count = 1;
	rtt_list->num_peers_done = 0;
	rtt_list->num_dws = 1;
	rtt_list->rp[0].chanspec = cmd_data->chanspec;
	memcpy(&rtt_list->rp[0].ea, &cmd_data->mac_addr,
		sizeof(struct ether_addr));
	rtt_list->rp[0].abitmap = cmd_data->bmap;
	rtt_list->rp[0].frmcnt = 5;
	rtt_list->rp[0].retrycnt = 6;
	rtt_list->rp[0].flags = 1;

	iovbuf_size = sizeof(wl_nan_ranging_list_t) +
		sizeof(wl_nan_ranging_peer_t);
	ret = wldev_iovar_setbuf(ndev, "proxd_nanfind", iovbuf,
		iovbuf_size, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan rtt find failed, error = %d \n", ret));
	} else {
		WL_DBG((" nan rtt find successful \n"));
	}

	if (iovbuf) {
		kfree(iovbuf);
	}

	return ret;
}
Exemplo n.º 22
0
int
wl_cfgnan_transmit_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ioc_t *nanioc = NULL;
	void *pxtlv;
	s32 ret = BCME_OK;
	u16 start, end;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;

	/*
	 * proceed only if mandatory arguments are present - subscriber id,
	 * publisher id, mac address
	 */
	if ((!cmd_data->sub_id) || (!cmd_data->pub_id) ||
		ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
		WL_ERR((" mandatory arguments are not present \n"));
		return -EINVAL;
	}

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	/*
	 * command to test
	 *
	 * wl: wl nan trasnmit <sub_id> <pub_id> <mac_addr> -info <hex_string>
	 *
	 * wpa_cli: DRIVER NAN_TRANSMIT SUB_ID=<sub_id> PUB_ID=<pub_id>
	 *          MAC_ADDR=<mac_addr> SVC_INFO=<hex_string>
	 */

	/* nan transmit */
	start = end = NAN_IOCTL_BUF_SIZE;
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_TRANSMIT);
	pxtlv = nanioc->data;

	ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_INSTANCE_ID,
		sizeof(wl_nan_instance_id_t), &cmd_data->sub_id);
	if (unlikely(ret)) {
		goto fail;
	}
	ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_REQUESTOR_ID,
		sizeof(wl_nan_instance_id_t), &cmd_data->pub_id);
	if (unlikely(ret)) {
		goto fail;
	}
	ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_MAC_ADDR,
		ETHER_ADDR_LEN, &cmd_data->mac_addr.octet);
	if (unlikely(ret)) {
		goto fail;
	}
	if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
		WL_DBG((" optional svc_info present, pack it \n"));
		ret = bcm_pack_xtlv_entry_from_hex_string(&pxtlv,
			&end, WL_NAN_XTLV_SVC_INFO, cmd_data->svc_info.data);
		if (unlikely(ret)) {
			goto fail;
		}
	}

	nanioc->len = start - end;
	nanioc_size = sizeof(wl_nan_ioc_t) + nanioc->len;
	ret = wldev_iovar_setbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan transmit failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan transmit successful \n"));
	}

fail:
	if (nanioc) {
		kfree(nanioc);
	}

	return ret;
}
Exemplo n.º 23
0
int get_roam_channel_list(int target_chan,
	chanspec_t *channels, const wlc_ssid_t *ssid, int ioctl_ver)
{
	int i, n = 1;
	uint band, band2G, band5G, bw;

#ifdef D11AC_IOTYPES
	if (ioctl_ver == 1) {
		/* legacy chanspec */
		band2G = WL_LCHANSPEC_BAND_2G;
		band5G = WL_LCHANSPEC_BAND_5G;
		bw = WL_LCHANSPEC_BW_20 | WL_LCHANSPEC_CTL_SB_NONE;
	} else {
		band2G = WL_CHANSPEC_BAND_2G;
		band5G = WL_CHANSPEC_BAND_5G;
		bw = WL_CHANSPEC_BW_20;
	}
#else
	band2G = WL_CHANSPEC_BAND_2G;
	band5G = WL_CHANSPEC_BAND_5G;
	bw = WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
#endif /* D11AC_IOTYPES */

	if (target_chan <= 14)
		band = band2G;
	else
		band = band5G;
	*channels = (target_chan & WL_CHANSPEC_CHAN_MASK) | band | bw;
	WL_DBG((" %s: %02d 0x%04X\n", __FUNCTION__, target_chan, *channels));
	++channels;

#if defined(CUSTOMER_HW4) && defined(WES_SUPPORT)
	if (roamscan_mode) {
		for (i = 0; i < n_roam_cache; i++) {
			if ((roam_cache[i].chanspec & WL_CHANSPEC_CHAN_MASK) != target_chan) {
				*channels = roam_cache[i].chanspec & WL_CHANSPEC_CHAN_MASK;
				WL_DBG((" %s: %02d\n", __FUNCTION__, *channels));
				if (*channels <= 14)
					*channels |= band2G | bw;
				else
					*channels |= band5G | bw;

				channels++;
				n++;
			}
		}

		return n;
	}
#endif /* WES_SUPPORT */

	for (i = 0; i < n_roam_cache; i++) {
		chanspec_t ch = roam_cache[i].chanspec;
		if ((roam_cache[i].ssid_len == ssid->SSID_len) &&
			((ch & WL_CHANSPEC_CHAN_MASK) != target_chan) &&
			((roam_band == WLC_BAND_AUTO) ||
			((roam_band == WLC_BAND_2G) && CHSPEC_IS2G(ch)) ||
			((roam_band == WLC_BAND_5G) && CHSPEC_IS5G(ch))) &&
			(memcmp(roam_cache[i].ssid, ssid->SSID, ssid->SSID_len) == 0)) {
			/* match found, add it */
			*channels = ch & WL_CHANSPEC_CHAN_MASK;
			if (*channels <= 14)
				*channels |= band2G | bw;
			else
				*channels |= band5G | bw;

			WL_DBG((" %s: %02d 0x%04X\n", __FUNCTION__,
				ch & WL_CHANSPEC_CHAN_MASK, *channels));

			channels++; n++;
		}
	}

	return n;
}
Exemplo n.º 24
0
int
wl_cfgnan_enable_events(struct net_device *ndev, struct bcm_cfg80211 *cfg)
{
	wl_nan_ioc_t *nanioc = NULL;
	void *pxtlv;
	u32 event_mask = 0;
	s32 ret = BCME_OK;
	u16 start, end;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
	if (unlikely(ret)) {
		WL_ERR((" nan event enable failed, error = %d \n", ret));
		goto fail;
	}
	if (g_nan_debug) {
		/* enable all nan events */
		event_mask = NAN_EVENT_MASK_ALL;
	} else {
		/* enable only selected nan events to avoid unnecessary host wake up */
		event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_START);
		event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_JOIN);
		event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_DISCOVERY_RESULT);
		event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_RECEIVE);
		event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_TERMINATED);
		event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_STOP);
		event_mask |= NAN_EVENT_BIT(WL_NAN_EVENT_CLEAR_BIT);
		event_mask = htod32(event_mask);
	}

	start = end = NAN_IOCTL_BUF_SIZE;
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_EVENT_MASK);
	pxtlv = nanioc->data;
	ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_EVENT_MASK,
		sizeof(uint32), &event_mask);
	if (unlikely(ret)) {
		goto fail;
	}
	nanioc->len = start - end;
	nanioc_size = sizeof(wl_nan_ioc_t) + nanioc->len;
	ret = wldev_iovar_setbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan event selective enable failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan event selective enable successful \n"));
	}

	ret = wl_add_remove_eventmsg(ndev, WLC_E_PROXD, true);
	if (unlikely(ret)) {
		WL_ERR((" proxd event enable failed, error = %d \n", ret));
		goto fail;
	}

fail:
	if (nanioc) {
		kfree(nanioc);
	}

	return ret;
}
Exemplo n.º 25
0
s32
wl_cfgnan_notify_proxd_status(struct bcm_cfg80211 *cfg,
	bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *event, void *data)
{
	s32 ret = BCME_OK;
	wl_nan_ranging_event_data_t *rdata;
	s32 status;
	u16 data_len;
	s32 event_type;
	s32 event_num;
	u8 *buf = NULL;
	u32 buf_len;
	u8 *ptr;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	s32 i;

	if (!event || !data) {
		WL_ERR((" event data is NULL \n"));
		return -EINVAL;
	}

	status = ntoh32(event->reason);
	event_type = ntoh32(event->event_type);
	event_num = ntoh32(event->reason);
	data_len = ntoh32(event->datalen);

	WL_DBG((" proxd event: type: %d num: %d len: %d \n",
		event_type, event_num, data_len));

	if (INVALID_PROXD_EVENT(event_num)) {
		WL_ERR((" unsupported event, num: %d \n", event_num));
		return -EINVAL;
	}

	if (g_nan_debug) {
		WL_DBG((" event name: WLC_E_PROXD_NAN_EVENT \n"));
		WL_DBG((" event data: \n"));
		prhex(NULL, data, data_len);
	}

	if (data_len < sizeof(wl_nan_ranging_event_data_t)) {
		WL_ERR((" wrong data len \n"));
		return -EINVAL;
	}

	rdata = (wl_nan_ranging_event_data_t *)data;

	WL_DBG((" proxd event: count:%d success_count:%d mode:%d \n",
		rdata->count, rdata->success_count, rdata->mode));

	if (g_nan_debug) {
		prhex(" event data: ", data, data_len);
	}

	buf_len = NAN_IOCTL_BUF_SIZE;
	buf = kzalloc(buf_len, kflags);
	if (!buf) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	for (i = 0; i < rdata->count; i++) {
		if (&rdata->rr[i] == NULL) {
			ret = -EINVAL;
			goto fail;
		}

		ptr = buf;
		WL_DBG((" ranging data for mac:"MACDBG" \n",
			MAC2STRDBG(rdata->rr[i].ea.octet)));
		ptr += sprintf(buf, SUPP_EVENT_PREFIX"%s " MAC_ADDR_PREFIX MACF
			" "STATUS_PREFIX"%s", EVENT_RTT_STATUS_STR,
			ETHER_TO_MACF(rdata->rr[i].ea), (rdata->rr[i].status == 1) ?
			"success" : "fail");

		if (rdata->rr[i].status == 1) {
			/* add tsf and distance only if status is success */
			ptr += sprintf(ptr, " "TIMESTAMP_PREFIX"0x%x "
				DISTANCE_PREFIX"%d.%04d", rdata->rr[i].timestamp,
				rdata->rr[i].distance >> 4,
				((rdata->rr[i].distance & 0x0f) * 625));
		}

#ifdef WL_GENL
		/* send the preformatted string to the upper layer as event */
		WL_DBG((" formatted string for userspace: %s, len: %zu \n",
			buf, strlen(buf)));
		wl_genl_send_msg(bcmcfg_to_prmry_ndev(cfg), 0, buf, strlen(buf), 0, 0);
#endif /* WL_GENL */
	}
Exemplo n.º 26
0
int
wl_cfgnan_stop_handler(struct net_device *ndev,
	struct bcm_cfg80211 *cfg, char *cmd, nan_cmd_data_t *cmd_data)
{
	wl_nan_ioc_t *nanioc = NULL;
	struct ether_addr cluster_id = ether_null;
	void *pxtlv;
	s32 ret = BCME_OK;
	u16 start, end;
	u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
	uint16 nanioc_size = sizeof(wl_nan_ioc_t) + NAN_IOCTL_BUF_SIZE;
	uint8 nan_enable = FALSE;

	if (cfg->nan_running == false) {
		WL_DBG((" nan not running, do nothing \n"));
		return BCME_OK;
	}

	nanioc = kzalloc(nanioc_size, kflags);
	if (!nanioc) {
		WL_ERR((" memory allocation failed \n"));
		return -ENOMEM;
	}

	/*
	 * command to test
	 *
	 * wl: wl nan stop
	 *     wl nan 0
	 *
	 * wpa_cli: DRIVER NAN_STOP
	 */

	/* nan stop */

	start = end = NAN_IOCTL_BUF_SIZE;
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_STOP);
	pxtlv = nanioc->data;
	ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_CLUSTER_ID,
		ETHER_ADDR_LEN, &cluster_id);
	if (unlikely(ret)) {
		goto fail;
	}
	nanioc->len = start - end;
	nanioc_size = sizeof(wl_nan_ioc_t) + nanioc->len;
	ret = wldev_iovar_setbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan stop failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan stop successful \n"));
	}

	/* nan disable */
	memset(nanioc, 0, nanioc_size);
	start = end = NAN_IOCTL_BUF_SIZE;
	nanioc->version = htod16(WL_NAN_IOCTL_VERSION);
	nanioc->id = htod16(WL_NAN_CMD_ENABLE);
	pxtlv = nanioc->data;
	ret = bcm_pack_xtlv_entry(&pxtlv, &end, WL_NAN_XTLV_ENABLE,
		sizeof(uint8), &nan_enable);
	if (unlikely(ret)) {
		goto fail;
	}
	nanioc->len = start - end;
	nanioc_size = sizeof(wl_nan_ioc_t) + nanioc->len;
	ret = wldev_iovar_setbuf(ndev, "nan", nanioc, nanioc_size,
		cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
	if (unlikely(ret)) {
		WL_ERR((" nan disable failed, error = %d \n", ret));
		goto fail;
	} else {
		WL_DBG((" nan disable successful \n"));
	}

fail:
	if (nanioc) {
		kfree(nanioc);
	}

	return ret;
}