void encoder::send_encoded_packet(uint8_t type) { struct nl_msg *msg; struct nlattr *attr; uint8_t *data; size_t symbols = this->symbols(); VLOG(LOG_PKT) << "Encoder " << m_coder << ": Send " << (m_enc_pkt_count < symbols ? "systematic" : "encoded"); msg = nlmsg_alloc(); genlmsg_put(msg, NL_AUTO_PID, NL_AUTO_SEQ, m_io->genl_family(), 0, 0, BATADV_HLP_C_FRAME, 1); nla_put_u32(msg, BATADV_HLP_A_IFINDEX, m_io->ifindex()); nla_put(msg, BATADV_HLP_A_SRC, ETH_ALEN, _key.src); nla_put(msg, BATADV_HLP_A_DST, ETH_ALEN, _key.dst); nla_put_u16(msg, BATADV_HLP_A_BLOCK, _key.block); nla_put_u8(msg, BATADV_HLP_A_TYPE, type); attr = nla_reserve(msg, BATADV_HLP_A_FRAME, this->payload_size()); data = reinterpret_cast<uint8_t *>(nla_data(attr)); this->encode(data); m_io->send_msg(msg); nlmsg_free(msg); m_enc_pkt_count++; inc("encoded sent"); m_budget--; }
void recoder::send_rec_packet() { struct nl_msg *msg; struct nlattr *attr; uint8_t *data; msg = nlmsg_alloc(); genlmsg_put(msg, NL_AUTO_PID, NL_AUTO_SEQ, m_io->genl_family(), 0, 0, BATADV_HLP_C_FRAME, 1); nla_put_u32(msg, BATADV_HLP_A_IFINDEX, m_io->ifindex()); nla_put(msg, BATADV_HLP_A_SRC, ETH_ALEN, _key.src); nla_put(msg, BATADV_HLP_A_DST, ETH_ALEN, _key.dst); nla_put_u16(msg, BATADV_HLP_A_BLOCK, _key.block); nla_put_u8(msg, BATADV_HLP_A_TYPE, REC_PACKET); attr = nla_reserve(msg, BATADV_HLP_A_FRAME, this->payload_size()); data = reinterpret_cast<uint8_t *>(nla_data(attr)); this->recode(data); m_io->send_msg(msg); nlmsg_free(msg); m_rec_pkt_count++; inc("forward packets written"); }
static struct dlm_lock_data *mk_data(struct sk_buff *skb) { struct nlattr *ret; ret = nla_reserve(skb, DLM_TYPE_LOCK, sizeof(struct dlm_lock_data)); if (!ret) return NULL; return nla_data(ret); }
static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) { struct xfrm_algo *algo; struct nlattr *nla; nla = nla_reserve(skb, XFRMA_ALG_AUTH, sizeof(*algo) + (auth->alg_key_len + 7) / 8); if (!nla) return -EMSGSIZE; algo = nla_data(nla); strncpy(algo->alg_name, auth->alg_name,
static int trace_fill_header(struct sk_buff *nlskb, u16 type, const struct sk_buff *skb, int off, unsigned int len) { struct nlattr *nla; if (len == 0) return 0; nla = nla_reserve(nlskb, type, len); if (!nla || skb_copy_bits(skb, off, nla_data(nla), len)) return -1; return 0; }
static int nft_extension_dump_info(struct sk_buff *skb, int attr, const void *info, unsigned int size, unsigned int user_size) { unsigned int info_size, aligned_size = XT_ALIGN(size); struct nlattr *nla; nla = nla_reserve(skb, attr, aligned_size); if (!nla) return -1; info_size = user_size ? : size; memcpy(nla_data(nla), info, info_size); memset(nla_data(nla) + info_size, 0, aligned_size - info_size); return 0; }
static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog, struct sk_buff *skb) { struct nlattr *nla; if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops)) return -EMSGSIZE; nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops * sizeof(struct sock_filter)); if (nla == NULL) return -EMSGSIZE; memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); return 0; }
static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog, struct sk_buff *skb) { struct nlattr *nla; if (prog->bpf_name && nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name)) return -EMSGSIZE; nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST, sizeof(prog->filter->digest)); if (nla == NULL) return -EMSGSIZE; memcpy(nla_data(nla), prog->filter->digest, nla_len(nla)); return 0; }
static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) { struct xfrm_user_sec_ctx *uctx; struct nlattr *attr; int ctx_size = sizeof(*uctx) + s->ctx_len; attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); if (attr == NULL) return -EMSGSIZE; uctx = nla_data(attr); uctx->exttype = XFRMA_SEC_CTX; uctx->len = ctx_size; uctx->ctx_doi = s->ctx_doi; uctx->ctx_alg = s->ctx_alg; uctx->ctx_len = s->ctx_len; memcpy(uctx + 1, s->ctx_str, s->ctx_len); return 0; }
static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, struct list_head *address_list) { struct sctp_sockaddr_entry *laddr; int addrlen = sizeof(struct sockaddr_storage); int addrcnt = 0; struct nlattr *attr; void *info = NULL; list_for_each_entry_rcu(laddr, address_list, list) addrcnt++; attr = nla_reserve(skb, INET_DIAG_LOCALS, addrlen * addrcnt); if (!attr) return -EMSGSIZE; info = nla_data(attr); list_for_each_entry_rcu(laddr, address_list, list) { memcpy(info, &laddr->a, addrlen); info += addrlen; }
/* * Main event dispatcher. Called from other parts and drivers. * Send the event on the appropriate channels. * May be called from interrupt context. */ void wireless_send_event(struct net_device * dev, unsigned int cmd, union iwreq_data * wrqu, const char * extra) { const struct iw_ioctl_description * descr = NULL; int extra_len = 0; struct iw_event *event; /* Mallocated whole event */ int event_len; /* Its size */ int hdr_len; /* Size of the event header */ int wrqu_off = 0; /* Offset in wrqu */ /* Don't "optimise" the following variable, it will crash */ unsigned cmd_index; /* *MUST* be unsigned */ struct sk_buff *skb; struct nlmsghdr *nlh; struct nlattr *nla; #ifdef CONFIG_COMPAT struct __compat_iw_event *compat_event; struct compat_iw_point compat_wrqu; struct sk_buff *compskb; #endif /* * Nothing in the kernel sends scan events with data, be safe. * This is necessary because we cannot fix up scan event data * for compat, due to being contained in 'extra', but normally * applications are required to retrieve the scan data anyway * and no data is included in the event, this codifies that * practice. */ //if (WARN_ON(cmd == SIOCGIWSCAN && extra)) if (cmd == SIOCGIWSCAN && extra) extra = NULL; /* Get the description of the Event */ if (cmd <= SIOCIWLAST) { cmd_index = IW_IOCTL_IDX(cmd); if (cmd_index < standard_ioctl_num) descr = &(standard_ioctl[cmd_index]); } else { cmd_index = IW_EVENT_IDX(cmd); if (cmd_index < standard_event_num) descr = &(standard_event[cmd_index]); } /* Don't accept unknown events */ if (descr == NULL) { /* Note : we don't return an error to the driver, because * the driver would not know what to do about it. It can't * return an error to the user, because the event is not * initiated by a user request. * The best the driver could do is to log an error message. * We will do it ourselves instead... */ printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n", dev->name, cmd); return; } /* Check extra parameters and set extra_len */ if (descr->header_type == IW_HEADER_TYPE_POINT) { /* Check if number of token fits within bounds */ if (wrqu->data.length > descr->max_tokens) { printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length); return; } if (wrqu->data.length < descr->min_tokens) { printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length); return; } /* Calculate extra_len - extra is NULL for restricted events */ if (extra != NULL) extra_len = wrqu->data.length * descr->token_size; /* Always at an offset in wrqu */ wrqu_off = IW_EV_POINT_OFF; } /* Total length of the event */ hdr_len = event_type_size[descr->header_type]; event_len = hdr_len + extra_len; /* * The problem for 64/32 bit. * * On 64-bit, a regular event is laid out as follows: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | wrqu data ... (with the correct size) | * * This padding exists because we manipulate event->u, * and 'event' is not packed. * * An iw_point event is laid out like this instead: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | iwpnt.len | iwpnt.flg | p a d d i n g | * | extra data ... * * The second padding exists because struct iw_point is extended, * but this depends on the platform... * * On 32-bit, all the padding shouldn't be there. */ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, skb); if (WARN_ON(!nlh)) { kfree_skb(skb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(skb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); return; } event = nla_data(nla); /* Fill event - first clear to avoid data leaking */ memset(event, 0, hdr_len); event->len = event_len; event->cmd = cmd; memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); if (extra_len) memcpy(((char *) event) + hdr_len, extra, extra_len); nlmsg_end(skb, nlh); #ifdef CONFIG_COMPAT hdr_len = compat_event_type_size[descr->header_type]; event_len = hdr_len + extra_len; compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!compskb) { kfree_skb(skb); return; } /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, compskb); if (WARN_ON(!nlh)) { kfree_skb(skb); kfree_skb(compskb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); kfree_skb(compskb); return; } compat_event = nla_data(nla); compat_event->len = event_len; compat_event->cmd = cmd; if (descr->header_type == IW_HEADER_TYPE_POINT) { compat_wrqu.length = wrqu->data.length; compat_wrqu.flags = wrqu->data.flags; memcpy(&compat_event->pointer, ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF, hdr_len - IW_EV_COMPAT_LCP_LEN); if (extra_len) memcpy(((char *) compat_event) + hdr_len, extra, extra_len); } else { /* extra_len must be zero, so no if (extra) needed */ memcpy(&compat_event->pointer, wrqu, hdr_len - IW_EV_COMPAT_LCP_LEN); } nlmsg_end(compskb, nlh); skb_shinfo(skb)->frag_list = compskb; #endif skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); schedule_work(&wireless_nlevent_work); }
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct sk_buff *skb, const struct inet_diag_req_v2 *req, struct user_namespace *user_ns, u32 portid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { const struct inet_sock *inet = inet_sk(sk); const struct tcp_congestion_ops *ca_ops; const struct inet_diag_handler *handler; int ext = req->idiag_ext; struct inet_diag_msg *r; struct nlmsghdr *nlh; struct nlattr *attr; void *info = NULL; handler = inet_diag_table[req->sdiag_protocol]; BUG_ON(!handler); nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; r = nlmsg_data(nlh); BUG_ON(!sk_fullsock(sk)); inet_diag_msg_common_fill(r, sk); r->idiag_state = sk->sk_state; r->idiag_timer = 0; r->idiag_retrans = 0; if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) goto errout; /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, * hence this needs to be included regardless of socket family. */ if (ext & (1 << (INET_DIAG_TOS - 1))) if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0) goto errout; #if IS_ENABLED(CONFIG_IPV6) if (r->idiag_family == AF_INET6) { if (ext & (1 << (INET_DIAG_TCLASS - 1))) if (nla_put_u8(skb, INET_DIAG_TCLASS, inet6_sk(sk)->tclass) < 0) goto errout; if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk))) goto errout; } #endif r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); r->idiag_inode = sock_i_ino(sk); if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { struct inet_diag_meminfo minfo = { .idiag_rmem = sk_rmem_alloc_get(sk), .idiag_wmem = sk->sk_wmem_queued, .idiag_fmem = sk->sk_forward_alloc, .idiag_tmem = sk_wmem_alloc_get(sk), }; if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0) goto errout; } if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) goto errout; if (!icsk) { handler->idiag_get_info(sk, r, NULL); goto out; } #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { r->idiag_timer = 1; r->idiag_retrans = icsk->icsk_retransmits; r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { r->idiag_timer = 4; r->idiag_retrans = icsk->icsk_probes_out; r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); } else if (timer_pending(&sk->sk_timer)) { r->idiag_timer = 2; r->idiag_retrans = icsk->icsk_probes_out; r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); } else { r->idiag_timer = 0; r->idiag_expires = 0; } #undef EXPIRES_IN_MS if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) { attr = nla_reserve(skb, INET_DIAG_INFO, handler->idiag_info_size); if (!attr) goto errout; info = nla_data(attr); } if (ext & (1 << (INET_DIAG_CONG - 1))) { int err = 0; rcu_read_lock(); ca_ops = READ_ONCE(icsk->icsk_ca_ops); if (ca_ops) err = nla_put_string(skb, INET_DIAG_CONG, ca_ops->name); rcu_read_unlock(); if (err < 0) goto errout; } handler->idiag_get_info(sk, r, info); if (sk->sk_state < TCP_TIME_WAIT) { union tcp_cc_info info; size_t sz = 0; int attr; rcu_read_lock(); ca_ops = READ_ONCE(icsk->icsk_ca_ops); if (ca_ops && ca_ops->get_info) sz = ca_ops->get_info(sk, ext, &attr, &info); rcu_read_unlock(); if (sz && nla_put(skb, attr, sz, &info) < 0) goto errout; } out: nlmsg_end(skb, nlh); return 0; errout: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } EXPORT_SYMBOL_GPL(inet_sk_diag_fill); static int inet_csk_diag_fill(struct sock *sk, struct sk_buff *skb, const struct inet_diag_req_v2 *req, struct user_namespace *user_ns, u32 portid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns, portid, seq, nlmsg_flags, unlh); } static int inet_twsk_diag_fill(struct sock *sk, struct sk_buff *skb, u32 portid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { struct inet_timewait_sock *tw = inet_twsk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo; nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; r = nlmsg_data(nlh); BUG_ON(tw->tw_state != TCP_TIME_WAIT); tmo = tw->tw_timer.expires - jiffies; if (tmo < 0) tmo = 0; inet_diag_msg_common_fill(r, sk); r->idiag_retrans = 0; r->idiag_state = tw->tw_substate; r->idiag_timer = 3; r->idiag_expires = jiffies_to_msecs(tmo); r->idiag_rqueue = 0; r->idiag_wqueue = 0; r->idiag_uid = 0; r->idiag_inode = 0; nlmsg_end(skb, nlh); return 0; } static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb, u32 portid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { struct inet_diag_msg *r; struct nlmsghdr *nlh; long tmo; nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; r = nlmsg_data(nlh); inet_diag_msg_common_fill(r, sk); r->idiag_state = TCP_SYN_RECV; r->idiag_timer = 1; r->idiag_retrans = inet_reqsk(sk)->num_retrans; BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) != offsetof(struct sock, sk_cookie)); tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies; r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0; r->idiag_rqueue = 0; r->idiag_wqueue = 0; r->idiag_uid = 0; r->idiag_inode = 0; nlmsg_end(skb, nlh); return 0; } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, const struct inet_diag_req_v2 *r, struct user_namespace *user_ns, u32 portid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { if (sk->sk_state == TCP_TIME_WAIT) return inet_twsk_diag_fill(sk, skb, portid, seq, nlmsg_flags, unlh); if (sk->sk_state == TCP_NEW_SYN_RECV) return inet_req_diag_fill(sk, skb, portid, seq, nlmsg_flags, unlh); return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh); } struct sock *inet_diag_find_one_icsk(struct net *net, struct inet_hashinfo *hashinfo, const struct inet_diag_req_v2 *req) { struct sock *sk; if (req->sdiag_family == AF_INET) sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0], req->id.idiag_dport, req->id.idiag_src[0], req->id.idiag_sport, req->id.idiag_if); #if IS_ENABLED(CONFIG_IPV6) else if (req->sdiag_family == AF_INET6) { if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) && ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src)) sk = inet_lookup(net, hashinfo, req->id.idiag_dst[3], req->id.idiag_dport, req->id.idiag_src[3], req->id.idiag_sport, req->id.idiag_if); else sk = inet6_lookup(net, hashinfo, (struct in6_addr *)req->id.idiag_dst, req->id.idiag_dport, (struct in6_addr *)req->id.idiag_src, req->id.idiag_sport, req->id.idiag_if); } #endif else return ERR_PTR(-EINVAL); if (!sk) return ERR_PTR(-ENOENT); if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) { sock_gen_put(sk); return ERR_PTR(-ENOENT); } return sk; } EXPORT_SYMBOL_GPL(inet_diag_find_one_icsk); int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req) { struct net *net = sock_net(in_skb->sk); struct sk_buff *rep; struct sock *sk; int err; sk = inet_diag_find_one_icsk(net, hashinfo, req); if (IS_ERR(sk)) return PTR_ERR(sk); rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL); if (!rep) { err = -ENOMEM; goto out; } err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk), NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0, nlh); if (err < 0) { WARN_ON(err == -EMSGSIZE); nlmsg_free(rep); goto out; } err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, MSG_DONTWAIT); if (err > 0) err = 0; out: if (sk) sock_gen_put(sk); return err; } EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk); static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req) { const struct inet_diag_handler *handler; int err; handler = inet_diag_lock_handler(req->sdiag_protocol); if (IS_ERR(handler)) err = PTR_ERR(handler); else if (cmd == SOCK_DIAG_BY_FAMILY) err = handler->dump_one(in_skb, nlh, req); else if (cmd == SOCK_DESTROY_BACKPORT && handler->destroy) err = handler->destroy(in_skb, req); else err = -EOPNOTSUPP; inet_diag_unlock_handler(handler); return err; } static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits) { int words = bits >> 5; bits &= 0x1f; if (words) { if (memcmp(a1, a2, words << 2)) return 0; } if (bits) { __be32 w1, w2; __be32 mask; w1 = a1[words]; w2 = a2[words]; mask = htonl((0xffffffff) << (32 - bits)); if ((w1 ^ w2) & mask) return 0; } return 1; }
static int wl_cfgvendor_gscan_get_batch_results(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int len) { int err = 0; struct wl_priv *cfg = wiphy_priv(wiphy); gscan_results_cache_t *results, *iter; uint32 reply_len, complete = 1; int32 mem_needed, num_results_iter; wifi_gscan_result_t *ptr; uint16 num_scan_ids, num_results; struct sk_buff *skb; struct nlattr *scan_hdr, *complete_flag; err = dhd_dev_wait_batch_results_complete(wl_to_prmry_ndev(cfg)); if (err != BCME_OK) return -EBUSY; err = dhd_dev_pno_lock_access_batch_results(wl_to_prmry_ndev(cfg)); if (err != BCME_OK) { WL_ERR(("Can't obtain lock to access batch results %d\n", err)); return -EBUSY; } results = dhd_dev_pno_get_gscan(wl_to_prmry_ndev(cfg), DHD_PNO_GET_BATCH_RESULTS, NULL, &reply_len); if (!results) { WL_ERR(("No results to send %d\n", err)); err = wl_cfgvendor_send_cmd_reply(wiphy, wl_to_prmry_ndev(cfg), results, 0); if (unlikely(err)) WL_ERR(("Vendor Command reply failed ret:%d \n", err)); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return err; } num_scan_ids = reply_len & 0xFFFF; num_results = (reply_len & 0xFFFF0000) >> 16; mem_needed = (num_results * sizeof(wifi_gscan_result_t)) + (num_scan_ids * GSCAN_BATCH_RESULT_HDR_LEN) + VENDOR_REPLY_OVERHEAD + SCAN_RESULTS_COMPLETE_FLAG_LEN; if (mem_needed > (int32)NLMSG_DEFAULT_SIZE) { mem_needed = (int32)NLMSG_DEFAULT_SIZE; complete = 0; } WL_TRACE(("complete %d mem_needed %d max_mem %d\n", complete, mem_needed, (int)NLMSG_DEFAULT_SIZE)); /* Alloc the SKB for vendor_event */ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed); if (unlikely(!skb)) { WL_ERR(("skb alloc failed")); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return -ENOMEM; } iter = results; complete_flag = nla_reserve(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, sizeof(complete)); mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD); while (iter) { num_results_iter = (mem_needed - GSCAN_BATCH_RESULT_HDR_LEN)/sizeof(wifi_gscan_result_t); if (num_results_iter <= 0 || ((iter->tot_count - iter->tot_consumed) > num_results_iter)) break; scan_hdr = nla_nest_start(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS); /* no more room? we are done then (for now) */ if (scan_hdr == NULL) { complete = 0; break; } nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id); nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag); num_results_iter = iter->tot_count - iter->tot_consumed; nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter); if (num_results_iter) { ptr = &iter->results[iter->tot_consumed]; iter->tot_consumed += num_results_iter; nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS, num_results_iter * sizeof(wifi_gscan_result_t), ptr); } nla_nest_end(skb, scan_hdr); mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN + (num_results_iter * sizeof(wifi_gscan_result_t)); iter = iter->next; } memcpy(nla_data(complete_flag), &complete, sizeof(complete)); dhd_dev_gscan_batch_cache_cleanup(wl_to_prmry_ndev(cfg)); dhd_dev_pno_unlock_access_batch_results(wl_to_prmry_ndev(cfg)); return cfg80211_vendor_cmd_reply(skb); }
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, unsigned int flags) { struct ifinfomsg *ifm; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); if (nlh == NULL) return -EMSGSIZE; ifm = nlmsg_data(nlh); ifm->ifi_family = AF_UNSPEC; ifm->__ifi_pad = 0; ifm->ifi_type = dev->type; ifm->ifi_index = dev->ifindex; ifm->ifi_flags = dev_get_flags(dev); ifm->ifi_change = change; NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len); NLA_PUT_U32(skb, IFLA_WEIGHT, dev->weight); NLA_PUT_U8(skb, IFLA_OPERSTATE, netif_running(dev) ? dev->operstate : IF_OPER_DOWN); NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); if (dev->ifindex != dev->iflink) NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); if (dev->master) NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); if (dev->qdisc_sleeping) NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc_sleeping->ops->id); if (1) { struct rtnl_link_ifmap map = { .mem_start = dev->mem_start, .mem_end = dev->mem_end, .base_addr = dev->base_addr, .irq = dev->irq, .dma = dev->dma, .port = dev->if_port, }; NLA_PUT(skb, IFLA_MAP, sizeof(map), &map); } if (dev->addr_len) { NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); } if (dev->get_stats) { struct net_device_stats *stats = dev->get_stats(dev); if (stats) { struct nlattr *attr; attr = nla_reserve(skb, IFLA_STATS, sizeof(struct rtnl_link_stats)); if (attr == NULL) goto nla_put_failure; copy_rtnl_link_stats(nla_data(attr), stats); } } return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) { int idx; int s_idx = cb->args[0]; struct net_device *dev; idx = 0; for_each_netdev(dev) { if (idx < s_idx) goto cont; if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0) break; cont: idx++; } cb->args[0] = idx; return skb->len; }
int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct sk_buff *skb, struct inet_diag_req_v2 *req, struct user_namespace *user_ns, u32 portid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { const struct inet_sock *inet = inet_sk(sk); struct inet_diag_msg *r; struct nlmsghdr *nlh; struct nlattr *attr; void *info = NULL; const struct inet_diag_handler *handler; int ext = req->idiag_ext; handler = inet_diag_table[req->sdiag_protocol]; BUG_ON(handler == NULL); nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; r = nlmsg_data(nlh); BUG_ON(sk->sk_state == TCP_TIME_WAIT); r->idiag_family = sk->sk_family; r->idiag_state = sk->sk_state; r->idiag_timer = 0; r->idiag_retrans = 0; r->id.idiag_if = sk->sk_bound_dev_if; sock_diag_save_cookie(sk, r->id.idiag_cookie); r->id.idiag_sport = inet->inet_sport; r->id.idiag_dport = inet->inet_dport; r->id.idiag_src[0] = inet->inet_rcv_saddr; r->id.idiag_dst[0] = inet->inet_daddr; if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown)) goto errout; /* IPv6 dual-stack sockets use inet->tos for IPv4 connections, * hence this needs to be included regardless of socket family. */ if (ext & (1 << (INET_DIAG_TOS - 1))) if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0) goto errout; #if IS_ENABLED(CONFIG_IPV6) if (r->idiag_family == AF_INET6) { const struct ipv6_pinfo *np = inet6_sk(sk); *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr; *(struct in6_addr *)r->id.idiag_dst = np->daddr; if (ext & (1 << (INET_DIAG_TCLASS - 1))) if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0) goto errout; } #endif r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk)); r->idiag_inode = sock_i_ino(sk); if (ext & (1 << (INET_DIAG_MEMINFO - 1))) { struct inet_diag_meminfo minfo = { .idiag_rmem = sk_rmem_alloc_get(sk), .idiag_wmem = sk->sk_wmem_queued, .idiag_fmem = sk->sk_forward_alloc, .idiag_tmem = sk_wmem_alloc_get(sk), }; if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0) goto errout; } if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO)) goto errout; if (icsk == NULL) { handler->idiag_get_info(sk, r, NULL); goto out; } #define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) if (icsk->icsk_pending == ICSK_TIME_RETRANS) { r->idiag_timer = 1; r->idiag_retrans = icsk->icsk_retransmits; r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { r->idiag_timer = 4; r->idiag_retrans = icsk->icsk_probes_out; r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); } else if (timer_pending(&sk->sk_timer)) { r->idiag_timer = 2; r->idiag_retrans = icsk->icsk_probes_out; r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires); } else { r->idiag_timer = 0; r->idiag_expires = 0; } #undef EXPIRES_IN_MS if (ext & (1 << (INET_DIAG_INFO - 1))) { attr = nla_reserve(skb, INET_DIAG_INFO, sizeof(struct tcp_info)); if (!attr) goto errout; info = nla_data(attr); } if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) if (nla_put_string(skb, INET_DIAG_CONG, icsk->icsk_ca_ops->name) < 0) goto errout; handler->idiag_get_info(sk, r, info); if (sk->sk_state < TCP_TIME_WAIT && icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) icsk->icsk_ca_ops->get_info(sk, ext, skb); out: return nlmsg_end(skb, nlh); errout: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } EXPORT_SYMBOL_GPL(inet_sk_diag_fill); static int inet_csk_diag_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_req_v2 *req, struct user_namespace *user_ns, u32 portid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns, portid, seq, nlmsg_flags, unlh); } static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, struct sk_buff *skb, struct inet_diag_req_v2 *req, u32 portid, u32 seq, u16 nlmsg_flags, const struct nlmsghdr *unlh) { long tmo; struct inet_diag_msg *r; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r), nlmsg_flags); if (!nlh) return -EMSGSIZE; r = nlmsg_data(nlh); BUG_ON(tw->tw_state != TCP_TIME_WAIT); tmo = tw->tw_ttd - jiffies; if (tmo < 0) tmo = 0; r->idiag_family = tw->tw_family; r->idiag_retrans = 0; r->id.idiag_if = tw->tw_bound_dev_if; sock_diag_save_cookie(tw, r->id.idiag_cookie); r->id.idiag_sport = tw->tw_sport; r->id.idiag_dport = tw->tw_dport; r->id.idiag_src[0] = tw->tw_rcv_saddr; r->id.idiag_dst[0] = tw->tw_daddr; r->idiag_state = tw->tw_substate; r->idiag_timer = 3; r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ); r->idiag_rqueue = 0; r->idiag_wqueue = 0; r->idiag_uid = 0; r->idiag_inode = 0; #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == AF_INET6) { const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr; *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr; } #endif return nlmsg_end(skb, nlh); }