void sendnlmsg(char *message) { struct sk_buff *skb_1; struct nlmsghdr *nlh; int len = NLMSG_SPACE(MAX_MSGSIZE); int slen = 0; if(!message || !nl_sk) { return ; } skb_1 = alloc_skb(len,GFP_KERNEL); if(!skb_1) { printk(KERN_ERR "pwp:alloc_skb_1 error\n"); } slen = strlen(message); nlh = nlmsg_put(skb_1, 0, 0, 0, MAX_MSGSIZE, 0); NETLINK_CB(skb_1).pid = 0; NETLINK_CB(skb_1).dst_group = 0; message[slen]= '\0'; memcpy(NLMSG_DATA(nlh), message, slen+1); printk("pwp:send message '%s'.\n",(char *)NLMSG_DATA(nlh)); netlink_unicast(nl_sk, skb_1, user_pid, MSG_DONTWAIT); }
size_t connection_write_data(struct connection *conn, void *buffer, uint32_t len) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; int ret = 0; MCDRV_DBG_VERBOSE("buffer length %u from pid %u\n", len, conn->sequence_magic); do { skb = nlmsg_new(NLMSG_SPACE(len), GFP_KERNEL); if (!skb) { ret = -1; break; } nlh = nlmsg_put(skb, 0, conn->sequence_magic, 2, NLMSG_LENGTH(len), NLM_F_REQUEST); if (!nlh) { ret = -1; break; } memcpy(NLMSG_DATA(nlh), buffer, len); netlink_unicast(conn->socket_descriptor, skb, conn->peer_pid, MSG_DONTWAIT); ret = len; } while (0); if (!ret && skb != NULL) kfree_skb(skb); return ret; }
void audit_send_reply(int pid, int seq, int type, int done, int multi, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; int len = NLMSG_SPACE(size); void *data; int flags = multi ? NLM_F_MULTI : 0; int t = done ? NLMSG_DONE : type; skb = alloc_skb(len, GFP_KERNEL); if (!skb) goto nlmsg_failure; nlh = NLMSG_PUT(skb, pid, seq, t, len - sizeof(*nlh)); nlh->nlmsg_flags = flags; data = NLMSG_DATA(nlh); memcpy(data, payload, size); netlink_unicast(audit_sock, skb, pid, MSG_DONTWAIT); return; nlmsg_failure: /* Used by NLMSG_PUT */ if (skb) kfree_skb(skb); }
static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) { struct sk_buff *skb; /* * Play the pending entries through our router */ while((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) { if (skb->nh.iph->version == 0) { int err; struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = skb->tail - (u8*)nlh; } else { nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); ((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -EMSGSIZE; } err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT); } else { DBPRINT("ipmr_cache_resolve - IP packet from %u.%u.%u.%u -> %u.%u.%u.%u with NextProtocol= %d\n", NIPQUAD(skb->nh.iph->saddr), NIPQUAD(skb->nh.iph->daddr), skb->nh.iph->protocol); ip_mr_forward(skb, c, 0); } } }
void audit_send_reply(int pid, int seq, int type, int done, int multi, void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; int len = NLMSG_SPACE(size); void *data; int flags = multi ? NLM_F_MULTI : 0; int t = done ? NLMSG_DONE : type; skb = alloc_skb(len, GFP_KERNEL); if (!skb) return; nlh = NLMSG_PUT(skb, pid, seq, t, size); nlh->nlmsg_flags = flags; data = NLMSG_DATA(nlh); memcpy(data, payload, size); /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ netlink_unicast(audit_sock, skb, pid, 0); return; nlmsg_failure: /* Used by NLMSG_PUT */ if (skb) kfree_skb(skb); }
/* Send Message to user mode */ void dispmgr_nl_send_msg(struct dispmgr_command_hdr *cmd_hdr) { struct nlmsghdr *nlh; struct sk_buff *skb_out; unsigned int msg_size = 0; unsigned int data_size = 0; unsigned int hdr_size = 0; int ret = 0; /* if no user mode process active */ if (!g_pid) return; hdr_size = sizeof(struct dispmgr_command_hdr); data_size = hdr_size + cmd_hdr->data_size; msg_size = data_size + sizeof(struct nlmsghdr); skb_out = nlmsg_new(msg_size, 0); if (!skb_out) { printk ("kdispmgr: Failed to allocated skb\n"); return; } nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, msg_size, 0); NETLINK_CB(skb_out).dst_group = 0; /* not in mcast group */ memcpy(nlmsg_data(nlh), cmd_hdr, hdr_size); if (cmd_hdr->data_size) { memcpy(nlmsg_data(nlh) + hdr_size, cmd_hdr->data, cmd_hdr->data_size); } ret = netlink_unicast(nl_sk, skb_out, g_pid, MSG_DONTWAIT); }
static int send_to_user(struct packet_info *info) { int ret; int size; unsigned char *old_tail; struct sk_buff *skb; struct nlmsghdr *nlh; struct packet_info *packet; size = NLMSG_SPACE(sizeof(*info)); skb = alloc_skb(size, GFP_ATOMIC); old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, IMP2_K_MSG, size-sizeof(*nlh)); packet = NLMSG_DATA(nlh); memset(packet, 0, sizeof(struct packet_info)); packet->src = info->src; packet->dest = info->dest; nlh->nlmsg_len = skb->tail - old_tail; NETLINK_CB(skb).dst_groups = 0; read_lock_bh(&user_proc.lock); ret = netlink_unicast(nlfd, skb, user_proc.pid, MSG_DONTWAIT); read_unlock_bh(&user_proc.lock); return ret; nlmsg_failure: if(skb) kfree_skb(skb); return -1; }
/* * Description: This function is called by Linux kernel when user * applications sends a message on netlink socket. It * dequeues the message, calls the functions to process * the commands and sends the result back to user. * * Input: skb - Kernel socket structure */ static void emf_netlink_sock_cb(struct sk_buff *skb) { struct nlmsghdr *nlh; nlh = nlmsg_hdr(skb); EMF_DEBUG("Length of the command buffer %d\n", nlh->nlmsg_len); /* Check the buffer for min size */ if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len || nlh->nlmsg_len < NLMSG_LENGTH(sizeof(emf_cfg_request_t))) { EMF_ERROR("Configuration request size not > %d\n", sizeof(emf_cfg_request_t)); return; } skb = skb_clone(skb, GFP_KERNEL); if (skb == NULL) return; nlh = nlmsg_hdr(skb); /* Process the message */ emf_cfg_request_process((emf_cfg_request_t *)NLMSG_DATA(nlh)); /* Send the result to user process */ NETLINK_CB(skb).pid = nlh->nlmsg_pid; NETLINK_CB(skb).dst_group = 0; netlink_unicast(emf->nl_sk, skb, nlh->nlmsg_pid, MSG_DONTWAIT); }
static void udp_reply(int pid,int seq,void *payload) { struct sk_buff *skb; struct nlmsghdr *nlh; int size=strlen(payload)+1; int len = NLMSG_SPACE(size); void *data; int ret; skb = alloc_skb(len, GFP_ATOMIC); if (!skb) return; nlh= NLMSG_PUT(skb, pid, seq, 0, size); nlh->nlmsg_flags = 0; data=NLMSG_DATA(nlh); memcpy(data, payload, size); NETLINK_CB(skb).pid = 0; /* from kernel */ NETLINK_CB(skb).dst_group = 0; /* unicast */ ret=netlink_unicast(netlink_sock, skb, pid, MSG_DONTWAIT); if (ret <0) { printk("send failed\n"); return; } return; nlmsg_failure: /* Used by NLMSG_PUT */ if (skb) kfree_skb(skb); }
void ath_netlink_send(char *event_data, u32 event_datalen) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; skb = nlmsg_new(NLMSG_SPACE(event_datalen), GFP_ATOMIC); if (!skb) { ath6kl_err("%s: No memory,\n", __func__); return; } nlh = nlmsg_put(skb, gpid, 0, 0 , NLMSG_SPACE(event_datalen), 0); if (!nlh) { ath6kl_err("%s: nlmsg_put() failed\n", __func__); return; } memcpy(NLMSG_DATA(nlh), event_data, event_datalen); #ifdef ATH6KL_SUPPORT_NETLINK_KERNEL3_7 NETLINK_CB(skb).portid = 0; /* from kernel */ #else NETLINK_CB(skb).pid = 0; /* from kernel */ #endif NETLINK_CB(skb).dst_group = 0; /* unicast */ netlink_unicast(ath_nl_sock, skb, gpid, MSG_DONTWAIT); }
void send_network_msg(int uid) { struct sk_buff *skb; struct nlmsghdr *nlh; char uidStr[10]; int ret; //printk(KERN_ERR "send_network_msg, uid = %d", uid); snprintf(uidStr, 10, "%d", uid); if(per_sock) { skb = alloc_skb(NLMSG_SPACE(MAX_MSGSIZE), GFP_ATOMIC); nlh = NLMSG_PUT(skb, 0, 0, 0, NLMSG_SPACE(MAX_MSGSIZE)); strcpy((char*)NLMSG_DATA(nlh), uidStr); nlh->nlmsg_len = NLMSG_SPACE(MAX_MSGSIZE); nlh->nlmsg_pid = 0; nlh->nlmsg_flags = 0; NETLINK_CB(skb).pid = 0; NETLINK_CB(skb).dst_group = 0; ret = netlink_unicast(per_sock, skb, clientPid, MSG_DONTWAIT); //printk(KERN_ERR "netlink_unicast, ret=%d", ret); return; nlmsg_failure: printk(KERN_ERR "netlink_unicast error!"); if (skb) { kfree_skb(skb); } } }
void sendnlmsg(void) { struct sk_buff *skb_1; struct nlmsghdr *nlh; int len = NLMSG_SPACE(MAX_MSGSIZE); int slen = 0; char buffer[128]; const char *message="hello i am kernel"; if(!message || !nl_sk){ return ; } skb_1 = alloc_skb(len,GFP_KERNEL); if(!skb_1){ printk(KERN_ERR "my_net_link:alloc_skb_1 error\n"); } nlh = nlmsg_put(skb_1,0,0,0,MAX_MSGSIZE,0); //NETLINK_CB(skb_1).pid = 0; NETLINK_CB(skb_1).portid = 0; NETLINK_CB(skb_1).dst_group = 0; slen = stringlength(message); memset(buffer,0,sizeof(buffer)); memcpy(buffer,message,slen); memcpy(NLMSG_DATA(nlh),buffer,slen+1); printk("my_net_link:send message '%s'.\n",(char *)NLMSG_DATA(nlh)); netlink_unicast(nl_sk,skb_1,pid,MSG_DONTWAIT); }
static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) { struct sk_buff *skb; struct nlmsgerr *e; /* * Play the pending entries through our router */ while((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) { if (skb->nh.iph->version == 0) { int err; struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = skb->tail - (u8*)nlh; } else { nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); e = NLMSG_DATA(nlh); e->error = -EMSGSIZE; memset(&e->msg, 0, sizeof(e->msg)); } err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT); } else ip_mr_forward(skb, c, 0); } }
/** * Daemon responsible to empty the FIFO of events inside the Netlink */ static int kct_daemon(void *unused) { struct sk_buff *skb = NULL; pr_debug("%s: started!\n", __func__); while (!kthread_should_stop()) { pr_debug("%s: loop.\n", __func__); if (skb_queue_len(&kct_skb_queue) && monitor_pid) { skb = skb_dequeue(&kct_skb_queue); if (skb) { /* pid might not have been set in kct_log_event; * ensure it's ok now **/ PORTID(skb) = monitor_pid; netlink_unicast(kct_nl_sk, skb, monitor_pid, 1); } } else { wait_event_interruptible(kct_wq, (skb_queue_len(&kct_skb_queue) && monitor_pid) || kthread_should_stop()); } } pr_debug("%s: daemon terminated.\n", __func__); skb_queue_purge(&kct_skb_queue); return 0; }
/** * Send message to userspace */ void sendnlmsg(char *message) { struct sk_buff *nskb; struct nlmsghdr *nlh; int len = NLMSG_SPACE(MAX_MSGSIZE); int slen = 0; if(!message || !nl_sk) { return ; } //allocate space nskb = alloc_skb(len,GFP_KERNEL); if(!nskb) { printk(KERN_ERR "alloc_skb send to userspace error\n"); } slen = strlen(message); //construct the packent nlh = nlmsg_put(nskb,0,0,0,MAX_MSGSIZE,0); NETLINK_CB(nskb).pid = 0; NETLINK_CB(nskb).dst_group = 0; memcpy(NLMSG_DATA(nlh), message, slen+1); //sent unicast message netlink_unicast(nl_sk, nskb, pid, MSG_DONTWAIT); }
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) { struct sk_buff *skb; struct nlmsghdr *rep; struct nlmsgerr *errmsg; int size; if (err == 0) size = NLMSG_SPACE(sizeof(struct nlmsgerr)); else size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len)); skb = alloc_skb(size, GFP_KERNEL); if (!skb) { struct sock *sk; sk = netlink_lookup(in_skb->sk->sk_protocol, NETLINK_CB(in_skb).pid); if (sk) { sk->sk_err = ENOBUFS; sk->sk_error_report(sk); sock_put(sk); } return; } rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, NLMSG_ERROR, sizeof(struct nlmsgerr)); errmsg = NLMSG_DATA(rep); errmsg->error = err; memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr)); netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); }
int send_to_user(char *info) //发送到用户空间 { int size; struct sk_buff *skb; unsigned int old_tail; struct nlmsghdr *nlh; //报文头 int retval; size = NLMSG_SPACE(strlen(info)); //报文大小 skb = alloc_skb(size, GFP_ATOMIC); //分配一个新的套接字缓存,使用GFP_ATOMIC标志进程不>会被置为睡眠 //初始化一个netlink消息首部 nlh = nlmsg_put(skb, 0, 0, 0, NLMSG_SPACE(strlen(info))-sizeof(struct nlmsghdr), 0); old_tail = skb->tail; memcpy(NLMSG_DATA(nlh), info, strlen(info)); //填充数据区 nlh->nlmsg_len = skb->tail - old_tail; //设置消息长度 //设置控制字段 NETLINK_CB(skb).pid = 0; NETLINK_CB(skb).dst_group = 0; printk( "[kernel send] skb->data:%s\n", (char *)NLMSG_DATA((struct nlmsghdr *)skb->data)); //发送数据 retval = netlink_unicast(netlinkfd, skb, user_process.pid, MSG_DONTWAIT); printk( "[kernel send] netlink_unicast return: %d\n", retval); return 0; }
int kauditd_thread(void *dummy) { struct sk_buff *skb; while (1) { skb = skb_dequeue(&audit_skb_queue); wake_up(&audit_backlog_wait); if (skb) { if (audit_pid) { int err = netlink_unicast(audit_sock, skb, audit_pid, 0); if (err < 0) { BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */ printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid); audit_pid = 0; } } else { printk(KERN_NOTICE "%s\n", skb->data + NLMSG_SPACE(0)); kfree_skb(skb); } } else { DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kauditd_wait, &wait); if (!skb_queue_len(&audit_skb_queue)) { try_to_freeze(); schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&kauditd_wait, &wait); } } }
void spectral_unicast_msg(struct ath_softc *sc) { struct ath_spectral *spectral=sc->sc_spectral; if ((spectral==NULL) || (spectral->spectral_sock==NULL)) { SPECTRAL_DPRINTK(sc, ATH_DEBUG_SPECTRAL2,"%s NULL pointers (spectral=%d) (sock=%d) (skb=%d)\n", __func__, (spectral==NULL),(spectral->spectral_sock==NULL),(spectral->spectral_skb==NULL)); dev_kfree_skb(spectral->spectral_skb); spectral->spectral_skb = NULL; return; } if (spectral->spectral_skb != NULL) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) NETLINK_CB(spectral->spectral_skb).pid = 0; /* from kernel */ NETLINK_CB(spectral->spectral_skb).dst_pid = spectral->spectral_pid; NETLINK_CB(spectral->spectral_skb).pid = 0; /* from kernel */ #endif /* VERSION - field depracated by newer kernel */ NETLINK_CB(spectral->spectral_skb).pid = 0; /* from kernel */ /* to mcast group 1<<0 */ NETLINK_CB(spectral->spectral_skb).dst_group=0;; netlink_unicast(spectral->spectral_sock, spectral->spectral_skb, spectral->spectral_pid, MSG_DONTWAIT); } }
int netlinkSayHello(int pid) { struct sk_buff *skb; struct nlmsghdr *nlh; skb = nlmsg_new(MAX_PAYLOAD, GFP_ATOMIC); if (skb == NULL) { printk(KERN_ERR "Failed to alloc skb\n"); return 0; } // put into skb nlh = nlmsg_put(skb, 0, 0, 0, MAX_PAYLOAD, 0); // below line is meaningless memcpy(NLMSG_DATA(nlh), "Hello Client", sizeof("Hello Client")); if (netlink_unicast(nl_sk, skb, pid, 0) < 0) { printk(KERN_ERR"Failed to unicast skb\n"); return 0; } return 1; }
void send_command_to_daemon(const int command/*struct sk_buff *skb*/) { /* struct iphdr *iph; struct ethhdr *ehdr; */ struct nlmsghdr *nlh; struct sk_buff *nl_skb; int res; MSG("here we will send command to native daemon\n"); /* if(skb == NULL) { ERR("invalid sk_buff\n"); return; } */ if(!g_nl_sk) { ERR("invalid socket\n"); return; } if(pid == 0) { ERR("invalid native process pid\n"); return; } /*alloc data buffer for sending to native*/ nl_skb = alloc_skb(NLMSG_SPACE(MAX_NL_MSG_LEN), GFP_ATOMIC); /*malloc data space at least 1500 bytes, which is ethernet data length*/ if(nl_skb == NULL) { ERR("malloc skb error\n"); return; } MSG("malloc data space done\n"); /* ehdr = eth_hdr(skb); iph = ip_hdr(skb); */ // nlh = NLMSG_PUT(nl_skb, 0, 0, 0, NLMSG_SPACE(1500)-sizeof(struct nlmsghdr)); nlh = nlmsg_put(nl_skb, 0, 0, 0, MAX_NL_MSG_LEN, 0); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) NETLINK_CB(nl_skb).pid = 0; #else NETLINK_CB(nl_skb).portid = 0; #endif // memcpy(NLMSG_DATA(nlh), ACK, 5); *(char *)NLMSG_DATA(nlh) = command; res = netlink_unicast(g_nl_sk, nl_skb, pid, MSG_DONTWAIT); if(res == 0) { MSG("send to user space process error\n"); return; } else { ERR("send to user space process done, data length = %d\n", res); return; } }
TI_INT32 IPC_EventSend(TI_HANDLE hAdapter, TI_UINT8* pEvData, TI_UINT32 EvDataSize) { struct sk_buff *skb; int res; TWlanDrvIfObj *drv = (TWlanDrvIfObj *) hAdapter; TI_UINT32 realSize = 0; TI_UINT32 msgSize; struct nlmsghdr *nlh; TI_UINT8 *msg; os_wake_lock_timeout_enable(drv); /* This event is targetted to the OS process Id 0 is not a valid pId for LINUX*/ if ((( IPC_EVENT_PARAMS *) pEvData) ->uProcessID == 0) { (( IPC_EVENT_PARAMS *) pEvData) ->pfEventCallback(( IPC_EV_DATA *) pEvData); return 0; } /* set the payload size */ msgSize = (( IPC_EV_DATA *) pEvData) ->uBufferSize + offsetof(IPC_EV_DATA,uBuffer); /* add the netlink header size */ realSize = NLMSG_SPACE(msgSize); /* allocate the complete message */ skb = dev_alloc_skb(realSize); if (!skb) { printk(KERN_ERR "Failed to allocate new skb with size=%u.\n",realSize); return -1; } /* set the netlink header params */ nlh = NLMSG_PUT(skb, 0, 0, NLMSG_DONE, realSize - sizeof(*nlh)); /* get the payload pointer */ msg = (char *)NLMSG_DATA(nlh); /* copy the data to the payload */ memcpy(msg,pEvData,msgSize); NETLINK_CB(skb).pid = 0; /* from kernel */ #define RTMGRP_LINK 1 NETLINK_CB(skb).dst_group = RTMGRP_LINK; /* send the message*/ res = netlink_unicast(drv->wl_sock, skb, (( IPC_EVENT_PARAMS *) pEvData) ->uProcessID, MSG_DONTWAIT); /* Sanity checks. As far as we're concerned this error is unrecovarable.*/ if (res >= 0) { return 0; } nlmsg_failure: ti_dprintf(TIWLAN_LOG_INFO,"IPC kernel: did not send the netlink message\n"); return -1; }
int nas_netlink_send(unsigned char *data,unsigned int len) { struct sk_buff *nl_skb = alloc_skb(NLMSG_SPACE(len),GFP_ATOMIC); struct nlmsghdr *nlh = (struct nlmsghdr *)nl_skb->data; int status; // printk("[NAS][NETLINK] Sending %d bytes (%d)\n",len,NLMSG_SPACE(len)); skb_put(nl_skb, NLMSG_SPACE(len)); memcpy(NLMSG_DATA(nlh),data,len); nlh->nlmsg_len = NLMSG_SPACE(len); nlh->nlmsg_pid = 0; /* from kernel */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0) NETLINK_CB(nl_skb).portid = 0; #else NETLINK_CB(nl_skb).pid = 0; #endif #ifdef NETLINK_DEBUG printk("[NAS][NETLINK] In nas_netlink_send, nl_skb %p, nl_sk %x, nlh %p, nlh->nlmsg_len %d (OAI_IP_DRIVER_NETLINK_ID %d)\n", nl_skb,nas_nl_sk,nlh,nlh->nlmsg_len, OAI_IP_DRIVER_NETLINK_ID); #endif //DEBUG_NETLINK if (nas_nl_sk) { // nasmesh_lock(); status = netlink_unicast(nas_nl_sk, nl_skb, NL_DEST_PID, MSG_DONTWAIT); // mutex_unlock(&nasmesh_mutex); if (status < 0) { printk("[NAS][NETLINK] SEND status is %d\n",status); return(0); } else { #ifdef NETLINK_DEBUG printk("[NAS][NETLINK] SEND status is %d\n",status); #endif return len; } } else { printk("[NAS][SEND] socket is NULL\n"); return(0); } /* nlmsg_failure: // Used by NLMSG_PUT if (nl_skb) kfree_skb(nl_skb); */ }
static int tcpdiag_get_exact(struct sk_buff *in_skb, struct nlmsghdr *nlh) { int err; struct sock *sk; struct tcpdiagreq *req = NLMSG_DATA(nlh); struct sk_buff *rep; if (req->tcpdiag_family == AF_INET) { sk = tcp_v4_lookup(req->id.tcpdiag_dst[0], req->id.tcpdiag_dport, req->id.tcpdiag_src[0], req->id.tcpdiag_sport, req->id.tcpdiag_if); } #ifdef CONFIG_IPV6 else if (req->tcpdiag_family == AF_INET6) { sk = tcp_v6_lookup((struct in6_addr*)req->id.tcpdiag_dst, req->id.tcpdiag_dport, (struct in6_addr*)req->id.tcpdiag_src, req->id.tcpdiag_sport, req->id.tcpdiag_if); } #endif else { return -EINVAL; } if (sk == NULL) return -ENOENT; err = -ESTALE; if ((req->id.tcpdiag_cookie[0] != TCPDIAG_NOCOOKIE || req->id.tcpdiag_cookie[1] != TCPDIAG_NOCOOKIE) && sk != *((struct sock **)&req->id.tcpdiag_cookie[0])) goto out; err = -ENOMEM; rep = alloc_skb(NLMSG_SPACE(sizeof(struct tcpdiagmsg)+ sizeof(struct tcpdiag_meminfo)+ sizeof(struct tcp_info)+64), GFP_KERNEL); if (!rep) goto out; if (tcpdiag_fill(rep, sk, req->tcpdiag_ext, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq) <= 0) BUG(); err = netlink_unicast(tcpnl, rep, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); if (err > 0) err = 0; out: if (sk) { if (sk->state == TCP_TIME_WAIT) tcp_tw_put((struct tcp_tw_bucket*)sk); else sock_put(sk); } return err; }
static int ethertap_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_local *lp = (struct net_local *)dev->priv; #ifdef CONFIG_ETHERTAP_MC struct ethhdr *eth = (struct ethhdr*)skb->data; #endif if (skb_headroom(skb) < 2) { static int once; struct sk_buff *skb2; if (!once) { once = 1; printk(KERN_DEBUG "%s: not aligned xmit by protocol %04x\n", dev->name, skb->protocol); } skb2 = skb_realloc_headroom(skb, 2); dev_kfree_skb(skb); if (skb2 == NULL) return 0; skb = skb2; } __skb_push(skb, 2); /* Make the same thing, which loopback does. */ if (skb_shared(skb)) { struct sk_buff *skb2 = skb; skb = skb_clone(skb, GFP_ATOMIC); /* Clone the buffer */ if (skb==NULL) { dev_kfree_skb(skb2); return 0; } dev_kfree_skb(skb2); } /* ... but do not orphan it here, netlink does it in any case. */ lp->stats.tx_bytes+=skb->len; lp->stats.tx_packets++; #ifndef CONFIG_ETHERTAP_MC netlink_broadcast(lp->nl, skb, 0, ~0, GFP_ATOMIC); #else if (dev->flags&IFF_NOARP) { netlink_broadcast(lp->nl, skb, 0, ~0, GFP_ATOMIC); return 0; } if (!(eth->h_dest[0]&1)) { /* Unicast packet */ __u32 pid; memcpy(&pid, eth->h_dest+2, 4); netlink_unicast(lp->nl, skb, ntohl(pid), MSG_DONTWAIT); } else netlink_broadcast(lp->nl, skb, 0, ethertap_mc_hash(eth->h_dest), GFP_ATOMIC); #endif return 0; }
static int netlink_send_peer(ipq_queue_element_t *e) { int status = 0; struct sk_buff *skb; skb = netlink_build_message(e, &status); if (skb == NULL) return status; return netlink_unicast(nfnl, skb, nlq->peer.pid, MSG_DONTWAIT); }
void toi_send_netlink_message(struct user_helper_data *uhd, int type, void *params, size_t len) { struct sk_buff *skb; struct nlmsghdr *nlh; void *dest; struct task_struct *t; if (uhd->pid == -1) return; if (uhd->debug) printk(KERN_ERR "toi_send_netlink_message: Send " "message type %d.\n", type); skb = toi_get_skb(uhd); if (!skb) { printk(KERN_INFO "toi_netlink: Can't allocate skb!\n"); return; } /* NLMSG_PUT contains a hidden goto nlmsg_failure */ nlh = NLMSG_PUT(skb, 0, uhd->sock_seq, type, len); uhd->sock_seq++; dest = NLMSG_DATA(nlh); if (params && len > 0) memcpy(dest, params, len); netlink_unicast(uhd->nl, skb, uhd->pid, 0); read_lock(&tasklist_lock); t = find_task_by_pid_type_ns(PIDTYPE_PID, uhd->pid, &init_pid_ns); if (!t) { read_unlock(&tasklist_lock); if (uhd->pid > -1) printk(KERN_INFO "Hmm. Can't find the userspace task" " %d.\n", uhd->pid); return; } wake_up_process(t); read_unlock(&tasklist_lock); yield(); return; nlmsg_failure: if (skb) put_skb(uhd, skb); if (uhd->debug) printk(KERN_ERR "toi_send_netlink_message: Failed to send " "message type %d.\n", type); }
static void ipmr_cache_delete(struct mfc_cache *cache) { struct sk_buff *skb; int line; struct mfc_cache **cp; /* * Find the right cache line */ line=MFC_HASH(cache->mfc_mcastgrp,cache->mfc_origin); cp=&(mfc_cache_array[line]); if(cache->mfc_flags&MFC_QUEUED) del_timer(&cache->mfc_timer); /* * Unlink the buffer */ while(*cp!=NULL) { if(*cp==cache) { *cp=cache->next; break; } cp=&((*cp)->next); } /* * Free the buffer. If it is a pending resolution * clean up the other resources. */ if(cache->mfc_flags&MFC_QUEUED) { cache_resolve_queue_len--; while((skb=skb_dequeue(&cache->mfc_unresolved))) { #ifdef CONFIG_RTNETLINK if (skb->nh.iph->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); ((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -ETIMEDOUT; netlink_unicast(rtnl, skb, NETLINK_CB(skb).dst_pid, MSG_DONTWAIT); } else #endif kfree_skb(skb); } } kfree_s(cache,sizeof(cache)); }
int rtnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) { int err = 0; NETLINK_CB(skb).dst_group = group; if (echo) atomic_inc(&skb->users); netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); if (echo) err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); return err; }
/** * audit_send_reply - send an audit reply message via netlink * @pid: process id to send reply to * @seq: sequence number * @type: audit message type * @done: done (last) flag * @multi: multi-part message flag * @payload: payload data * @size: payload size * * Allocates an skb, builds the netlink message, and sends it to the pid. * No failure notifications. */ void audit_send_reply(int pid, int seq, int type, int done, int multi, void *payload, int size) { struct sk_buff *skb; skb = audit_make_reply(pid, seq, type, done, multi, payload, size); if (!skb) return; /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ netlink_unicast(audit_sock, skb, pid, 0); return; }