static int tpmd_handle_command(const uint8_t *in, uint32_t in_size) { int res; struct msghdr msg; struct kvec vec; /* send command to tpmd */ memset(&msg, 0, sizeof(msg)); vec.iov_base = (void*)in; vec.iov_len = in_size; res = kernel_sendmsg(tpmd_sock, &msg, &vec, 1, in_size); if (res < 0) { error("sock_sendmsg() failed: %d\n", res); return res; } /* receive response from tpmd */ tpm_response.size = TPM_CMD_BUF_SIZE; tpm_response.data = kmalloc(tpm_response.size, GFP_KERNEL); if (tpm_response.data == NULL) return -1; memset(&msg, 0, sizeof(msg)); vec.iov_base = (void*)tpm_response.data; vec.iov_len = tpm_response.size; res = kernel_recvmsg(tpmd_sock, &msg, &vec, 1, tpm_response.size, 0); if (res < 0) { error("sock_recvmsg() failed: %d\n", res); tpm_response.data = NULL; return res; } tpm_response.size = res; return 0; }
/* * Send or receive packet. */ static int sock_xmit(struct socket *sock, int send, void *buf, int size, int msg_flags) { int result; struct msghdr msg; struct kvec iov; unsigned long flags; sigset_t oldset; /* Allow interception of SIGKILL only * Don't allow other signals to interrupt the transmission */ spin_lock_irqsave(¤t->sighand->siglock, flags); oldset = current->blocked; sigfillset(¤t->blocked); sigdelsetmask(¤t->blocked, sigmask(SIGKILL)); recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) result = kernel_sendmsg(sock, &msg, &iov, 1, size); else result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0); if (signal_pending(current)) { siginfo_t info; spin_lock_irqsave(¤t->sighand->siglock, flags); printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", current->pid, current->comm, dequeue_signal(current, ¤t->blocked, &info)); spin_unlock_irqrestore(¤t->sighand->siglock, flags); result = -EINTR; break; } if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } size -= result; buf += result; } while (size > 0); spin_lock_irqsave(¤t->sighand->siglock, flags); current->blocked = oldset; recalc_sigpending(); spin_unlock_irqrestore(¤t->sighand->siglock, flags); return result; }
/* * Basic network sending/receiving functions. * Blocked mode is used. */ static int netfs_data_recv(struct netfs_state *st, void *buf, u64 size) { struct msghdr msg; struct kvec iov; int err; BUG_ON(!size); iov.iov_base = buf; iov.iov_len = size; msg.msg_iov = (struct iovec *)&iov; msg.msg_iovlen = 1; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = MSG_DONTWAIT; err = kernel_recvmsg(st->socket, &msg, &iov, 1, iov.iov_len, msg.msg_flags); if (err <= 0) { printk("%s: failed to recv data: size: %llu, err: %d.\n", __func__, size, err); if (err == 0) err = -ECONNRESET; } return err; }
static int recv_remote_info (struct socket *sock, struct ib_side_info *info) { struct msghdr hdr; struct kvec iov; int ret; printk (KERN_INFO "recv_remote_info\n"); /* receive remote info */ memset (&hdr, 0, sizeof (hdr)); iov.iov_base = info; iov.iov_len = sizeof (*info); while (iov.iov_len) { ret = kernel_recvmsg (sock, &hdr, &iov, 1, iov.iov_len, 0); if (ret < 0) { printk (KERN_INFO "sock_recvmsg failed: %d\n", ret); return ret; } if (!ret) break; iov.iov_base += ret; iov.iov_len -= ret; } return 0; }
void processConnection(struct socket* _clientSocket){ /*kmalloc a receive buffer*/ char *recvbuf=NULL; struct kvec vec; struct msghdr msg; recvbuf=kmalloc(1024,GFP_KERNEL); if(recvbuf==NULL){ printk("server: recvbuf kmalloc error!\n"); return ; } memset(recvbuf, 0, 1024); /*receive message from client*/ memset(&vec,0,sizeof(vec)); memset(&msg,0,sizeof(msg)); vec.iov_base=recvbuf; vec.iov_len=1024; if(kernel_recvmsg(_clientSocket,&msg,&vec,1,1024,0)){ printk("receive msg failed\n"); return; } printk("receive message:\n %s\n",recvbuf); /*release socket*/ sock_release(_clientSocket); }
int receive(struct socket* sock, unsigned char* buf, int len) { struct msghdr msg; struct kvec iov; int size = 0; if (sock->sk==NULL) return 0; iov.iov_base = buf; iov.iov_len = len; /* msg.msg_flags = 0; msg.msg_name = addr; msg.msg_namelen = sizeof(struct sockaddr_in); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; */ size = kernel_recvmsg(sock,&msg,&iov,1,len,0); return size; }
void user_data_available_cbk(struct socket *sock) { struct msghdr msg; struct iovec vec; struct sockaddr_in sockaddrin; struct rte_mbuf *mbuf; int i,dummy = 1; user_on_rx_opportunity_called++; memset(&vec,0,sizeof(vec)); if(unlikely(sock == NULL)) { return; } msg.msg_namelen = sizeof(sockaddrin); msg.msg_name = &sockaddrin; while(unlikely((i = kernel_recvmsg(sock, &msg,&vec, 1 /*num*/, 1448 /*size*/, 0 /*flags*/)) > 0)) { dummy = 0; while(unlikely(mbuf = msg.msg_iov->head)) { msg.msg_iov->head = msg.msg_iov->head->pkt.next; //printf("received %d\n",i); rte_pktmbuf_free_seg(mbuf); } //printf("received %d\n",i); memset(&vec,0,sizeof(vec)); msg.msg_namelen = sizeof(sockaddrin); msg.msg_name = &sockaddrin; } if(dummy) { user_on_rx_opportunity_called_wo_result++; } }
/* Receive data over TCP/IP. */ int usbip_recv(struct socket *sock, void *buf, int size) { int result; struct msghdr msg; struct kvec iov; int total = 0; /* for blocks of if (usbip_dbg_flag_xmit) */ char *bp = buf; int osize = size; usbip_dbg_xmit("enter\n"); if (!sock || !buf || !size) { pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf, size); return -EINVAL; } do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; msg.msg_flags = MSG_NOSIGNAL; result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); if (result <= 0) { pr_debug("receive sock %p buf %p size %u ret %d total %d\n", sock, buf, size, result, total); goto err; } size -= result; buf += result; total += result; } while (size > 0); if (usbip_dbg_flag_xmit) { if (!in_interrupt()) pr_debug("%-10s:", current->comm); else pr_debug("interrupt :"); pr_debug("receiving....\n"); usbip_dump_buffer(bp, osize); pr_debug("received, osize %d ret %d size %d total %d\n", osize, result, size, total); } return total; err: return result; }
/* * Send or receive packet. */ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, int msg_flags) { struct socket *sock = nbd->sock; int result; struct msghdr msg; struct kvec iov; sigset_t blocked, oldset; unsigned long pflags = current->flags; if (unlikely(!sock)) { dev_err(disk_to_dev(nbd->disk), "Attempted %s on closed socket in sock_xmit\n", (send ? "send" : "recv")); return -EINVAL; } /* Allow interception of SIGKILL only * Don't allow other signals to interrupt the transmission */ siginitsetinv(&blocked, sigmask(SIGKILL)); sigprocmask(SIG_SETMASK, &blocked, &oldset); current->flags |= PF_MEMALLOC; do { sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) result = kernel_sendmsg(sock, &msg, &iov, 1, size); else result = kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags); if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } size -= result; buf += result; } while (size > 0); sigprocmask(SIG_SETMASK, &oldset, NULL); tsk_restore_flags(current, pflags, PF_MEMALLOC); if (!send && nbd->xmit_timeout) mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout); return result; }
int osi_NetReceive(osi_socket so, struct sockaddr_in *from, struct iovec *iov, int iovcnt, int *lengthp) { struct msghdr msg; int code; #ifdef ADAPT_PMTU int sockerr; int esize; #endif struct iovec tmpvec[RX_MAXWVECS + 2]; struct socket *sop = (struct socket *)so; if (iovcnt > RX_MAXWVECS + 2) { osi_Panic("Too many (%d) iovecs passed to osi_NetReceive\n", iovcnt); } #ifdef ADAPT_PMTU while (1) { sockerr=0; esize = sizeof(sockerr); kernel_getsockopt(sop, SOL_SOCKET, SO_ERROR, (char *)&sockerr, &esize); if (sockerr == 0) break; handle_socket_error(so); } #endif memcpy(tmpvec, iov, iovcnt * sizeof(struct iovec)); msg.msg_name = from; msg.msg_iov = tmpvec; msg.msg_iovlen = iovcnt; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; code = kernel_recvmsg(sop, &msg, (struct kvec *)tmpvec, iovcnt, *lengthp, 0); if (code < 0) { afs_try_to_freeze(); /* Clear the error before using the socket again. * Oh joy, Linux has hidden header files as well. It appears we can * simply call again and have it clear itself via sock_error(). */ flush_signals(current); /* We don't want no stinkin' signals. */ rxk_lastSocketError = code; rxk_nSocketErrors++; } else { *lengthp = code; code = 0; } return code; }
void handle_socket_error(osi_socket so) { struct msghdr msg; struct cmsghdr *cmsg; struct sock_extended_err *err; struct sockaddr_in addr; struct sockaddr *offender; char *controlmsgbuf; int code; struct socket *sop = (struct socket *)so; if (!(controlmsgbuf=rxi_Alloc(256))) return; msg.msg_name = &addr; msg.msg_namelen = sizeof(addr); msg.msg_control = controlmsgbuf; msg.msg_controllen = 256; msg.msg_flags = 0; code = kernel_recvmsg(sop, &msg, NULL, 0, 0, MSG_ERRQUEUE|MSG_DONTWAIT|MSG_TRUNC); if (code < 0 || !(msg.msg_flags & MSG_ERRQUEUE)) goto out; for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) { if (CMSG_OK(&msg, cmsg) && cmsg->cmsg_level == SOL_IP && cmsg->cmsg_type == IP_RECVERR) break; } if (!cmsg) goto out; err = CMSG_DATA(cmsg); offender = SO_EE_OFFENDER(err); if (offender->sa_family != AF_INET) goto out; memcpy(&addr, offender, sizeof(addr)); if (err->ee_origin == SO_EE_ORIGIN_ICMP && err->ee_type == ICMP_DEST_UNREACH && err->ee_code == ICMP_FRAG_NEEDED) { rxi_SetPeerMtu(NULL, ntohl(addr.sin_addr.s_addr), ntohs(addr.sin_port), err->ee_info); } /* other DEST_UNREACH's and TIME_EXCEEDED should be dealt with too */ out: rxi_Free(controlmsgbuf, 256); return; }
/* * Send or receive packet. */ static int sock_xmit(struct socket *sock, int send, void *buf, int size, int msg_flags) { int result; struct msghdr msg; struct kvec iov; sigset_t blocked, oldset; /* Allow interception of SIGKILL only * Don't allow other signals to interrupt the transmission */ siginitsetinv(&blocked, sigmask(SIGKILL)); sigprocmask(SIG_SETMASK, &blocked, &oldset); do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) result = kernel_sendmsg(sock, &msg, &iov, 1, size); else result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0); if (signal_pending(current)) { siginfo_t info; printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", current->pid, current->comm, dequeue_signal_lock(current, ¤t->blocked, &info)); result = -EINTR; break; } if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } size -= result; buf += result; } while (size > 0); sigprocmask(SIG_SETMASK, &oldset, NULL); return result; }
/* * Send or receive packet. */ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, int msg_flags) { struct socket *sock = nbd->sock; int result; struct msghdr msg; struct kvec iov; unsigned long pflags = current->flags; if (unlikely(!sock)) { dev_err(disk_to_dev(nbd->disk), "Attempted %s on closed socket in sock_xmit\n", (send ? "send" : "recv")); return -EINVAL; } current->flags |= PF_MEMALLOC; do { sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) result = kernel_sendmsg(sock, &msg, &iov, 1, size); else result = kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags); if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } size -= result; buf += result; } while (size > 0); tsk_restore_flags(current, pflags, PF_MEMALLOC); if (!send && nbd->xmit_timeout) mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout); return result; }
static int vdev_socket_recv(unsigned char *bf, int len) { int ret=0; struct kvec vec; struct msghdr msg; memset(&msg,0,sizeof(msg)); vec.iov_base=bf; vec.iov_len=len; ret=kernel_recvmsg(client_sock,&msg,&vec,1,len,0); return ret; }
int osi_NetReceive(osi_socket so, struct sockaddr_in *from, struct iovec *iov, int iovcnt, int *lengthp) { struct msghdr msg; int code; struct iovec tmpvec[RX_MAXWVECS + 2]; struct socket *sop = (struct socket *)so; if (iovcnt > RX_MAXWVECS + 2) { osi_Panic("Too many (%d) iovecs passed to osi_NetReceive\n", iovcnt); } memcpy(tmpvec, iov, iovcnt * sizeof(struct iovec)); msg.msg_name = from; #if defined(STRUCT_MSGHDR_HAS_MSG_ITER) msg.msg_iter.iov = tmpvec; msg.msg_iter.nr_segs = iovcnt; #else msg.msg_iov = tmpvec; msg.msg_iovlen = iovcnt; #endif msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; code = kernel_recvmsg(sop, &msg, (struct kvec *)tmpvec, iovcnt, *lengthp, 0); if (code < 0) { afs_try_to_freeze(); /* Clear the error before using the socket again. * Oh joy, Linux has hidden header files as well. It appears we can * simply call again and have it clear itself via sock_error(). */ flush_signals(current); /* We don't want no stinkin' signals. */ rxk_lastSocketError = code; rxk_nSocketErrors++; do_handlesocketerror(so); } else { *lengthp = code; code = 0; } return code; }
static void work_handler(struct work_struct *work) { struct work_struct_data *wsdata = (struct work_struct_data *)work; char *recvbuf=NULL; recvbuf=kmalloc(1024,GFP_KERNEL); if(recvbuf==NULL) { printk("server: recvbuf kmalloc error!\n"); return ; } memset(recvbuf, 0, sizeof(recvbuf)); //receive message from client struct kvec vec; struct msghdr msg; memset(&vec,0,sizeof(vec)); memset(&msg,0,sizeof(msg)); vec.iov_base=recvbuf; vec.iov_len=1024; int ret=0; ret=kernel_recvmsg(wsdata->client,&msg,&vec,1,1024,0); //printk("receive message:\n%s\n",recvbuf); //printk("receive size=%d\n",ret); char *buf2; buf2=dealrequest(recvbuf,buf2); kfree(recvbuf); //printk("\nbuf2 de dihzhi:%d\n",buf2); //printk("\n\n%s\n\n",buf2); //send message to client /////////////////////////////// int len; //iFileLen=sizeof(buf2); len=strlen(buf2)*sizeof(char); //printk("\n33==%s\nlen=%d\n",buf2,len); struct kvec vec2; struct msghdr msg2; vec2.iov_base=buf2; vec2.iov_len=len; memset(&msg2,0,sizeof(msg2)); ret= kernel_sendmsg(wsdata->client,&msg2,&vec2,1,len); kfree(buf2); buf2=NULL; //release client socket sock_release(wsdata->client); }
static void cntl_socket_read_work_fn(struct work_struct *work) { union cntl_port_msg msg; int ret = 0; struct kvec iov = { 0 }; struct msghdr read_msg = { 0 }; if (!cntl_socket) return; ret = wait_event_interruptible(cntl_socket->read_wait_q, (atomic_read(&cntl_socket->data_ready) > 0)); if (ret) return; do { iov.iov_base = &msg; iov.iov_len = sizeof(msg); read_msg.msg_name = NULL; read_msg.msg_namelen = 0; ret = kernel_recvmsg(cntl_socket->hdl, &read_msg, &iov, 1, sizeof(msg), MSG_DONTWAIT); if (ret < 0) { pr_debug("diag: In %s, Error recving data %d\n", __func__, ret); break; } atomic_dec(&cntl_socket->data_ready); switch (msg.srv.cmd) { case CNTL_CMD_NEW_SERVER: case CNTL_CMD_REMOVE_SERVER: cntl_socket_process_msg_server(msg.srv.cmd, msg.srv.service, msg.srv.instance); break; case CNTL_CMD_REMOVE_CLIENT: cntl_socket_process_msg_client(msg.cli.cmd, msg.cli.node_id, msg.cli.port_id); break; } } while (atomic_read(&cntl_socket->data_ready) > 0); }
int recvfrom(SOCKET socket_p, char* buf, int len, int flags, struct sockaddr *from, int * fromlen) { int rc; struct msghdr msg; struct kvec iov; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_name = from; // will be struct sock_addr msg.msg_namelen = *fromlen; iov.iov_len = len; iov.iov_base = buf; rc = kernel_recvmsg(socket_p, &msg, &iov, 1, iov.iov_len, 0); return rc; }
static int osi_HandleSocketError(osi_socket so, char *cmsgbuf, size_t cmsgbuf_len) { struct msghdr msg; struct cmsghdr *cmsg; struct sock_extended_err *err; struct sockaddr_in addr; int code; struct socket *sop = (struct socket *)so; msg.msg_name = &addr; msg.msg_namelen = sizeof(addr); msg.msg_control = cmsgbuf; msg.msg_controllen = cmsgbuf_len; msg.msg_flags = 0; code = kernel_recvmsg(sop, &msg, NULL, 0, 0, MSG_ERRQUEUE|MSG_DONTWAIT|MSG_TRUNC); if (code < 0 || !(msg.msg_flags & MSG_ERRQUEUE)) return 0; /* kernel_recvmsg changes msg_control to point at the _end_ of the buffer, * and msg_controllen is set to the number of bytes remaining */ msg.msg_controllen = ((char*)msg.msg_control - (char*)cmsgbuf); msg.msg_control = cmsgbuf; for (cmsg = CMSG_FIRSTHDR(&msg); cmsg && CMSG_OK(&msg, cmsg); cmsg = CMSG_NXTHDR(&msg, cmsg)) { if (cmsg->cmsg_level != SOL_IP || cmsg->cmsg_type != IP_RECVERR) { continue; } err = CMSG_DATA(cmsg); rxi_ProcessNetError(err, addr.sin_addr.s_addr, addr.sin_port); } return 1; }
// receive a message on the UDP socket (non-blocking); wait on the wait queue // until something becomes available, if necessary int udpserver_recvmsg(struct request_state* req) { struct iovec iov; iov.iov_base = req->recvbuf; iov.iov_len = req->len_recvbuf; // Set callback data (sender's IP and port etc.) for sending the reply req->msg.msg_name = &req->sockaddr; req->msg.msg_namelen = sizeof(struct sockaddr_in); req->msg.msg_control = 0; req->msg.msg_controllen = 0; // TODO: use smaller buffers and trap errors when recvmsg says the buffer is too small #ifdef __KERNEL__ return kernel_recvmsg(udpserver->sock, &req->msg, (struct kvec*) &iov, 1, iov.iov_len, MSG_DONTWAIT); #else /* not non blocking in the case of a userland process which can be killed */ req->msg.msg_iov = &iov; req->msg.msg_iovlen = 1; return recvmsg(udpserver->sock, &req->msg, 0); #endif }
/* * Send or receive packet. */ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, int msg_flags) { struct socket *sock = nbd->sock; int result; struct msghdr msg; struct kvec iov; sigset_t blocked, oldset; if (unlikely(!sock)) { dev_err(disk_to_dev(nbd->disk), "Attempted %s on closed socket in sock_xmit\n", (send ? "send" : "recv")); return -EINVAL; } /* Allow interception of SIGKILL only * Don't allow other signals to interrupt the transmission */ siginitsetinv(&blocked, sigmask(SIGKILL)); sigprocmask(SIG_SETMASK, &blocked, &oldset); do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) { struct timer_list ti; if (nbd->xmit_timeout) { init_timer(&ti); ti.function = nbd_xmit_timeout; ti.data = (unsigned long)current; ti.expires = jiffies + nbd->xmit_timeout; add_timer(&ti); } result = kernel_sendmsg(sock, &msg, &iov, 1, size); if (nbd->xmit_timeout) del_timer_sync(&ti); } else result = kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags); if (signal_pending(current)) { siginfo_t info; printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", task_pid_nr(current), current->comm, dequeue_signal_lock(current, ¤t->blocked, &info)); result = -EINTR; sock_shutdown(nbd, !send); break; } if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } size -= result; buf += result; } while (size > 0); sigprocmask(SIG_SETMASK, &oldset, NULL); return result; }
static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len) { int err = 0; int pkt_len = 0; int read_len = 0; int bytes_remaining = 0; int total_recd = 0; int loop_count = 0; uint8_t buf_full = 0; unsigned char *temp = NULL; struct kvec iov = {0}; struct msghdr read_msg = {0}; struct sockaddr_msm_ipc src_addr = {0}; struct diag_socket_info *info = NULL; unsigned long flags; info = (struct diag_socket_info *)(ctxt); if (!info) return -ENODEV; if (!buf || !ctxt || buf_len <= 0) return -EINVAL; temp = buf; bytes_remaining = buf_len; err = wait_event_interruptible(info->read_wait_q, (info->data_ready > 0) || (!info->hdl) || (atomic_read(&info->diag_state) == 0)); if (err) { diagfwd_channel_read_done(info->fwd_ctxt, buf, 0); return -ERESTARTSYS; } /* * There is no need to continue reading over peripheral in this case. * Release the wake source hold earlier. */ if (atomic_read(&info->diag_state) == 0) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s closing read thread. diag state is closed\n", info->name); diag_ws_release(); return 0; } if (!info->hdl) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s closing read thread\n", info->name); goto fail; } do { loop_count++; iov.iov_base = temp; iov.iov_len = bytes_remaining; read_msg.msg_name = &src_addr; read_msg.msg_namelen = sizeof(src_addr); pkt_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1, 0, MSG_PEEK); if (pkt_len <= 0) break; if (pkt_len > bytes_remaining) { buf_full = 1; break; } spin_lock_irqsave(&info->lock, flags); info->data_ready--; spin_unlock_irqrestore(&info->lock, flags); read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1, pkt_len, 0); if (read_len <= 0) goto fail; if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER) { /* * This is the first packet from the client. Copy its * address to the connection object. Consider this * channel open for communication. */ memcpy(&info->remote_addr, &src_addr, sizeof(src_addr)); if (info->ins_id == INST_ID_DCI) atomic_set(&info->opened, 1); else __socket_open_channel(info); } if (read_len < 0) { pr_err_ratelimited("diag: In %s, error receiving data, err: %d\n", __func__, pkt_len); err = read_len; goto fail; } temp += read_len; total_recd += read_len; bytes_remaining -= read_len; } while (info->data_ready > 0); if (buf_full || (info->type == TYPE_DATA && pkt_len)) err = queue_work(info->wq, &(info->read_work)); if (total_recd > 0) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n", info->name, total_recd); err = diagfwd_channel_read_done(info->fwd_ctxt, buf, total_recd); if (err) goto fail; } else { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n", info->name, total_recd); goto fail; } diag_socket_queue_read(info); return 0; fail: diagfwd_channel_read_done(info->fwd_ctxt, buf, 0); return -EIO; }
int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) { int rc; long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); unsigned long then; struct timeval tv; LASSERT(nob > 0); LASSERT(jiffies_left > 0); for (;;) { struct kvec iov = { .iov_base = buffer, .iov_len = nob }; struct msghdr msg = { .msg_flags = 0 }; /* Set receive timeout to remaining time */ jiffies_to_timeval(jiffies_left, &tv); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv)); if (rc) { CERROR("Can't set socket recv timeout %ld.%06d: %d\n", (long)tv.tv_sec, (int)tv.tv_usec, rc); return rc; } then = jiffies; rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0); jiffies_left -= jiffies - then; if (rc < 0) return rc; if (!rc) return -ECONNRESET; buffer = ((char *)buffer) + rc; nob -= rc; if (!nob) return 0; if (jiffies_left <= 0) return -ETIMEDOUT; } } EXPORT_SYMBOL(lnet_sock_read); static int lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, int local_port) { struct sockaddr_in locaddr; struct socket *sock; int rc; int option; /* All errors are fatal except bind failure if the port is in use */ *fatal = 1; rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); *sockp = sock; if (rc) { CERROR("Can't create socket: %d\n", rc); return rc; } option = 1; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&option, sizeof(option)); if (rc) { CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc); goto failed; } if (local_ip || local_port) { memset(&locaddr, 0, sizeof(locaddr)); locaddr.sin_family = AF_INET; locaddr.sin_port = htons(local_port); if (!local_ip) locaddr.sin_addr.s_addr = htonl(INADDR_ANY); else locaddr.sin_addr.s_addr = htonl(local_ip); rc = kernel_bind(sock, (struct sockaddr *)&locaddr, sizeof(locaddr)); if (rc == -EADDRINUSE) { CDEBUG(D_NET, "Port %d already in use\n", local_port); *fatal = 0; goto failed; } if (rc) { CERROR("Error trying to bind to port %d: %d\n", local_port, rc); goto failed; } } return 0; failed: sock_release(sock); return rc; } int lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize) { int option; int rc; if (txbufsize) { option = txbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char *)&option, sizeof(option)); if (rc) { CERROR("Can't set send buffer %d: %d\n", option, rc); return rc; } } if (rxbufsize) { option = rxbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (char *)&option, sizeof(option)); if (rc) { CERROR("Can't set receive buffer %d: %d\n", option, rc); return rc; } } return 0; }
static int CliRecvThread(void *data) { struct kvec recviov, recvdataiov, sendiov, senddataiov; struct client_host *clihost = (struct client_host *)data; struct msghdr recvmsg, sendmsg, senddatamsg, recvdatamsg; struct netmsg_req msg_req; struct netmsg_data *msg_wrdata = (struct netmsg_data *)kmalloc(sizeof(struct netmsg_data), GFP_USER); struct netmsg_rpy msg_rpy; struct netmsg_data *msg_rddata = (struct netmsg_data *)kmalloc(sizeof(struct netmsg_data), GFP_USER); int len = 0; memset(&recvmsg, 0, sizeof(struct msghdr)); memset(&recvdatamsg, 0, sizeof(struct msghdr)); memset(&sendmsg, 0, sizeof(struct msghdr)); memset(&senddatamsg, 0, sizeof(struct msghdr)); sendmsg.msg_name = (void *)&clihost->host_addr; sendmsg.msg_namelen = sizeof(struct sockaddr_in); senddatamsg.msg_name = (void *)&clihost->host_data_addr; senddatamsg.msg_namelen = sizeof(struct sockaddr_in); memset(&recviov, 0, sizeof(struct kvec)); memset(&recvdataiov, 0, sizeof(struct kvec)); memset(&sendiov, 0, sizeof(struct kvec)); memset(&senddataiov, 0, sizeof(struct kvec)); // recviov.iov_base = (void *)&msg_req.info; // recviov.iov_len = sizeof(struct req_info); // sendiov.iov_base = (void *)&msg_rpy.info; // sendiov.iov_len = sizeof(struct rpy_info); // recvdataiov.iov_base = (void *)&msg_wrdata->info; // recvdataiov.iov_len = sizeof(struct data_info); // senddataiov.iov_base = (void *)&msg_rddata->info; // senddataiov.iov_len = sizeof(struct data_info); while (!kthread_should_stop()) { //schedule_timeout_interruptible(SCHEDULE_TIME * HZ); memset(&msg_req, 0, sizeof(struct netmsg_req)); memset(&msg_rpy, 0, sizeof(struct netmsg_rpy)); mutex_lock(&clihost->ptr_mutex); if(CLIHOST_STATE_CLOSED == clihost->state) { mutex_unlock(&clihost->ptr_mutex); continue; } mutex_unlock(&clihost->ptr_mutex); recviov.iov_base = (void *)&msg_req.info; recviov.iov_len = sizeof(struct req_info); len = kernel_recvmsg(clihost->sock, &recvmsg, &recviov, 1, sizeof(struct req_info), 0); KER_DEBUG(KERN_ALERT"mempool handlethread: kernel_recvmsg len=%d, ID=%d\n",len, msg_req.info.msgID); //close of client if(len == 0) { break; } if (len < 0 || len != sizeof(struct req_info)) { KER_DEBUG(KERN_ALERT"mempool handlethread: kernel_recvmsg err, len=%d, buffer=%ld\n", len, sizeof(struct req_info)); if (len == -ECONNREFUSED) { KER_DEBUG(KERN_ALERT"mempool thread: Receive Port Unreachable packet!\n"); } continue; } switch(msg_req.info.msgID) { //alloc block case NETMSG_CLI_REQUEST_ALLOC_BLK: { unsigned int nIndex = 0, count = 0; KER_PRT(KERN_INFO"begin to alloc\n"); msg_rpy.info.msgID = NETMSG_SER_REPLY_ALLOC_BLK; mutex_lock(&Devices->blk_mutex); for(nIndex = 0, count = 0; nIndex < MAX_BLK_NUM_IN_MEMPOOL && count < BLK_MAX_PER_REQ && count < msg_req.info.data.req_alloc_blk.blknum; nIndex++) { if(Devices->blk[nIndex].avail && !Devices->blk[nIndex].inuse) { msg_rpy.info.data.rpyblk.blkinfo[count].remoteIndex = nIndex; Devices->blk[nIndex].inuse = TRUE; count++; } } mutex_unlock(&Devices->blk_mutex); Devices->nblk_avail -= count; clihost->block_inuse += count; msg_rpy.info.data.rpyblk.blk_alloc = count; msg_rpy.info.data.rpyblk.blk_rest_available = Devices->nblk_avail; KER_DEBUG(KERN_INFO"mempool thread: send alloc blk reply\n"); break; } //write data case NETMSG_CLI_REQUEST_WRITE: { unsigned int nBlkIndex = 0, nPageIndex = 0; recvdataiov.iov_base = (void *)&msg_wrdata->info; recvdataiov.iov_len = sizeof(struct data_info); len = kernel_recvmsg(clihost->datasock, &recvmsg, &recvdataiov, 1, sizeof(struct data_info), 0); if (len < 0 || len != sizeof(struct data_info)) { KER_DEBUG(KERN_ALERT"mempool handlethread: kernel_recvmsg err, len=%d, buffer=%ld\n", len, sizeof(struct req_info)); if (len == -ECONNREFUSED) { KER_DEBUG(KERN_ALERT"mempool thread: Receive Port Unreachable packet!\n"); } } KER_PRT(KERN_INFO"begin to write\n"); nBlkIndex = msg_req.info.data.req_write.remoteIndex; nPageIndex = msg_req.info.data.req_write.pageIndex; KER_DEBUG(KERN_INFO"mempool CliSendThread: nBlkIndex %d, nPageIndex %d\n", nBlkIndex, nPageIndex); KER_DEBUG(KERN_INFO"mempool CliSendThread: data %s\n", msg_wrdata->info.data); mutex_lock(&Devices->blk_mutex); memcpy(Devices->blk[nBlkIndex].blk_addr + nPageIndex * VPAGE_SIZE, msg_wrdata->info.data, VPAGE_SIZE); mutex_unlock(&Devices->blk_mutex); msg_rpy.info.msgID = NETMSG_SER_REPLY_WRITE; KER_PRT(KERN_INFO"end to write\n"); break; } //read data case NETMSG_CLI_REQUEST_READ: { unsigned int nBlkIndex = 0, nPageIndex = 0; KER_PRT(KERN_INFO"begin to read\n"); msg_rpy.info.msgID = NETMSG_SER_REPLY_READ; msg_rpy.info.data.rpy_read.vpageaddr = msg_req.info.data.req_read.vpageaddr; msg_rpy.info.data.rpy_read.remoteIndex = msg_req.info.data.req_read.remoteIndex; msg_rpy.info.data.rpy_read.pageIndex = msg_req.info.data.req_read.pageIndex; nBlkIndex = msg_req.info.data.req_write.remoteIndex; nPageIndex = msg_req.info.data.req_write.pageIndex; memcpy(msg_rddata->info.data, Devices->blk[nBlkIndex].blk_addr + nPageIndex * VPAGE_SIZE, VPAGE_SIZE); KER_PRT(KERN_INFO"end to read\n"); senddataiov.iov_base = (void *)&msg_rddata->info; senddataiov.iov_len = sizeof(struct data_info); len = kernel_sendmsg(clihost->datasock, &senddatamsg, &senddataiov, 1, sizeof(struct data_info)); if (len < 0 || len != sizeof(struct data_info)) { KER_DEBUG(KERN_ALERT"mempool handlethread: kernel_sendmsg err, len=%d, buffer=%ld\n", len, sizeof(struct req_info)); if (len == -ECONNREFUSED) { KER_DEBUG(KERN_ALERT"mempool thread: Receive Port Unreachable packet!\n"); } } break; } //heart beat case NETMSG_CLI_REQUEST_HEARTBEAT: { msg_rpy.info.msgID = NETMSG_SER_REPLY_HEARTBEAT; msg_rpy.info.data.rpy_heartbeat.blk_rest_available = Devices->nblk_avail; break; } default: continue; } sendiov.iov_base = (void *)&msg_rpy.info; sendiov.iov_len = sizeof(struct rpy_info); len = kernel_sendmsg(clihost->sock, &sendmsg, &sendiov, 1, sizeof(struct rpy_info)); if(len != sizeof(struct rpy_info)) { KER_DEBUG(KERN_INFO"kernel_sendmsg err, len=%d, buffer=%ld\n", len, sizeof(struct rpy_info)); if(len == -ECONNREFUSED) { KER_DEBUG(KERN_INFO"Receive Port Unreachable packet!\n"); } //continue; } KER_PRT(KERN_INFO"end\n"); } mutex_lock(&clihost->ptr_mutex); if(CLIHOST_STATE_CONNECTED == clihost->state) { clihost->state = CLIHOST_STATE_CLOSED; kernel_sock_shutdown(clihost->sock, SHUT_RDWR); kernel_sock_shutdown(clihost->datasock, SHUT_RDWR); //sock_release(clihost->sock); //sock_release(clihost->datasock); //clihost->sock = NULL; } mutex_unlock(&clihost->ptr_mutex); kfree(msg_wrdata); kfree(msg_rddata); while(!kthread_should_stop()) { schedule_timeout_interruptible(SCHEDULE_TIME * HZ); } return 0; }
int ksocknal_lib_recv_iov (ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX struct kvec scratch; struct kvec *scratchiov = &scratch; unsigned int niov = 1; #else struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; unsigned int niov = conn->ksnc_rx_niov; #endif struct kvec *iov = conn->ksnc_rx_iov; struct msghdr msg = { .msg_flags = 0 }; int nob; int i; int rc; int fragnob; int sum; __u32 saved_csum; /* NB we can't trust socket ops to either consume our iovs * or leave them alone. */ LASSERT (niov > 0); for (nob = i = 0; i < niov; i++) { scratchiov[i] = iov[i]; nob += scratchiov[i].iov_len; } LASSERT (nob <= conn->ksnc_rx_nob_wanted); rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob, MSG_DONTWAIT); saved_csum = 0; if (conn->ksnc_proto == &ksocknal_protocol_v2x) { saved_csum = conn->ksnc_msg.ksm_csum; conn->ksnc_msg.ksm_csum = 0; } if (saved_csum != 0) { /* accumulate checksum */ for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { LASSERT (i < niov); fragnob = iov[i].iov_len; if (fragnob > sum) fragnob = sum; conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, iov[i].iov_base, fragnob); } conn->ksnc_msg.ksm_csum = saved_csum; } return rc; } static void ksocknal_lib_kiov_vunmap(void *addr) { if (addr == NULL) return; vunmap(addr); } static void * ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, struct kvec *iov, struct page **pages) { void *addr; int nob; int i; if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL) return NULL; LASSERT (niov <= LNET_MAX_IOV); if (niov < 2 || niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags) return NULL; for (nob = i = 0; i < niov; i++) { if ((kiov[i].kiov_offset != 0 && i > 0) || (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) return NULL; pages[i] = kiov[i].kiov_page; nob += kiov[i].kiov_len; } addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL); if (addr == NULL) return NULL; iov->iov_base = addr + kiov[0].kiov_offset; iov->iov_len = nob; return addr; }
int ksocknal_lib_recv_kiov (ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK struct kvec scratch; struct kvec *scratchiov = &scratch; struct page **pages = NULL; unsigned int niov = 1; #else #ifdef CONFIG_HIGHMEM #warning "XXX risk of kmap deadlock on multiple frags..." #endif struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs; unsigned int niov = conn->ksnc_rx_nkiov; #endif lnet_kiov_t *kiov = conn->ksnc_rx_kiov; struct msghdr msg = { .msg_flags = 0 }; int nob; int i; int rc; void *base; void *addr; int sum; int fragnob; int n; /* NB we can't trust socket ops to either consume our iovs * or leave them alone. */ if ((addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages)) != NULL) { nob = scratchiov[0].iov_len; n = 1; } else { for (nob = i = 0; i < niov; i++) { nob += scratchiov[i].iov_len = kiov[i].kiov_len; scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; } n = niov; } LASSERT (nob <= conn->ksnc_rx_nob_wanted); rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, n, nob, MSG_DONTWAIT); if (conn->ksnc_msg.ksm_csum != 0) { for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { LASSERT (i < niov); /* Dang! have to kmap again because I have nowhere to stash the * mapped address. But by doing it while the page is still * mapped, the kernel just bumps the map count and returns me * the address it stashed. */ base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; fragnob = kiov[i].kiov_len; if (fragnob > sum) fragnob = sum; conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, base, fragnob); kunmap(kiov[i].kiov_page); } } if (addr != NULL) { ksocknal_lib_kiov_vunmap(addr); } else { for (i = 0; i < niov; i++) kunmap(kiov[i].kiov_page); } return (rc); } void ksocknal_lib_csum_tx(ksock_tx_t *tx) { int i; __u32 csum; void *base; LASSERT(tx->tx_iov[0].iov_base == (void *)&tx->tx_msg); LASSERT(tx->tx_conn != NULL); LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x); tx->tx_msg.ksm_csum = 0; csum = ksocknal_csum(~0, (void *)tx->tx_iov[0].iov_base, tx->tx_iov[0].iov_len); if (tx->tx_kiov != NULL) { for (i = 0; i < tx->tx_nkiov; i++) { base = kmap(tx->tx_kiov[i].kiov_page) + tx->tx_kiov[i].kiov_offset; csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len); kunmap(tx->tx_kiov[i].kiov_page); } } else { for (i = 1; i < tx->tx_niov; i++) csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base, tx->tx_iov[i].iov_len); } if (*ksocknal_tunables.ksnd_inject_csum_error) { csum++; *ksocknal_tunables.ksnd_inject_csum_error = 0; } tx->tx_msg.ksm_csum = csum; }
/* Send/receive messages over TCP/IP. I refer drivers/block/nbd.c */ int usbip_xmit(int send, struct socket *sock, char *buf, int size, int msg_flags) { int result; struct msghdr msg; struct kvec iov; int total = 0; /* for blocks of if (usbip_dbg_flag_xmit) */ char *bp = buf; int osize = size; usbip_dbg_xmit("enter\n"); if (!sock || !buf || !size) { printk(KERN_ERR "%s: invalid arg, sock %p buff %p size %d\n", __func__, sock, buf, size); return -EINVAL; } if (usbip_dbg_flag_xmit) { if (send) { if (!in_interrupt()) printk(KERN_DEBUG "%-10s:", current->comm); else printk(KERN_DEBUG "interrupt :"); printk(KERN_DEBUG "%s: sending... , sock %p, buf %p, " "size %d, msg_flags %d\n", __func__, sock, buf, size, msg_flags); usbip_dump_buffer(buf, size); } } do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) result = kernel_sendmsg(sock, &msg, &iov, 1, size); else result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL); if (result <= 0) { usbip_udbg("usbip_xmit: %s sock %p buf %p size %u ret " "%d total %d\n", send ? "send" : "receive", sock, buf, size, result, total); goto err; } size -= result; buf += result; total += result; } while (size > 0); if (usbip_dbg_flag_xmit) { if (!send) { if (!in_interrupt()) printk(KERN_DEBUG "%-10s:", current->comm); else printk(KERN_DEBUG "interrupt :"); printk(KERN_DEBUG "usbip_xmit: receiving....\n"); usbip_dump_buffer(bp, osize); printk(KERN_DEBUG "usbip_xmit: received, osize %d ret " "%d size %d total %d\n", osize, result, size, total); } if (send) printk(KERN_DEBUG "usbip_xmit: send, total %d\n", total); } return total; err: return result; }
static int _recv(struct socket *sock, void *buf, int size, unsigned flags) { struct msghdr msg = {NULL, }; struct kvec iov = {buf, size}; return kernel_recvmsg(sock, &msg, &iov, 1, size, flags); }