/* * send a packet through the transport endpoint */ int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb) { struct kvec iov[1]; struct msghdr msg; int ret, opt; _enter(",{%d}", skb->len); iov[0].iov_base = skb->head; iov[0].iov_len = skb->len; msg.msg_name = &trans->peer->srx.transport.sin; msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; /* send the packet with the don't fragment bit set if we currently * think it's small enough */ if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) { down_read(&trans->local->defrag_sem); /* send the packet by UDP * - returns -EMSGSIZE if UDP would have to fragment the packet * to go out of the interface * - in which case, we'll have processed the ICMP error * message and update the peer record */ ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, iov[0].iov_len); up_read(&trans->local->defrag_sem); if (ret == -EMSGSIZE) goto send_fragmentable; _leave(" = %d [%u]", ret, trans->peer->maxdata); return ret; } send_fragmentable: /* attempt to send this message with fragmentation enabled */ _debug("send fragment"); down_write(&trans->local->defrag_sem); opt = IP_PMTUDISC_DONT; ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); if (ret == 0) { ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1, iov[0].iov_len); opt = IP_PMTUDISC_DO; kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER, (char *) &opt, sizeof(opt)); } up_write(&trans->local->defrag_sem); _leave(" = %d [frag %u]", ret, trans->peer->maxdata); return ret; }
int rds_tcp_keepalive(struct socket *sock) { /* values below based on xs_udp_default_timeout */ int keepidle = 5; /* send a probe 'keepidle' secs after last data */ int keepcnt = 5; /* number of unack'ed probes before declaring dead */ int keepalive = 1; int ret = 0; ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&keepalive, sizeof(keepalive)); if (ret < 0) goto bail; ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT, (char *)&keepcnt, sizeof(keepcnt)); if (ret < 0) goto bail; ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, (char *)&keepidle, sizeof(keepidle)); if (ret < 0) goto bail; /* KEEPINTVL is the interval between successive probes. We follow * the model in xs_tcp_finish_connecting() and re-use keepidle. */ ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL, (char *)&keepidle, sizeof(keepidle)); bail: return ret; }
void ksocknal_lib_push_conn (ksock_conn_t *conn) { struct sock *sk; struct tcp_sock *tp; int nonagle; int val = 1; int rc; rc = ksocknal_connsock_addref(conn); if (rc != 0) /* being shut down */ return; sk = conn->ksnc_sock->sk; tp = tcp_sk(sk); lock_sock (sk); nonagle = tp->nonagle; tp->nonagle = 1; release_sock (sk); rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY, (char *)&val, sizeof(val)); LASSERT (rc == 0); lock_sock (sk); tp->nonagle = nonagle; release_sock (sk); ksocknal_connsock_decref(conn); }
/* rxk_NewSocket * open and bind RX socket */ osi_socket * rxk_NewSocketHost(afs_uint32 ahost, short aport) { struct socket *sockp; struct sockaddr_in myaddr; int code; #ifdef AFS_ADAPT_PMTU int pmtu = IP_PMTUDISC_WANT; #else int pmtu = IP_PMTUDISC_DONT; #endif #ifdef HAVE_LINUX_SOCK_CREATE_KERN_NS code = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sockp); #elif defined(HAVE_LINUX_SOCK_CREATE_KERN) code = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sockp); #elif defined(LINUX_KERNEL_SOCK_CREATE_V) code = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sockp, 0); #else code = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sockp); #endif if (code < 0) return NULL; /* Bind socket */ myaddr.sin_family = AF_INET; myaddr.sin_addr.s_addr = ahost; myaddr.sin_port = aport; code = sockp->ops->bind(sockp, (struct sockaddr *)&myaddr, sizeof(myaddr)); if (code < 0) { printk("sock_release(rx_socket) FIXME\n"); return NULL; } kernel_setsockopt(sockp, SOL_IP, IP_MTU_DISCOVER, (char *)&pmtu, sizeof(pmtu)); #ifdef AFS_RXERRQ_ENV { int recverr = 1; kernel_setsockopt(sockp, SOL_IP, IP_RECVERR, (char *)&recverr, sizeof(recverr)); } #endif return (osi_socket *)sockp; }
/* rxk_NewSocket * open and bind RX socket */ osi_socket * rxk_NewSocketHost(afs_uint32 ahost, short aport) { struct socket *sockp; struct sockaddr_in myaddr; int code; #ifdef ADAPT_PMTU int pmtu = IP_PMTUDISC_WANT; int do_recverr = 1; #else int pmtu = IP_PMTUDISC_DONT; #endif /* We need a better test for this. if you need it back, tell us * how to detect it. */ #ifdef LINUX_KERNEL_SOCK_CREATE_V code = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sockp, 0); #else code = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sockp); #endif if (code < 0) return NULL; /* Bind socket */ myaddr.sin_family = AF_INET; myaddr.sin_addr.s_addr = ahost; myaddr.sin_port = aport; code = sockp->ops->bind(sockp, (struct sockaddr *)&myaddr, sizeof(myaddr)); if (code < 0) { printk("sock_release(rx_socket) FIXME\n"); return NULL; } kernel_setsockopt(sockp, SOL_IP, IP_MTU_DISCOVER, (char *)&pmtu, sizeof(pmtu)); #ifdef ADAPT_PMTU kernel_setsockopt(sockp, SOL_IP, IP_RECVERR, (char *)&do_recverr, sizeof(do_recverr)); #endif return (osi_socket *)sockp; }
/* * open an RxRPC socket and bind it to be a server for callback notifications * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT */ int afs_open_socket(struct afs_net *net) { struct sockaddr_rxrpc srx; struct socket *socket; unsigned int min_level; int ret; _enter(""); ret = sock_create_kern(net->net, AF_RXRPC, SOCK_DGRAM, PF_INET6, &socket); if (ret < 0) goto error_1; socket->sk->sk_allocation = GFP_NOFS; /* bind the callback manager's address to make this a server socket */ memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; srx.srx_service = CM_SERVICE; srx.transport_type = SOCK_DGRAM; srx.transport_len = sizeof(srx.transport.sin6); srx.transport.sin6.sin6_family = AF_INET6; srx.transport.sin6.sin6_port = htons(AFS_CM_PORT); min_level = RXRPC_SECURITY_ENCRYPT; ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL, (void *)&min_level, sizeof(min_level)); if (ret < 0) goto error_2; ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); if (ret == -EADDRINUSE) { srx.transport.sin6.sin6_port = 0; ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); } if (ret < 0) goto error_2; rxrpc_kernel_new_call_notification(socket, afs_rx_new_call, afs_rx_discard_new_call); ret = kernel_listen(socket, INT_MAX); if (ret < 0) goto error_2; net->socket = socket; afs_charge_preallocation(&net->charge_preallocation_work); _leave(" = 0"); return 0; error_2: sock_release(socket); error_1: _leave(" = %d", ret); return ret; }
struct socket* createServerSocket(void) { struct socket *_sock; struct sockaddr_in _saddr; unsigned short _port=0x8080; int ret = 0; memset(&_saddr,0,sizeof(_saddr)); _saddr.sin_family = AF_INET; _saddr.sin_port = htons(_port); _saddr.sin_addr.s_addr = htonl(INADDR_ANY); _sock = (struct socket*)kmalloc(sizeof(struct socket),GFP_KERNEL); //_clientSock = (struct socket*)kmalloc(sizeof(struct socket),GFP_KERNEL); ret = sock_create_kern(AF_INET, SOCK_STREAM,0,&_sock); if(ret ) { printk("server socket created error\n"); return NULL; } int value=1; if(kernel_setsockopt(_sock, SOL_SOCKET, SO_REUSEPORT, &value, 4)) { printk("set reuseport option failed\n"); return NULL; } if(kernel_setsockopt(_sock, SOL_SOCKET, SO_REUSEADDR, &value, 4)) { printk("set option reuseaddr failed\n"); return NULL; } printk("reuse port set\n"); ret = _sock->ops->bind(_sock,(struct sockaddr *)&_saddr,sizeof(struct sockaddr_in)); if(ret<0){ printk("server: bind error\n"); return NULL; } printk("server:bind ok!\n"); return _sock; }
void ksocknal_lib_eager_ack (ksock_conn_t *conn) { int opt = 1; struct socket *sock = conn->ksnc_sock; /* Remind the socket to ACK eagerly. If I don't, the socket might * think I'm about to send something it could piggy-back the ACK * on, introducing delay in completing zero-copy sends in my * peer. */ kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt, sizeof(opt)); }
int lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) { int rc; long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); unsigned long then; struct timeval tv; struct kvec iov = { .iov_base = buffer, .iov_len = nob }; struct msghdr msg = {NULL,}; LASSERT(nob > 0); /* * Caller may pass a zero timeout if she thinks the socket buffer is * empty enough to take the whole message immediately */ iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, nob); for (;;) { msg.msg_flags = !timeout ? MSG_DONTWAIT : 0; if (timeout) { /* Set send timeout to remaining time */ jiffies_to_timeval(jiffies_left, &tv); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv, sizeof(tv)); if (rc) { CERROR("Can't set socket send timeout %ld.%06d: %d\n", (long)tv.tv_sec, (int)tv.tv_usec, rc); return rc; } } then = jiffies; rc = kernel_sendmsg(sock, &msg, &iov, 1, nob); jiffies_left -= jiffies - then; if (rc < 0) return rc; if (!rc) { CERROR("Unexpected zero rc\n"); return -ECONNABORTED; } if (!msg_data_left(&msg)) break; if (jiffies_left <= 0) return -EAGAIN; } return 0; }
/* * send a packet through the transport endpoint */ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, bool retrans) { struct rxrpc_connection *conn = call->conn; struct rxrpc_wire_header whdr; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct msghdr msg; struct kvec iov[2]; rxrpc_serial_t serial; size_t len; bool lost = false; int ret, opt; _enter(",{%d}", skb->len); /* Each transmission of a Tx packet needs a new serial number */ serial = atomic_inc_return(&conn->serial); whdr.epoch = htonl(conn->proto.epoch); whdr.cid = htonl(call->cid); whdr.callNumber = htonl(call->call_id); whdr.seq = htonl(sp->hdr.seq); whdr.serial = htonl(serial); whdr.type = RXRPC_PACKET_TYPE_DATA; whdr.flags = sp->hdr.flags; whdr.userStatus = 0; whdr.securityIndex = call->security_ix; whdr._rsvd = htons(sp->hdr._rsvd); whdr.serviceId = htons(call->service_id); iov[0].iov_base = &whdr; iov[0].iov_len = sizeof(whdr); iov[1].iov_base = skb->head; iov[1].iov_len = skb->len; len = iov[0].iov_len + iov[1].iov_len; msg.msg_name = &call->peer->srx.transport; msg.msg_namelen = call->peer->srx.transport_len; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; /* If our RTT cache needs working on, request an ACK. Also request * ACKs if a DATA packet appears to have been lost. */ if (!(sp->hdr.flags & RXRPC_LAST_PACKET) && (retrans || call->cong_mode == RXRPC_CALL_SLOW_START || (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_get_real()))) whdr.flags |= RXRPC_REQUEST_ACK; if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { static int lose; if ((lose++ & 7) == 7) { ret = 0; lost = true; goto done; } } _proto("Tx DATA %%%u { #%u }", serial, sp->hdr.seq); /* send the packet with the don't fragment bit set if we currently * think it's small enough */ if (iov[1].iov_len >= call->peer->maxdata) goto send_fragmentable; down_read(&conn->params.local->defrag_sem); /* send the packet by UDP * - returns -EMSGSIZE if UDP would have to fragment the packet * to go out of the interface * - in which case, we'll have processed the ICMP error * message and update the peer record */ ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); up_read(&conn->params.local->defrag_sem); if (ret == -EMSGSIZE) goto send_fragmentable; done: trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans, lost); if (ret >= 0) { ktime_t now = ktime_get_real(); skb->tstamp = now; smp_wmb(); sp->hdr.serial = serial; if (whdr.flags & RXRPC_REQUEST_ACK) { call->peer->rtt_last_req = now; trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); } } _leave(" = %d [%u]", ret, call->peer->maxdata); return ret; send_fragmentable: /* attempt to send this message with fragmentation enabled */ _debug("send fragment"); down_write(&conn->params.local->defrag_sem); switch (conn->params.local->srx.transport.family) { case AF_INET: opt = IP_PMTUDISC_DONT; ret = kernel_setsockopt(conn->params.local->socket, SOL_IP, IP_MTU_DISCOVER, (char *)&opt, sizeof(opt)); if (ret == 0) { ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); opt = IP_PMTUDISC_DO; kernel_setsockopt(conn->params.local->socket, SOL_IP, IP_MTU_DISCOVER, (char *)&opt, sizeof(opt)); } break; #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: opt = IPV6_PMTUDISC_DONT; ret = kernel_setsockopt(conn->params.local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, (char *)&opt, sizeof(opt)); if (ret == 0) { ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1, iov[0].iov_len); opt = IPV6_PMTUDISC_DO; kernel_setsockopt(conn->params.local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, (char *)&opt, sizeof(opt)); } break; #endif } up_write(&conn->params.local->defrag_sem); goto done; }
static int smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) { int rc; struct kvec *iov = rqst->rq_iov; int n_vec = rqst->rq_nvec; unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); unsigned long send_length; unsigned int i; size_t total_len = 0, sent; struct socket *ssocket = server->ssocket; int val = 1; if (ssocket == NULL) return -ENOTSOCK; /* sanity check send length */ send_length = rqst_len(rqst); if (send_length != smb_buf_length + 4) { WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n", send_length, smb_buf_length); return -EIO; } cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); dump_smb(iov[0].iov_base, iov[0].iov_len); /* cork the socket */ kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); rc = smb_send_kvec(server, iov, n_vec, &sent); if (rc < 0) goto uncork; total_len += sent; /* now walk the page array and send each page in it */ for (i = 0; i < rqst->rq_npages; i++) { struct kvec p_iov; cifs_rqst_page_to_kvec(rqst, i, &p_iov); rc = smb_send_kvec(server, &p_iov, 1, &sent); kunmap(rqst->rq_pages[i]); if (rc < 0) break; total_len += sent; } uncork: /* uncork it */ val = 0; kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); if ((total_len > 0) && (total_len != smb_buf_length + 4)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", smb_buf_length + 4, total_len); /* * If we have only sent part of an SMB then the next SMB could * be taken as the remainder of this one. We need to kill the * socket so the server throws away the partial SMB */ server->tcpStatus = CifsNeedReconnect; } if (rc < 0 && rc != -EINTR) cifs_dbg(VFS, "Error %d sending data on socket to server\n", rc); else rc = 0; return rc; }
static struct socket * udp_tx_sock_create (struct in_addr if_address, uint16_t port, struct in_addr dest_addr, uint32_t bufsize) { struct socket *sk; struct sockaddr_in saddr; char loop = 0; int err = 0; do { err = sock_create(PF_INET, SOCK_DGRAM, 0, &sk); if (err < 0) { break; } memset(&saddr, 0, sizeof(saddr)); saddr.sin_addr = if_address; saddr.sin_family = AF_INET; saddr.sin_port = 0; err = kernel_bind(sk, (struct sockaddr *) &saddr, sizeof(saddr)); if (err < 0) { break; } err = kernel_setsockopt(sk, IPPROTO_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); if (err < 0) { break; } memset(&saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr = dest_addr; saddr.sin_port = port; err = kernel_connect(sk, (struct sockaddr *)&saddr, sizeof(saddr), 0); if (err < 0) { break; } if (bufsize) { err = kernel_setsockopt(sk, SOL_SOCKET, SO_SNDBUF, (char *)&bufsize, sizeof(bufsize)); if (err < 0) { break; } } } while (0); if (err < 0) { kernel_sock_shutdown(sk, SHUT_RDWR); sock_release(sk); sk = NULL; } return (sk); }
static int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) { int rc = 0; struct kvec *iov; int n_vec; unsigned int send_length = 0; unsigned int i, j; sigset_t mask, oldmask; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; struct msghdr smb_msg; int val = 1; __be32 rfc1002_marker; if (cifs_rdma_enabled(server) && server->smbd_conn) { rc = smbd_send(server, num_rqst, rqst); goto smbd_done; } if (ssocket == NULL) return -EAGAIN; if (signal_pending(current)) { cifs_dbg(FYI, "signal is pending before sending any data\n"); return -EINTR; } /* cork the socket */ kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); for (j = 0; j < num_rqst; j++) send_length += smb_rqst_len(server, &rqst[j]); rfc1002_marker = cpu_to_be32(send_length); /* * We should not allow signals to interrupt the network send because * any partial send will cause session reconnects thus increasing * latency of system calls and overload a server with unnecessary * requests. */ sigfillset(&mask); sigprocmask(SIG_BLOCK, &mask, &oldmask); /* Generate a rfc1002 marker for SMB2+ */ if (server->vals->header_preamble_size == 0) { struct kvec hiov = { .iov_base = &rfc1002_marker, .iov_len = 4 }; iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto unmask; total_len += sent; send_length += 4; } cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length); for (j = 0; j < num_rqst; j++) { iov = rqst[j].rq_iov; n_vec = rqst[j].rq_nvec; size = 0; for (i = 0; i < n_vec; i++) { dump_smb(iov[i].iov_base, iov[i].iov_len); size += iov[i].iov_len; } iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto unmask; total_len += sent; /* now walk the page array and send each page in it */ for (i = 0; i < rqst[j].rq_npages; i++) { struct bio_vec bvec; bvec.bv_page = rqst[j].rq_pages[i]; rqst_page_get_length(&rqst[j], i, &bvec.bv_len, &bvec.bv_offset); iov_iter_bvec(&smb_msg.msg_iter, WRITE, &bvec, 1, bvec.bv_len); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) break; total_len += sent; } } unmask: sigprocmask(SIG_SETMASK, &oldmask, NULL); /* * If signal is pending but we have already sent the whole packet to * the server we need to return success status to allow a corresponding * mid entry to be kept in the pending requests queue thus allowing * to handle responses from the server by the client. * * If only part of the packet has been sent there is no need to hide * interrupt because the session will be reconnected anyway, so there * won't be any response from the server to handle. */ if (signal_pending(current) && (total_len != send_length)) { cifs_dbg(FYI, "signal is pending after attempt to send\n"); rc = -EINTR; } /* uncork it */ val = 0; kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); if ((total_len > 0) && (total_len != send_length)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", send_length, total_len); /* * If we have only sent part of an SMB then the next SMB could * be taken as the remainder of this one. We need to kill the * socket so the server throws away the partial SMB */ server->tcpStatus = CifsNeedReconnect; trace_smb3_partial_send_reconnect(server->CurrentMid, server->hostname); } smbd_done: if (rc < 0 && rc != -EINTR) cifs_dbg(VFS, "Error %d sending data on socket to server\n", rc); else if (rc > 0) rc = 0; return rc; } static int smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst, int flags) { struct kvec iov; struct smb2_transform_hdr tr_hdr; struct smb_rqst cur_rqst[MAX_COMPOUND]; int rc; if (!(flags & CIFS_TRANSFORM_REQ)) return __smb_send_rqst(server, num_rqst, rqst); if (num_rqst > MAX_COMPOUND - 1) return -ENOMEM; memset(&cur_rqst[0], 0, sizeof(cur_rqst)); memset(&iov, 0, sizeof(iov)); memset(&tr_hdr, 0, sizeof(tr_hdr)); iov.iov_base = &tr_hdr; iov.iov_len = sizeof(tr_hdr); cur_rqst[0].rq_iov = &iov; cur_rqst[0].rq_nvec = 1; if (!server->ops->init_transform_rq) { cifs_dbg(VFS, "Encryption requested but transform callback " "is missing\n"); return -EIO; } rc = server->ops->init_transform_rq(server, num_rqst + 1, &cur_rqst[0], rqst); if (rc) return rc; rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); return rc; } int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) { struct kvec iov[2]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; iov[0].iov_base = smb_buffer; iov[0].iov_len = 4; iov[1].iov_base = (char *)smb_buffer + 4; iov[1].iov_len = smb_buf_length; return __smb_send_rqst(server, 1, &rqst); } static int wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits, const int timeout, const int flags, unsigned int *instance) { int rc; int *credits; int optype; long int t; if (timeout < 0) t = MAX_JIFFY_OFFSET; else t = msecs_to_jiffies(timeout); optype = flags & CIFS_OP_MASK; *instance = 0; credits = server->ops->get_credits_field(server, optype); /* Since an echo is already inflight, no need to wait to send another */ if (*credits <= 0 && optype == CIFS_ECHO_OP) return -EAGAIN; spin_lock(&server->req_lock); if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) { /* oplock breaks must not be held up */ server->in_flight++; *credits -= 1; *instance = server->reconnect_instance; spin_unlock(&server->req_lock); return 0; } while (1) { if (*credits < num_credits) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable_timeout(server->request_q, has_credits(server, credits, num_credits), t); cifs_num_waiters_dec(server); if (!rc) { trace_smb3_credit_timeout(server->CurrentMid, server->hostname, num_credits); cifs_dbg(VFS, "wait timed out after %d ms\n", timeout); return -ENOTSUPP; } if (rc == -ERESTARTSYS) return -ERESTARTSYS; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { spin_unlock(&server->req_lock); return -ENOENT; } /* * For normal commands, reserve the last MAX_COMPOUND * credits to compound requests. * Otherwise these compounds could be permanently * starved for credits by single-credit requests. * * To prevent spinning CPU, block this thread until * there are >MAX_COMPOUND credits available. * But only do this is we already have a lot of * credits in flight to avoid triggering this check * for servers that are slow to hand out credits on * new sessions. */ if (!optype && num_credits == 1 && server->in_flight > 2 * MAX_COMPOUND && *credits <= MAX_COMPOUND) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable_timeout( server->request_q, has_credits(server, credits, MAX_COMPOUND + 1), t); cifs_num_waiters_dec(server); if (!rc) { trace_smb3_credit_timeout( server->CurrentMid, server->hostname, num_credits); cifs_dbg(VFS, "wait timed out after %d ms\n", timeout); return -ENOTSUPP; } if (rc == -ERESTARTSYS) return -ERESTARTSYS; spin_lock(&server->req_lock); continue; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) { *credits -= num_credits; server->in_flight += num_credits; *instance = server->reconnect_instance; } spin_unlock(&server->req_lock); break; } } return 0; } static int wait_for_free_request(struct TCP_Server_Info *server, const int flags, unsigned int *instance) { return wait_for_free_credits(server, 1, -1, flags, instance); } static int wait_for_compound_request(struct TCP_Server_Info *server, int num, const int flags, unsigned int *instance) { int *credits; credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK); spin_lock(&server->req_lock); if (*credits < num) { /* * Return immediately if not too many requests in flight since * we will likely be stuck on waiting for credits. */ if (server->in_flight < num - *credits) { spin_unlock(&server->req_lock); return -ENOTSUPP; } } spin_unlock(&server->req_lock); return wait_for_free_credits(server, num, 60000, flags, instance); }
int init_server(void *conf) { int ret=0; int flags=1; sock_entry_t *se; struct linger ling= {0,0}; //create workqueue if(!wq) { wq=create_singlethread_workqueue("kkvserver"); if(!wq) { #ifdef DEBUG_KKV_NETWORK printk("create_workqueue() failed in server_init()\n"); #endif return -ENOMEM; } } if(!svr) { svr=kmalloc(sizeof(kkv_server),GFP_KERNEL); INIT_WORK(&svr->work,server_work); } //create socket se=(sock_entry_t *)conf; ret=sock_create_kern(se->family,se->type,se->protocol,&svr->socket); if(ret<0) { #ifdef DEBUG_KKV_NETWORK printk("sock_create_kern() failed=%d, family=%d, type=%d, protocol=%d\n", ret,se->family,se->type,se->protocol); #endif goto out0; } set_server_sock_callbacks(svr->socket,svr); ret=kernel_setsockopt(svr->socket,SOL_SOCKET,SO_REUSEADDR,(char*)&flags,sizeof(flags)); if(ret<0) { #ifdef DEBUG_KKV_NETWORK printk("kernel_setsockopt() failed=%d, level=%d, name=%d\n",ret,SOL_SOCKET,SO_REUSEADDR); #endif goto out1; } ret=kernel_setsockopt(svr->socket,SOL_SOCKET,SO_KEEPALIVE,(char*)&flags,sizeof(flags)); if(ret<0) { #ifdef DEBUG_KKV_NETWORK printk("kernel_setsockopt() failed=%d, level=%d, name=%d\n",ret,SOL_SOCKET,SO_KEEPALIVE); #endif goto out1; } ret=kernel_setsockopt(svr->socket,SOL_SOCKET,SO_LINGER,(char*)&ling,sizeof(ling)); if(ret<0) { #ifdef DEBUG_KKV_NETWORK printk("kernel_setsockopt() failed=%d, level=%d, name=%d\n",ret,SOL_SOCKET,SO_LINGER); #endif goto out1; } ret=kernel_setsockopt(svr->socket,SOL_TCP,TCP_NODELAY,(char*)&flags,sizeof(flags)); if(ret<0) { #ifdef DEBUG_KKV_NETWORK printk("kernel_setsockopt() failed=%d, level=%d, name=%d\n",ret,IPPROTO_TCP,TCP_NODELAY); #endif goto out1; } ret=kernel_bind(svr->socket,(struct sockaddr*)se->addr,se->addrlen); if(ret<0) { #ifdef DEBUG_KKV_NETWORK printk("kernel_bind() failed=%d\n",ret); #endif goto out1; } ret=kernel_listen(svr->socket,1024); if(ret<0) { #ifdef DEBUG_KKV_NETWORK printk("kernel_listen() failed=%d\n",ret); #endif goto out1; } return 0; out1: sock_release(svr->socket); out0: svr->socket=NULL; return ret; }
int ksocknal_lib_setup_sock (struct socket *sock) { int rc; int option; int keep_idle; int keep_intvl; int keep_count; int do_keepalive; struct linger linger; sock->sk->sk_allocation = GFP_NOFS; /* Ensure this socket aborts active sends immediately when we close * it. */ linger.l_onoff = 0; linger.l_linger = 0; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger, sizeof(linger)); if (rc != 0) { CERROR ("Can't set SO_LINGER: %d\n", rc); return (rc); } option = -1; rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option, sizeof(option)); if (rc != 0) { CERROR ("Can't set SO_LINGER2: %d\n", rc); return (rc); } if (!*ksocknal_tunables.ksnd_nagle) { option = 1; rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&option, sizeof(option)); if (rc != 0) { CERROR ("Can't disable nagle: %d\n", rc); return (rc); } } rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size); if (rc != 0) { CERROR ("Can't set buffer tx %d, rx %d buffers: %d\n", *ksocknal_tunables.ksnd_tx_buffer_size, *ksocknal_tunables.ksnd_rx_buffer_size, rc); return (rc); } /* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */ #ifdef SOCKNAL_BACKOFF if (*ksocknal_tunables.ksnd_backoff_init > 0) { option = *ksocknal_tunables.ksnd_backoff_init; #ifdef SOCKNAL_BACKOFF_MS option *= 1000; #endif rc = kernel_setsockopt(sock, SOL_TCP, TCP_BACKOFF_INIT, (char *)&option, sizeof(option)); if (rc != 0) { CERROR ("Can't set initial tcp backoff %d: %d\n", option, rc); return (rc); } } if (*ksocknal_tunables.ksnd_backoff_max > 0) { option = *ksocknal_tunables.ksnd_backoff_max; #ifdef SOCKNAL_BACKOFF_MS option *= 1000; #endif rc = kernel_setsockopt(sock, SOL_TCP, TCP_BACKOFF_MAX, (char *)&option, sizeof(option)); if (rc != 0) { CERROR ("Can't set maximum tcp backoff %d: %d\n", option, rc); return (rc); } } #endif /* snapshot tunables */ keep_idle = *ksocknal_tunables.ksnd_keepalive_idle; keep_count = *ksocknal_tunables.ksnd_keepalive_count; keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl; do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0); option = (do_keepalive ? 1 : 0); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option, sizeof(option)); if (rc != 0) { CERROR ("Can't set SO_KEEPALIVE: %d\n", rc); return (rc); } if (!do_keepalive) return (0); rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle, sizeof(keep_idle)); if (rc != 0) { CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc); return (rc); } rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, (char *)&keep_intvl, sizeof(keep_intvl)); if (rc != 0) { CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc); return (rc); } rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count, sizeof(keep_count)); if (rc != 0) { CERROR ("Can't set TCP_KEEPCNT: %d\n", rc); return (rc); } return (0); }
static int smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) { int rc; struct kvec *iov = rqst->rq_iov; int n_vec = rqst->rq_nvec; unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); unsigned long send_length; unsigned int i; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; struct msghdr smb_msg; int val = 1; if (ssocket == NULL) return -ENOTSOCK; /* sanity check send length */ send_length = rqst_len(rqst); if (send_length != smb_buf_length + 4) { WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n", send_length, smb_buf_length); return -EIO; } cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); dump_smb(iov[0].iov_base, iov[0].iov_len); /* cork the socket */ kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); size = 0; for (i = 0; i < n_vec; i++) size += iov[i].iov_len; iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; total_len += sent; /* now walk the page array and send each page in it */ for (i = 0; i < rqst->rq_npages; i++) { size_t len = i == rqst->rq_npages - 1 ? rqst->rq_tailsz : rqst->rq_pagesz; struct bio_vec bvec = { .bv_page = rqst->rq_pages[i], .bv_len = len }; iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, len); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) break; total_len += sent; } uncork: /* uncork it */ val = 0; kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); if ((total_len > 0) && (total_len != smb_buf_length + 4)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", smb_buf_length + 4, total_len); /* * If we have only sent part of an SMB then the next SMB could * be taken as the remainder of this one. We need to kill the * socket so the server throws away the partial SMB */ server->tcpStatus = CifsNeedReconnect; } if (rc < 0 && rc != -EINTR) cifs_dbg(VFS, "Error %d sending data on socket to server\n", rc); else rc = 0; return rc; } static int smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) { struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = n_vec }; return smb_send_rqst(server, &rqst); } int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) { struct kvec iov; iov.iov_base = smb_buffer; iov.iov_len = smb_buf_length + 4; return smb_sendv(server, &iov, 1); } static int wait_for_free_credits(struct TCP_Server_Info *server, const int timeout, int *credits) { int rc; spin_lock(&server->req_lock); if (timeout == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ server->in_flight++; *credits -= 1; spin_unlock(&server->req_lock); return 0; } while (1) { if (*credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable(server->request_q, has_credits(server, credits)); cifs_num_waiters_dec(server); if (rc) return rc; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { spin_unlock(&server->req_lock); return -ENOENT; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if (timeout != CIFS_BLOCKING_OP) { *credits -= 1; server->in_flight++; } spin_unlock(&server->req_lock); break; } } return 0; }
int myserver(void) { struct socket *client_sock; struct sockaddr_in s_addr; unsigned short portnum=8888; int ret=0; memset(&s_addr,0,sizeof(s_addr)); s_addr.sin_family=AF_INET; s_addr.sin_port=htons(portnum); s_addr.sin_addr.s_addr=htonl(INADDR_ANY); sock=(struct socket *)kmalloc(sizeof(struct socket),GFP_KERNEL); client_sock=(struct socket *)kmalloc(sizeof(struct socket),GFP_KERNEL); /*create a socket*/ ret=sock_create_kern(AF_INET, SOCK_STREAM,0,&sock); if(ret) { printk("server:socket_create error!\n"); } //printk("server:socket_create ok!\n"); /*set the socket can be reused*/ int val=1; ret= kernel_setsockopt(sock,SOL_SOCKET,SO_REUSEADDR,(char *)&val,sizeof(val)); if(ret) { // printk("kernel_setsockopt error!!!!!!!!!!!\n"); } /*bind the socket*/ ret=sock->ops->bind(sock,(struct sockaddr *)&s_addr,sizeof(struct sockaddr_in)); if(ret<0) { // printk("server: bind error\n"); return ret; } //printk("server:bind ok!\n"); /*listen*/ ret=sock->ops->listen(sock,10); if(ret<0) { // printk("server: listen error\n"); return ret; } //printk("server:listen ok!\n"); my_wq = create_workqueue("my_queue"); while(1) { ret=1; struct work_struct_data * wsdata; ret = kernel_accept(sock,&client_sock,100); printk("server:accept ing!,ret=%d\n",ret); if(ret<0) { printk("server:accept error!,ret=%d\n",ret); //return ret; break; } int ret = 0; if (my_wq) { wsdata = (struct work_struct_data *) kmalloc(sizeof(struct work_struct_data), GFP_KERNEL); // 设置要传递的数据 wsdata->client = client_sock; if (wsdata) { //初始化work_struct类型的变量(主要是指定处理函数) INIT_WORK(&wsdata->my_work, work_handler); //将work添加到刚创建的工作队列中 ret = queue_work(my_wq, &wsdata->my_work); } } //printk("server: accept ok, Connection Established,ret=%d\n",ret); } sock_release(sock); return ret; }
int lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) { int rc; long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC); unsigned long then; struct timeval tv; LASSERT(nob > 0); LASSERT(jiffies_left > 0); for (;;) { struct kvec iov = { .iov_base = buffer, .iov_len = nob }; struct msghdr msg = { .msg_flags = 0 }; /* Set receive timeout to remaining time */ jiffies_to_timeval(jiffies_left, &tv); rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (char *)&tv, sizeof(tv)); if (rc) { CERROR("Can't set socket recv timeout %ld.%06d: %d\n", (long)tv.tv_sec, (int)tv.tv_usec, rc); return rc; } then = jiffies; rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0); jiffies_left -= jiffies - then; if (rc < 0) return rc; if (!rc) return -ECONNRESET; buffer = ((char *)buffer) + rc; nob -= rc; if (!nob) return 0; if (jiffies_left <= 0) return -ETIMEDOUT; } } EXPORT_SYMBOL(lnet_sock_read); static int lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, int local_port) { struct sockaddr_in locaddr; struct socket *sock; int rc; int option; /* All errors are fatal except bind failure if the port is in use */ *fatal = 1; rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); *sockp = sock; if (rc) { CERROR("Can't create socket: %d\n", rc); return rc; } option = 1; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&option, sizeof(option)); if (rc) { CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc); goto failed; } if (local_ip || local_port) { memset(&locaddr, 0, sizeof(locaddr)); locaddr.sin_family = AF_INET; locaddr.sin_port = htons(local_port); if (!local_ip) locaddr.sin_addr.s_addr = htonl(INADDR_ANY); else locaddr.sin_addr.s_addr = htonl(local_ip); rc = kernel_bind(sock, (struct sockaddr *)&locaddr, sizeof(locaddr)); if (rc == -EADDRINUSE) { CDEBUG(D_NET, "Port %d already in use\n", local_port); *fatal = 0; goto failed; } if (rc) { CERROR("Error trying to bind to port %d: %d\n", local_port, rc); goto failed; } } return 0; failed: sock_release(sock); return rc; } int lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize) { int option; int rc; if (txbufsize) { option = txbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, (char *)&option, sizeof(option)); if (rc) { CERROR("Can't set send buffer %d: %d\n", option, rc); return rc; } } if (rxbufsize) { option = rxbufsize; rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, (char *)&option, sizeof(option)); if (rc) { CERROR("Can't set receive buffer %d: %d\n", option, rc); return rc; } } return 0; }
static int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) { int rc = 0; struct kvec *iov; int n_vec; unsigned int send_length = 0; unsigned int i, j; size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; struct msghdr smb_msg; int val = 1; __be32 rfc1002_marker; if (cifs_rdma_enabled(server) && server->smbd_conn) { rc = smbd_send(server, rqst); goto smbd_done; } if (ssocket == NULL) return -ENOTSOCK; /* cork the socket */ kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); for (j = 0; j < num_rqst; j++) send_length += smb_rqst_len(server, &rqst[j]); rfc1002_marker = cpu_to_be32(send_length); /* Generate a rfc1002 marker for SMB2+ */ if (server->vals->header_preamble_size == 0) { struct kvec hiov = { .iov_base = &rfc1002_marker, .iov_len = 4 }; iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; total_len += sent; send_length += 4; } cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length); for (j = 0; j < num_rqst; j++) { iov = rqst[j].rq_iov; n_vec = rqst[j].rq_nvec; size = 0; for (i = 0; i < n_vec; i++) { dump_smb(iov[i].iov_base, iov[i].iov_len); size += iov[i].iov_len; } iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; total_len += sent; /* now walk the page array and send each page in it */ for (i = 0; i < rqst[j].rq_npages; i++) { struct bio_vec bvec; bvec.bv_page = rqst[j].rq_pages[i]; rqst_page_get_length(&rqst[j], i, &bvec.bv_len, &bvec.bv_offset); iov_iter_bvec(&smb_msg.msg_iter, WRITE, &bvec, 1, bvec.bv_len); rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) break; total_len += sent; } } uncork: /* uncork it */ val = 0; kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); if ((total_len > 0) && (total_len != send_length)) { cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", send_length, total_len); /* * If we have only sent part of an SMB then the next SMB could * be taken as the remainder of this one. We need to kill the * socket so the server throws away the partial SMB */ server->tcpStatus = CifsNeedReconnect; trace_smb3_partial_send_reconnect(server->CurrentMid, server->hostname); } smbd_done: if (rc < 0 && rc != -EINTR) cifs_dbg(VFS, "Error %d sending data on socket to server\n", rc); else if (rc > 0) rc = 0; return rc; } static int smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst, int flags) { struct kvec iov; struct smb2_transform_hdr tr_hdr; struct smb_rqst cur_rqst[MAX_COMPOUND]; int rc; if (!(flags & CIFS_TRANSFORM_REQ)) return __smb_send_rqst(server, num_rqst, rqst); if (num_rqst > MAX_COMPOUND - 1) return -ENOMEM; memset(&cur_rqst[0], 0, sizeof(cur_rqst)); memset(&iov, 0, sizeof(iov)); memset(&tr_hdr, 0, sizeof(tr_hdr)); iov.iov_base = &tr_hdr; iov.iov_len = sizeof(tr_hdr); cur_rqst[0].rq_iov = &iov; cur_rqst[0].rq_nvec = 1; if (!server->ops->init_transform_rq) { cifs_dbg(VFS, "Encryption requested but transform callback " "is missing\n"); return -EIO; } rc = server->ops->init_transform_rq(server, num_rqst + 1, &cur_rqst[0], rqst); if (rc) return rc; rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]); smb3_free_compound_rqst(num_rqst, &cur_rqst[1]); return rc; } int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) { struct kvec iov[2]; struct smb_rqst rqst = { .rq_iov = iov, .rq_nvec = 2 }; iov[0].iov_base = smb_buffer; iov[0].iov_len = 4; iov[1].iov_base = (char *)smb_buffer + 4; iov[1].iov_len = smb_buf_length; return __smb_send_rqst(server, 1, &rqst); } static int wait_for_free_credits(struct TCP_Server_Info *server, const int timeout, int *credits) { int rc; spin_lock(&server->req_lock); if (timeout == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ server->in_flight++; *credits -= 1; spin_unlock(&server->req_lock); return 0; } while (1) { if (*credits <= 0) { spin_unlock(&server->req_lock); cifs_num_waiters_inc(server); rc = wait_event_killable(server->request_q, has_credits(server, credits)); cifs_num_waiters_dec(server); if (rc) return rc; spin_lock(&server->req_lock); } else { if (server->tcpStatus == CifsExiting) { spin_unlock(&server->req_lock); return -ENOENT; } /* * Can not count locking commands against total * as they are allowed to block on server. */ /* update # of requests on the wire to server */ if (timeout != CIFS_BLOCKING_OP) { *credits -= 1; server->in_flight++; } spin_unlock(&server->req_lock); break; } } return 0; } static int wait_for_free_request(struct TCP_Server_Info *server, const int timeout, const int optype) { int *val; val = server->ops->get_credits_field(server, optype); /* Since an echo is already inflight, no need to wait to send another */ if (*val <= 0 && optype == CIFS_ECHO_OP) return -EAGAIN; return wait_for_free_credits(server, timeout, val); }