int sk_stream_wait_memory(struct sock *sk, long *timeo_p) { int err = 0; long vm_wait = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); if (sk_stream_memory_free(sk)) current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2; while (1) { set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; if (!*timeo_p) goto do_nonblock; if (signal_pending(current)) goto do_interrupted; clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); if (sk_stream_memory_free(sk) && !vm_wait) break; set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; sk_wait_event(sk, ¤t_timeo, sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN) || (sk_stream_memory_free(sk) && !vm_wait)); sk->sk_write_pending--; if (vm_wait) { vm_wait -= current_timeo; current_timeo = *timeo_p; if (current_timeo != MAX_SCHEDULE_TIMEOUT && (current_timeo -= vm_wait) < 0) current_timeo = 0; vm_wait = 0; } *timeo_p = current_timeo; } out: finish_wait(sk_sleep(sk), &wait); return err; do_error: err = -EPIPE; goto out; do_nonblock: err = -EAGAIN; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; }
bool rtl_sk_stream_memory_free(const struct sock *sk) { return sk_stream_memory_free(sk); }
int tls_sw_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); int ret = 0; long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); bool eor; size_t orig_size = size; unsigned char record_type = TLS_RECORD_TYPE_DATA; struct scatterlist *sg; bool full_record; int record_room; if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) return -ENOTSUPP; /* No MSG_EOR from splice, only look at MSG_MORE */ eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); lock_sock(sk); sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo)) goto sendpage_end; /* Call the sk_stream functions to manage the sndbuf mem. */ while (size > 0) { size_t copy, required_size; if (sk->sk_err) { ret = sk->sk_err; goto sendpage_end; } full_record = false; record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; copy = size; if (copy >= record_room) { copy = record_room; full_record = true; } required_size = ctx->sg_plaintext_size + copy + tls_ctx->overhead_size; if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; alloc_payload: ret = alloc_encrypted_sg(sk, required_size); if (ret) { if (ret != -ENOSPC) goto wait_for_memory; /* Adjust copy according to the amount that was * actually allocated. The difference is due * to max sg elements limit */ copy -= required_size - ctx->sg_plaintext_size; full_record = true; } get_page(page); sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; sg_set_page(sg, page, copy, offset); ctx->sg_plaintext_num_elem++; sk_mem_charge(sk, copy); offset += copy; size -= copy; ctx->sg_plaintext_size += copy; tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem; if (full_record || eor || ctx->sg_plaintext_num_elem == ARRAY_SIZE(ctx->sg_plaintext_data)) { push_record: ret = tls_push_record(sk, flags, record_type); if (ret) { if (ret == -ENOMEM) goto wait_for_memory; goto sendpage_end; } } continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: ret = sk_stream_wait_memory(sk, &timeo); if (ret) { trim_both_sgl(sk, ctx->sg_plaintext_size); goto sendpage_end; } if (tls_is_pending_closed_record(tls_ctx)) goto push_record; goto alloc_payload; } sendpage_end: if (orig_size > size) ret = orig_size - size; else ret = sk_stream_error(sk, flags, ret); release_sock(sk); return ret; }
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); int ret = 0; int required_size; long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); bool eor = !(msg->msg_flags & MSG_MORE); size_t try_to_copy, copied = 0; unsigned char record_type = TLS_RECORD_TYPE_DATA; int record_room; bool full_record; int orig_size; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) return -ENOTSUPP; lock_sock(sk); if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo)) goto send_end; if (unlikely(msg->msg_controllen)) { ret = tls_proccess_cmsg(sk, msg, &record_type); if (ret) goto send_end; } while (msg_data_left(msg)) { if (sk->sk_err) { ret = sk->sk_err; goto send_end; } orig_size = ctx->sg_plaintext_size; full_record = false; try_to_copy = msg_data_left(msg); record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; if (try_to_copy >= record_room) { try_to_copy = record_room; full_record = true; } required_size = ctx->sg_plaintext_size + try_to_copy + tls_ctx->overhead_size; if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; alloc_encrypted: ret = alloc_encrypted_sg(sk, required_size); if (ret) { if (ret != -ENOSPC) goto wait_for_memory; /* Adjust try_to_copy according to the amount that was * actually allocated. The difference is due * to max sg elements limit */ try_to_copy -= required_size - ctx->sg_encrypted_size; full_record = true; } if (full_record || eor) { ret = zerocopy_from_iter(sk, &msg->msg_iter, try_to_copy); if (ret) goto fallback_to_reg_send; copied += try_to_copy; ret = tls_push_record(sk, msg->msg_flags, record_type); if (!ret) continue; if (ret == -EAGAIN) goto send_end; copied -= try_to_copy; fallback_to_reg_send: iov_iter_revert(&msg->msg_iter, ctx->sg_plaintext_size - orig_size); trim_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, orig_size); } required_size = ctx->sg_plaintext_size + try_to_copy; alloc_plaintext: ret = alloc_plaintext_sg(sk, required_size); if (ret) { if (ret != -ENOSPC) goto wait_for_memory; /* Adjust try_to_copy according to the amount that was * actually allocated. The difference is due * to max sg elements limit */ try_to_copy -= required_size - ctx->sg_plaintext_size; full_record = true; trim_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, ctx->sg_plaintext_size + tls_ctx->overhead_size); } ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy); if (ret) goto trim_sgl; copied += try_to_copy; if (full_record || eor) { push_record: ret = tls_push_record(sk, msg->msg_flags, record_type); if (ret) { if (ret == -ENOMEM) goto wait_for_memory; goto send_end; } } continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: ret = sk_stream_wait_memory(sk, &timeo); if (ret) { trim_sgl: trim_both_sgl(sk, orig_size); goto send_end; } if (tls_is_pending_closed_record(tls_ctx)) goto push_record; if (ctx->sg_encrypted_size < required_size) goto alloc_encrypted; goto alloc_plaintext; } send_end: ret = sk_stream_error(sk, msg->msg_flags, ret); release_sock(sk); return copied ? copied : ret; }