static int memcopy_from_iter(struct sock *sk, struct iov_iter *from, int bytes) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); struct scatterlist *sg = ctx->sg_plaintext_data; int copy, i, rc = 0; for (i = tls_ctx->pending_open_record_frags; i < ctx->sg_plaintext_num_elem; ++i) { copy = sg[i].length; if (copy_from_iter( page_address(sg_page(&sg[i])) + sg[i].offset, copy, from) != copy) { rc = -EFAULT; goto out; } bytes -= copy; ++tls_ctx->pending_open_record_frags; if (!bytes) break; } out: return rc; }
struct sk_buff *tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, struct sk_buff *skb) { if (dev == tls_get_ctx(sk)->netdev) return skb; return tls_sw_fallback(sk, skb); }
static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, int length) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); struct page *pages[MAX_SKB_FRAGS]; size_t offset; ssize_t copied, use; int i = 0; unsigned int size = ctx->sg_plaintext_size; int num_elem = ctx->sg_plaintext_num_elem; int rc = 0; int maxpages; while (length > 0) { i = 0; maxpages = ARRAY_SIZE(ctx->sg_plaintext_data) - num_elem; if (maxpages == 0) { rc = -EFAULT; goto out; } copied = iov_iter_get_pages(from, pages, length, maxpages, &offset); if (copied <= 0) { rc = -EFAULT; goto out; } iov_iter_advance(from, copied); length -= copied; size += copied; while (copied) { use = min_t(int, copied, PAGE_SIZE - offset); sg_set_page(&ctx->sg_plaintext_data[num_elem], pages[i], use, offset); sg_unmark_end(&ctx->sg_plaintext_data[num_elem]); sk_mem_charge(sk, use); offset = 0; copied -= use; ++i; ++num_elem; } } out: ctx->sg_plaintext_size = size; ctx->sg_plaintext_num_elem = num_elem; return rc; }
static void tls_free_both_sg(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size); free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size); }
static int alloc_encrypted_sg(struct sock *sk, int len) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); int rc = 0; rc = alloc_sg(sk, len, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, 0); return rc; }
static int alloc_plaintext_sg(struct sock *sk, int len) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); int rc = 0; rc = alloc_sg(sk, len, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, tls_ctx->pending_open_record_frags); return rc; }
void tls_sw_free_tx_resources(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); if (ctx->aead_send) crypto_free_aead(ctx->aead_send); tls_free_both_sg(sk); kfree(ctx); kfree(tls_ctx); }
static int tls_push_record(struct sock *sk, int flags, unsigned char record_type) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); int rc; sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size, tls_ctx->rec_seq, tls_ctx->rec_seq_size, record_type); tls_fill_prepend(tls_ctx, page_address(sg_page(&ctx->sg_encrypted_data[0])) + ctx->sg_encrypted_data[0].offset, ctx->sg_plaintext_size, record_type); tls_ctx->pending_open_record_frags = 0; set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size, sk->sk_allocation); if (rc < 0) { /* If we are called from write_space and * we fail, we need to set this SOCK_NOSPACE * to trigger another write_space in the future. */ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); return rc; } free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size); ctx->sg_encrypted_num_elem = 0; ctx->sg_encrypted_size = 0; /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */ rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags); if (rc < 0 && rc != -EAGAIN) tls_err_abort(sk); tls_advance_record_sn(sk, tls_ctx); return rc; }
static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb) { int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb); struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); int payload_len = skb->len - tcp_payload_offset; struct scatterlist *sg_in, sg_out[3]; struct sk_buff *nskb = NULL; int sg_in_max_elements; int resync_sgs = 0; s32 sync_size = 0; u64 rcd_sn; /* worst case is: * MAX_SKB_FRAGS in tls_record_info * MAX_SKB_FRAGS + 1 in SKB head and frags. */ sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1; if (!payload_len) return skb; sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC); if (!sg_in) goto free_orig; sg_init_table(sg_in, sg_in_max_elements); sg_init_table(sg_out, ARRAY_SIZE(sg_out)); if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) { /* bypass packets before kernel TLS socket option was set */ if (sync_size < 0 && payload_len <= -sync_size) nskb = skb_get(skb); goto put_sg; } nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn); put_sg: while (resync_sgs) put_page(sg_page(&sg_in[--resync_sgs])); kfree(sg_in); free_orig: kfree_skb(skb); return nskb; }
static void trim_both_sgl(struct sock *sk, int target_size) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); trim_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, target_size); if (target_size > 0) target_size += tls_ctx->overhead_size; trim_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, target_size); }
int tls_sw_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); int ret = 0; long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); bool eor; size_t orig_size = size; unsigned char record_type = TLS_RECORD_TYPE_DATA; struct scatterlist *sg; bool full_record; int record_room; if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) return -ENOTSUPP; /* No MSG_EOR from splice, only look at MSG_MORE */ eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); lock_sock(sk); sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo)) goto sendpage_end; /* Call the sk_stream functions to manage the sndbuf mem. */ while (size > 0) { size_t copy, required_size; if (sk->sk_err) { ret = sk->sk_err; goto sendpage_end; } full_record = false; record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; copy = size; if (copy >= record_room) { copy = record_room; full_record = true; } required_size = ctx->sg_plaintext_size + copy + tls_ctx->overhead_size; if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; alloc_payload: ret = alloc_encrypted_sg(sk, required_size); if (ret) { if (ret != -ENOSPC) goto wait_for_memory; /* Adjust copy according to the amount that was * actually allocated. The difference is due * to max sg elements limit */ copy -= required_size - ctx->sg_plaintext_size; full_record = true; } get_page(page); sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; sg_set_page(sg, page, copy, offset); ctx->sg_plaintext_num_elem++; sk_mem_charge(sk, copy); offset += copy; size -= copy; ctx->sg_plaintext_size += copy; tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem; if (full_record || eor || ctx->sg_plaintext_num_elem == ARRAY_SIZE(ctx->sg_plaintext_data)) { push_record: ret = tls_push_record(sk, flags, record_type); if (ret) { if (ret == -ENOMEM) goto wait_for_memory; goto sendpage_end; } } continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: ret = sk_stream_wait_memory(sk, &timeo); if (ret) { trim_both_sgl(sk, ctx->sg_plaintext_size); goto sendpage_end; } if (tls_is_pending_closed_record(tls_ctx)) goto push_record; goto alloc_payload; } sendpage_end: if (orig_size > size) ret = orig_size - size; else ret = sk_stream_error(sk, flags, ret); release_sock(sk); return ret; }
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx); int ret = 0; int required_size; long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); bool eor = !(msg->msg_flags & MSG_MORE); size_t try_to_copy, copied = 0; unsigned char record_type = TLS_RECORD_TYPE_DATA; int record_room; bool full_record; int orig_size; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) return -ENOTSUPP; lock_sock(sk); if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo)) goto send_end; if (unlikely(msg->msg_controllen)) { ret = tls_proccess_cmsg(sk, msg, &record_type); if (ret) goto send_end; } while (msg_data_left(msg)) { if (sk->sk_err) { ret = sk->sk_err; goto send_end; } orig_size = ctx->sg_plaintext_size; full_record = false; try_to_copy = msg_data_left(msg); record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; if (try_to_copy >= record_room) { try_to_copy = record_room; full_record = true; } required_size = ctx->sg_plaintext_size + try_to_copy + tls_ctx->overhead_size; if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; alloc_encrypted: ret = alloc_encrypted_sg(sk, required_size); if (ret) { if (ret != -ENOSPC) goto wait_for_memory; /* Adjust try_to_copy according to the amount that was * actually allocated. The difference is due * to max sg elements limit */ try_to_copy -= required_size - ctx->sg_encrypted_size; full_record = true; } if (full_record || eor) { ret = zerocopy_from_iter(sk, &msg->msg_iter, try_to_copy); if (ret) goto fallback_to_reg_send; copied += try_to_copy; ret = tls_push_record(sk, msg->msg_flags, record_type); if (!ret) continue; if (ret == -EAGAIN) goto send_end; copied -= try_to_copy; fallback_to_reg_send: iov_iter_revert(&msg->msg_iter, ctx->sg_plaintext_size - orig_size); trim_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, orig_size); } required_size = ctx->sg_plaintext_size + try_to_copy; alloc_plaintext: ret = alloc_plaintext_sg(sk, required_size); if (ret) { if (ret != -ENOSPC) goto wait_for_memory; /* Adjust try_to_copy according to the amount that was * actually allocated. The difference is due * to max sg elements limit */ try_to_copy -= required_size - ctx->sg_plaintext_size; full_record = true; trim_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, &ctx->sg_encrypted_size, ctx->sg_plaintext_size + tls_ctx->overhead_size); } ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy); if (ret) goto trim_sgl; copied += try_to_copy; if (full_record || eor) { push_record: ret = tls_push_record(sk, msg->msg_flags, record_type); if (ret) { if (ret == -ENOMEM) goto wait_for_memory; goto send_end; } } continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: ret = sk_stream_wait_memory(sk, &timeo); if (ret) { trim_sgl: trim_both_sgl(sk, orig_size); goto send_end; } if (tls_is_pending_closed_record(tls_ctx)) goto push_record; if (ctx->sg_encrypted_size < required_size) goto alloc_encrypted; goto alloc_plaintext; } send_end: ret = sk_stream_error(sk, msg->msg_flags, ret); release_sock(sk); return copied ? copied : ret; }