/** * The function does not free @m->skb_list, the caller is responsible for that. */ void tfw_http_msg_free(TfwHttpMsg *m) { TFW_DBG("Free msg: %p\n", m); if (!m) return; if (m->conn && m->conn->msg == (TfwMsg *)m) m->conn->msg = NULL; while (1) { /* * The skbs are passed to us by put_skb_to_msg() call, * so we're responsible to free them. */ struct sk_buff *skb = ss_skb_dequeue(&m->msg.skb_list); if (!skb) break; TFW_DBG("free skb %p: truesize=%d sk=%p, destructor=%p" " users=%d type=%s\n", skb, skb->truesize, skb->sk, skb->destructor, atomic_read(&skb->users), m->conn && TFW_CONN_TYPE(m->conn) & Conn_Clnt ? "Conn_Clnt" : m->conn && TFW_CONN_TYPE(m->conn) & Conn_Srv ? "Conn_Srv" : "Unknown"); kfree_skb(skb); } tfw_pool_free(m->pool); }
/** * Directly insert all skbs from @skb_list into @sk TCP write queue regardless * write buffer size. This allows directly forward modified packets without * copying. See do_tcp_sendpages() and tcp_sendmsg() in linux/net/ipv4/tcp.c. * * Can be called in softirq context as well as from kernel thread. * * TODO use MSG_MORE untill we reach end of message. */ int ss_send(struct sock *sk, SsSkbList *skb_list, bool pass_skb) { int r = 0; struct sk_buff *skb, *skb_copy; SsWork sw = { .sk = sk, .action = SS_SEND, }; BUG_ON(!sk); BUG_ON(ss_skb_queue_empty(skb_list)); SS_DBG("%s: cpu=%d sk=%p (cpu=%d) state=%s\n", __func__, smp_processor_id(), sk, sk->sk_incoming_cpu, ss_statename[sk->sk_state]); /* * Remove the skbs from Tempesta lists if we won't use them, * or copy them if they're going to be used by Tempesta during * and after the transmission. */ if (pass_skb) { sw.skb_list = *skb_list; ss_skb_queue_head_init(skb_list); } else { ss_skb_queue_head_init(&sw.skb_list); for (skb = ss_skb_peek(skb_list); skb; skb = ss_skb_next(skb)) { /* tcp_transmit_skb() will clone the skb. */ skb_copy = pskb_copy_for_clone(skb, GFP_ATOMIC); if (!skb_copy) { SS_WARN("Unable to copy an egress SKB.\n"); r = -ENOMEM; goto err; } ss_skb_queue_tail(&sw.skb_list, skb_copy); } } /* * Schedule the socket for TX softirq processing. * Only part of @skb_list could be passed to send queue. */ if (ss_wq_push(&sw)) { SS_WARN("Cannot schedule socket %p for transmission\n", sk); r = -EBUSY; goto err; } return 0; err: if (!pass_skb) while ((skb = ss_skb_dequeue(&sw.skb_list))) kfree_skb(skb); return r; }
/** * @skb_list can be invalid after the function call, don't try to use it. */ static void ss_do_send(struct sock *sk, SsSkbList *skb_list) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int size, mss = tcp_send_mss(sk, &size, MSG_DONTWAIT); SS_DBG("%s: cpu=%d sk=%p queue_empty=%d send_head=%p" " sk_state=%d mss=%d size=%d\n", __func__, smp_processor_id(), sk, tcp_write_queue_empty(sk), tcp_send_head(sk), sk->sk_state, mss, size); if (unlikely(!ss_sock_active(sk))) return; ss_sock_cpu_check(sk); while ((skb = ss_skb_dequeue(skb_list))) { skb->ip_summed = CHECKSUM_PARTIAL; skb_shinfo(skb)->gso_segs = 0; /* * TODO Mark all data with PUSH to force receiver to consume * the data. Currently we do this for debugging purposes. * We need to do this only for complete messages/skbs. * Actually tcp_push() already does it for the last skb. * MSG_MORE should be used, probably by connection layer. */ tcp_mark_push(tp, skb); SS_DBG("%s: entail skb=%p data_len=%u len=%u\n", __func__, skb, skb->data_len, skb->len); ss_skb_entail(sk, skb); tp->write_seq += skb->len; TCP_SKB_CB(skb)->end_seq += skb->len; } SS_DBG("%s: sk=%p send_head=%p sk_state=%d\n", __func__, sk, tcp_send_head(sk), sk->sk_state); tcp_push(sk, MSG_DONTWAIT, mss, TCP_NAGLE_OFF|TCP_NAGLE_PUSH, size); }