struct sk_buff *tipc_buf_acquire(u32 size) { struct sk_buff *skb; unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); if (skb) { skb_reserve(skb, BUF_HEADROOM); skb_put(skb, size); skb->next = NULL; } return skb; }
/* * Allocate a frame intended to be sent via fcoe_xmit. * Get an sk_buff for the frame and set the length. */ struct fc_frame *_fc_frame_alloc(size_t len) { struct fc_frame *fp; struct sk_buff *skb; WARN_ON((len % sizeof(u32)) != 0); len += sizeof(struct fc_frame_header); skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM + NET_SKB_PAD, GFP_ATOMIC); if (!skb) return NULL; skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM); fp = (struct fc_frame *) skb; fc_frame_init(fp); skb_put(skb, len); return fp; }
/** * This is main body of the socket close function in Sync Sockets. * * inet_release() can sleep (as well as tcp_close()), so we make our own * non-sleepable socket closing. * * This function must be used only for data sockets. * Use standard sock_release() for listening sockets. * * In most cases it is called in softirq context and from ksoftirqd which * processes data from the socket (RSS and RPS distribute packets that way). * * Note: it used to be called in process context as well, at the time when * Tempesta starts or stops. That's not the case right now, but it may change. * * TODO In some cases we need to close socket agresively w/o FIN_WAIT_2 state, * e.g. by sending RST. So we need to add second parameter to the function * which says how to close the socket. * One of the examples is rcl_req_limit() (it should reset connections). * See tcp_sk(sk)->linger2 processing in standard tcp_close(). * * Called with locked socket. */ static void ss_do_close(struct sock *sk) { struct sk_buff *skb; int data_was_unread = 0; int state; if (unlikely(!sk)) return; SS_DBG("Close socket %p (%s): cpu=%d account=%d refcnt=%d\n", sk, ss_statename[sk->sk_state], smp_processor_id(), sk_has_account(sk), atomic_read(&sk->sk_refcnt)); assert_spin_locked(&sk->sk_lock.slock); ss_sock_cpu_check(sk); BUG_ON(sk->sk_state == TCP_LISTEN); /* We must return immediately, so LINGER option is meaningless. */ WARN_ON(sock_flag(sk, SOCK_LINGER)); /* We don't support virtual containers, so TCP_REPAIR is prohibited. */ WARN_ON(tcp_sk(sk)->repair); /* The socket must have atomic allocation mask. */ WARN_ON(!(sk->sk_allocation & GFP_ATOMIC)); /* The below is mostly copy-paste from tcp_close(). */ sk->sk_shutdown = SHUTDOWN_MASK; while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - tcp_hdr(skb)->fin; data_was_unread += len; SS_DBG("free rcv skb %p\n", skb); __kfree_skb(skb); } sk_mem_reclaim(sk); if (sk->sk_state == TCP_CLOSE) goto adjudge_to_death; if (data_was_unread) { NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, sk->sk_allocation); } else if (tcp_close_state(sk)) { /* The code below is taken from tcp_send_fin(). */ struct tcp_sock *tp = tcp_sk(sk); int mss_now = tcp_current_mss(sk); skb = tcp_write_queue_tail(sk); if (tcp_send_head(sk) != NULL) { /* Send FIN with data if we have any. */ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; TCP_SKB_CB(skb)->end_seq++; tp->write_seq++; } else { /* No data to send in the socket, allocate new skb. */ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); if (!skb) { SS_WARN("can't send FIN due to bad alloc"); } else { skb_reserve(skb, MAX_TCP_HEADER); tcp_init_nondata_skb(skb, tp->write_seq, TCPHDR_ACK | TCPHDR_FIN); tcp_queue_skb(sk, skb); } } __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); } adjudge_to_death: state = sk->sk_state; sock_hold(sk); sock_orphan(sk); /* * SS sockets are processed in softirq only, * so backlog queue should be empty. */ WARN_ON(sk->sk_backlog.tail); percpu_counter_inc(sk->sk_prot->orphan_count); if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) return; if (sk->sk_state == TCP_FIN_WAIT2) { const int tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); return; } } if (sk->sk_state != TCP_CLOSE) { sk_mem_reclaim(sk); if (tcp_check_oom(sk, 0)) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); } } if (sk->sk_state == TCP_CLOSE) { struct request_sock *req = tcp_sk(sk)->fastopen_rsk; if (req != NULL) reqsk_fastopen_remove(sk, req, false); inet_csk_destroy_sock(sk); } }