Ejemplo n.º 1
0
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
			  struct sk_buff_head *hitlist)
{
	struct sk_buff *skb;
	struct sk_buff *next;

	spin_lock(&x->sk_receive_queue.lock);
	receive_queue_for_each_skb(x, next, skb) {
		/*
		 *	Do we have file descriptors ?
		 */
		if (UNIXCB(skb).fp) {
			bool hit = false;
			/*
			 *	Process the descriptors of this socket
			 */
			int nfd = UNIXCB(skb).fp->count;
			struct file **fp = UNIXCB(skb).fp->fp;
			while (nfd--) {
				/*
				 *	Get the socket the fd matches
				 *	if it indeed does so
				 */
				struct sock *sk = unix_get_socket(*fp++);
				if (sk) {
					struct unix_sock *u = unix_sk(sk);

					/*
					 * Ignore non-candidates, they could
					 * have been added to the queues after
					 * starting the garbage collection
					 */
					if (u->gc_candidate) {
						hit = true;
						func(u);
					}
				}
			}
			if (hit && hitlist != NULL) {
				__skb_unlink(skb, &x->sk_receive_queue);
				__skb_queue_tail(hitlist, skb);
			}
		}
	}
	spin_unlock(&x->sk_receive_queue.lock);
}
Ejemplo n.º 2
0
static int restore_unix_rqueue(struct sock *sk, struct cpt_sock_image *si,
                               loff_t pos, struct cpt_context *ctx)
{
    loff_t endpos;

    pos = pos + si->cpt_hdrlen;
    endpos = pos + si->cpt_next;
    while (pos < endpos) {
        struct sk_buff *skb;
        struct sock *owner_sk;
        __u32 owner;

        skb = rst_skb(sk, &pos, &owner, NULL, ctx);
        if (IS_ERR(skb)) {
            if (PTR_ERR(skb) == -EINVAL) {
                int err;

                err = rst_sock_attr(&pos, sk, ctx);
                if (err)
                    return err;
            }
            return PTR_ERR(skb);
        }

        owner_sk = unix_peer(sk);
        if (owner != -1) {
            cpt_object_t *pobj;
            pobj = lookup_cpt_obj_byindex(CPT_OBJ_SOCKET, owner, ctx);
            if (pobj == NULL) {
                eprintk_ctx("orphan af_unix skb?\n");
                kfree_skb(skb);
                continue;
            }
            owner_sk = pobj->o_obj;
        }
        if (owner_sk == NULL) {
            dprintk_ctx("orphan af_unix skb 2?\n");
            kfree_skb(skb);
            continue;
        }
        skb_set_owner_w(skb, owner_sk);
        if (UNIXCB(skb).fp)
            skb->destructor = unix_destruct_fds;
        skb_queue_tail(&sk->sk_receive_queue, skb);
        if (sk->sk_state == TCP_LISTEN) {
            struct socket *sock = skb->sk->sk_socket;
            if (sock == NULL) BUG();
            if (sock->file) BUG();
            skb->sk->sk_socket = NULL;
            skb->sk->sk_sleep = NULL;
            sock->sk = NULL;
            sock_release(sock);
        }
    }
    return 0;
}
Ejemplo n.º 3
0
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
			  struct sk_buff_head *hitlist)
{
	struct sk_buff *skb;
	struct sk_buff *next;

	spin_lock(&x->sk_receive_queue.lock);
	receive_queue_for_each_skb(x, next, skb) {
		/*
		 *	Do we have file descriptors ?
		 */
		if (UNIXCB(skb).fp) {
			bool hit = false;
			/*
			 *	Process the descriptors of this socket
			 */
			int nfd = UNIXCB(skb).fp->count;
			struct file **fp = UNIXCB(skb).fp->fp;
			while (nfd--) {
				/*
				 *	Get the socket the fd matches
				 *	if it indeed does so
				 */
				struct sock *sk = unix_get_socket(*fp++);
				if (sk) {
					hit = true;
					func(unix_sk(sk));
				}
			}
			if (hit && hitlist != NULL) {
				__skb_unlink(skb, &x->sk_receive_queue);
				__skb_queue_tail(hitlist, skb);
			}
		}
	}
	spin_unlock(&x->sk_receive_queue.lock);
}
Ejemplo n.º 4
0
struct sk_buff * rst_skb(struct sock *sk, loff_t *pos_p, __u32 *owner,
                         __u32 *queue, struct cpt_context *ctx)
{
    int err;
    struct sk_buff *skb;
    struct cpt_skb_image v;
    loff_t pos = *pos_p;
    struct scm_fp_list *fpl = NULL;
    struct timeval tmptv;

    err = rst_get_object(CPT_OBJ_SKB, pos, &v, ctx);
    if (err)
        return ERR_PTR(err);
    *pos_p = pos + v.cpt_next;

    if (owner)
        *owner = v.cpt_owner;
    if (queue)
        *queue = v.cpt_queue;

    skb = alloc_skb(v.cpt_len + v.cpt_hspace + v.cpt_tspace, GFP_KERNEL);
    if (skb == NULL)
        return ERR_PTR(-ENOMEM);
    skb_reserve(skb, v.cpt_hspace);
    skb_put(skb, v.cpt_len);
#ifdef NET_SKBUFF_DATA_USES_OFFSET
    skb->transport_header = v.cpt_h;
    skb->network_header = v.cpt_nh;
    skb->mac_header = v.cpt_mac;
#else
    skb->transport_header = skb->head + v.cpt_h;
    skb->network_header = skb->head + v.cpt_nh;
    skb->mac_header = skb->head + v.cpt_mac;
#endif
    BUILD_BUG_ON(sizeof(skb->cb) < sizeof(v.cpt_cb));
    if (sk->sk_protocol == IPPROTO_TCP) {
        /*
         * According to Alexey all packets in queue have non-zero
         * flags, as at least TCPCB_FLAG_ACK is set on them.
         * Luckily for us, offset of field flags in tcp_skb_cb struct
         * with IPv6 is higher then total size of tcp_skb_cb struct
         * without IPv6.
         */
        if (ctx->image_version >= CPT_VERSION_18_2 ||
                ((struct tcp_skb_cb_ipv6 *)&v.cpt_cb)->flags) {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
            check_tcp_cb_conv(NOT_CONV, CONV);
            memcpy(skb->cb, v.cpt_cb, sizeof(v.cpt_cb));
#else
            check_tcp_cb_conv(CONV, NOT_CONV);
            rst_tcp_cb_ipv6_to_ipv4(&v, skb);
#endif
        } else {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
            check_tcp_cb_conv(CONV, NOT_CONV);
            rst_tcp_cb_ipv4_to_ipv6(&v, skb);
#else
            check_tcp_cb_conv(NOT_CONV, CONV);
            memcpy(skb->cb, v.cpt_cb, sizeof(v.cpt_cb));
#endif
        }
    } else
        memcpy(skb->cb, v.cpt_cb, sizeof(v.cpt_cb));
    skb->mac_len = v.cpt_mac_len;

    skb->csum = v.cpt_csum;
    skb->local_df = v.cpt_local_df;
    skb->pkt_type = v.cpt_pkt_type;
    skb->ip_summed = v.cpt_ip_summed;
    skb->priority = v.cpt_priority;
    skb->protocol = v.cpt_protocol;
    cpt_timeval_import(&tmptv, v.cpt_stamp);
    skb->tstamp = timeval_to_ktime(tmptv);

    skb_shinfo(skb)->gso_segs = v.cpt_gso_segs;
    skb_shinfo(skb)->gso_size = v.cpt_gso_size;
    if (ctx->image_version == 0) {
        skb_shinfo(skb)->gso_segs = 1;
        skb_shinfo(skb)->gso_size = 0;
    }

    if (v.cpt_next > v.cpt_hdrlen) {
        pos = pos + v.cpt_hdrlen;
        while (pos < *pos_p) {
            union {
                struct cpt_obj_bits b;
                struct cpt_fd_image f;
            } u;

            err = rst_get_object(-1, pos, &u, ctx);
            if (err) {
                kfree_skb(skb);
                return ERR_PTR(err);
            }
            if (u.b.cpt_object == CPT_OBJ_BITS) {
                if (u.b.cpt_size != v.cpt_hspace + skb->len) {
                    eprintk_ctx("invalid skb image %u != %u + %u\n", u.b.cpt_size, v.cpt_hspace, skb->len);
                    kfree_skb(skb);
                    return ERR_PTR(-EINVAL);
                }

                err = ctx->pread(skb->head, u.b.cpt_size, ctx, pos+u.b.cpt_hdrlen);
                if (err) {
                    kfree_skb(skb);
                    return ERR_PTR(err);
                }
            } else if (u.f.cpt_object == CPT_OBJ_FILEDESC) {
                if (!fpl) {
                    fpl = kmalloc(sizeof(struct scm_fp_list),
                                  GFP_KERNEL_UBC);
                    if (!fpl) {
                        kfree_skb(skb);
                        return ERR_PTR(-ENOMEM);
                    }
                    fpl->count = 0;
                    UNIXCB(skb).fp = fpl;
                }
                fpl->fp[fpl->count] = rst_file(u.f.cpt_file, -1, ctx);
                if (!IS_ERR(fpl->fp[fpl->count]))
                    fpl->count++;
            }
            pos += u.b.cpt_next;
        }
    }

    return skb;
}