Пример #1
0
void synchronise_count_slave(int cpu)
{
	int i;

	/*
	 * Not every cpu is online at the time this gets called,
	 * so we first wait for the master to say everyone is ready
	 */

	for (i = 0; i < NR_LOOPS; i++) {
		atomic_inc_unchecked(&count_count_start);
		while (atomic_read_unchecked(&count_count_start) != 2)
			mb();

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		atomic_inc_unchecked(&count_count_stop);
		while (atomic_read_unchecked(&count_count_stop) != 2)
			mb();
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);
}
Пример #2
0
static int giu_get_irq(unsigned int irq)
{
	u16 pendl, pendh, maskl, maskh;
	int i;

	pendl = giu_read(GIUINTSTATL);
	pendh = giu_read(GIUINTSTATH);
	maskl = giu_read(GIUINTENL);
	maskh = giu_read(GIUINTENH);

	maskl &= pendl;
	maskh &= pendh;

	if (maskl) {
		for (i = 0; i < 16; i++) {
			if (maskl & (1 << i))
				return GIU_IRQ(i);
		}
	} else if (maskh) {
		for (i = 0; i < 16; i++) {
			if (maskh & (1 << i))
				return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
		}
	}

	printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
	       maskl, pendl, maskh, pendh);

	atomic_inc_unchecked(&irq_err_count);

	return -EINVAL;
}
Пример #3
0
/**
 * uio_event_notify - trigger an interrupt event
 * @info: UIO device capabilities
 */
void uio_event_notify(struct uio_info *info)
{
    struct uio_device *idev = info->uio_dev;

    atomic_inc_unchecked(&idev->event);
    wake_up_interruptible(&idev->wait);
    kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
}
Пример #4
0
static void switch_worker(struct work_struct *work)
{
	if (oprofile_ops.switch_events())
		return;

	atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
	start_switch_worker();
}
Пример #5
0
/*
 * Free an element to the rcu_tortures pool.
 */
static void
rcu_torture_free(struct rcu_torture *p)
{
	atomic_inc_unchecked(&n_rcu_torture_free);
	spin_lock_bh(&rcu_torture_lock);
	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
	spin_unlock_bh(&rcu_torture_lock);
}
Пример #6
0
/*
 * Allocate an element from the rcu_tortures pool.
 */
static struct rcu_torture *
rcu_torture_alloc(void)
{
	struct list_head *p;

	spin_lock_bh(&rcu_torture_lock);
	if (list_empty(&rcu_torture_freelist)) {
		atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
		spin_unlock_bh(&rcu_torture_lock);
		return NULL;
	}
	atomic_inc_unchecked(&n_rcu_torture_alloc);
	p = rcu_torture_freelist.next;
	list_del_init(p);
	spin_unlock_bh(&rcu_torture_lock);
	return container_of(p, struct rcu_torture, rtort_free);
}
Пример #7
0
int atm_charge(struct atm_vcc *vcc, int truesize)
{
    atm_force_charge(vcc, truesize);
    if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
        return 1;
    atm_return(vcc, truesize);
    atomic_inc_unchecked(&vcc->stats->rx_drop);
    return 0;
}
Пример #8
0
static irqreturn_t sht15_interrupt_fired(int irq, void *d)
{
	struct sht15_data *data = d;
	/* First disable the interrupt */
	disable_irq_nosync(irq);
	atomic_inc_unchecked(&data->interrupt_handled);
	/* Then schedule a reading work struct */
	if (data->flag != SHT15_READING_NOTHING)
		schedule_work(&data->read_work);
	return IRQ_HANDLED;
}
Пример #9
0
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
	struct drm_device *dev = (struct drm_device *) arg;
	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
	int status;
	int handled = 0;

	status = MGA_READ(MGA_STATUS);

	/* VBLANK interrupt */
	if (status & MGA_VLINEPEN) {
		MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
		atomic_inc_unchecked(&dev_priv->vbl_received);
		drm_handle_vblank(dev, 0);
		handled = 1;
	}

	/* SOFTRAP interrupt */
	if (status & MGA_SOFTRAPEN) {
		const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
		const u32 prim_end = MGA_READ(MGA_PRIMEND);


		MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);

		/* In addition to clearing the interrupt-pending bit, we
		 * have to write to MGA_PRIMEND to re-start the DMA operation.
		 */
		if ((prim_start & ~0x03) != (prim_end & ~0x03))
			MGA_WRITE(MGA_PRIMEND, prim_end);

		atomic_inc_unchecked(&dev_priv->last_fence_retired);
		DRM_WAKEUP(&dev_priv->fence_queue);
		handled = 1;
	}

	if (handled)
		return IRQ_HANDLED;
	return IRQ_NONE;
}
Пример #10
0
int raw_rcv(struct sock *sk, struct sk_buff *skb)
{
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
		atomic_inc_unchecked(&sk->sk_drops);
		kfree_skb(skb);
		return NET_RX_DROP;
	}
	nf_reset(skb);

	skb_push(skb, skb->data - skb_network_header(skb));

	raw_rcv_skb(sk, skb);
	return 0;
}
Пример #11
0
void sysfs_notify_dirent(struct sysfs_dirent *sd)
{
	struct sysfs_open_dirent *od;
	unsigned long flags;

	spin_lock_irqsave(&sysfs_open_dirent_lock, flags);

	od = sd->s_attr.open;
	if (od) {
		atomic_inc_unchecked(&od->event);
		wake_up_interruptible(&od->poll);
	}

	spin_unlock_irqrestore(&sysfs_open_dirent_lock, flags);
}
Пример #12
0
static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
{
	if (!test_bit(0, &lis3_dev.misc_opened))
		goto out;

	/*
	 * Be careful: on some HP laptops the bios force DD when on battery and
	 * the lid is closed. This leads to interrupts as soon as a little move
	 * is done.
	 */
	atomic_inc_unchecked(&lis3_dev.count);

	wake_up_interruptible(&lis3_dev.misc_wait);
	kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
out:
	if (atomic_read(&lis3_dev.wake_thread))
		return IRQ_WAKE_THREAD;
	return IRQ_HANDLED;
}
Пример #13
0
struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
                                 gfp_t gfp_flags)
{
    struct sock *sk = sk_atm(vcc);
    int guess = SKB_TRUESIZE(pdu_size);

    atm_force_charge(vcc, guess);
    if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
        struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags);

        if (skb) {
            atomic_add(skb->truesize-guess,
                       &sk->sk_rmem_alloc);
            return skb;
        }
    }
    atm_return(vcc, guess);
    atomic_inc_unchecked(&vcc->stats->rx_drop);
    return NULL;
}
Пример #14
0
void gt641xx_irq_dispatch(void)
{
	u32 cause, mask;
	int i;

	cause = GT_READ(GT_INTRCAUSE_OFS);
	mask = GT_READ(GT_INTRMASK_OFS);
	cause &= mask;

	/*
	 * bit0 : logical or of all the interrupt bits.
	 * bit30: logical or of bits[29:26,20:1].
	 * bit31: logical or of bits[25:1].
	 */
	for (i = 1; i < 30; i++) {
		if (cause & (1U << i)) {
			do_IRQ(GT641XX_IRQ_BASE + i);
			return;
		}
	}

	atomic_inc_unchecked(&irq_err_count);
}
Пример #15
0
int
ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
                   struct ip_vs_protocol *pp, int offset)
{
    struct rt6_info	*rt;	/* Route to the other host */
    int mtu;
    int rc;
    int local;

    EnterFunction(10);

    /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
       forwarded directly here, because there is no need to
       translate address/port back */
    if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
        if (cp->packet_xmit)
            rc = cp->packet_xmit(skb, cp, pp);
        else
            rc = NF_ACCEPT;
        /* do not touch skb anymore */
        atomic_inc_unchecked(&cp->in_pkts);
        goto out;
    }

    /*
     * mangle and send the packet here (only for VS/NAT)
     */

    if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
                                     0, 1|2|4)))
        goto tx_error_icmp;

    local = __ip_vs_is_local_route6(rt);
    /*
     * Avoid duplicate tuple in reply direction for NAT traffic
     * to local address when connection is sync-ed
     */
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
    if (cp->flags & IP_VS_CONN_F_SYNC && local) {
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);

        if (ct && !nf_ct_is_untracked(ct)) {
            IP_VS_DBG(10, "%s(): "
                      "stopping DNAT to local address %pI6\n",
                      __func__, &cp->daddr.in6);
            goto tx_error_put;
        }
    }
#endif

    /* From world but DNAT to loopback address? */
    if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
            ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
        IP_VS_DBG(1, "%s(): "
                  "stopping DNAT to loopback %pI6\n",
                  __func__, &cp->daddr.in6);
        goto tx_error_put;
    }

    /* MTU checking */
    mtu = dst_mtu(&rt->dst);
    if (skb->len > mtu && !skb_is_gso(skb)) {
        if (!skb->dev) {
            struct net *net = dev_net(skb_dst(skb)->dev);

            skb->dev = net->loopback_dev;
        }
        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
        IP_VS_DBG_RL("%s(): frag needed\n", __func__);
        goto tx_error_put;
    }

    /* copy-on-write the packet before mangling it */
    if (!skb_make_writable(skb, offset))
        goto tx_error_put;

    if (skb_cow(skb, rt->dst.dev->hard_header_len))
        goto tx_error_put;

    ip_vs_nat_icmp_v6(skb, pp, cp, 0);

    if (!local || !skb->dev) {
        /* drop the old route when skb is not shared */
        skb_dst_drop(skb);
        skb_dst_set(skb, &rt->dst);
    } else {
        /* destined to loopback, do we need to change route? */
        dst_release(&rt->dst);
    }

    /* Another hack: avoid icmp_send in ip_fragment */
    skb->local_df = 1;

    IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);

    rc = NF_STOLEN;
    goto out;

tx_error_icmp:
    dst_link_failure(skb);
tx_error:
    dev_kfree_skb(skb);
    rc = NF_STOLEN;
out:
    LeaveFunction(10);
    return rc;
tx_error_put:
    dst_release(&rt->dst);
    goto tx_error;
}
Пример #16
0
/*
 *	ICMP packet transmitter
 *	called by the ip_vs_in_icmp
 */
int
ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
                struct ip_vs_protocol *pp, int offset)
{
    struct rtable	*rt;	/* Route to the other host */
    int mtu;
    int rc;
    int local;

    EnterFunction(10);

    /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
       forwarded directly here, because there is no need to
       translate address/port back */
    if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
        if (cp->packet_xmit)
            rc = cp->packet_xmit(skb, cp, pp);
        else
            rc = NF_ACCEPT;
        /* do not touch skb anymore */
        atomic_inc_unchecked(&cp->in_pkts);
        goto out;
    }

    /*
     * mangle and send the packet here (only for VS/NAT)
     */

    if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
                                  RT_TOS(ip_hdr(skb)->tos),
                                  IP_VS_RT_MODE_LOCAL |
                                  IP_VS_RT_MODE_NON_LOCAL |
                                  IP_VS_RT_MODE_RDR)))
        goto tx_error_icmp;
    local = rt->rt_flags & RTCF_LOCAL;

    /*
     * Avoid duplicate tuple in reply direction for NAT traffic
     * to local address when connection is sync-ed
     */
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
    if (cp->flags & IP_VS_CONN_F_SYNC && local) {
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);

        if (ct && !nf_ct_is_untracked(ct)) {
            IP_VS_DBG(10, "%s(): "
                      "stopping DNAT to local address %pI4\n",
                      __func__, &cp->daddr.ip);
            goto tx_error_put;
        }
    }
#endif

    /* From world but DNAT to loopback address? */
    if (local && ipv4_is_loopback(rt->rt_dst) &&
            rt_is_input_route(skb_rtable(skb))) {
        IP_VS_DBG(1, "%s(): "
                  "stopping DNAT to loopback %pI4\n",
                  __func__, &cp->daddr.ip);
        goto tx_error_put;
    }

    /* MTU checking */
    mtu = dst_mtu(&rt->dst);
    if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
            !skb_is_gso(skb)) {
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
        IP_VS_DBG_RL("%s(): frag needed\n", __func__);
        goto tx_error_put;
    }

    /* copy-on-write the packet before mangling it */
    if (!skb_make_writable(skb, offset))
        goto tx_error_put;

    if (skb_cow(skb, rt->dst.dev->hard_header_len))
        goto tx_error_put;

    ip_vs_nat_icmp(skb, pp, cp, 0);

    if (!local) {
        /* drop the old route when skb is not shared */
        skb_dst_drop(skb);
        skb_dst_set(skb, &rt->dst);
    } else {
        ip_rt_put(rt);
        /*
         * Some IPv4 replies get local address from routes,
         * not from iph, so while we DNAT after routing
         * we need this second input/output route.
         */
        if (!__ip_vs_reroute_locally(skb))
            goto tx_error;
    }

    /* Another hack: avoid icmp_send in ip_fragment */
    skb->local_df = 1;

    IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);

    rc = NF_STOLEN;
    goto out;

tx_error_icmp:
    dst_link_failure(skb);
tx_error:
    dev_kfree_skb(skb);
    rc = NF_STOLEN;
out:
    LeaveFunction(10);
    return rc;
tx_error_put:
    ip_rt_put(rt);
    goto tx_error;
}
Пример #17
0
static void poll_rx(struct atm_dev *dev,int mbx)
{
	struct zatm_dev *zatm_dev;
	unsigned long pos;
	u32 x;
	int error;

	EVENT("poll_rx\n",0,0);
	zatm_dev = ZATM_DEV(dev);
	pos = (zatm_dev->mbx_start[mbx] & ~0xffffUL) | zin(MTA(mbx));
	while (x = zin(MWA(mbx)), (pos & 0xffff) != x) {
		u32 *here;
		struct sk_buff *skb;
		struct atm_vcc *vcc;
		int cells,size,chan;

		EVENT("MBX: host 0x%lx, nic 0x%x\n",pos,x);
		here = (u32 *) pos;
		if (((pos += 16) & 0xffff) == zatm_dev->mbx_end[mbx])
			pos = zatm_dev->mbx_start[mbx];
		cells = here[0] & uPD98401_AAL5_SIZE;
#if 0
printk("RX IND: 0x%x, 0x%x, 0x%x, 0x%x\n",here[0],here[1],here[2],here[3]);
{
unsigned long *x;
		printk("POOL: 0x%08x, 0x%08x\n",zpeekl(zatm_dev,
		      zatm_dev->pool_base),
		      zpeekl(zatm_dev,zatm_dev->pool_base+1));
		x = (unsigned long *) here[2];
		printk("[0..3] = 0x%08lx, 0x%08lx, 0x%08lx, 0x%08lx\n",
		    x[0],x[1],x[2],x[3]);
}
#endif
		error = 0;
		if (here[3] & uPD98401_AAL5_ERR) {
			error = (here[3] & uPD98401_AAL5_ES) >>
			    uPD98401_AAL5_ES_SHIFT;
			if (error == uPD98401_AAL5_ES_DEACT ||
			    error == uPD98401_AAL5_ES_FREE) continue;
		}
EVENT("error code 0x%x/0x%x\n",(here[3] & uPD98401_AAL5_ES) >>
  uPD98401_AAL5_ES_SHIFT,error);
		skb = ((struct rx_buffer_head *) bus_to_virt(here[2]))->skb;
		__net_timestamp(skb);
#if 0
printk("[-3..0] 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",((unsigned *) skb->data)[-3],
  ((unsigned *) skb->data)[-2],((unsigned *) skb->data)[-1],
  ((unsigned *) skb->data)[0]);
#endif
		EVENT("skb 0x%lx, here 0x%lx\n",(unsigned long) skb,
		    (unsigned long) here);
#if 0
printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
#endif
		size = error ? 0 : ntohs(((__be16 *) skb->data)[cells*
		    ATM_CELL_PAYLOAD/sizeof(u16)-3]);
		EVENT("got skb 0x%lx, size %d\n",(unsigned long) skb,size);
		chan = (here[3] & uPD98401_AAL5_CHAN) >>
		    uPD98401_AAL5_CHAN_SHIFT;
		if (chan < zatm_dev->chans && zatm_dev->rx_map[chan]) {
			int pos;
			vcc = zatm_dev->rx_map[chan];
			pos = ZATM_VCC(vcc)->pool;
			if (skb == zatm_dev->last_free[pos])
				zatm_dev->last_free[pos] = NULL;
			skb_unlink(skb, zatm_dev->pool + pos);
		}
		else {
			printk(KERN_ERR DEV_LABEL "(itf %d): RX indication "
			    "for non-existing channel\n",dev->number);
			size = 0;
			vcc = NULL;
			event_dump();
		}
		if (error) {
			static unsigned long silence = 0;
			static int last_error = 0;

			if (error != last_error ||
			    time_after(jiffies, silence)  || silence == 0){
				printk(KERN_WARNING DEV_LABEL "(itf %d): "
				    "chan %d error %s\n",dev->number,chan,
				    err_txt[error]);
				last_error = error;
				silence = (jiffies+2*HZ)|1;
			}
			size = 0;
		}
		if (size && (size > cells*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER ||
		    size <= (cells-1)*ATM_CELL_PAYLOAD-ATM_AAL5_TRAILER)) {
			printk(KERN_ERR DEV_LABEL "(itf %d): size %d with %d "
			    "cells\n",dev->number,size,cells);
			size = 0;
			event_dump();
		}
		if (size > ATM_MAX_AAL5_PDU) {
			printk(KERN_ERR DEV_LABEL "(itf %d): size too big "
			    "(%d)\n",dev->number,size);
			size = 0;
			event_dump();
		}
		if (!size) {
			dev_kfree_skb_irq(skb);
			if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
			continue;
		}
		if (!atm_charge(vcc,skb->truesize)) {
			dev_kfree_skb_irq(skb);
			continue;
		}
		skb->len = size;
		ATM_SKB(skb)->vcc = vcc;
		vcc->push(vcc,skb);
		atomic_inc_unchecked(&vcc->stats->rx);
	}
Пример #18
0
void synchronise_count_master(int cpu)
{
	int i;
	unsigned long flags;

	printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);

	local_irq_save(flags);

	/*
	 * We loop a few times to get a primed instruction cache,
	 * then the last pass is more or less synchronised and
	 * the master and slaves each set their cycle counters to a known
	 * value all at once. This reduces the chance of having random offsets
	 * between the processors, and guarantees that the maximum
	 * delay between the cycle counters is never bigger than
	 * the latency of information-passing (cachelines) between
	 * two CPUs.
	 */

	for (i = 0; i < NR_LOOPS; i++) {
		/* slaves loop on '!= 2' */
		while (atomic_read_unchecked(&count_count_start) != 1)
			mb();
		atomic_set_unchecked(&count_count_stop, 0);
		smp_wmb();

		/* Let the slave writes its count register */
		atomic_inc_unchecked(&count_count_start);

		/* Count will be initialised to current timer */
		if (i == 1)
			initcount = read_c0_count();

		/*
		 * Everyone initialises count in the last loop:
		 */
		if (i == NR_LOOPS-1)
			write_c0_count(initcount);

		/*
		 * Wait for slave to leave the synchronization point:
		 */
		while (atomic_read_unchecked(&count_count_stop) != 1)
			mb();
		atomic_set_unchecked(&count_count_start, 0);
		smp_wmb();
		atomic_inc_unchecked(&count_count_stop);
	}
	/* Arrange for an interrupt in a short while */
	write_c0_compare(read_c0_count() + COUNTON);

	local_irq_restore(flags);

	/*
	 * i386 code reported the skew here, but the
	 * count registers were almost certainly out of sync
	 * so no point in alarming people
	 */
	printk("done.\n");
}
Пример #19
0
int
ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
		struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
		struct ip_vs_iphdr *ipvsh)
{
	struct rt6_info	*rt;	/* Route to the other host */
	int rc;
	int local;
	int rt_mode;

	EnterFunction(10);

	/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
	   forwarded directly here, because there is no need to
	   translate address/port back */
	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
		if (cp->packet_xmit)
			rc = cp->packet_xmit(skb, cp, pp, ipvsh);
		else
			rc = NF_ACCEPT;
		/* do not touch skb anymore */
		atomic_inc_unchecked(&cp->in_pkts);
		goto out;
	}

	/*
	 * mangle and send the packet here (only for VS/NAT)
	 */

	/* LOCALNODE from FORWARD hook is not supported */
	rt_mode = (hooknum != NF_INET_FORWARD) ?
		  IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
		  IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
	rcu_read_lock();
	local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
				      ipvsh, 0, rt_mode);
	if (local < 0)
		goto tx_error;
	rt = (struct rt6_info *) skb_dst(skb);
	/*
	 * Avoid duplicate tuple in reply direction for NAT traffic
	 * to local address when connection is sync-ed
	 */
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
	if (cp->flags & IP_VS_CONN_F_SYNC && local) {
		enum ip_conntrack_info ctinfo;
		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);

		if (ct && !nf_ct_is_untracked(ct)) {
			IP_VS_DBG(10, "%s(): "
				  "stopping DNAT to local address %pI6\n",
				  __func__, &cp->daddr.in6);
			goto tx_error;
		}
	}
#endif

	/* From world but DNAT to loopback address? */
	if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
	    ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
		IP_VS_DBG(1, "%s(): "
			  "stopping DNAT to loopback %pI6\n",
			  __func__, &cp->daddr.in6);
		goto tx_error;
	}

	/* copy-on-write the packet before mangling it */
	if (!skb_make_writable(skb, offset))
		goto tx_error;

	if (skb_cow(skb, rt->dst.dev->hard_header_len))
		goto tx_error;

	ip_vs_nat_icmp_v6(skb, pp, cp, 0);

	/* Another hack: avoid icmp_send in ip_fragment */
	skb->local_df = 1;

	rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
	rcu_read_unlock();
	goto out;

tx_error:
	kfree_skb(skb);
	rcu_read_unlock();
	rc = NF_STOLEN;
out:
	LeaveFunction(10);
	return rc;
}
/*
 * Use RDMA_READ to read data from the advertised client buffer into the
 * XDR stream starting at rq_arg.head[0].iov_base.
 * Each chunk in the array
 * contains the following fields:
 * discrim      - '1', This isn't used for data placement
 * position     - The xdr stream offset (the same for every chunk)
 * handle       - RMR for client memory region
 * length       - data transfer length
 * offset       - 64 bit tagged offset in remote memory region
 *
 * On our side, we need to read into a pagelist. The first page immediately
 * follows the RPC header.
 *
 * This function returns:
 * 0 - No error and no read-list found.
 *
 * 1 - Successful read-list processing. The data is not yet in
 * the pagelist and therefore the RPC request must be deferred. The
 * I/O completion will enqueue the transport again and
 * svc_rdma_recvfrom will complete the request.
 *
 * <0 - Error processing/posting read-list.
 *
 * NOTE: The ctxt must not be touched after the last WR has been posted
 * because the I/O completion processing may occur on another
 * processor and free / modify the context. Ne touche pas!
 */
static int rdma_read_xdr(struct svcxprt_rdma *xprt,
			 struct rpcrdma_msg *rmsgp,
			 struct svc_rqst *rqstp,
			 struct svc_rdma_op_ctxt *hdr_ctxt)
{
	struct ib_send_wr read_wr;
	struct ib_send_wr inv_wr;
	int err = 0;
	int ch_no;
	int ch_count;
	int byte_count;
	int sge_count;
	u64 sgl_offset;
	struct rpcrdma_read_chunk *ch;
	struct svc_rdma_op_ctxt *ctxt = NULL;
	struct svc_rdma_req_map *rpl_map;
	struct svc_rdma_req_map *chl_map;

	/* If no read list is present, return 0 */
	ch = svc_rdma_get_read_chunk(rmsgp);
	if (!ch)
		return 0;

	svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
	if (ch_count > RPCSVC_MAXPAGES)
		return -EINVAL;

	/* Allocate temporary reply and chunk maps */
	rpl_map = svc_rdma_get_req_map();
	chl_map = svc_rdma_get_req_map();

	if (!xprt->sc_frmr_pg_list_len)
		sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
					    rpl_map, chl_map, ch_count,
					    byte_count);
	else
		sge_count = fast_reg_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
						 rpl_map, chl_map, ch_count,
						 byte_count);
	if (sge_count < 0) {
		err = -EIO;
		goto out;
	}

	sgl_offset = 0;
	ch_no = 0;

	for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
	     ch->rc_discrim != 0; ch++, ch_no++) {
next_sge:
		ctxt = svc_rdma_get_context(xprt);
		ctxt->direction = DMA_FROM_DEVICE;
		ctxt->frmr = hdr_ctxt->frmr;
		ctxt->read_hdr = NULL;
		clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
		clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);

		/* Prepare READ WR */
		memset(&read_wr, 0, sizeof read_wr);
		read_wr.wr_id = (unsigned long)ctxt;
		read_wr.opcode = IB_WR_RDMA_READ;
		ctxt->wr_op = read_wr.opcode;
		read_wr.send_flags = IB_SEND_SIGNALED;
		read_wr.wr.rdma.rkey = ch->rc_target.rs_handle;
		read_wr.wr.rdma.remote_addr =
			get_unaligned(&(ch->rc_target.rs_offset)) +
			sgl_offset;
		read_wr.sg_list = ctxt->sge;
		read_wr.num_sge =
			rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
		err = rdma_set_ctxt_sge(xprt, ctxt, hdr_ctxt->frmr,
					&rpl_map->sge[chl_map->ch[ch_no].start],
					&sgl_offset,
					read_wr.num_sge);
		if (err) {
			svc_rdma_unmap_dma(ctxt);
			svc_rdma_put_context(ctxt, 0);
			goto out;
		}
		if (((ch+1)->rc_discrim == 0) &&
		    (read_wr.num_sge == chl_map->ch[ch_no].count)) {
			/*
			 * Mark the last RDMA_READ with a bit to
			 * indicate all RPC data has been fetched from
			 * the client and the RPC needs to be enqueued.
			 */
			set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
			if (hdr_ctxt->frmr) {
				set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
				/*
				 * Invalidate the local MR used to map the data
				 * sink.
				 */
				if (xprt->sc_dev_caps &
				    SVCRDMA_DEVCAP_READ_W_INV) {
					read_wr.opcode =
						IB_WR_RDMA_READ_WITH_INV;
					ctxt->wr_op = read_wr.opcode;
					read_wr.ex.invalidate_rkey =
						ctxt->frmr->mr->lkey;
				} else {
					/* Prepare INVALIDATE WR */
					memset(&inv_wr, 0, sizeof inv_wr);
					inv_wr.opcode = IB_WR_LOCAL_INV;
					inv_wr.send_flags = IB_SEND_SIGNALED;
					inv_wr.ex.invalidate_rkey =
						hdr_ctxt->frmr->mr->lkey;
					read_wr.next = &inv_wr;
				}
			}
			ctxt->read_hdr = hdr_ctxt;
		}
		/* Post the read */
		err = svc_rdma_send(xprt, &read_wr);
		if (err) {
			printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
			       err);
			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
			svc_rdma_unmap_dma(ctxt);
			svc_rdma_put_context(ctxt, 0);
			goto out;
		}
		atomic_inc_unchecked(&rdma_stat_read);

		if (read_wr.num_sge < chl_map->ch[ch_no].count) {
			chl_map->ch[ch_no].count -= read_wr.num_sge;
			chl_map->ch[ch_no].start += read_wr.num_sge;
			goto next_sge;
		}
		sgl_offset = 0;
		err = 1;
	}

 out:
	svc_rdma_put_req_map(rpl_map);
	svc_rdma_put_req_map(chl_map);

	/* Detach arg pages. svc_recv will replenish them */
	for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++)
		rqstp->rq_pages[ch_no] = NULL;

	/*
	 * Detach res pages. svc_release must see a resused count of
	 * zero or it will attempt to put them.
	 */
	while (rqstp->rq_resused)
		rqstp->rq_respages[--rqstp->rq_resused] = NULL;

	return err;
}