Example #1
0
/*
 * Reverse map root block init
 */
static void
xfs_rmaproot_init(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp,
	struct aghdr_init_data	*id)
{
	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
	struct xfs_rmap_rec	*rrec;

	xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno, 0);

	/*
	 * mark the AG header regions as static metadata The BNO
	 * btree block is the first block after the headers, so
	 * it's location defines the size of region the static
	 * metadata consumes.
	 *
	 * Note: unlike mkfs, we never have to account for log
	 * space when growing the data regions
	 */
	rrec = XFS_RMAP_REC_ADDR(block, 1);
	rrec->rm_startblock = 0;
	rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
	rrec->rm_offset = 0;

	/* account freespace btree root blocks */
	rrec = XFS_RMAP_REC_ADDR(block, 2);
	rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
	rrec->rm_blockcount = cpu_to_be32(2);
	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
	rrec->rm_offset = 0;

	/* account inode btree root blocks */
	rrec = XFS_RMAP_REC_ADDR(block, 3);
	rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
	rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
					  XFS_IBT_BLOCK(mp));
	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
	rrec->rm_offset = 0;

	/* account for rmap btree root */
	rrec = XFS_RMAP_REC_ADDR(block, 4);
	rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
	rrec->rm_blockcount = cpu_to_be32(1);
	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
	rrec->rm_offset = 0;

	/* account for refc btree root */
	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
		rrec = XFS_RMAP_REC_ADDR(block, 5);
		rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
		rrec->rm_blockcount = cpu_to_be32(1);
		rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
		rrec->rm_offset = 0;
		be16_add_cpu(&block->bb_numrecs, 1);
	}
}
Example #2
0
int
ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
                     struct ip_vs_protocol *pp)
{
    struct rt6_info *rt;		/* Route to the other host */
    struct in6_addr saddr;		/* Source for tunnel */
    struct net_device *tdev;	/* Device to other host */
    struct ipv6hdr  *old_iph = ipv6_hdr(skb);
    struct ipv6hdr  *iph;		/* Our new IP header */
    unsigned int max_headroom;	/* The extra header space needed */
    int    mtu;
    int ret;

    EnterFunction(10);

    if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
                                     &saddr, 1, 1|2)))
        goto tx_error_icmp;
    if (__ip_vs_is_local_route6(rt)) {
        dst_release(&rt->dst);
        IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
    }

    tdev = rt->dst.dev;

    mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
    if (mtu < IPV6_MIN_MTU) {
        IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
                     IPV6_MIN_MTU);
        goto tx_error_put;
    }
    if (skb_dst(skb))
        skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);

    if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
            !skb_is_gso(skb)) {
        if (!skb->dev) {
            struct net *net = dev_net(skb_dst(skb)->dev);

            skb->dev = net->loopback_dev;
        }
        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
        IP_VS_DBG_RL("%s(): frag needed\n", __func__);
        goto tx_error_put;
    }

    /*
     * Okay, now see if we can stuff it in the buffer as-is.
     */
    max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);

    if (skb_headroom(skb) < max_headroom
            || skb_cloned(skb) || skb_shared(skb)) {
        struct sk_buff *new_skb =
            skb_realloc_headroom(skb, max_headroom);
        if (!new_skb) {
            dst_release(&rt->dst);
            kfree_skb(skb);
            IP_VS_ERR_RL("%s(): no memory\n", __func__);
            return NF_STOLEN;
        }
        kfree_skb(skb);
        skb = new_skb;
        old_iph = ipv6_hdr(skb);
    }

    skb->transport_header = skb->network_header;

    skb_push(skb, sizeof(struct ipv6hdr));
    skb_reset_network_header(skb);
    memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));

    /* drop old route */
    skb_dst_drop(skb);
    skb_dst_set(skb, &rt->dst);

    /*
     *	Push down and install the IPIP header.
     */
    iph			=	ipv6_hdr(skb);
    iph->version		=	6;
    iph->nexthdr		=	IPPROTO_IPV6;
    iph->payload_len	=	old_iph->payload_len;
    be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
    iph->priority		=	old_iph->priority;
    memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
    ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
    ipv6_addr_copy(&iph->saddr, &saddr);
    iph->hop_limit		=	old_iph->hop_limit;

    /* Another hack: avoid icmp_send in ip_fragment */
    skb->local_df = 1;

    ret = IP_VS_XMIT_TUNNEL(skb, cp);
    if (ret == NF_ACCEPT)
        ip6_local_out(skb);
    else if (ret == NF_DROP)
        kfree_skb(skb);

    LeaveFunction(10);

    return NF_STOLEN;

tx_error_icmp:
    dst_link_failure(skb);
tx_error:
    kfree_skb(skb);
    LeaveFunction(10);
    return NF_STOLEN;
tx_error_put:
    dst_release(&rt->dst);
    goto tx_error;
}
Example #3
0
int
ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
		     struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
	struct rt6_info *rt;		/* Route to the other host */
	struct in6_addr saddr;		/* Source for tunnel */
	struct net_device *tdev;	/* Device to other host */
	struct ipv6hdr  *old_iph = ipv6_hdr(skb);
	struct ipv6hdr  *iph;		/* Our new IP header */
	unsigned int max_headroom;	/* The extra header space needed */
	int ret, local;

	EnterFunction(10);

	rcu_read_lock();
	local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
				      &saddr, ipvsh, 1,
				      IP_VS_RT_MODE_LOCAL |
				      IP_VS_RT_MODE_NON_LOCAL |
				      IP_VS_RT_MODE_TUNNEL);
	if (local < 0)
		goto tx_error;
	if (local) {
		rcu_read_unlock();
		return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
	}

	rt = (struct rt6_info *) skb_dst(skb);
	tdev = rt->dst.dev;

	/*
	 * Okay, now see if we can stuff it in the buffer as-is.
	 */
	max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);

	if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
		struct sk_buff *new_skb =
			skb_realloc_headroom(skb, max_headroom);

		if (!new_skb)
			goto tx_error;
		consume_skb(skb);
		skb = new_skb;
		old_iph = ipv6_hdr(skb);
	}

	skb->transport_header = skb->network_header;

	skb_push(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));

	/*
	 *	Push down and install the IPIP header.
	 */
	iph			=	ipv6_hdr(skb);
	iph->version		=	6;
	iph->nexthdr		=	IPPROTO_IPV6;
	iph->payload_len	=	old_iph->payload_len;
	be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
	iph->priority		=	old_iph->priority;
	memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
	iph->daddr = cp->daddr.in6;
	iph->saddr = saddr;
	iph->hop_limit		=	old_iph->hop_limit;

	/* Another hack: avoid icmp_send in ip_fragment */
	skb->local_df = 1;

	ret = ip_vs_tunnel_xmit_prepare(skb, cp);
	if (ret == NF_ACCEPT)
		ip6_local_out(skb);
	else if (ret == NF_DROP)
		kfree_skb(skb);
	rcu_read_unlock();

	LeaveFunction(10);

	return NF_STOLEN;

tx_error:
	kfree_skb(skb);
	rcu_read_unlock();
	LeaveFunction(10);
	return NF_STOLEN;
}
Example #4
0
File: ileaf.c Project: Zkin/tux3
static inline void add_idict(__be16 *dict, int n)
{
	be16_add_cpu(dict, n);
}
Example #5
0
static void
prop_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
		bt_status_t *btree_curs, xfs_agblock_t startblock,
		xfs_extlen_t blockcount, int level, __uint32_t magic)
{
	struct xfs_btree_block	*bt_hdr;
	xfs_alloc_key_t		*bt_key;
	xfs_alloc_ptr_t		*bt_ptr;
	xfs_agblock_t		agbno;
	bt_stat_level_t		*lptr;
	__uint32_t		crc_magic;

	if (magic == XFS_ABTB_MAGIC)
		crc_magic = XFS_ABTB_CRC_MAGIC;
	else
		crc_magic = XFS_ABTC_CRC_MAGIC;

	level++;

	if (level >= btree_curs->num_levels)
		return;

	lptr = &btree_curs->level[level];
	bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);

	if (be16_to_cpu(bt_hdr->bb_numrecs) == 0)  {
		/*
		 * only happens once when initializing the
		 * left-hand side of the tree.
		 */
		prop_freespace_cursor(mp, agno, btree_curs, startblock,
				blockcount, level, magic);
	}

	if (be16_to_cpu(bt_hdr->bb_numrecs) ==
				lptr->num_recs_pb + (lptr->modulo > 0))  {
		/*
		 * write out current prev block, grab us a new block,
		 * and set the rightsib pointer of current block
		 */
#ifdef XR_BLD_FREE_TRACE
		fprintf(stderr, " %d ", lptr->prev_agbno);
#endif
		if (lptr->prev_agbno != NULLAGBLOCK) {
			ASSERT(lptr->prev_buf_p != NULL);
			libxfs_writebuf(lptr->prev_buf_p, 0);
		}
		lptr->prev_agbno = lptr->agbno;;
		lptr->prev_buf_p = lptr->buf_p;
		agbno = get_next_blockaddr(agno, level, btree_curs);

		bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);

		lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, agbno),
					XFS_FSB_TO_BB(mp, 1));
		lptr->agbno = agbno;

		if (lptr->modulo)
			lptr->modulo--;

		/*
		 * initialize block header
		 */
		lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, crc_magic, level,
						0, agno, XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic, level,
						0, agno, 0);

		bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);

		/*
		 * propagate extent record for first extent in new block up
		 */
		prop_freespace_cursor(mp, agno, btree_curs, startblock,
				blockcount, level, magic);
	}
	/*
	 * add extent info to current block
	 */
	be16_add_cpu(&bt_hdr->bb_numrecs, 1);

	bt_key = XFS_ALLOC_KEY_ADDR(mp, bt_hdr,
				be16_to_cpu(bt_hdr->bb_numrecs));
	bt_ptr = XFS_ALLOC_PTR_ADDR(mp, bt_hdr,
				be16_to_cpu(bt_hdr->bb_numrecs),
				mp->m_alloc_mxr[1]);

	bt_key->ar_startblock = cpu_to_be32(startblock);
	bt_key->ar_blockcount = cpu_to_be32(blockcount);
	*bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
}
Example #6
0
static void
prop_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
	xfs_agino_t startino, int level)
{
	struct xfs_btree_block	*bt_hdr;
	xfs_inobt_key_t		*bt_key;
	xfs_inobt_ptr_t		*bt_ptr;
	xfs_agblock_t		agbno;
	bt_stat_level_t		*lptr;

	level++;

	if (level >= btree_curs->num_levels)
		return;

	lptr = &btree_curs->level[level];
	bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);

	if (be16_to_cpu(bt_hdr->bb_numrecs) == 0)  {
		/*
		 * this only happens once to initialize the
		 * first path up the left side of the tree
		 * where the agbno's are already set up
		 */
		prop_ino_cursor(mp, agno, btree_curs, startino, level);
	}

	if (be16_to_cpu(bt_hdr->bb_numrecs) ==
				lptr->num_recs_pb + (lptr->modulo > 0))  {
		/*
		 * write out current prev block, grab us a new block,
		 * and set the rightsib pointer of current block
		 */
#ifdef XR_BLD_INO_TRACE
		fprintf(stderr, " ino prop agbno %d ", lptr->prev_agbno);
#endif
		if (lptr->prev_agbno != NULLAGBLOCK)  {
			ASSERT(lptr->prev_buf_p != NULL);
			libxfs_writebuf(lptr->prev_buf_p, 0);
		}
		lptr->prev_agbno = lptr->agbno;;
		lptr->prev_buf_p = lptr->buf_p;
		agbno = get_next_blockaddr(agno, level, btree_curs);

		bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);

		lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, agbno),
					XFS_FSB_TO_BB(mp, 1));
		lptr->agbno = agbno;

		if (lptr->modulo)
			lptr->modulo--;

		/*
		 * initialize block header
		 */
		lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, XFS_IBT_CRC_MAGIC,
						level, 0, agno,
						XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, XFS_IBT_MAGIC,
						level, 0, agno, 0);

		bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);

		/*
		 * propagate extent record for first extent in new block up
		 */
		prop_ino_cursor(mp, agno, btree_curs, startino, level);
	}
	/*
	 * add inode info to current block
	 */
	be16_add_cpu(&bt_hdr->bb_numrecs, 1);

	bt_key = XFS_INOBT_KEY_ADDR(mp, bt_hdr,
				    be16_to_cpu(bt_hdr->bb_numrecs));
	bt_ptr = XFS_INOBT_PTR_ADDR(mp, bt_hdr,
				    be16_to_cpu(bt_hdr->bb_numrecs),
				    mp->m_inobt_mxr[1]);

	bt_key->ir_startino = cpu_to_be32(startino);
	*bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
}