示例#1
0
/* Unpack LOV object metadata from disk storage.  It is packed in LE byte
 * order and is opaque to the networking layer.
 */
int lov_unpackmd(struct obd_export *exp,  struct lov_stripe_md **lsmp,
                 struct lov_mds_md *lmm, int lmm_bytes)
{
        struct obd_device *obd = class_exp2obd(exp);
        struct lov_obd *lov = &obd->u.lov;
        int rc = 0, lsm_size;
        __u16 stripe_count;
        __u32 magic;
	__u32 pattern;
        ENTRY;

        /* If passed an MDS struct use values from there, otherwise defaults */
        if (lmm) {
                rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
                if (rc)
                        RETURN(rc);
                magic = le32_to_cpu(lmm->lmm_magic);
		pattern = le32_to_cpu(lmm->lmm_pattern);
        } else {
                magic = LOV_MAGIC;
                stripe_count = lov_get_stripecnt(lov, magic, 0);
		pattern = LOV_PATTERN_RAID0;
        }

        /* If we aren't passed an lsmp struct, we just want the size */
        if (!lsmp) {
                /* XXX LOV STACKING call into osc for sizes */
                LBUG();
                RETURN(lov_stripe_md_size(stripe_count));
        }
        /* If we are passed an allocated struct but nothing to unpack, free */
        if (*lsmp && !lmm) {
                lov_free_memmd(lsmp);
                RETURN(0);
        }

        lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
        if (lsm_size < 0)
                RETURN(lsm_size);

        /* If we are passed a pointer but nothing to unpack, we only alloc */
        if (!lmm)
                RETURN(lsm_size);

        LASSERT(lsm_op_find(magic) != NULL);
        rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
        if (rc) {
                lov_free_memmd(lsmp);
                RETURN(rc);
        }

        RETURN(lsm_size);
}
示例#2
0
static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
			   struct lov_stripe_md **lsmp,
			   struct lov_user_md *lump)
{
	struct obd_device *obd = class_exp2obd(exp);
	struct lov_obd *lov = &obd->u.lov;
	char buffer[sizeof(struct lov_user_md_v3)];
	struct lov_user_md_v3 *lumv3 = (struct lov_user_md_v3 *)&buffer[0];
	struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&buffer[0];
	int lmm_magic;
	__u16 stripe_count;
	int rc;
	int cplen = 0;

	rc = lov_lum_swab_if_needed(lumv3, &lmm_magic, lump);
	if (rc)
		return rc;

	/* in the rest of the tests, as *lumv1 and lumv3 have the same
	 * fields, we use lumv1 to avoid code duplication */

	if (lumv1->lmm_pattern == 0) {
		lumv1->lmm_pattern = lov->desc.ld_pattern ?
			lov->desc.ld_pattern : LOV_PATTERN_RAID0;
	}

	if (lov_pattern(lumv1->lmm_pattern) != LOV_PATTERN_RAID0) {
		CDEBUG(D_IOCTL, "bad userland stripe pattern: %#x\n",
		       lumv1->lmm_pattern);
		return -EINVAL;
	}

	/* 64kB is the largest common page size we see (ia64), and matches the
	 * check in lfs */
	if (lumv1->lmm_stripe_size & (LOV_MIN_STRIPE_SIZE - 1)) {
		CDEBUG(D_IOCTL, "stripe size %u not multiple of %u, fixing\n",
		       lumv1->lmm_stripe_size, LOV_MIN_STRIPE_SIZE);
		lumv1->lmm_stripe_size = LOV_MIN_STRIPE_SIZE;
	}

	if ((lumv1->lmm_stripe_offset >= lov->desc.ld_tgt_count) &&
	    (lumv1->lmm_stripe_offset !=
	     (typeof(lumv1->lmm_stripe_offset))(-1))) {
		CDEBUG(D_IOCTL, "stripe offset %u > number of OSTs %u\n",
		       lumv1->lmm_stripe_offset, lov->desc.ld_tgt_count);
		return -EINVAL;
	}
	stripe_count = lov_get_stripecnt(lov, lmm_magic,
					 lumv1->lmm_stripe_count);

	if (max_lmm_size) {
		int max_stripes = (max_lmm_size -
				   lov_mds_md_size(0, lmm_magic)) /
				   sizeof(struct lov_ost_data_v1);
		if (unlikely(max_stripes < stripe_count)) {
			CDEBUG(D_IOCTL, "stripe count reset from %d to %d\n",
			       stripe_count, max_stripes);
			stripe_count = max_stripes;
		}
	}

	if (lmm_magic == LOV_USER_MAGIC_V3) {
		struct pool_desc *pool;

		/* In the function below, .hs_keycmp resolves to
		 * pool_hashkey_keycmp() */
		/* coverity[overrun-buffer-val] */
		pool = lov_find_pool(lov, lumv3->lmm_pool_name);
		if (pool != NULL) {
			if (lumv3->lmm_stripe_offset !=
			    (typeof(lumv3->lmm_stripe_offset))(-1)) {
				rc = lov_check_index_in_pool(
					lumv3->lmm_stripe_offset, pool);
				if (rc < 0) {
					lov_pool_putref(pool);
					return -EINVAL;
				}
			}

			if (stripe_count > pool_tgt_count(pool))
				stripe_count = pool_tgt_count(pool);

			lov_pool_putref(pool);
		}
	}

	if (lumv1->lmm_pattern & LOV_PATTERN_F_RELEASED)
		stripe_count = 0;

	rc = lov_alloc_memmd(lsmp, stripe_count, lumv1->lmm_pattern, lmm_magic);

	if (rc >= 0) {
		(*lsmp)->lsm_oinfo[0]->loi_ost_idx = lumv1->lmm_stripe_offset;
		(*lsmp)->lsm_stripe_size = lumv1->lmm_stripe_size;
		if (lmm_magic == LOV_USER_MAGIC_V3) {
			cplen = strlcpy((*lsmp)->lsm_pool_name,
					lumv3->lmm_pool_name,
					sizeof((*lsmp)->lsm_pool_name));
			if (cplen >= sizeof((*lsmp)->lsm_pool_name))
				rc = -E2BIG;
		}
		rc = 0;
	}

	return rc;
}
示例#3
0
/* Pack LOV object metadata for disk storage.  It is packed in LE byte
 * order and is opaque to the networking layer.
 *
 * XXX In the future, this will be enhanced to get the EA size from the
 *     underlying OSC device(s) to get their EA sizes so we can stack
 *     LOVs properly.  For now lov_mds_md_size() just assumes one obd_id
 *     per stripe.
 */
int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
	       struct lov_stripe_md *lsm)
{
	struct obd_device *obd = class_exp2obd(exp);
	struct lov_obd *lov = &obd->u.lov;
	struct lov_mds_md_v1 *lmmv1;
	struct lov_mds_md_v3 *lmmv3;
	__u16 stripe_count;
	struct lov_ost_data_v1 *lmm_objects;
	int lmm_size, lmm_magic;
	int i;
	int cplen = 0;

	if (lsm) {
		lmm_magic = lsm->lsm_magic;
	} else {
		if (lmmp && *lmmp)
			lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
		else
			/* lsm == NULL and lmmp == NULL */
			lmm_magic = LOV_MAGIC;
	}

	if ((lmm_magic != LOV_MAGIC_V1) &&
	    (lmm_magic != LOV_MAGIC_V3)) {
		CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
			lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
		return -EINVAL;

	}

	if (lsm) {
		/* If we are just sizing the EA, limit the stripe count
		 * to the actual number of OSTs in this filesystem. */
		if (!lmmp) {
			stripe_count = lov_get_stripecnt(lov, lmm_magic,
							lsm->lsm_stripe_count);
			lsm->lsm_stripe_count = stripe_count;
		} else if (!lsm_is_released(lsm)) {
			stripe_count = lsm->lsm_stripe_count;
		} else {
			stripe_count = 0;
		}
	} else {
		/* No need to allocate more than maximum supported stripes.
		 * Anyway, this is pretty inaccurate since ld_tgt_count now
		 * represents max index and we should rely on the actual number
		 * of OSTs instead */
		stripe_count = lov_mds_md_max_stripe_count(
			lov->lov_ocd.ocd_max_easize, lmm_magic);

		if (stripe_count > lov->desc.ld_tgt_count)
			stripe_count = lov->desc.ld_tgt_count;
	}

	/* XXX LOV STACKING call into osc for sizes */
	lmm_size = lov_mds_md_size(stripe_count, lmm_magic);

	if (!lmmp)
		return lmm_size;

	if (*lmmp && !lsm) {
		stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
		lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
		OBD_FREE_LARGE(*lmmp, lmm_size);
		*lmmp = NULL;
		return 0;
	}

	if (!*lmmp) {
		OBD_ALLOC_LARGE(*lmmp, lmm_size);
		if (!*lmmp)
			return -ENOMEM;
	}

	CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
	       lmm_magic, lmm_size);

	lmmv1 = *lmmp;
	lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
	if (lmm_magic == LOV_MAGIC_V3)
		lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
	else
		lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);

	if (!lsm)
		return lmm_size;

	/* lmmv1 and lmmv3 point to the same struct and have the
	 * same first fields
	 */
	lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
	lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
	lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
	lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
	lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
	if (lsm->lsm_magic == LOV_MAGIC_V3) {
		cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
				sizeof(lmmv3->lmm_pool_name));
		if (cplen >= sizeof(lmmv3->lmm_pool_name))
			return -E2BIG;
		lmm_objects = lmmv3->lmm_objects;
	} else {
		lmm_objects = lmmv1->lmm_objects;
	}

	for (i = 0; i < stripe_count; i++) {
		struct lov_oinfo *loi = lsm->lsm_oinfo[i];
		/* XXX LOV STACKING call down to osc_packmd() to do packing */
		LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
			 " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
			 i, stripe_count, loi->loi_ost_idx);
		ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
		lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
		lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
	}

	return lmm_size;
}
示例#4
0
static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
                           struct lov_stripe_md **lsmp,
                           struct lov_user_md *lump)
{
    struct obd_device *obd = class_exp2obd(exp);
    struct lov_obd *lov = &obd->u.lov;
    char buffer[sizeof(struct lov_user_md_v3)];
    struct lov_user_md_v3 *lumv3 = (struct lov_user_md_v3 *)&buffer[0];
    struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&buffer[0];
    int lmm_magic;
    int stripe_count;
    int rc;
    ENTRY;

    if (cfs_copy_from_user(lumv3, lump, sizeof(struct lov_user_md_v1)))
        RETURN(-EFAULT);

    lmm_magic = lumv1->lmm_magic;

    if (lmm_magic == __swab32(LOV_USER_MAGIC_V1)) {
        lustre_swab_lov_user_md_v1(lumv1);
        lmm_magic = LOV_USER_MAGIC_V1;
    } else if (lmm_magic == LOV_USER_MAGIC_V3) {
        if (cfs_copy_from_user(lumv3, lump, sizeof(*lumv3)))
            RETURN(-EFAULT);
    } else if (lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
        if (cfs_copy_from_user(lumv3, lump, sizeof(*lumv3)))
            RETURN(-EFAULT);
        lustre_swab_lov_user_md_v3(lumv3);
        lmm_magic = LOV_USER_MAGIC_V3;
    } else if (lmm_magic != LOV_USER_MAGIC_V1) {
        CDEBUG(D_IOCTL,
               "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
               lmm_magic, LOV_USER_MAGIC_V1, LOV_USER_MAGIC_V3);
        RETURN(-EINVAL);
    }

    /* in the rest of the tests, as *lumv1 and lumv3 have the same
     * fields, we use lumv1 to avoid code duplication */

    if (lumv1->lmm_pattern == 0) {
        lumv1->lmm_pattern = lov->desc.ld_pattern ?
                             lov->desc.ld_pattern : LOV_PATTERN_RAID0;
    }

    if (lumv1->lmm_pattern != LOV_PATTERN_RAID0) {
        CDEBUG(D_IOCTL, "bad userland stripe pattern: %#x\n",
               lumv1->lmm_pattern);
        RETURN(-EINVAL);
    }

    /* 64kB is the largest common page size we see (ia64), and matches the
     * check in lfs */
    if (lumv1->lmm_stripe_size & (LOV_MIN_STRIPE_SIZE - 1)) {
        CDEBUG(D_IOCTL, "stripe size %u not multiple of %u, fixing\n",
               lumv1->lmm_stripe_size, LOV_MIN_STRIPE_SIZE);
        lumv1->lmm_stripe_size = LOV_MIN_STRIPE_SIZE;
    }

    if ((lumv1->lmm_stripe_offset >= lov->desc.ld_tgt_count) &&
            (lumv1->lmm_stripe_offset !=
             (typeof(lumv1->lmm_stripe_offset))(-1))) {
        CDEBUG(D_IOCTL, "stripe offset %u > number of OSTs %u\n",
               lumv1->lmm_stripe_offset, lov->desc.ld_tgt_count);
        RETURN(-EINVAL);
    }
    stripe_count = lov_get_stripecnt(lov, lumv1->lmm_stripe_count);

    if (max_lmm_size) {
        int max_stripes = (max_lmm_size -
                           lov_mds_md_size(0, lmm_magic)) /
                          sizeof(struct lov_ost_data_v1);
        if (unlikely(max_stripes < stripe_count)) {
            CDEBUG(D_IOCTL, "stripe count reset from %d to %d\n",
                   stripe_count, max_stripes);
            stripe_count = max_stripes;
        }
    }

    if (lmm_magic == LOV_USER_MAGIC_V3) {
        struct pool_desc *pool;

        pool = lov_find_pool(lov, lumv3->lmm_pool_name);
        if (pool != NULL) {
            if (lumv3->lmm_stripe_offset !=
                    (typeof(lumv3->lmm_stripe_offset))(-1)) {
                rc = lov_check_index_in_pool(
                         lumv3->lmm_stripe_offset, pool);
                if (rc < 0) {
                    lov_pool_putref(pool);
                    RETURN(-EINVAL);
                }
            }

            if (stripe_count > pool_tgt_count(pool))
                stripe_count = pool_tgt_count(pool);

            lov_pool_putref(pool);
        }
    }

    rc = lov_alloc_memmd(lsmp, stripe_count, lumv1->lmm_pattern, lmm_magic);

    if (rc >= 0) {
        (*lsmp)->lsm_oinfo[0]->loi_ost_idx = lumv1->lmm_stripe_offset;
        (*lsmp)->lsm_stripe_size = lumv1->lmm_stripe_size;
        if (lmm_magic == LOV_USER_MAGIC_V3)
            strncpy((*lsmp)->lsm_pool_name, lumv3->lmm_pool_name,
                    LOV_MAXPOOLNAME);
        rc = 0;
    }

    RETURN(rc);
}
示例#5
0
/* Pack LOV object metadata for disk storage.  It is packed in LE byte
 * order and is opaque to the networking layer.
 *
 * XXX In the future, this will be enhanced to get the EA size from the
 *     underlying OSC device(s) to get their EA sizes so we can stack
 *     LOVs properly.  For now lov_mds_md_size() just assumes one obd_id
 *     per stripe.
 */
int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
               struct lov_stripe_md *lsm)
{
    struct obd_device *obd = class_exp2obd(exp);
    struct lov_obd *lov = &obd->u.lov;
    struct lov_mds_md_v1 *lmmv1;
    struct lov_mds_md_v3 *lmmv3;
    int stripe_count;
    struct lov_ost_data_v1 *lmm_objects;
    int lmm_size, lmm_magic;
    int i;
    ENTRY;

    if (lsm) {
        lmm_magic = lsm->lsm_magic;

        /* If we are just sizing the EA, limit the stripe count
         * to the actual number of OSTs in this filesystem. */
        if (!lmmp) {
            stripe_count = lov_get_stripecnt(lov,
                                             lsm->lsm_stripe_count);
            lsm->lsm_stripe_count = stripe_count;
        } else {
            stripe_count = lsm->lsm_stripe_count;
        }
    } else {
        /* No needs to allocated more than LOV_MAX_STRIPE_COUNT.
         * Anyway, this is pretty inaccurate since ld_tgt_count now
         * represents max index and we should rely on the actual number
         * of OSTs instead */
        stripe_count = min((__u32)LOV_MAX_STRIPE_COUNT,
                           lov->desc.ld_tgt_count);

        if (lmmp && *lmmp)
            lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
        else
            /* lsm == NULL and lmmp == NULL */
            lmm_magic = LOV_MAGIC;
    }

    if ((lmm_magic != LOV_MAGIC_V1) &&
            (lmm_magic != LOV_MAGIC_V3)) {
        CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
               lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
        RETURN(-EINVAL);

    }

    /* XXX LOV STACKING call into osc for sizes */
    lmm_size = lov_mds_md_size(stripe_count, lmm_magic);

    if (!lmmp)
        RETURN(lmm_size);

    if (*lmmp && !lsm) {
        stripe_count = le32_to_cpu((*lmmp)->lmm_stripe_count);
        lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
        OBD_FREE(*lmmp, lmm_size);
        *lmmp = NULL;
        RETURN(0);
    }

    if (!*lmmp) {
        OBD_ALLOC(*lmmp, lmm_size);
        if (!*lmmp)
            RETURN(-ENOMEM);
    }

    CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
           lmm_magic, lmm_size);

    lmmv1 = *lmmp;
    lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
    if (lmm_magic == LOV_MAGIC_V3)
        lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
    else
        lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);

    if (!lsm)
        RETURN(lmm_size);

    /* lmmv1 and lmmv3 point to the same struct and have the
     * same first fields
     */
    lmmv1->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
    lmmv1->lmm_object_seq = cpu_to_le64(lsm->lsm_object_seq);
    lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
    lmmv1->lmm_stripe_count = cpu_to_le32(stripe_count);
    lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
    if (lsm->lsm_magic == LOV_MAGIC_V3) {
        strncpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
                LOV_MAXPOOLNAME);
        lmm_objects = lmmv3->lmm_objects;
    } else {
        lmm_objects = lmmv1->lmm_objects;
    }

    for (i = 0; i < stripe_count; i++) {
        struct lov_oinfo *loi = lsm->lsm_oinfo[i];
        /* XXX LOV STACKING call down to osc_packmd() to do packing */
        LASSERTF(loi->loi_id, "lmm_oid "LPU64" stripe %u/%u idx %u\n",
                 lmmv1->lmm_object_id, i, stripe_count, loi->loi_ost_idx);
        lmm_objects[i].l_object_id = cpu_to_le64(loi->loi_id);
        lmm_objects[i].l_object_seq = cpu_to_le64(loi->loi_seq);
        lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
        lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
    }

    RETURN(lmm_size);
}
示例#6
0
文件: lov_pack.c 项目: acton393/linux
/* Pack LOV object metadata for disk storage.  It is packed in LE byte
 * order and is opaque to the networking layer.
 *
 * XXX In the future, this will be enhanced to get the EA size from the
 *     underlying OSC device(s) to get their EA sizes so we can stack
 *     LOVs properly.  For now lov_mds_md_size() just assumes one u64
 *     per stripe.
 */
int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
		   struct lov_stripe_md *lsm)
{
	struct lov_mds_md_v1 *lmmv1;
	struct lov_mds_md_v3 *lmmv3;
	__u16 stripe_count;
	struct lov_ost_data_v1 *lmm_objects;
	int lmm_size, lmm_magic;
	int i;
	int cplen = 0;

	if (lsm) {
		lmm_magic = lsm->lsm_magic;
	} else {
		if (lmmp && *lmmp)
			lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
		else
			/* lsm == NULL and lmmp == NULL */
			lmm_magic = LOV_MAGIC;
	}

	if ((lmm_magic != LOV_MAGIC_V1) &&
	    (lmm_magic != LOV_MAGIC_V3)) {
		CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
		       lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
		return -EINVAL;
	}

	if (lsm) {
		/* If we are just sizing the EA, limit the stripe count
		 * to the actual number of OSTs in this filesystem.
		 */
		if (!lmmp) {
			stripe_count = lov_get_stripecnt(lov, lmm_magic,
							 lsm->lsm_stripe_count);
			lsm->lsm_stripe_count = stripe_count;
		} else if (!lsm_is_released(lsm)) {
			stripe_count = lsm->lsm_stripe_count;
		} else {
			stripe_count = 0;
		}
	} else {
		/*
		 * To calculate maximum easize by active targets at present,
		 * which is exactly the maximum easize to be seen by LOV
		 */
		stripe_count = lov->desc.ld_active_tgt_count;
	}

	/* XXX LOV STACKING call into osc for sizes */
	lmm_size = lov_mds_md_size(stripe_count, lmm_magic);

	if (!lmmp)
		return lmm_size;

	if (*lmmp && !lsm) {
		stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
		lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
		kvfree(*lmmp);
		*lmmp = NULL;
		return 0;
	}

	if (!*lmmp) {
		*lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
		if (!*lmmp)
			return -ENOMEM;
	}

	CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
	       lmm_magic, lmm_size);

	lmmv1 = *lmmp;
	lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
	if (lmm_magic == LOV_MAGIC_V3)
		lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
	else
		lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);

	if (!lsm)
		return lmm_size;

	/* lmmv1 and lmmv3 point to the same struct and have the
	 * same first fields
	 */
	lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
	lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
	lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
	lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
	lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
	if (lsm->lsm_magic == LOV_MAGIC_V3) {
		cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
				sizeof(lmmv3->lmm_pool_name));
		if (cplen >= sizeof(lmmv3->lmm_pool_name))
			return -E2BIG;
		lmm_objects = lmmv3->lmm_objects;
	} else {
		lmm_objects = lmmv1->lmm_objects;
	}

	for (i = 0; i < stripe_count; i++) {
		struct lov_oinfo *loi = lsm->lsm_oinfo[i];
		/* XXX LOV STACKING call down to osc_packmd() to do packing */
		LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
			 " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
			 i, stripe_count, loi->loi_ost_idx);
		ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
		lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
		lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
	}

	return lmm_size;
}