Exemple #1
0
static
int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
{
	struct ptlrpc_reply_state *rs;
	int rs_size = sizeof(*rs) + msgsize;

	LASSERT(msgsize % 8 == 0);

	rs = req->rq_reply_state;

	if (rs) {
		/* pre-allocated */
		LASSERT(rs->rs_size >= rs_size);
	} else {
		rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
		if (!rs)
			return -ENOMEM;

		rs->rs_size = rs_size;
	}

	rs->rs_svc_ctx = req->rq_svc_ctx;
	atomic_inc(&req->rq_svc_ctx->sc_refcount);

	rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
	rs->rs_repbuf_len = rs_size - sizeof(*rs);
	rs->rs_msg = rs->rs_repbuf;

	req->rq_reply_state = rs;
	return 0;
}
Exemple #2
0
struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
{
	struct lov_stripe_md *lsm;
	struct lov_oinfo     *loi;
	int		   i, oinfo_ptrs_size;

	LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);

	oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
	*size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size;

	lsm = libcfs_kvzalloc(*size, GFP_NOFS);
	if (!lsm)
		return NULL;

	for (i = 0; i < stripe_count; i++) {
		loi = kmem_cache_alloc(lov_oinfo_slab, GFP_NOFS | __GFP_ZERO);
		if (loi == NULL)
			goto err;
		lsm->lsm_oinfo[i] = loi;
	}
	lsm->lsm_stripe_count = stripe_count;
	return lsm;

err:
	while (--i >= 0)
		kmem_cache_free(lov_oinfo_slab, lsm->lsm_oinfo[i]);
	kvfree(lsm);
	return NULL;
}
Exemple #3
0
static inline void enc_pools_alloc(void)
{
	LASSERT(page_pools.epp_max_pools);
	page_pools.epp_pools =
		libcfs_kvzalloc(page_pools.epp_max_pools *
				sizeof(*page_pools.epp_pools),
				GFP_NOFS);
}
Exemple #4
0
static
int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
			struct ptlrpc_request *req,
			int segment, int newsize)
{
	struct lustre_msg *newbuf;
	struct lustre_msg *oldbuf = req->rq_reqmsg;
	int oldsize, newmsg_size, alloc_size;

	LASSERT(req->rq_reqbuf);
	LASSERT(req->rq_reqbuf == req->rq_reqmsg);
	LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
	LASSERT(req->rq_reqlen == lustre_packed_msg_size(oldbuf));

	/* compute new message size */
	oldsize = req->rq_reqbuf->lm_buflens[segment];
	req->rq_reqbuf->lm_buflens[segment] = newsize;
	newmsg_size = lustre_packed_msg_size(oldbuf);
	req->rq_reqbuf->lm_buflens[segment] = oldsize;

	/* request from pool should always have enough buffer */
	LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size);

	if (req->rq_reqbuf_len < newmsg_size) {
		alloc_size = size_roundup_power2(newmsg_size);

		newbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS);
		if (!newbuf)
			return -ENOMEM;

		/* Must lock this, so that otherwise unprotected change of
		 * rq_reqmsg is not racing with parallel processing of
		 * imp_replay_list traversing threads. See LU-3333
		 * This is a bandaid at best, we really need to deal with this
		 * in request enlarging code before unpacking that's already
		 * there
		 */
		if (req->rq_import)
			spin_lock(&req->rq_import->imp_lock);
		memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);

		kvfree(req->rq_reqbuf);
		req->rq_reqbuf = newbuf;
		req->rq_reqmsg = newbuf;
		req->rq_reqbuf_len = alloc_size;

		if (req->rq_import)
			spin_unlock(&req->rq_import->imp_lock);
	}

	_sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
	req->rq_reqlen = newmsg_size;

	return 0;
}
Exemple #5
0
static
int null_alloc_repbuf(struct ptlrpc_sec *sec,
		      struct ptlrpc_request *req,
		      int msgsize)
{
	/* add space for early replied */
	msgsize += lustre_msg_early_size();

	msgsize = size_roundup_power2(msgsize);

	req->rq_repbuf = libcfs_kvzalloc(msgsize, GFP_NOFS);
	if (!req->rq_repbuf)
		return -ENOMEM;

	req->rq_repbuf_len = msgsize;
	return 0;
}
Exemple #6
0
static
int null_alloc_reqbuf(struct ptlrpc_sec *sec,
		      struct ptlrpc_request *req,
		      int msgsize)
{
	if (!req->rq_reqbuf) {
		int alloc_size = size_roundup_power2(msgsize);

		LASSERT(!req->rq_pool);
		req->rq_reqbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS);
		if (!req->rq_reqbuf)
			return -ENOMEM;

		req->rq_reqbuf_len = alloc_size;
	} else {
		LASSERT(req->rq_pool);
		LASSERT(req->rq_reqbuf_len >= msgsize);
		memset(req->rq_reqbuf, 0, msgsize);
	}

	req->rq_reqmsg = req->rq_reqbuf;
	return 0;
}
Exemple #7
0
/* Pack LOV object metadata for disk storage.  It is packed in LE byte
 * order and is opaque to the networking layer.
 *
 * XXX In the future, this will be enhanced to get the EA size from the
 *     underlying OSC device(s) to get their EA sizes so we can stack
 *     LOVs properly.  For now lov_mds_md_size() just assumes one u64
 *     per stripe.
 */
int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
	       struct lov_stripe_md *lsm)
{
	struct obd_device *obd = class_exp2obd(exp);
	struct lov_obd *lov = &obd->u.lov;
	struct lov_mds_md_v1 *lmmv1;
	struct lov_mds_md_v3 *lmmv3;
	__u16 stripe_count;
	struct lov_ost_data_v1 *lmm_objects;
	int lmm_size, lmm_magic;
	int i;
	int cplen = 0;

	if (lsm) {
		lmm_magic = lsm->lsm_magic;
	} else {
		if (lmmp && *lmmp)
			lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
		else
			/* lsm == NULL and lmmp == NULL */
			lmm_magic = LOV_MAGIC;
	}

	if ((lmm_magic != LOV_MAGIC_V1) &&
	    (lmm_magic != LOV_MAGIC_V3)) {
		CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
		       lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
		return -EINVAL;
	}

	if (lsm) {
		/* If we are just sizing the EA, limit the stripe count
		 * to the actual number of OSTs in this filesystem.
		 */
		if (!lmmp) {
			stripe_count = lov_get_stripecnt(lov, lmm_magic,
							 lsm->lsm_stripe_count);
			lsm->lsm_stripe_count = stripe_count;
		} else if (!lsm_is_released(lsm)) {
			stripe_count = lsm->lsm_stripe_count;
		} else {
			stripe_count = 0;
		}
	} else {
		/* No need to allocate more than maximum supported stripes.
		 * Anyway, this is pretty inaccurate since ld_tgt_count now
		 * represents max index and we should rely on the actual number
		 * of OSTs instead
		 */
		stripe_count = lov_mds_md_max_stripe_count(
			lov->lov_ocd.ocd_max_easize, lmm_magic);

		if (stripe_count > lov->desc.ld_tgt_count)
			stripe_count = lov->desc.ld_tgt_count;
	}

	/* XXX LOV STACKING call into osc for sizes */
	lmm_size = lov_mds_md_size(stripe_count, lmm_magic);

	if (!lmmp)
		return lmm_size;

	if (*lmmp && !lsm) {
		stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
		lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
		kvfree(*lmmp);
		*lmmp = NULL;
		return 0;
	}

	if (!*lmmp) {
		*lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
		if (!*lmmp)
			return -ENOMEM;
	}

	CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
	       lmm_magic, lmm_size);

	lmmv1 = *lmmp;
	lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
	if (lmm_magic == LOV_MAGIC_V3)
		lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
	else
		lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);

	if (!lsm)
		return lmm_size;

	/* lmmv1 and lmmv3 point to the same struct and have the
	 * same first fields
	 */
	lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
	lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
	lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
	lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
	lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
	if (lsm->lsm_magic == LOV_MAGIC_V3) {
		cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
				sizeof(lmmv3->lmm_pool_name));
		if (cplen >= sizeof(lmmv3->lmm_pool_name))
			return -E2BIG;
		lmm_objects = lmmv3->lmm_objects;
	} else {
		lmm_objects = lmmv1->lmm_objects;
	}

	for (i = 0; i < stripe_count; i++) {
		struct lov_oinfo *loi = lsm->lsm_oinfo[i];
		/* XXX LOV STACKING call down to osc_packmd() to do packing */
		LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
			 " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
			 i, stripe_count, loi->loi_ost_idx);
		ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
		lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
		lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
	}

	return lmm_size;
}
Exemple #8
0
static int lov_init_raid0(const struct lu_env *env,
			  struct lov_device *dev, struct lov_object *lov,
			  const struct cl_object_conf *conf,
			  union  lov_layout_state *state)
{
	int result;
	int i;

	struct cl_object	*stripe;
	struct lov_thread_info  *lti     = lov_env_info(env);
	struct cl_object_conf   *subconf = &lti->lti_stripe_conf;
	struct lov_stripe_md    *lsm     = conf->u.coc_md->lsm;
	struct lu_fid	   *ofid    = &lti->lti_fid;
	struct lov_layout_raid0 *r0      = &state->raid0;

	if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
		dump_lsm(D_ERROR, lsm);
		LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
			 LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
	}

	LASSERT(!lov->lo_lsm);
	lov->lo_lsm = lsm_addref(lsm);
	r0->lo_nr  = lsm->lsm_stripe_count;
	LASSERT(r0->lo_nr <= lov_targets_nr(dev));

	r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
				     GFP_NOFS);
	if (r0->lo_sub) {
		int psz = 0;

		result = 0;
		subconf->coc_inode = conf->coc_inode;
		spin_lock_init(&r0->lo_sub_lock);
		/*
		 * Create stripe cl_objects.
		 */
		for (i = 0; i < r0->lo_nr && result == 0; ++i) {
			struct cl_device *subdev;
			struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
			int ost_idx = oinfo->loi_ost_idx;

			if (lov_oinfo_is_dummy(oinfo))
				continue;

			result = ostid_to_fid(ofid, &oinfo->loi_oi,
					      oinfo->loi_ost_idx);
			if (result != 0)
				goto out;

			subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
			subconf->u.coc_oinfo = oinfo;
			LASSERTF(subdev, "not init ost %d\n", ost_idx);
			/* In the function below, .hs_keycmp resolves to
			 * lu_obj_hop_keycmp()
			 */
			/* coverity[overrun-buffer-val] */
			stripe = lov_sub_find(env, subdev, ofid, subconf);
			if (!IS_ERR(stripe)) {
				result = lov_init_sub(env, lov, stripe, r0, i);
				if (result == -EAGAIN) { /* try again */
					--i;
					result = 0;
					continue;
				}
			} else {
				result = PTR_ERR(stripe);
			}

			if (result == 0) {
				int sz = lov_page_slice_fixup(lov, stripe);

				LASSERT(ergo(psz > 0, psz == sz));
				psz = sz;
			}
		}
		if (result == 0)
			cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
	} else {
		result = -ENOMEM;
	}
out:
	return result;
}
Exemple #9
0
/* Pack LOV object metadata for disk storage.  It is packed in LE byte
 * order and is opaque to the networking layer.
 *
 * XXX In the future, this will be enhanced to get the EA size from the
 *     underlying OSC device(s) to get their EA sizes so we can stack
 *     LOVs properly.  For now lov_mds_md_size() just assumes one u64
 *     per stripe.
 */
int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
		   struct lov_stripe_md *lsm)
{
	struct lov_mds_md_v1 *lmmv1;
	struct lov_mds_md_v3 *lmmv3;
	__u16 stripe_count;
	struct lov_ost_data_v1 *lmm_objects;
	int lmm_size, lmm_magic;
	int i;
	int cplen = 0;

	if (lsm) {
		lmm_magic = lsm->lsm_magic;
	} else {
		if (lmmp && *lmmp)
			lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
		else
			/* lsm == NULL and lmmp == NULL */
			lmm_magic = LOV_MAGIC;
	}

	if ((lmm_magic != LOV_MAGIC_V1) &&
	    (lmm_magic != LOV_MAGIC_V3)) {
		CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
		       lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
		return -EINVAL;
	}

	if (lsm) {
		/* If we are just sizing the EA, limit the stripe count
		 * to the actual number of OSTs in this filesystem.
		 */
		if (!lmmp) {
			stripe_count = lov_get_stripecnt(lov, lmm_magic,
							 lsm->lsm_stripe_count);
			lsm->lsm_stripe_count = stripe_count;
		} else if (!lsm_is_released(lsm)) {
			stripe_count = lsm->lsm_stripe_count;
		} else {
			stripe_count = 0;
		}
	} else {
		/*
		 * To calculate maximum easize by active targets at present,
		 * which is exactly the maximum easize to be seen by LOV
		 */
		stripe_count = lov->desc.ld_active_tgt_count;
	}

	/* XXX LOV STACKING call into osc for sizes */
	lmm_size = lov_mds_md_size(stripe_count, lmm_magic);

	if (!lmmp)
		return lmm_size;

	if (*lmmp && !lsm) {
		stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
		lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
		kvfree(*lmmp);
		*lmmp = NULL;
		return 0;
	}

	if (!*lmmp) {
		*lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
		if (!*lmmp)
			return -ENOMEM;
	}

	CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
	       lmm_magic, lmm_size);

	lmmv1 = *lmmp;
	lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
	if (lmm_magic == LOV_MAGIC_V3)
		lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
	else
		lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);

	if (!lsm)
		return lmm_size;

	/* lmmv1 and lmmv3 point to the same struct and have the
	 * same first fields
	 */
	lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
	lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
	lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
	lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
	lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
	if (lsm->lsm_magic == LOV_MAGIC_V3) {
		cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
				sizeof(lmmv3->lmm_pool_name));
		if (cplen >= sizeof(lmmv3->lmm_pool_name))
			return -E2BIG;
		lmm_objects = lmmv3->lmm_objects;
	} else {
		lmm_objects = lmmv1->lmm_objects;
	}

	for (i = 0; i < stripe_count; i++) {
		struct lov_oinfo *loi = lsm->lsm_oinfo[i];
		/* XXX LOV STACKING call down to osc_packmd() to do packing */
		LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
			 " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
			 i, stripe_count, loi->loi_ost_idx);
		ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
		lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
		lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
	}

	return lmm_size;
}
Exemple #10
0
static int llog_process_thread(void *arg)
{
	struct llog_process_info	*lpi = arg;
	struct llog_handle		*loghandle = lpi->lpi_loghandle;
	struct llog_log_hdr		*llh = loghandle->lgh_hdr;
	struct llog_process_cat_data	*cd  = lpi->lpi_catdata;
	char				*buf;
	u64 cur_offset, tmp_offset;
	int chunk_size;
	int				 rc = 0, index = 1, last_index;
	int				 saved_index = 0;
	int				 last_called_index = 0;

	if (!llh)
		return -EINVAL;

	cur_offset = llh->llh_hdr.lrh_len;
	chunk_size = llh->llh_hdr.lrh_len;
	/* expect chunk_size to be power of two */
	LASSERT(is_power_of_2(chunk_size));

	buf = libcfs_kvzalloc(chunk_size, GFP_NOFS);
	if (!buf) {
		lpi->lpi_rc = -ENOMEM;
		return 0;
	}

	if (cd) {
		last_called_index = cd->lpcd_first_idx;
		index = cd->lpcd_first_idx + 1;
	}
	if (cd && cd->lpcd_last_idx)
		last_index = cd->lpcd_last_idx;
	else
		last_index = LLOG_HDR_BITMAP_SIZE(llh) - 1;

	while (rc == 0) {
		unsigned int buf_offset = 0;
		struct llog_rec_hdr *rec;
		bool partial_chunk;
		off_t chunk_offset;

		/* skip records not set in bitmap */
		while (index <= last_index &&
		       !ext2_test_bit(index, LLOG_HDR_BITMAP(llh)))
			++index;

		if (index > last_index)
			break;

		CDEBUG(D_OTHER, "index: %d last_index %d\n",
		       index, last_index);
repeat:
		/* get the buf with our target record; avoid old garbage */
		memset(buf, 0, chunk_size);
		rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index,
				     index, &cur_offset, buf, chunk_size);
		if (rc)
			goto out;

		/*
		 * NB: after llog_next_block() call the cur_offset is the
		 * offset of the next block after read one.
		 * The absolute offset of the current chunk is calculated
		 * from cur_offset value and stored in chunk_offset variable.
		 */
		tmp_offset = cur_offset;
		if (do_div(tmp_offset, chunk_size)) {
			partial_chunk = true;
			chunk_offset = cur_offset & ~(chunk_size - 1);
		} else {
			partial_chunk = false;
			chunk_offset = cur_offset - chunk_size;
		}

		/* NB: when rec->lrh_len is accessed it is already swabbed
		 * since it is used at the "end" of the loop and the rec
		 * swabbing is done at the beginning of the loop.
		 */
		for (rec = (struct llog_rec_hdr *)(buf + buf_offset);
		     (char *)rec < buf + chunk_size;
		     rec = llog_rec_hdr_next(rec)) {
			CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
			       rec, rec->lrh_type);

			if (LLOG_REC_HDR_NEEDS_SWABBING(rec))
				lustre_swab_llog_rec(rec);

			CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n",
			       rec->lrh_type, rec->lrh_index);

			/*
			 * for partial chunk the end of it is zeroed, check
			 * for index 0 to distinguish it.
			 */
			if (partial_chunk && !rec->lrh_index) {
				/* concurrent llog_add() might add new records
				 * while llog_processing, check this is not
				 * the case and re-read the current chunk
				 * otherwise.
				 */
				if (index > loghandle->lgh_last_idx) {
					rc = 0;
					goto out;
				}
				CDEBUG(D_OTHER, "Re-read last llog buffer for new records, index %u, last %u\n",
				       index, loghandle->lgh_last_idx);
				/* save offset inside buffer for the re-read */
				buf_offset = (char *)rec - (char *)buf;
				cur_offset = chunk_offset;
				goto repeat;
			}

			if (!rec->lrh_len || rec->lrh_len > chunk_size) {
				CWARN("invalid length %d in llog record for index %d/%d\n",
				      rec->lrh_len,
				      rec->lrh_index, index);
				rc = -EINVAL;
				goto out;
			}

			if (rec->lrh_index < index) {
				CDEBUG(D_OTHER, "skipping lrh_index %d\n",
				       rec->lrh_index);
				continue;
			}

			if (rec->lrh_index != index) {
				CERROR("%s: Invalid record: index %u but expected %u\n",
				       loghandle->lgh_ctxt->loc_obd->obd_name,
				       rec->lrh_index, index);
				rc = -ERANGE;
				goto out;
			}

			CDEBUG(D_OTHER,
			       "lrh_index: %d lrh_len: %d (%d remains)\n",
			       rec->lrh_index, rec->lrh_len,
			       (int)(buf + chunk_size - (char *)rec));

			loghandle->lgh_cur_idx = rec->lrh_index;
			loghandle->lgh_cur_offset = (char *)rec - (char *)buf +
						    chunk_offset;

			/* if set, process the callback on this record */
			if (ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) {
				rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec,
						 lpi->lpi_cbdata);
				last_called_index = index;
				if (rc)
					goto out;
			}

			/* exit if the last index is reached */
			if (index >= last_index) {
				rc = 0;
				goto out;
			}
			index++;
		}
	}

out:
	if (cd)
		cd->lpcd_last_idx = last_called_index;

	kfree(buf);
	lpi->lpi_rc = rc;
	return 0;
}
Exemple #11
0
int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
		     int flags, struct obd_uuid *uuid)
{
	int chunk_size = handle->lgh_ctxt->loc_chunk_size;
	enum llog_flag fmt = flags & LLOG_F_EXT_MASK;
	struct llog_log_hdr	*llh;
	int			 rc;

	LASSERT(!handle->lgh_hdr);

	LASSERT(chunk_size >= LLOG_MIN_CHUNK_SIZE);
	llh = libcfs_kvzalloc(sizeof(*llh), GFP_NOFS);
	if (!llh)
		return -ENOMEM;
	handle->lgh_hdr = llh;
	handle->lgh_hdr_size = chunk_size;
	/* first assign flags to use llog_client_ops */
	llh->llh_flags = flags;
	rc = llog_read_header(env, handle, uuid);
	if (rc == 0) {
		if (unlikely((llh->llh_flags & LLOG_F_IS_PLAIN &&
			      flags & LLOG_F_IS_CAT) ||
			     (llh->llh_flags & LLOG_F_IS_CAT &&
			      flags & LLOG_F_IS_PLAIN))) {
			CERROR("%s: llog type is %s but initializing %s\n",
			       handle->lgh_ctxt->loc_obd->obd_name,
			       llh->llh_flags & LLOG_F_IS_CAT ?
			       "catalog" : "plain",
			       flags & LLOG_F_IS_CAT ? "catalog" : "plain");
			rc = -EINVAL;
			goto out;
		} else if (llh->llh_flags &
			   (LLOG_F_IS_PLAIN | LLOG_F_IS_CAT)) {
			/*
			 * it is possible to open llog without specifying llog
			 * type so it is taken from llh_flags
			 */
			flags = llh->llh_flags;
		} else {
			/* for some reason the llh_flags has no type set */
			CERROR("llog type is not specified!\n");
			rc = -EINVAL;
			goto out;
		}
		if (unlikely(uuid &&
			     !obd_uuid_equals(uuid, &llh->llh_tgtuuid))) {
			CERROR("%s: llog uuid mismatch: %s/%s\n",
			       handle->lgh_ctxt->loc_obd->obd_name,
			       (char *)uuid->uuid,
			       (char *)llh->llh_tgtuuid.uuid);
			rc = -EEXIST;
			goto out;
		}
	}
	if (flags & LLOG_F_IS_CAT) {
		LASSERT(list_empty(&handle->u.chd.chd_head));
		INIT_LIST_HEAD(&handle->u.chd.chd_head);
		llh->llh_size = sizeof(struct llog_logid_rec);
		llh->llh_flags |= LLOG_F_IS_FIXSIZE;
	} else if (!(flags & LLOG_F_IS_PLAIN)) {
		CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
		       handle->lgh_ctxt->loc_obd->obd_name,
		       flags, LLOG_F_IS_CAT, LLOG_F_IS_PLAIN);
		rc = -EINVAL;
	}
	llh->llh_flags |= fmt;
out:
	if (rc) {
		kvfree(llh);
		handle->lgh_hdr = NULL;
	}
	return rc;
}