Пример #1
0
/**
 * lookup range for a seq passed. note here we only care about the start/end,
 * caller should handle the attached location data (flags, index).
 *
 * \param  seq     seq for lookup.
 * \param  range   result of lookup.
 *
 * \retval  0           found, \a range is the matched range;
 * \retval -ENOENT      not found, \a range is the left-side range;
 * \retval  -ve         other error;
 */
int fld_index_lookup(const struct lu_env *env, struct lu_server_fld *fld,
		     seqno_t seq, struct lu_seq_range *range)
{
        struct lu_seq_range     *fld_rec;
        struct fld_thread_info  *info;
        int rc;

        ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
	fld_rec = &info->fti_rec;

	rc = fld_cache_lookup(fld->lsf_cache, seq, fld_rec);
	if (rc == 0) {
                *range = *fld_rec;
                if (range_within(range, seq))
                        rc = 0;
                else
                        rc = -ENOENT;
        }

        CDEBUG(D_INFO, "%s: lookup seq = "LPX64" range : "DRANGE" rc = %d\n",
               fld->lsf_name, seq, PRANGE(range), rc);

        RETURN(rc);
}
Пример #2
0
/* This function implies that caller takes care about locking. */
int seq_store_write(struct lu_server_seq *seq,
                    const struct lu_env *env,
                    struct thandle *th)
{
        struct dt_object *dt_obj = seq->lss_obj;
        struct seq_thread_info *info;
        struct dt_device *dt_dev;
        loff_t pos = 0;
        int rc;
        ENTRY;

        dt_dev = lu2dt_dev(seq->lss_obj->do_lu.lo_dev);
        info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
        LASSERT(info != NULL);

        /* Store ranges in le format. */
        range_cpu_to_le(&info->sti_space, &seq->lss_space);

        rc = dt_obj->do_body_ops->dbo_write(env, dt_obj,
                                            seq_store_buf(info),
                                            &pos, th, BYPASS_CAPA, 1);
        if (rc == sizeof(info->sti_space)) {
                CDEBUG(D_INFO, "%s: Space - "DRANGE"\n",
                       seq->lss_name, PRANGE(&seq->lss_space));
                rc = 0;
        } else if (rc >= 0) {
                rc = -EIO;
        }


        RETURN(rc);
}
Пример #3
0
/*
 * This function implies that caller takes care about locking or locking is not
 * needed (init time).
 */
int seq_store_read(struct lu_server_seq *seq,
                   const struct lu_env *env)
{
        struct dt_object *dt_obj = seq->lss_obj;
        struct seq_thread_info *info;
        loff_t pos = 0;
        int rc;
        ENTRY;

        info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
        LASSERT(info != NULL);

        rc = dt_obj->do_body_ops->dbo_read(env, dt_obj, seq_store_buf(info),
                                           &pos, BYPASS_CAPA);

        if (rc == sizeof(info->sti_space)) {
                range_le_to_cpu(&seq->lss_space, &info->sti_space);
                CDEBUG(D_INFO, "%s: Space - "DRANGE"\n",
                       seq->lss_name, PRANGE(&seq->lss_space));
                rc = 0;
        } else if (rc == 0) {
                rc = -ENODATA;
        } else if (rc >= 0) {
                CERROR("%s: Read only %d bytes of %d\n", seq->lss_name,
                       rc, (int)sizeof(info->sti_space));
                rc = -EIO;
        }

        RETURN(rc);
}
Пример #4
0
/**
 * Lookup sequece in local cache/fldb.
 **/
int fld_local_lookup(const struct lu_env *env, struct lu_server_fld *fld,
		     u64 seq, struct lu_seq_range *range)
{
	struct lu_seq_range *erange;
	struct fld_thread_info *info;
	int rc;
	ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
	LASSERT(info != NULL);
	erange = &info->fti_lrange;

	/* Lookup it in the cache. */
	rc = fld_cache_lookup(fld->lsf_cache, seq, erange);
	if (rc == 0) {
		if (unlikely(fld_range_type(erange) != fld_range_type(range) &&
			     !fld_range_is_any(range))) {
			CERROR("%s: FLD cache range "DRANGE" does not match"
			       "requested flag %x: rc = %d\n", fld->lsf_name,
			       PRANGE(erange), range->lsr_flags, -EIO);
			RETURN(-EIO);
		}
		*range = *erange;
		RETURN(0);
	}
	RETURN(rc);
}
Пример #5
0
int fld_index_lookup(struct lu_server_fld *fld,
                     const struct lu_env *env,
                     seqno_t seq,
                     struct lu_seq_range *range)
{
        struct dt_object        *dt_obj = fld->lsf_obj;
        struct lu_seq_range     *fld_rec;
        struct dt_key           *key = fld_key(env, seq);
        struct fld_thread_info  *info;
        int rc;

        ENTRY;

        info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
        fld_rec = &info->fti_rec;

        rc = dt_obj->do_index_ops->dio_lookup(env, dt_obj,
                                              (struct dt_rec*) fld_rec,
                                              key, BYPASS_CAPA);

        if (rc >= 0) {
                range_be_to_cpu(fld_rec, fld_rec);
                *range = *fld_rec;
                if (range_within(range, seq))
                        rc = 0;
                else
                        rc = -ENOENT;
        }

        CDEBUG(D_INFO, "%s: lookup seq = "LPX64" range : "DRANGE" rc = %d\n",
               fld->lsf_name, seq, PRANGE(range), rc);

        RETURN(rc);
}
Пример #6
0
/* Update last_rcvd records with the latest transaction data */
int ofd_txn_stop_cb(const struct lu_env *env, struct thandle *txn,
		    void *cookie)
{
	struct ofd_device *ofd = cookie;
	struct ofd_thread_info *info;

	ENTRY;

	info = lu_context_key_get(&env->le_ctx, &ofd_thread_key);

	if (info->fti_exp == NULL)
		 RETURN(0);

	LASSERT(ofd_exp(info->fti_exp) == ofd);
	if (info->fti_has_trans) {
		if (info->fti_mult_trans == 0) {
			CERROR("More than one transaction "LPU64"\n",
			       info->fti_transno);
			RETURN(0);
		}
		/* we need another transno to be assigned */
		info->fti_transno = 0;
	} else if (txn->th_result == 0) {
		info->fti_has_trans = 1;
	}

	spin_lock(&ofd->ofd_lut.lut_translock);
	if (txn->th_result != 0) {
		if (info->fti_transno != 0) {
			CERROR("Replay transno "LPU64" failed: rc %d\n",
			       info->fti_transno, txn->th_result);
			info->fti_transno = 0;
		}
	} else if (info->fti_transno == 0) {
		info->fti_transno = ++ofd->ofd_lut.lut_last_transno;
	} else {
		/* should be replay */
		if (info->fti_transno > ofd->ofd_lut.lut_last_transno)
			ofd->ofd_lut.lut_last_transno = info->fti_transno;
	}
	spin_unlock(&ofd->ofd_lut.lut_translock);

	/** VBR: set new versions */
	if (txn->th_result == 0 && info->fti_obj != NULL) {
		dt_version_set(env, ofd_object_child(info->fti_obj),
			       info->fti_transno, txn);
		info->fti_obj = NULL;
	}

	/* filling reply data */
	CDEBUG(D_INODE, "transno = %llu, last_committed = %llu\n",
	       info->fti_transno, ofd_obd(ofd)->obd_last_committed);

	/* if can't add callback, do sync write */
	txn->th_sync |= !!tgt_last_commit_cb_add(txn, &ofd->ofd_lut,
						 info->fti_exp,
						 info->fti_transno);

	return ofd_last_rcvd_update(info, txn);
}
Пример #7
0
int fld_server_read(const struct lu_env *env, struct lu_server_fld *fld,
		    struct lu_seq_range *range, void *data, int data_len)
{
	struct lu_seq_range_array *lsra = data;
	struct fld_thread_info	  *info;
	struct dt_object	  *dt_obj = fld->lsf_obj;
	struct lu_seq_range	  *entry;
	struct dt_it		  *it;
	const struct dt_it_ops	  *iops;
	int			  rc;

	ENTRY;

	lsra->lsra_count = 0;
	iops = &dt_obj->do_index_ops->dio_it;
	it = iops->init(env, dt_obj, 0);
	if (IS_ERR(it))
		RETURN(PTR_ERR(it));

	rc = iops->load(env, it, range->lsr_end);
	if (rc <= 0)
		GOTO(out_it_fini, rc);

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
	LASSERT(info != NULL);
	entry = &info->fti_rec;
	do {
		rc = iops->rec(env, it, (struct dt_rec *)entry, 0);
		if (rc != 0)
			GOTO(out_it_put, rc);

		if (offsetof(typeof(*lsra), lsra_lsr[lsra->lsra_count + 1]) >
		    data_len)
			GOTO(out, rc = -EAGAIN);

		range_be_to_cpu(entry, entry);
		if (entry->lsr_index == range->lsr_index &&
		    entry->lsr_flags == range->lsr_flags &&
		    entry->lsr_start > range->lsr_start) {
			lsra->lsra_lsr[lsra->lsra_count] = *entry;
			lsra->lsra_count++;
		}

		rc = iops->next(env, it);
	} while (rc == 0);
	if (rc > 0)
		rc = 0;
out:
	range_array_cpu_to_le(lsra, lsra);
out_it_put:
	iops->put(env, it);
out_it_fini:
	iops->fini(env, it);

	RETURN(rc);
}
Пример #8
0
int fld_declare_index_create(const struct lu_env *env,
			     struct lu_server_fld *fld,
			     const struct lu_seq_range *new_range,
			     struct thandle *th)
{
	struct lu_seq_range	*tmp;
	struct lu_seq_range	*range;
	struct fld_thread_info	*info;
	int			rc = 0;

	ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
	range = &info->fti_lrange;
	tmp = &info->fti_irange;
	memset(range, 0, sizeof(*range));

	rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
	if (rc == 0) {
		/* In case of duplicate entry, the location must be same */
		LASSERT((range_compare_loc(new_range, range) == 0));
		GOTO(out, rc = -EEXIST);
	}

	if (rc != -ENOENT) {
		CERROR("%s: lookup range "DRANGE" error: rc = %d\n",
			fld->lsf_name, PRANGE(range), rc);
		GOTO(out, rc);
	}

	/* Check for merge case, since the fld entry can only be increamental,
	 * so we will only check whether it can be merged from the left. */
	if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
	    range_compare_loc(new_range, range) == 0) {
		range_cpu_to_be(tmp, range);
		rc = dt_declare_delete(env, fld->lsf_obj,
				       (struct dt_key *)&tmp->lsr_start, th);
		if (rc) {
			CERROR("%s: declare record "DRANGE" failed: rc = %d\n",
			       fld->lsf_name, PRANGE(range), rc);
			GOTO(out, rc);
		}
		memcpy(tmp, new_range, sizeof(*new_range));
		tmp->lsr_start = range->lsr_start;
	} else {
		memcpy(tmp, new_range, sizeof(*new_range));
	}

	range_cpu_to_be(tmp, tmp);
	rc = dt_declare_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
			       (struct dt_key *)&tmp->lsr_start, th);
out:
	RETURN(rc);
}
Пример #9
0
static struct dt_key *fld_key(const struct lu_env *env,
                              const seqno_t seq)
{
        struct fld_thread_info *info;
        ENTRY;

        info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
        LASSERT(info != NULL);

        info->fti_key = cpu_to_be64(seq);
        RETURN((void *)&info->fti_key);
}
Пример #10
0
struct thandle* fld_trans_start(struct lu_server_fld *fld,
                                const struct lu_env *env, int credit)
{
        struct fld_thread_info *info;
        struct dt_device *dt_dev;
        struct txn_param *p;

        dt_dev = lu2dt_dev(fld->lsf_obj->do_lu.lo_dev);
        info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
        p = &info->fti_txn_param;
        txn_param_init(p, credit);

        return dt_dev->dd_ops->dt_trans_start(env, dt_dev, p);
}
Пример #11
0
static struct dt_rec *fld_rec(const struct lu_env *env,
                              const struct lu_seq_range *range)
{
        struct fld_thread_info *info;
        struct lu_seq_range *rec;
        ENTRY;

        info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
        LASSERT(info != NULL);
        rec = &info->fti_rec;

        range_cpu_to_be(rec, range);
        RETURN((void *)rec);
}
Пример #12
0
int mdt_capa_keys_init(const struct lu_env *env, struct mdt_device *mdt)
{
        struct lustre_capa_key  *keys = mdt->mdt_capa_keys;
        struct mdt_thread_info  *mti;
        struct dt_object        *obj;
        struct lu_attr          *la;
        mdsno_t                  mdsnum;
        unsigned long            size;
        int                      rc;
        ENTRY;

	mdsnum = mdt_seq_site(mdt)->ss_node_id;

        mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
        LASSERT(mti != NULL);
        la = &mti->mti_attr.ma_attr;

        obj = mdt->mdt_ck_obj;
        rc = obj->do_ops->do_attr_get(env, mdt->mdt_ck_obj, la, BYPASS_CAPA);
        if (rc)
                RETURN(rc);

        size = (unsigned long)la->la_size;
        if (size == 0) {
                int i;

                for (i = 0; i < 2; i++) {
                        make_capa_key(&keys[i], mdsnum, i);
                        DEBUG_CAPA_KEY(D_SEC, &keys[i], "initializing");
                }

                rc = write_capa_keys(env, mdt, keys);
                if (rc) {
                        CERROR("error writing MDS %s: rc %d\n", CAPA_KEYS, rc);
                        RETURN(rc);
                }
        } else {
                rc = read_capa_keys(env, mdt, keys);
                if (rc) {
                        CERROR("error reading MDS %s: rc %d\n", CAPA_KEYS, rc);
                        RETURN(rc);
                }
        }
        set_capa_key_expiry(mdt);
        cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
        CDEBUG(D_SEC, "mds_ck_timer %lu\n", mdt->mdt_ck_expiry);
        RETURN(0);
}
Пример #13
0
static int write_capa_keys(const struct lu_env *env,
                           struct mdt_device *mdt,
                           struct lustre_capa_key *keys)
{
        struct mdt_thread_info *mti;
        struct lustre_capa_key *tmp;
        struct thandle *th;
        loff_t off = 0;
        int i, rc;

        mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
	th = dt_trans_create(env, mdt->mdt_bottom);
	if (IS_ERR(th))
		RETURN(PTR_ERR(th));

	rc = dt_declare_record_write(env, mdt->mdt_ck_obj,
				     mdt_buf_const(env, NULL,
				     sizeof(*tmp) * 3), 0, th);
	if (rc)
		goto stop;

	rc = dt_trans_start_local(env, mdt->mdt_bottom, th);
        if (rc)
                goto stop;

        tmp = &mti->mti_capa_key;

        for (i = 0; i < 2; i++) {
                lck_cpu_to_le(tmp, &keys[i]);

                rc = dt_record_write(env, mdt->mdt_ck_obj,
                                     mdt_buf_const(env, tmp, sizeof(*tmp)),
                                     &off, th);
                if (rc)
                        break;
        }

stop:
	dt_trans_stop(env, mdt->mdt_bottom, th);

        CDEBUG(D_INFO, "write capability keys rc = %d:\n", rc);
        return rc;
}
Пример #14
0
struct thandle *seq_store_trans_start(struct lu_server_seq *seq,
                                      const struct lu_env *env, int credit,
                                      int sync)
{
        struct seq_thread_info *info;
        struct dt_device *dt_dev;
        struct thandle *th;
        ENTRY;

        dt_dev = lu2dt_dev(seq->lss_obj->do_lu.lo_dev);
        info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
        LASSERT(info != NULL);

        txn_param_init(&info->sti_txn, credit);
        if (sync)
                txn_param_sync(&info->sti_txn);

        th = dt_dev->dd_ops->dt_trans_start(env, dt_dev, &info->sti_txn);
        return th;
}
Пример #15
0
int seq_handle(struct ptlrpc_request *req)
{
	const struct lu_env *env;
	struct seq_thread_info *info;
	int rc;

	env = req->rq_svc_thread->t_env;
	LASSERT(env != NULL);

	info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
	LASSERT(info != NULL);

	seq_thread_info_init(req, info);
	rc = seq_req_handle(req, env, info);
	/* XXX: we don't need replay but MDT assign transno in any case,
	 * remove it manually before reply*/
	lustre_msg_set_transno(req->rq_repmsg, 0);
	seq_thread_info_fini(info);

	return rc;
}
Пример #16
0
/*
 * Returns true, if fid is local to this server node.
 *
 * WARNING: this function is *not* guaranteed to return false if fid is
 * remote: it makes an educated conservative guess only.
 *
 * fid_is_local() is supposed to be used in assertion checks only.
 */
int fid_is_local(const struct lu_env *env,
                 struct lu_site *site, const struct lu_fid *fid)
{
	int result;
	struct seq_server_site *ss_site;
	struct lu_seq_range *range;
	struct fld_thread_info *info;
	ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
	range = &info->fti_lrange;

	result = 1; /* conservatively assume fid is local */
	ss_site = lu_site2seq(site);
	if (ss_site->ss_client_fld != NULL) {
		int rc;

		rc = fld_cache_lookup(ss_site->ss_client_fld->lcf_cache,
				      fid_seq(fid), range);
		if (rc == 0)
			result = (range->lsr_index == ss_site->ss_node_id);
	}
	return result;
}
Пример #17
0
static int read_capa_keys(const struct lu_env *env,
                          struct mdt_device *mdt,
                          struct lustre_capa_key *keys)
{
        struct mdt_thread_info *mti;
        struct lustre_capa_key *tmp;
        loff_t off = 0;
        int i, rc;

        mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
        tmp = &mti->mti_capa_key;

        for (i = 0; i < 2; i++) {
                rc = dt_record_read(env, mdt->mdt_ck_obj,
                                    mdt_buf(env, tmp, sizeof(*tmp)), &off);
                if (rc)
                        return rc;

                lck_le_to_cpu(&keys[i], tmp);
                DEBUG_CAPA_KEY(D_SEC, &keys[i], "read");
        }

        return 0;
}
Пример #18
0
int fld_index_init(const struct lu_env *env, struct lu_server_fld *fld,
		   struct dt_device *dt)
{
	struct dt_object	*dt_obj = NULL;
	struct lu_fid		fid;
	struct lu_attr		*attr = NULL;
	struct lu_seq_range	*range = NULL;
	struct fld_thread_info	*info;
	struct dt_object_format	dof;
	struct dt_it		*it;
	const struct dt_it_ops	*iops;
	int			rc;
	ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
	LASSERT(info != NULL);

	lu_local_obj_fid(&fid, FLD_INDEX_OID);
	OBD_ALLOC_PTR(attr);
	if (attr == NULL)
		RETURN(-ENOMEM);

	memset(attr, 0, sizeof(attr));
	attr->la_valid = LA_MODE;
	attr->la_mode = S_IFREG | 0666;
	dof.dof_type = DFT_INDEX;
	dof.u.dof_idx.di_feat = &fld_index_features;

	dt_obj = dt_find_or_create(env, dt, &fid, &dof, attr);
	if (IS_ERR(dt_obj)) {
		rc = PTR_ERR(dt_obj);
		CERROR("%s: Can't find \"%s\" obj %d\n", fld->lsf_name,
			fld_index_name, rc);
		dt_obj = NULL;
		GOTO(out, rc);
	}

	fld->lsf_obj = dt_obj;
	rc = dt_obj->do_ops->do_index_try(env, dt_obj, &fld_index_features);
	if (rc != 0) {
		CERROR("%s: File \"%s\" is not an index: rc = %d!\n",
		       fld->lsf_name, fld_index_name, rc);
		GOTO(out, rc);
	}

	range = &info->fti_rec;
	/* Load fld entry to cache */
	iops = &dt_obj->do_index_ops->dio_it;
	it = iops->init(env, dt_obj, 0, NULL);
	if (IS_ERR(it))
		GOTO(out, rc = PTR_ERR(it));

	rc = iops->load(env, it, 0);
	if (rc < 0)
		GOTO(out_it_fini, rc);

	if (rc > 0) {
		/* Load FLD entry into server cache */
		do {
			rc = iops->rec(env, it, (struct dt_rec *)range, 0);
			if (rc != 0)
				GOTO(out_it_put, rc);
			LASSERT(range != NULL);
			range_be_to_cpu(range, range);
			rc = fld_cache_insert(fld->lsf_cache, range);
			if (rc != 0)
				GOTO(out_it_put, rc);
			rc = iops->next(env, it);
		} while (rc == 0);
	}

	/* Note: fld_insert_entry will detect whether these
	 * special entries already exist inside FLDB */
	mutex_lock(&fld->lsf_lock);
	rc = fld_insert_special_entries(env, fld);
	mutex_unlock(&fld->lsf_lock);
	if (rc != 0) {
		CERROR("%s: insert special entries failed!: rc = %d\n",
		       fld->lsf_name, rc);
		GOTO(out_it_put, rc);
	}

out_it_put:
	iops->put(env, it);
out_it_fini:
	iops->fini(env, it);
out:
	if (attr != NULL)
		OBD_FREE_PTR(attr);

	if (rc != 0) {
		if (dt_obj != NULL)
			lu_object_put(env, &dt_obj->do_lu);
		fld->lsf_obj = NULL;
	}
	RETURN(rc);
}
Пример #19
0
static int mdt_lvbo_fill(struct ldlm_lock *lock, void *lvb, int lvblen)
{
	struct lu_env env;
	struct mdt_thread_info *info;
	struct mdt_device *mdt;
	struct lu_fid *fid;
	struct mdt_object *obj = NULL;
	struct md_object *child = NULL;
	int rc;
	ENTRY;

	mdt = ldlm_lock_to_ns(lock)->ns_lvbp;
	if (IS_LQUOTA_RES(lock->l_resource)) {
		if (mdt->mdt_qmt_dev == NULL)
			RETURN(0);

		/* call lvbo fill function of quota master */
		rc = qmt_hdls.qmth_lvbo_fill(mdt->mdt_qmt_dev, lock, lvb,
					     lvblen);
		RETURN(rc);
	}

	if (!ldlm_has_layout(lock))
		RETURN(0);

	/* layout lock will be granted to client, fill in lvb with layout */

	/* XXX create an env to talk to mdt stack. We should get this env from
	 * ptlrpc_thread->t_env. */
	rc = lu_env_init(&env, LCT_MD_THREAD);
	/* Likely ENOMEM */
	if (rc)
		RETURN(rc);

	info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
	/* Likely ENOMEM */
	if (info == NULL)
		GOTO(out, rc = -ENOMEM);

	memset(info, 0, sizeof *info);
	info->mti_env = &env;
	info->mti_exp = lock->l_export;
	info->mti_mdt = mdt;

	/* XXX get fid by resource id. why don't include fid in ldlm_resource */
	fid = &info->mti_tmp_fid2;
	fid_extract_from_res_name(fid, &lock->l_resource->lr_name);

	obj = mdt_object_find(&env, info->mti_mdt, fid);
	if (IS_ERR(obj))
		GOTO(out, rc = PTR_ERR(obj));

	if (!mdt_object_exists(obj) || mdt_object_remote(obj))
		GOTO(out, rc = -ENOENT);

	child = mdt_object_child(obj);

	/* get the length of lsm */
	rc = mo_xattr_get(&env, child, &LU_BUF_NULL, XATTR_NAME_LOV);
	if (rc < 0)
		GOTO(out, rc);

	if (rc > 0) {
		struct lu_buf *lmm = NULL;

		if (lvblen < rc) {
			CERROR("%s: expected %d actual %d.\n",
				mdt_obd_name(mdt), rc, lvblen);
			GOTO(out, rc = -ERANGE);
		}

		lmm = &info->mti_buf;
		lmm->lb_buf = lvb;
		lmm->lb_len = rc;

		rc = mo_xattr_get(&env, child, lmm, XATTR_NAME_LOV);
		if (rc < 0)
			GOTO(out, rc);
	}

out:
	if (obj != NULL && !IS_ERR(obj))
		mdt_object_put(&env, obj);
	lu_env_fini(&env);
	RETURN(rc < 0 ? 0 : rc);
}
Пример #20
0
/**
 * Retrieve fldb entry from MDT0 and add to local FLDB and cache.
 **/
int fld_update_from_controller(const struct lu_env *env,
			       struct lu_server_fld *fld)
{
	struct fld_thread_info	  *info;
	struct lu_seq_range	  *range;
	struct lu_seq_range_array *lsra;
	__u32			  index;
	struct ptlrpc_request	  *req;
	int			  rc;
	int			  i;
	ENTRY;

	/* Update only happens during initalization, i.e. local FLDB
	 * does not exist yet */
	if (!fld->lsf_new)
		RETURN(0);

	rc = fld_name_to_index(fld->lsf_name, &index);
	if (rc < 0)
		RETURN(rc);

	/* No need update fldb for MDT0 */
	if (index == 0)
		RETURN(0);

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
	LASSERT(info != NULL);
	range = &info->fti_lrange;
	memset(range, 0, sizeof(*range));
	range->lsr_index = index;
	fld_range_set_mdt(range);

	do {
		rc = fld_client_rpc(fld->lsf_control_exp, range, FLD_READ,
				    &req);
		if (rc != 0 && rc != -EAGAIN)
			GOTO(out, rc);

		LASSERT(req != NULL);
		lsra = (struct lu_seq_range_array *)req_capsule_server_get(
					  &req->rq_pill, &RMF_GENERIC_DATA);
		if (lsra == NULL)
			GOTO(out, rc = -EPROTO);

		range_array_le_to_cpu(lsra, lsra);
		for (i = 0; i < lsra->lsra_count; i++) {
			int rc1;

			if (lsra->lsra_lsr[i].lsr_flags != LU_SEQ_RANGE_MDT)
				GOTO(out, rc = -EINVAL);

			if (lsra->lsra_lsr[i].lsr_index != index)
				GOTO(out, rc = -EINVAL);

			mutex_lock(&fld->lsf_lock);
			rc1 = fld_insert_entry(env, fld, &lsra->lsra_lsr[i]);
			mutex_unlock(&fld->lsf_lock);

			if (rc1 != 0)
				GOTO(out, rc = rc1);
		}
		if (rc == -EAGAIN)
			*range = lsra->lsra_lsr[lsra->lsra_count - 1];
	} while (rc == -EAGAIN);

	fld->lsf_new = 1;
out:
	if (req != NULL)
		ptlrpc_req_finished(req);

	RETURN(rc);
}
Пример #21
0
static int mdt_ck_thread_main(void *args)
{
	struct mdt_device      *mdt = args;
	struct ptlrpc_thread   *thread = &mdt->mdt_ck_thread;
	struct lustre_capa_key *bkey = &mdt->mdt_capa_keys[0],
			       *rkey = &mdt->mdt_capa_keys[1];
	struct lustre_capa_key *tmp;
	struct lu_env           env;
	struct mdt_thread_info *info;
	struct md_device       *next;
	struct l_wait_info      lwi = { 0 };
	mdsno_t                 mdsnum;
	int                     rc;
	ENTRY;

	unshare_fs_struct();
	cfs_block_allsigs();

	thread_set_flags(thread, SVC_RUNNING);
	wake_up(&thread->t_ctl_waitq);

	rc = lu_env_init(&env, LCT_MD_THREAD|LCT_REMEMBER|LCT_NOREF);
	if (rc)
		RETURN(rc);

	thread->t_env = &env;
	env.le_ctx.lc_thread = thread;
	env.le_ctx.lc_cookie = 0x1;

	info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
	LASSERT(info != NULL);

	tmp = &info->mti_capa_key;
	mdsnum = mdt_seq_site(mdt)->ss_node_id;
	while (1) {
		l_wait_event(thread->t_ctl_waitq,
			     thread_is_stopping(thread) ||
			     thread_is_event(thread),
			     &lwi);

		if (thread_is_stopping(thread))
			break;
		thread_clear_flags(thread, SVC_EVENT);

		if (cfs_time_before(cfs_time_current(), mdt->mdt_ck_expiry))
			break;

		*tmp = *rkey;
		make_capa_key(tmp, mdsnum, rkey->lk_keyid);

		next = mdt->mdt_child;
		rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
		if (!rc) {
			spin_lock(&capa_lock);
			*bkey = *rkey;
			*rkey = *tmp;
			spin_unlock(&capa_lock);

			rc = write_capa_keys(&env, mdt, mdt->mdt_capa_keys);
			if (rc) {
				spin_lock(&capa_lock);
				*rkey = *bkey;
				memset(bkey, 0, sizeof(*bkey));
				spin_unlock(&capa_lock);
			} else {
				set_capa_key_expiry(mdt);
				DEBUG_CAPA_KEY(D_SEC, rkey, "new");
			}
		}
		if (rc) {
			DEBUG_CAPA_KEY(D_ERROR, rkey, "update failed for");
			/* next retry is in 300 sec */
			mdt->mdt_ck_expiry = jiffies + 300 * HZ;
		}

		cfs_timer_arm(&mdt->mdt_ck_timer, mdt->mdt_ck_expiry);
		CDEBUG(D_SEC, "mdt_ck_timer %lu\n", mdt->mdt_ck_expiry);
	}
	lu_env_fini(&env);

	thread_set_flags(thread, SVC_STOPPED);
	wake_up(&thread->t_ctl_waitq);
	RETURN(0);
}
Пример #22
0
/**
 * insert range in fld store.
 *
 *      \param  range  range to be inserted
 *      \param  th     transaction for this operation as it could compound
 *                     transaction.
 *
 *      \retval  0  success
 *      \retval  -ve error
 *
 * The whole fld index insertion is protected by seq->lss_mutex (see
 * seq_server_alloc_super), i.e. only one thread will access fldb each
 * time, so we do not need worry the fld file and cache will being
 * changed between declare and create.
 * Because the fld entry can only be increamental, so we will only check
 * whether it can be merged from the left.
 **/
int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
		     const struct lu_seq_range *new_range, struct thandle *th)
{
	struct lu_seq_range	*range;
	struct lu_seq_range	*tmp;
	struct fld_thread_info	*info;
	int			rc = 0;
	int			deleted = 0;
	struct fld_cache_entry	*flde;
	ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);

	LASSERT_MUTEX_LOCKED(&fld->lsf_lock);

	range = &info->fti_lrange;
	memset(range, 0, sizeof(*range));
	tmp = &info->fti_irange;
	rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
	if (rc != -ENOENT) {
		rc = rc == 0 ? -EEXIST : rc;
		GOTO(out, rc);
	}

	if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
	    range_compare_loc(new_range, range) == 0) {
		range_cpu_to_be(tmp, range);
		rc = dt_delete(env, fld->lsf_obj,
			       (struct dt_key *)&tmp->lsr_start, th,
				BYPASS_CAPA);
		if (rc != 0)
			GOTO(out, rc);
		memcpy(tmp, new_range, sizeof(*new_range));
		tmp->lsr_start = range->lsr_start;
		deleted = 1;
	} else {
		memcpy(tmp, new_range, sizeof(*new_range));
	}

	range_cpu_to_be(tmp, tmp);
	rc = dt_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
		       (struct dt_key *)&tmp->lsr_start, th, BYPASS_CAPA, 1);
	if (rc != 0) {
		CERROR("%s: insert range "DRANGE" failed: rc = %d\n",
		       fld->lsf_name, PRANGE(new_range), rc);
		GOTO(out, rc);
	}

	flde = fld_cache_entry_create(new_range);
	if (IS_ERR(flde))
		GOTO(out, rc = PTR_ERR(flde));

	write_lock(&fld->lsf_cache->fci_lock);
	if (deleted)
		fld_cache_delete_nolock(fld->lsf_cache, new_range);
	rc = fld_cache_insert_nolock(fld->lsf_cache, flde);
	write_unlock(&fld->lsf_cache->fci_lock);
	if (rc)
		OBD_FREE_PTR(flde);
out:
	RETURN(rc);
}
Пример #23
0
/**
 * Get ucred key if session exists and ucred key is allocated on it.
 * Return NULL otherwise.
 */
struct lu_ucred *lu_ucred(const struct lu_env *env)
{
	if (!env->le_ses)
		return NULL;
	return lu_context_key_get(env->le_ses, &lu_ucred_key);
}