示例#1
0
/* Allocate new sequence for client. */
static int seq_client_alloc_seq(const struct lu_env *env,
				struct lu_client_seq *seq, u64 *seqnr)
{
	int rc;

	LASSERT(lu_seq_range_is_sane(&seq->lcs_space));

	if (lu_seq_range_is_exhausted(&seq->lcs_space)) {
		rc = seq_client_alloc_meta(env, seq);
		if (rc) {
			CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
			       seq->lcs_name, rc);
			return rc;
		}
		CDEBUG(D_INFO, "%s: New range - " DRANGE "\n",
		       seq->lcs_name, PRANGE(&seq->lcs_space));
	} else {
		rc = 0;
	}

	LASSERT(!lu_seq_range_is_exhausted(&seq->lcs_space));
	*seqnr = seq->lcs_space.lsr_start;
	seq->lcs_space.lsr_start += 1;

	CDEBUG(D_INFO, "%s: Allocated sequence [%#llx]\n", seq->lcs_name,
	       *seqnr);

	return rc;
}
示例#2
0
文件: fid_store.c 项目: DCteam/lustre
/* This function implies that caller takes care about locking. */
int seq_store_write(struct lu_server_seq *seq,
                    const struct lu_env *env,
                    struct thandle *th)
{
        struct dt_object *dt_obj = seq->lss_obj;
        struct seq_thread_info *info;
        struct dt_device *dt_dev;
        loff_t pos = 0;
        int rc;
        ENTRY;

        dt_dev = lu2dt_dev(seq->lss_obj->do_lu.lo_dev);
        info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
        LASSERT(info != NULL);

        /* Store ranges in le format. */
        range_cpu_to_le(&info->sti_space, &seq->lss_space);

        rc = dt_obj->do_body_ops->dbo_write(env, dt_obj,
                                            seq_store_buf(info),
                                            &pos, th, BYPASS_CAPA, 1);
        if (rc == sizeof(info->sti_space)) {
                CDEBUG(D_INFO, "%s: Space - "DRANGE"\n",
                       seq->lss_name, PRANGE(&seq->lss_space));
                rc = 0;
        } else if (rc >= 0) {
                rc = -EIO;
        }


        RETURN(rc);
}
示例#3
0
文件: fld_index.c 项目: hfeeki/lustre
/**
 * lookup range for a seq passed. note here we only care about the start/end,
 * caller should handle the attached location data (flags, index).
 *
 * \param  seq     seq for lookup.
 * \param  range   result of lookup.
 *
 * \retval  0           found, \a range is the matched range;
 * \retval -ENOENT      not found, \a range is the left-side range;
 * \retval  -ve         other error;
 */
int fld_index_lookup(const struct lu_env *env, struct lu_server_fld *fld,
		     seqno_t seq, struct lu_seq_range *range)
{
        struct lu_seq_range     *fld_rec;
        struct fld_thread_info  *info;
        int rc;

        ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
	fld_rec = &info->fti_rec;

	rc = fld_cache_lookup(fld->lsf_cache, seq, fld_rec);
	if (rc == 0) {
                *range = *fld_rec;
                if (range_within(range, seq))
                        rc = 0;
                else
                        rc = -ENOENT;
        }

        CDEBUG(D_INFO, "%s: lookup seq = "LPX64" range : "DRANGE" rc = %d\n",
               fld->lsf_name, seq, PRANGE(range), rc);

        RETURN(rc);
}
示例#4
0
static int __seq_server_alloc_meta(struct lu_server_seq *seq,
				   struct lu_seq_range *out,
				   const struct lu_env *env)
{
	struct lu_seq_range *space = &seq->lss_space;
	int rc = 0;

	ENTRY;

	LASSERT(lu_seq_range_is_sane(space));

	rc = seq_server_check_and_alloc_super(env, seq);
	if (rc < 0) {
		CERROR("%s: Allocated super-sequence failed: rc = %d\n",
			seq->lss_name, rc);
		RETURN(rc);
	}

	rc = range_alloc_set(env, out, seq);
	if (rc != 0) {
		CERROR("%s: Allocated meta-sequence failed: rc = %d\n",
			seq->lss_name, rc);
		RETURN(rc);
	}

	CDEBUG(D_INFO, "%s: Allocated meta-sequence " DRANGE"\n",
		seq->lss_name, PRANGE(out));

	RETURN(rc);
}
示例#5
0
int seq_server_alloc_spec(struct lu_server_seq *seq,
			  struct lu_seq_range *spec,
			  const struct lu_env *env)
{
	struct lu_seq_range *space = &seq->lss_space;
	int rc = -ENOSPC;
	ENTRY;

	/*
	 * In some cases (like recovery after a disaster)
	 * we may need to allocate sequences manually
	 * Notice some sequences can be lost if requested
	 * range doesn't start at the beginning of current
	 * free space. Also notice it's not possible now
	 * to allocate sequences out of natural order.
	 */
	if (spec->lsr_start >= spec->lsr_end)
		RETURN(-EINVAL);
	if (spec->lsr_flags != LU_SEQ_RANGE_MDT &&
	    spec->lsr_flags != LU_SEQ_RANGE_OST)
		RETURN(-EINVAL);

	mutex_lock(&seq->lss_mutex);
	if (spec->lsr_start >= space->lsr_start) {
		space->lsr_start = spec->lsr_end;
		rc = seq_store_update(env, seq, spec, 1 /* sync */);

		LCONSOLE_INFO("%s: "DRANGE" sequences allocated: rc = %d \n",
			      seq->lss_name, PRANGE(spec), rc);
	}
	mutex_unlock(&seq->lss_mutex);

	RETURN(rc);
}
示例#6
0
文件: fid_store.c 项目: DCteam/lustre
/*
 * This function implies that caller takes care about locking or locking is not
 * needed (init time).
 */
int seq_store_read(struct lu_server_seq *seq,
                   const struct lu_env *env)
{
        struct dt_object *dt_obj = seq->lss_obj;
        struct seq_thread_info *info;
        loff_t pos = 0;
        int rc;
        ENTRY;

        info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
        LASSERT(info != NULL);

        rc = dt_obj->do_body_ops->dbo_read(env, dt_obj, seq_store_buf(info),
                                           &pos, BYPASS_CAPA);

        if (rc == sizeof(info->sti_space)) {
                range_le_to_cpu(&seq->lss_space, &info->sti_space);
                CDEBUG(D_INFO, "%s: Space - "DRANGE"\n",
                       seq->lss_name, PRANGE(&seq->lss_space));
                rc = 0;
        } else if (rc == 0) {
                rc = -ENODATA;
        } else if (rc >= 0) {
                CERROR("%s: Read only %d bytes of %d\n", seq->lss_name,
                       rc, (int)sizeof(info->sti_space));
                rc = -EIO;
        }

        RETURN(rc);
}
示例#7
0
/**
 * Lookup sequece in local cache/fldb.
 **/
int fld_local_lookup(const struct lu_env *env, struct lu_server_fld *fld,
		     u64 seq, struct lu_seq_range *range)
{
	struct lu_seq_range *erange;
	struct fld_thread_info *info;
	int rc;
	ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
	LASSERT(info != NULL);
	erange = &info->fti_lrange;

	/* Lookup it in the cache. */
	rc = fld_cache_lookup(fld->lsf_cache, seq, erange);
	if (rc == 0) {
		if (unlikely(fld_range_type(erange) != fld_range_type(range) &&
			     !fld_range_is_any(range))) {
			CERROR("%s: FLD cache range "DRANGE" does not match"
			       "requested flag %x: rc = %d\n", fld->lsf_name,
			       PRANGE(erange), range->lsr_flags, -EIO);
			RETURN(-EIO);
		}
		*range = *erange;
		RETURN(0);
	}
	RETURN(rc);
}
示例#8
0
文件: fld_index.c 项目: hpc/lustre
int fld_index_lookup(struct lu_server_fld *fld,
                     const struct lu_env *env,
                     seqno_t seq,
                     struct lu_seq_range *range)
{
        struct dt_object        *dt_obj = fld->lsf_obj;
        struct lu_seq_range     *fld_rec;
        struct dt_key           *key = fld_key(env, seq);
        struct fld_thread_info  *info;
        int rc;

        ENTRY;

        info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
        fld_rec = &info->fti_rec;

        rc = dt_obj->do_index_ops->dio_lookup(env, dt_obj,
                                              (struct dt_rec*) fld_rec,
                                              key, BYPASS_CAPA);

        if (rc >= 0) {
                range_be_to_cpu(fld_rec, fld_rec);
                *range = *fld_rec;
                if (range_within(range, seq))
                        rc = 0;
                else
                        rc = -ENOENT;
        }

        CDEBUG(D_INFO, "%s: lookup seq = "LPX64" range : "DRANGE" rc = %d\n",
               fld->lsf_name, seq, PRANGE(range), rc);

        RETURN(rc);
}
示例#9
0
/**
 * All MDT server handle fld lookup operation. But only MDT0 has fld index.
 * if entry is not found in cache we need to forward lookup request to MDT0
 */
static int fld_handle_lookup(struct tgt_session_info *tsi)
{
	struct obd_export	*exp = tsi->tsi_exp;
	struct lu_site		*site = exp->exp_obd->obd_lu_dev->ld_site;
	struct lu_server_fld	*fld;
	struct lu_seq_range	*in;
	struct lu_seq_range	*out;
	int			rc;

	ENTRY;

	in = req_capsule_client_get(tsi->tsi_pill, &RMF_FLD_MDFLD);
	if (in == NULL)
		RETURN(err_serious(-EPROTO));

	rc = req_capsule_server_pack(tsi->tsi_pill);
	if (unlikely(rc != 0))
		RETURN(err_serious(rc));

	out = req_capsule_server_get(tsi->tsi_pill, &RMF_FLD_MDFLD);
	if (out == NULL)
		RETURN(err_serious(-EPROTO));
	*out = *in;

	fld = lu_site2seq(site)->ss_server_fld;

	rc = fld_server_lookup(tsi->tsi_env, fld, in->lsr_start, out);

	CDEBUG(D_INFO, "%s: FLD req handle: error %d (range: "DRANGE")\n",
	       fld->lsf_name, rc, PRANGE(out));

	RETURN(rc);
}
示例#10
0
文件: fld_index.c 项目: DCteam/lustre
int fld_index_create(struct lu_server_fld *fld,
                     const struct lu_env *env,
                     const struct lu_seq_range *range,
                     struct thandle *th)
{
        struct dt_object *dt_obj = fld->lsf_obj;
        struct dt_device *dt_dev;
        seqno_t start;
        int rc;

        ENTRY;

        start = range->lsr_start;
        LASSERT(range_is_sane(range));
        dt_dev = lu2dt_dev(fld->lsf_obj->do_lu.lo_dev);

        rc = dt_obj->do_index_ops->dio_insert(env, dt_obj,
                                              fld_rec(env, range),
                                              fld_key(env, start),
                                              th, BYPASS_CAPA, 1);

        CDEBUG(D_INFO, "%s: insert given range : "DRANGE" rc = %d\n",
               fld->lsf_name, PRANGE(range), rc);
        RETURN(rc);
}
示例#11
0
static int mdd_fill_fldb(const struct lu_env *env, struct mdd_device *mdd)
{
	struct seq_server_site *ss = mdd_seq_site(mdd);
	struct lu_seq_range range;
	int	rc;

	LASSERT(ss->ss_server_seq != NULL);
	LASSERT(ss->ss_server_fld != NULL);

	if (ss->ss_server_seq->lss_space.lsr_end == 0)
		return 0;

	memcpy(&range, &ss->ss_server_seq->lss_space, sizeof(range));

	/* Pre-existing ZFS does not insert any entries to FLDB, we need
	 * to insert it to FLDB during convertion */
	range.lsr_start = FID_SEQ_NORMAL;
	fld_range_set_mdt(&range);

	mutex_lock(&ss->ss_server_fld->lsf_lock);
	rc = fld_insert_entry(env, ss->ss_server_fld, &range);
	mutex_unlock(&ss->ss_server_fld->lsf_lock);

	LCONSOLE_INFO("%s: insert missing range "DRANGE"\n",
		      mdd2obd_dev(mdd)->obd_name, PRANGE(&range));
	return rc;
}
示例#12
0
文件: fid_handler.c 项目: LLNL/lustre
static int __seq_server_alloc_super(struct lu_server_seq *seq,
                                    struct lu_seq_range *out,
                                    const struct lu_env *env)
{
        struct lu_seq_range *space = &seq->lss_space;
        int rc;
        ENTRY;

        LASSERT(range_is_sane(space));

        if (range_is_exhausted(space)) {
                CERROR("%s: Sequences space is exhausted\n",
                       seq->lss_name);
                RETURN(-ENOSPC);
        } else {
                range_alloc(out, space, seq->lss_width);
        }

        rc = seq_store_update(env, seq, out, 1 /* sync */);

        CDEBUG(D_INFO, "%s: super-sequence allocation rc = %d "
               DRANGE"\n", seq->lss_name, rc, PRANGE(out));

        RETURN(rc);
}
示例#13
0
/* Allocate new sequence for client. */
static int seq_client_alloc_seq(const struct lu_env *env,
                                struct lu_client_seq *seq, seqno_t *seqnr)
{
        int rc;
        ENTRY;

        LASSERT(range_is_sane(&seq->lcs_space));

        if (range_is_exhausted(&seq->lcs_space)) {
                rc = seq_client_alloc_meta(env, seq);
                if (rc) {
                        CERROR("%s: Can't allocate new meta-sequence,"
                               "rc %d\n", seq->lcs_name, rc);
                        RETURN(rc);
                } else {
                        CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
                               seq->lcs_name, PRANGE(&seq->lcs_space));
                }
        } else {
                rc = 0;
        }

        LASSERT(!range_is_exhausted(&seq->lcs_space));
        *seqnr = seq->lcs_space.lsr_start;
        seq->lcs_space.lsr_start += 1;

        CDEBUG(D_INFO, "%s: Allocated sequence ["LPX64"]\n", seq->lcs_name,
               *seqnr);

        RETURN(rc);
}
示例#14
0
文件: lproc_fid.c 项目: hpc/lustre
static int
seq_proc_read_common(char *page, char **start, off_t off,
                     int count, int *eof, void *data,
                     struct lu_seq_range *range)
{
	int rc;
	ENTRY;

        *eof = 1;
        rc = snprintf(page, count, "["LPX64" - "LPX64"]:%x:%x\n",
                      PRANGE(range));
	RETURN(rc);
}
static int
lprocfs_fid_space_seq_show(struct seq_file *m, void *unused)
{
	struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
	int rc;

	LASSERT(seq != NULL);

	mutex_lock(&seq->lcs_mutex);
	rc = seq_printf(m, "["LPX64" - "LPX64"]:%x:%s\n", PRANGE(&seq->lcs_space));
	mutex_unlock(&seq->lcs_mutex);

	return rc;
}
示例#16
0
/*
 * This function implements new seq allocation algorithm using async
 * updates to seq file on disk. ref bug 18857 for details.
 * there are four variable to keep track of this process
 *
 * lss_space; - available lss_space
 * lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use
 * lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be
 *                    not yet committed
 *
 * when lss_lowater_set reaches the end it is replaced with hiwater one and
 * a write operation is initiated to allocate new hiwater range.
 * if last seq write opearion is still not commited, current operation is
 * flaged as sync write op.
 */
static int range_alloc_set(const struct lu_env *env,
			   struct lu_seq_range *out,
			   struct lu_server_seq *seq)
{
	struct lu_seq_range *space = &seq->lss_space;
	struct lu_seq_range *loset = &seq->lss_lowater_set;
	struct lu_seq_range *hiset = &seq->lss_hiwater_set;
	int rc = 0;

	if (lu_seq_range_is_zero(loset))
		__seq_set_init(env, seq);

	if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */
		loset->lsr_start = loset->lsr_end;

	if (lu_seq_range_is_exhausted(loset)) {
		/* reached high water mark. */
		struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev;
		int obd_num_clients = dev->ld_obd->obd_num_exports;
		__u64 set_sz;

		/* calculate new seq width based on number of clients */
		set_sz = max(seq->lss_set_width,
			     obd_num_clients * seq->lss_width);
		set_sz = min(lu_seq_range_space(space), set_sz);

		/* Switch to hiwater range now */
		*loset = *hiset;
		/* allocate new hiwater range */
		range_alloc(hiset, space, set_sz);

		/* update ondisk seq with new *space */
		rc = seq_store_update(env, seq, NULL, seq->lss_need_sync);
	}

	LASSERTF(!lu_seq_range_is_exhausted(loset) ||
		 lu_seq_range_is_sane(loset),
		 DRANGE"\n", PRANGE(loset));

	if (rc == 0)
		range_alloc(out, loset, seq->lss_width);

	RETURN(rc);
}
示例#17
0
文件: fld_index.c 项目: hpc/lustre
int fld_index_delete(struct lu_server_fld *fld,
                     const struct lu_env *env,
                     struct lu_seq_range *range,
                     struct thandle   *th)
{
        struct dt_object *dt_obj = fld->lsf_obj;
        seqno_t seq = range->lsr_start;
        int rc;

        ENTRY;

        rc = dt_obj->do_index_ops->dio_delete(env, dt_obj, fld_key(env, seq),
                                              th, BYPASS_CAPA);

        CDEBUG(D_INFO, "%s: delete given range : "DRANGE" rc = %d\n",
               fld->lsf_name, PRANGE(range), rc);

        RETURN(rc);
}
示例#18
0
static int __seq_server_alloc_meta(struct lu_server_seq *seq,
				   struct lu_seq_range *out,
				   const struct lu_env *env)
{
	struct lu_seq_range *space = &seq->lss_space;
	int rc = 0;

	ENTRY;

	LASSERT(range_is_sane(space));

	/* Check if available space ends and allocate new super seq */
	if (range_is_exhausted(space)) {
		if (!seq->lss_cli) {
			CERROR("%s: No sequence controller is attached.\n",
			       seq->lss_name);
			RETURN(-ENODEV);
		}

		rc = seq_client_alloc_super(seq->lss_cli, env);
		if (rc) {
			CERROR("%s: Can't allocate super-sequence, rc %d\n",
			       seq->lss_name, rc);
			RETURN(rc);
		}

		/* Saving new range to allocation space. */
		*space = seq->lss_cli->lcs_space;
		LASSERT(range_is_sane(space));
	}

	rc = range_alloc_set(env, out, seq);
	if (rc != 0) {
		CERROR("%s: Allocated meta-sequence failed: rc = %d\n",
			seq->lss_name, rc);
		RETURN(rc);
	}

	CDEBUG(D_INFO, "%s: Allocated meta-sequence " DRANGE"\n",
		seq->lss_name, PRANGE(out));

	RETURN(rc);
}
/* Client side procfs stuff */
static ssize_t
lprocfs_fid_space_seq_write(struct file *file, const char *buffer,
			    size_t count, loff_t *off)
{
	struct lu_client_seq *seq = ((struct seq_file *)file->private_data)->private;
	int rc;

	LASSERT(seq != NULL);

	mutex_lock(&seq->lcs_mutex);
	rc = lprocfs_fid_write_common(buffer, count, &seq->lcs_space);

	if (rc == 0) {
		CDEBUG(D_INFO, "%s: Space: "DRANGE"\n",
		       seq->lcs_name, PRANGE(&seq->lcs_space));
	}

	mutex_unlock(&seq->lcs_mutex);

	return count;
}
示例#20
0
文件: lproc_fid.c 项目: hpc/lustre
/*
 * Server side procfs stuff.
 */
static int
seq_server_proc_write_space(struct file *file, const char *buffer,
                            unsigned long count, void *data)
{
        struct lu_server_seq *seq = (struct lu_server_seq *)data;
	int rc;
	ENTRY;

        LASSERT(seq != NULL);

        cfs_down(&seq->lss_sem);
	rc = seq_proc_write_common(file, buffer, count,
                                   data, &seq->lss_space);
	if (rc == 0) {
		CDEBUG(D_INFO, "%s: Space: "DRANGE"\n",
                       seq->lss_name, PRANGE(&seq->lss_space));
	}

        cfs_up(&seq->lss_sem);

        RETURN(count);
}
示例#21
0
static int seq_client_rpc(struct lu_client_seq *seq,
                          struct lu_seq_range *output, __u32 opc,
                          const char *opcname)
{
	struct obd_export     *exp = seq->lcs_exp;
	struct ptlrpc_request *req;
	struct lu_seq_range   *out, *in;
	__u32                 *op;
	unsigned int           debug_mask;
	int                    rc;
	ENTRY;

	LASSERT(exp != NULL && !IS_ERR(exp));
	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
					LUSTRE_MDS_VERSION, SEQ_QUERY);
	if (req == NULL)
		RETURN(-ENOMEM);

	/* Init operation code */
	op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
	*op = opc;

	/* Zero out input range, this is not recovery yet. */
	in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
	lu_seq_range_init(in);

	ptlrpc_request_set_replen(req);

	in->lsr_index = seq->lcs_space.lsr_index;
	if (seq->lcs_type == LUSTRE_SEQ_METADATA)
		fld_range_set_mdt(in);
	else
		fld_range_set_ost(in);

	if (opc == SEQ_ALLOC_SUPER) {
		req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
		req->rq_reply_portal = MDC_REPLY_PORTAL;
		/* During allocating super sequence for data object,
		 * the current thread might hold the export of MDT0(MDT0
		 * precreating objects on this OST), and it will send the
		 * request to MDT0 here, so we can not keep resending the
		 * request here, otherwise if MDT0 is failed(umounted),
		 * it can not release the export of MDT0 */
		if (seq->lcs_type == LUSTRE_SEQ_DATA)
			req->rq_no_delay = req->rq_no_resend = 1;
		debug_mask = D_CONSOLE;
	} else {
		if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
			req->rq_reply_portal = MDC_REPLY_PORTAL;
			req->rq_request_portal = SEQ_METADATA_PORTAL;
		} else {
			req->rq_reply_portal = OSC_REPLY_PORTAL;
			req->rq_request_portal = SEQ_DATA_PORTAL;
		}

		debug_mask = D_INFO;
	}

	/* Allow seq client RPC during recovery time. */
	req->rq_allow_replay = 1;

	ptlrpc_at_set_req_timeout(req);

	rc = ptlrpc_queue_wait(req);

	if (rc)
		GOTO(out_req, rc);

	out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
	*output = *out;

	if (!lu_seq_range_is_sane(output)) {
		CERROR("%s: Invalid range received from server: "
		       DRANGE"\n", seq->lcs_name, PRANGE(output));
		GOTO(out_req, rc = -EINVAL);
	}

	if (lu_seq_range_is_exhausted(output)) {
		CERROR("%s: Range received from server is exhausted: "
		       DRANGE"]\n", seq->lcs_name, PRANGE(output));
		GOTO(out_req, rc = -EINVAL);
	}

	CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n",
		     seq->lcs_name, opcname, PRANGE(output));

	EXIT;
out_req:
	ptlrpc_req_finished(req);
	return rc;
}
示例#22
0
文件: fid_request.c 项目: LLNL/lustre
static int seq_client_rpc(struct lu_client_seq *seq,
                          struct lu_seq_range *output, __u32 opc,
                          const char *opcname)
{
        struct obd_export     *exp = seq->lcs_exp;
        struct ptlrpc_request *req;
        struct lu_seq_range   *out, *in;
        __u32                 *op;
        int                    rc;
        ENTRY;

        req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
                                        LUSTRE_MDS_VERSION, SEQ_QUERY);
        if (req == NULL)
                RETURN(-ENOMEM);

        /* Init operation code */
        op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
        *op = opc;

        /* Zero out input range, this is not recovery yet. */
        in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
        range_init(in);

        ptlrpc_request_set_replen(req);

       if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
                req->rq_request_portal = SEQ_METADATA_PORTAL;
                in->lsr_flags = LU_SEQ_RANGE_MDT;
        } else {
                LASSERTF(seq->lcs_type == LUSTRE_SEQ_DATA,
                         "unknown lcs_type %u\n", seq->lcs_type);
                req->rq_request_portal = SEQ_DATA_PORTAL;
                in->lsr_flags = LU_SEQ_RANGE_OST;
        }

        if (opc == SEQ_ALLOC_SUPER) {
                /* Update index field of *in, it is required for
                 * FLD update on super sequence allocator node. */
                in->lsr_index = seq->lcs_space.lsr_index;
                req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
        } else {
                LASSERTF(opc == SEQ_ALLOC_META,
                         "unknown opcode %u\n, opc", opc);
        }

        ptlrpc_at_set_req_timeout(req);

        mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
        rc = ptlrpc_queue_wait(req);
        mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);

        if (rc)
                GOTO(out_req, rc);

        out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
        *output = *out;

        if (!range_is_sane(output)) {
                CERROR("%s: Invalid range received from server: "
                       DRANGE"\n", seq->lcs_name, PRANGE(output));
                GOTO(out_req, rc = -EINVAL);
        }

        if (range_is_exhausted(output)) {
                CERROR("%s: Range received from server is exhausted: "
                       DRANGE"]\n", seq->lcs_name, PRANGE(output));
                GOTO(out_req, rc = -EINVAL);
        }

        CDEBUG(D_INFO, "%s: Allocated %s-sequence "DRANGE"]\n",
               seq->lcs_name, opcname, PRANGE(output));

        EXIT;
out_req:
        ptlrpc_req_finished(req);
        return rc;
}
示例#23
0
/**
 * fix list by checking new entry with NEXT entry in order.
 */
static void fld_fix_new_list(struct fld_cache *cache)
{
	struct fld_cache_entry *f_curr;
	struct fld_cache_entry *f_next;
	struct lu_seq_range *c_range;
	struct lu_seq_range *n_range;
	struct list_head *head = &cache->fci_entries_head;

restart_fixup:

	list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
		c_range = &f_curr->fce_range;
		n_range = &f_next->fce_range;

		LASSERT(lu_seq_range_is_sane(c_range));
		if (&f_next->fce_list == head)
			break;

		if (c_range->lsr_flags != n_range->lsr_flags)
			continue;

		LASSERTF(c_range->lsr_start <= n_range->lsr_start,
			 "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
			 PRANGE(c_range), PRANGE(n_range));

		/* check merge possibility with next range */
		if (c_range->lsr_end == n_range->lsr_start) {
			if (c_range->lsr_index != n_range->lsr_index)
				continue;
			n_range->lsr_start = c_range->lsr_start;
			fld_cache_entry_delete(cache, f_curr);
			continue;
		}

		/* check if current range overlaps with next range. */
		if (n_range->lsr_start < c_range->lsr_end) {
			if (c_range->lsr_index == n_range->lsr_index) {
				n_range->lsr_start = c_range->lsr_start;
				n_range->lsr_end = max(c_range->lsr_end,
						       n_range->lsr_end);
				fld_cache_entry_delete(cache, f_curr);
			} else {
				if (n_range->lsr_end <= c_range->lsr_end) {
					*n_range = *c_range;
					fld_cache_entry_delete(cache, f_curr);
				} else {
					n_range->lsr_start = c_range->lsr_end;
				}
			}

			/* we could have overlap over next
			 * range too. better restart.
			 */
			goto restart_fixup;
		}

		/* kill duplicates */
		if (c_range->lsr_start == n_range->lsr_start &&
		    c_range->lsr_end == n_range->lsr_end)
			fld_cache_entry_delete(cache, f_curr);
	}
示例#24
0
文件: fld_index.c 项目: hfeeki/lustre
/**
 * insert range in fld store.
 *
 *      \param  range  range to be inserted
 *      \param  th     transaction for this operation as it could compound
 *                     transaction.
 *
 *      \retval  0  success
 *      \retval  -ve error
 *
 * The whole fld index insertion is protected by seq->lss_mutex (see
 * seq_server_alloc_super), i.e. only one thread will access fldb each
 * time, so we do not need worry the fld file and cache will being
 * changed between declare and create.
 * Because the fld entry can only be increamental, so we will only check
 * whether it can be merged from the left.
 **/
int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
		     const struct lu_seq_range *new_range, struct thandle *th)
{
	struct lu_seq_range	*range;
	struct lu_seq_range	*tmp;
	struct fld_thread_info	*info;
	int			rc = 0;
	int			deleted = 0;
	struct fld_cache_entry	*flde;
	ENTRY;

	info = lu_context_key_get(&env->le_ctx, &fld_thread_key);

	LASSERT_MUTEX_LOCKED(&fld->lsf_lock);

	range = &info->fti_lrange;
	memset(range, 0, sizeof(*range));
	tmp = &info->fti_irange;
	rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
	if (rc != -ENOENT) {
		rc = rc == 0 ? -EEXIST : rc;
		GOTO(out, rc);
	}

	if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
	    range_compare_loc(new_range, range) == 0) {
		range_cpu_to_be(tmp, range);
		rc = dt_delete(env, fld->lsf_obj,
			       (struct dt_key *)&tmp->lsr_start, th,
				BYPASS_CAPA);
		if (rc != 0)
			GOTO(out, rc);
		memcpy(tmp, new_range, sizeof(*new_range));
		tmp->lsr_start = range->lsr_start;
		deleted = 1;
	} else {
		memcpy(tmp, new_range, sizeof(*new_range));
	}

	range_cpu_to_be(tmp, tmp);
	rc = dt_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
		       (struct dt_key *)&tmp->lsr_start, th, BYPASS_CAPA, 1);
	if (rc != 0) {
		CERROR("%s: insert range "DRANGE" failed: rc = %d\n",
		       fld->lsf_name, PRANGE(new_range), rc);
		GOTO(out, rc);
	}

	flde = fld_cache_entry_create(new_range);
	if (IS_ERR(flde))
		GOTO(out, rc = PTR_ERR(flde));

	write_lock(&fld->lsf_cache->fci_lock);
	if (deleted)
		fld_cache_delete_nolock(fld->lsf_cache, new_range);
	rc = fld_cache_insert_nolock(fld->lsf_cache, flde);
	write_unlock(&fld->lsf_cache->fci_lock);
	if (rc)
		OBD_FREE_PTR(flde);
out:
	RETURN(rc);
}