Пример #1
0
/**
 * fix list by checking new entry with NEXT entry in order.
 */
static void fld_fix_new_list(struct fld_cache *cache)
{
        struct fld_cache_entry *f_curr;
        struct fld_cache_entry *f_next;
        struct lu_seq_range *c_range;
        struct lu_seq_range *n_range;
	struct list_head *head = &cache->fci_entries_head;
        ENTRY;

restart_fixup:

	list_for_each_entry_safe(f_curr, f_next, head, fce_list) {
		c_range = &f_curr->fce_range;
		n_range = &f_next->fce_range;

		LASSERT(lu_seq_range_is_sane(c_range));
		if (&f_next->fce_list == head)
			break;

		if (c_range->lsr_flags != n_range->lsr_flags)
			continue;

                LASSERTF(c_range->lsr_start <= n_range->lsr_start,
                         "cur lsr_start "DRANGE" next lsr_start "DRANGE"\n",
                         PRANGE(c_range), PRANGE(n_range));

                /* check merge possibility with next range */
                if (c_range->lsr_end == n_range->lsr_start) {
                        if (c_range->lsr_index != n_range->lsr_index)
                                continue;
                        n_range->lsr_start = c_range->lsr_start;
                        fld_cache_entry_delete(cache, f_curr);
                        continue;
                }

                /* check if current range overlaps with next range. */
                if (n_range->lsr_start < c_range->lsr_end) {
                        if (c_range->lsr_index == n_range->lsr_index) {
                                n_range->lsr_start = c_range->lsr_start;
                                n_range->lsr_end = max(c_range->lsr_end,
                                                       n_range->lsr_end);
                                fld_cache_entry_delete(cache, f_curr);
                        } else {
                                if (n_range->lsr_end <= c_range->lsr_end) {
                                        *n_range = *c_range;
                                        fld_cache_entry_delete(cache, f_curr);
                                } else
                                        n_range->lsr_start = c_range->lsr_end;
                        }

                        /* we could have overlap over next
                         * range too. better restart. */
                        goto restart_fixup;
                }

                /* kill duplicates */
		if (c_range->lsr_start == n_range->lsr_start &&
		    c_range->lsr_end == n_range->lsr_end)
			fld_cache_entry_delete(cache, f_curr);
        }
Пример #2
0
/* Allocate new sequence for client. */
static int seq_client_alloc_seq(const struct lu_env *env,
				struct lu_client_seq *seq, u64 *seqnr)
{
	int rc;
	ENTRY;

	LASSERT(lu_seq_range_is_sane(&seq->lcs_space));

	if (lu_seq_range_is_exhausted(&seq->lcs_space)) {
                rc = seq_client_alloc_meta(env, seq);
                if (rc) {
                        CERROR("%s: Can't allocate new meta-sequence,"
                               "rc %d\n", seq->lcs_name, rc);
                        RETURN(rc);
                } else {
                        CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
                               seq->lcs_name, PRANGE(&seq->lcs_space));
                }
        } else {
                rc = 0;
        }

	LASSERT(!lu_seq_range_is_exhausted(&seq->lcs_space));
	*seqnr = seq->lcs_space.lsr_start;
	seq->lcs_space.lsr_start += 1;

	CDEBUG(D_INFO, "%s: Allocated sequence [%#llx]\n", seq->lcs_name,
               *seqnr);

        RETURN(rc);
}
Пример #3
0
static int __seq_server_alloc_meta(struct lu_server_seq *seq,
				   struct lu_seq_range *out,
				   const struct lu_env *env)
{
	struct lu_seq_range *space = &seq->lss_space;
	int rc = 0;

	ENTRY;

	LASSERT(lu_seq_range_is_sane(space));

	rc = seq_server_check_and_alloc_super(env, seq);
	if (rc < 0) {
		CERROR("%s: Allocated super-sequence failed: rc = %d\n",
			seq->lss_name, rc);
		RETURN(rc);
	}

	rc = range_alloc_set(env, out, seq);
	if (rc != 0) {
		CERROR("%s: Allocated meta-sequence failed: rc = %d\n",
			seq->lss_name, rc);
		RETURN(rc);
	}

	CDEBUG(D_INFO, "%s: Allocated meta-sequence " DRANGE"\n",
		seq->lss_name, PRANGE(out));

	RETURN(rc);
}
Пример #4
0
static int __seq_server_alloc_super(struct lu_server_seq *seq,
                                    struct lu_seq_range *out,
                                    const struct lu_env *env)
{
	struct lu_seq_range *space = &seq->lss_space;
	int rc;
	ENTRY;

	LASSERT(lu_seq_range_is_sane(space));

	if (lu_seq_range_is_exhausted(space)) {
		CERROR("%s: Sequences space is exhausted\n",
		       seq->lss_name);
		RETURN(-ENOSPC);
	} else {
		range_alloc(out, space, seq->lss_width);
	}

	rc = seq_store_update(env, seq, out, 1 /* sync */);

	LCONSOLE_INFO("%s: super-sequence allocation rc = %d " DRANGE"\n",
		      seq->lss_name, rc, PRANGE(out));

	RETURN(rc);
}
Пример #5
0
/*
 * This function implements new seq allocation algorithm using async
 * updates to seq file on disk. ref bug 18857 for details.
 * there are four variable to keep track of this process
 *
 * lss_space; - available lss_space
 * lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use
 * lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be
 *                    not yet committed
 *
 * when lss_lowater_set reaches the end it is replaced with hiwater one and
 * a write operation is initiated to allocate new hiwater range.
 * if last seq write opearion is still not commited, current operation is
 * flaged as sync write op.
 */
static int range_alloc_set(const struct lu_env *env,
			   struct lu_seq_range *out,
			   struct lu_server_seq *seq)
{
	struct lu_seq_range *space = &seq->lss_space;
	struct lu_seq_range *loset = &seq->lss_lowater_set;
	struct lu_seq_range *hiset = &seq->lss_hiwater_set;
	int rc = 0;

	if (lu_seq_range_is_zero(loset))
		__seq_set_init(env, seq);

	if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */
		loset->lsr_start = loset->lsr_end;

	if (lu_seq_range_is_exhausted(loset)) {
		/* reached high water mark. */
		struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev;
		int obd_num_clients = dev->ld_obd->obd_num_exports;
		__u64 set_sz;

		/* calculate new seq width based on number of clients */
		set_sz = max(seq->lss_set_width,
			     obd_num_clients * seq->lss_width);
		set_sz = min(lu_seq_range_space(space), set_sz);

		/* Switch to hiwater range now */
		*loset = *hiset;
		/* allocate new hiwater range */
		range_alloc(hiset, space, set_sz);

		/* update ondisk seq with new *space */
		rc = seq_store_update(env, seq, NULL, seq->lss_need_sync);
	}

	LASSERTF(!lu_seq_range_is_exhausted(loset) ||
		 lu_seq_range_is_sane(loset),
		 DRANGE"\n", PRANGE(loset));

	if (rc == 0)
		range_alloc(out, loset, seq->lss_width);

	RETURN(rc);
}
Пример #6
0
/**
 * Check if the sequence server has sequence avaible
 *
 * Check if the sequence server has sequence avaible, if not, then
 * allocating super sequence from sequence manager (MDT0).
 *
 * \param[in] env	execution environment
 * \param[in] seq	server sequence
 *
 * \retval		negative errno if allocating new sequence fails
 * \retval		0 if there is enough sequence or allocating
 *                      new sequence succeeds
 */
int seq_server_check_and_alloc_super(const struct lu_env *env,
				     struct lu_server_seq *seq)
{
	struct lu_seq_range *space = &seq->lss_space;
	int rc = 0;

	ENTRY;

	/* Check if available space ends and allocate new super seq */
	if (lu_seq_range_is_exhausted(space)) {
		if (!seq->lss_cli) {
			CERROR("%s: No sequence controller is attached.\n",
			       seq->lss_name);
			RETURN(-ENODEV);
		}

		rc = seq_client_alloc_super(seq->lss_cli, env);
		if (rc) {
			CDEBUG(D_HA, "%s: Can't allocate super-sequence:"
			      " rc %d\n", seq->lss_name, rc);
			RETURN(rc);
		}

		/* Saving new range to allocation space. */
		*space = seq->lss_cli->lcs_space;
		LASSERT(lu_seq_range_is_sane(space));
		if (seq->lss_cli->lcs_srv == NULL) {
			struct lu_server_fld *fld;

			/* Insert it to the local FLDB */
			fld = seq->lss_site->ss_server_fld;
			mutex_lock(&fld->lsf_lock);
			rc = fld_insert_entry(env, fld, space);
			mutex_unlock(&fld->lsf_lock);
		}
	}

	if (lu_seq_range_is_zero(&seq->lss_lowater_set))
		__seq_set_init(env, seq);

	RETURN(rc);
}
Пример #7
0
static int seq_client_rpc(struct lu_client_seq *seq,
                          struct lu_seq_range *output, __u32 opc,
                          const char *opcname)
{
	struct obd_export     *exp = seq->lcs_exp;
	struct ptlrpc_request *req;
	struct lu_seq_range   *out, *in;
	__u32                 *op;
	unsigned int           debug_mask;
	int                    rc;
	ENTRY;

	LASSERT(exp != NULL && !IS_ERR(exp));
	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
					LUSTRE_MDS_VERSION, SEQ_QUERY);
	if (req == NULL)
		RETURN(-ENOMEM);

	/* Init operation code */
	op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
	*op = opc;

	/* Zero out input range, this is not recovery yet. */
	in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
	lu_seq_range_init(in);

	ptlrpc_request_set_replen(req);

	in->lsr_index = seq->lcs_space.lsr_index;
	if (seq->lcs_type == LUSTRE_SEQ_METADATA)
		fld_range_set_mdt(in);
	else
		fld_range_set_ost(in);

	if (opc == SEQ_ALLOC_SUPER) {
		req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
		req->rq_reply_portal = MDC_REPLY_PORTAL;
		/* During allocating super sequence for data object,
		 * the current thread might hold the export of MDT0(MDT0
		 * precreating objects on this OST), and it will send the
		 * request to MDT0 here, so we can not keep resending the
		 * request here, otherwise if MDT0 is failed(umounted),
		 * it can not release the export of MDT0 */
		if (seq->lcs_type == LUSTRE_SEQ_DATA)
			req->rq_no_delay = req->rq_no_resend = 1;
		debug_mask = D_CONSOLE;
	} else {
		if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
			req->rq_reply_portal = MDC_REPLY_PORTAL;
			req->rq_request_portal = SEQ_METADATA_PORTAL;
		} else {
			req->rq_reply_portal = OSC_REPLY_PORTAL;
			req->rq_request_portal = SEQ_DATA_PORTAL;
		}

		debug_mask = D_INFO;
	}

	/* Allow seq client RPC during recovery time. */
	req->rq_allow_replay = 1;

	ptlrpc_at_set_req_timeout(req);

	rc = ptlrpc_queue_wait(req);

	if (rc)
		GOTO(out_req, rc);

	out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
	*output = *out;

	if (!lu_seq_range_is_sane(output)) {
		CERROR("%s: Invalid range received from server: "
		       DRANGE"\n", seq->lcs_name, PRANGE(output));
		GOTO(out_req, rc = -EINVAL);
	}

	if (lu_seq_range_is_exhausted(output)) {
		CERROR("%s: Range received from server is exhausted: "
		       DRANGE"]\n", seq->lcs_name, PRANGE(output));
		GOTO(out_req, rc = -EINVAL);
	}

	CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n",
		     seq->lcs_name, opcname, PRANGE(output));

	EXIT;
out_req:
	ptlrpc_req_finished(req);
	return rc;
}
Пример #8
0
int seq_server_init(const struct lu_env *env,
		    struct lu_server_seq *seq,
		    struct dt_device *dev,
		    const char *prefix,
		    enum lu_mgr_type type,
		    struct seq_server_site *ss)
{
	int rc, is_srv = (type == LUSTRE_SEQ_SERVER);
	ENTRY;

	LASSERT(dev != NULL);
	LASSERT(prefix != NULL);
	LASSERT(ss != NULL);
	LASSERT(ss->ss_lu != NULL);

	/* A compile-time check for FIDs that used to be in lustre_idl.h
	 * but is moved here to remove CLASSERT/LASSERT in that header.
	 * Check all lu_fid fields are converted in fid_cpu_to_le() and friends
	 * and that there is no padding added by compiler to the struct. */
	{
		struct lu_fid tst;

		CLASSERT(sizeof(tst) == sizeof(tst.f_seq) +
			 sizeof(tst.f_oid) + sizeof(tst.f_ver));
	}

	seq->lss_cli = NULL;
	seq->lss_type = type;
	seq->lss_site = ss;
	lu_seq_range_init(&seq->lss_space);

	lu_seq_range_init(&seq->lss_lowater_set);
	lu_seq_range_init(&seq->lss_hiwater_set);
	seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH;

	mutex_init(&seq->lss_mutex);

        seq->lss_width = is_srv ?
                LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;

        snprintf(seq->lss_name, sizeof(seq->lss_name),
                 "%s-%s", (is_srv ? "srv" : "ctl"), prefix);

        rc = seq_store_init(seq, env, dev);
        if (rc)
                GOTO(out, rc);
        /* Request backing store for saved sequence info. */
        rc = seq_store_read(seq, env);
        if (rc == -ENODATA) {

                /* Nothing is read, init by default value. */
                seq->lss_space = is_srv ?
                        LUSTRE_SEQ_ZERO_RANGE:
                        LUSTRE_SEQ_SPACE_RANGE;

		seq->lss_space.lsr_index = ss->ss_node_id;
		LCONSOLE_INFO("%s: No data found "
			      "on store. Initialize space\n",
			      seq->lss_name);

                rc = seq_store_update(env, seq, NULL, 0);
                if (rc) {
                        CERROR("%s: Can't write space data, "
                               "rc %d\n", seq->lss_name, rc);
                }
        } else if (rc) {
                CERROR("%s: Can't read space data, rc %d\n",
                       seq->lss_name, rc);
                GOTO(out, rc);
        }

	if (is_srv) {
		LASSERT(lu_seq_range_is_sane(&seq->lss_space));
	} else {
		LASSERT(!lu_seq_range_is_zero(&seq->lss_space) &&
			lu_seq_range_is_sane(&seq->lss_space));
	}

        rc  = seq_server_proc_init(seq);
        if (rc)
                GOTO(out, rc);

        EXIT;
out:
        if (rc)
                seq_server_fini(seq, env);
        return rc;
}