Ejemplo n.º 1
0
/*
 * Finish the current sequence due to disconnect.
 * See mdc_import_event()
 */
void seq_client_flush(struct lu_client_seq *seq)
{
	wait_queue_t link;

	LASSERT(seq != NULL);
	init_waitqueue_entry(&link, current);
	mutex_lock(&seq->lcs_mutex);

	while (seq->lcs_update) {
		add_wait_queue(&seq->lcs_waitq, &link);
		set_current_state(TASK_UNINTERRUPTIBLE);
		mutex_unlock(&seq->lcs_mutex);

		schedule();

		mutex_lock(&seq->lcs_mutex);
		remove_wait_queue(&seq->lcs_waitq, &link);
		set_current_state(TASK_RUNNING);
	}

        fid_zero(&seq->lcs_fid);
        /**
         * this id shld not be used for seq range allocation.
         * set to -1 for dgb check.
         */

        seq->lcs_space.lsr_index = -1;

	lu_seq_range_init(&seq->lcs_space);
	mutex_unlock(&seq->lcs_mutex);
}
Ejemplo n.º 2
0
static int seq_client_rpc(struct lu_client_seq *seq,
                          struct lu_seq_range *output, __u32 opc,
                          const char *opcname)
{
	struct obd_export     *exp = seq->lcs_exp;
	struct ptlrpc_request *req;
	struct lu_seq_range   *out, *in;
	__u32                 *op;
	unsigned int           debug_mask;
	int                    rc;
	ENTRY;

	LASSERT(exp != NULL && !IS_ERR(exp));
	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
					LUSTRE_MDS_VERSION, SEQ_QUERY);
	if (req == NULL)
		RETURN(-ENOMEM);

	/* Init operation code */
	op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
	*op = opc;

	/* Zero out input range, this is not recovery yet. */
	in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
	lu_seq_range_init(in);

	ptlrpc_request_set_replen(req);

	in->lsr_index = seq->lcs_space.lsr_index;
	if (seq->lcs_type == LUSTRE_SEQ_METADATA)
		fld_range_set_mdt(in);
	else
		fld_range_set_ost(in);

	if (opc == SEQ_ALLOC_SUPER) {
		req->rq_request_portal = SEQ_CONTROLLER_PORTAL;
		req->rq_reply_portal = MDC_REPLY_PORTAL;
		/* During allocating super sequence for data object,
		 * the current thread might hold the export of MDT0(MDT0
		 * precreating objects on this OST), and it will send the
		 * request to MDT0 here, so we can not keep resending the
		 * request here, otherwise if MDT0 is failed(umounted),
		 * it can not release the export of MDT0 */
		if (seq->lcs_type == LUSTRE_SEQ_DATA)
			req->rq_no_delay = req->rq_no_resend = 1;
		debug_mask = D_CONSOLE;
	} else {
		if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
			req->rq_reply_portal = MDC_REPLY_PORTAL;
			req->rq_request_portal = SEQ_METADATA_PORTAL;
		} else {
			req->rq_reply_portal = OSC_REPLY_PORTAL;
			req->rq_request_portal = SEQ_DATA_PORTAL;
		}

		debug_mask = D_INFO;
	}

	/* Allow seq client RPC during recovery time. */
	req->rq_allow_replay = 1;

	ptlrpc_at_set_req_timeout(req);

	rc = ptlrpc_queue_wait(req);

	if (rc)
		GOTO(out_req, rc);

	out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
	*output = *out;

	if (!lu_seq_range_is_sane(output)) {
		CERROR("%s: Invalid range received from server: "
		       DRANGE"\n", seq->lcs_name, PRANGE(output));
		GOTO(out_req, rc = -EINVAL);
	}

	if (lu_seq_range_is_exhausted(output)) {
		CERROR("%s: Range received from server is exhausted: "
		       DRANGE"]\n", seq->lcs_name, PRANGE(output));
		GOTO(out_req, rc = -EINVAL);
	}

	CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n",
		     seq->lcs_name, opcname, PRANGE(output));

	EXIT;
out_req:
	ptlrpc_req_finished(req);
	return rc;
}
Ejemplo n.º 3
0
int seq_server_init(const struct lu_env *env,
		    struct lu_server_seq *seq,
		    struct dt_device *dev,
		    const char *prefix,
		    enum lu_mgr_type type,
		    struct seq_server_site *ss)
{
	int rc, is_srv = (type == LUSTRE_SEQ_SERVER);
	ENTRY;

	LASSERT(dev != NULL);
	LASSERT(prefix != NULL);
	LASSERT(ss != NULL);
	LASSERT(ss->ss_lu != NULL);

	/* A compile-time check for FIDs that used to be in lustre_idl.h
	 * but is moved here to remove CLASSERT/LASSERT in that header.
	 * Check all lu_fid fields are converted in fid_cpu_to_le() and friends
	 * and that there is no padding added by compiler to the struct. */
	{
		struct lu_fid tst;

		CLASSERT(sizeof(tst) == sizeof(tst.f_seq) +
			 sizeof(tst.f_oid) + sizeof(tst.f_ver));
	}

	seq->lss_cli = NULL;
	seq->lss_type = type;
	seq->lss_site = ss;
	lu_seq_range_init(&seq->lss_space);

	lu_seq_range_init(&seq->lss_lowater_set);
	lu_seq_range_init(&seq->lss_hiwater_set);
	seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH;

	mutex_init(&seq->lss_mutex);

        seq->lss_width = is_srv ?
                LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;

        snprintf(seq->lss_name, sizeof(seq->lss_name),
                 "%s-%s", (is_srv ? "srv" : "ctl"), prefix);

        rc = seq_store_init(seq, env, dev);
        if (rc)
                GOTO(out, rc);
        /* Request backing store for saved sequence info. */
        rc = seq_store_read(seq, env);
        if (rc == -ENODATA) {

                /* Nothing is read, init by default value. */
                seq->lss_space = is_srv ?
                        LUSTRE_SEQ_ZERO_RANGE:
                        LUSTRE_SEQ_SPACE_RANGE;

		seq->lss_space.lsr_index = ss->ss_node_id;
		LCONSOLE_INFO("%s: No data found "
			      "on store. Initialize space\n",
			      seq->lss_name);

                rc = seq_store_update(env, seq, NULL, 0);
                if (rc) {
                        CERROR("%s: Can't write space data, "
                               "rc %d\n", seq->lss_name, rc);
                }
        } else if (rc) {
                CERROR("%s: Can't read space data, rc %d\n",
                       seq->lss_name, rc);
                GOTO(out, rc);
        }

	if (is_srv) {
		LASSERT(lu_seq_range_is_sane(&seq->lss_space));
	} else {
		LASSERT(!lu_seq_range_is_zero(&seq->lss_space) &&
			lu_seq_range_is_sane(&seq->lss_space));
	}

        rc  = seq_server_proc_init(seq);
        if (rc)
                GOTO(out, rc);

        EXIT;
out:
        if (rc)
                seq_server_fini(seq, env);
        return rc;
}