예제 #1
0
파일: m_grammar.c 프로젝트: Lundex/lima
string pluralize(string str)
{
  switch (str)
  {
    case "were": return "was";
    case "don't": return "doesn't";
    case "aren't": return "isn't";
    case "possum": return "possums";
    case "staff": return "staves";
    case "die": return "dies";
    case "laf": return "lafs";
    case "barf": return "barfs";
    case "snarf": return "snarfs";
    case "hum": return "hums";
  }

  if ( str[<2..<1] == "ff" )	/* e.g. "bluff" */
    return str + "s";
// Temporary fix for MudOS v22.2b13 pluralizing elf as eves
  if ( str[<1..<1] == "f" )
{
LBUG(str);
    return str[0..<2] + "ves";
}
  if ( str[<5..<1] == "penis" )
    return str + "es";

  return efun::pluralize(str);
}	
예제 #2
0
static int osc_lock_unuse(const struct lu_env *env,
			  const struct cl_lock_slice *slice)
{
	struct osc_lock *ols = cl2osc_lock(slice);

	LINVRNT(osc_lock_invariant(ols));

	switch (ols->ols_state) {
	case OLS_NEW:
		LASSERT(!ols->ols_hold);
		LASSERT(ols->ols_agl);
		return 0;
	case OLS_UPCALL_RECEIVED:
		osc_lock_unhold(ols);
	case OLS_ENQUEUED:
		LASSERT(!ols->ols_hold);
		osc_lock_detach(env, ols);
		ols->ols_state = OLS_NEW;
		return 0;
	case OLS_GRANTED:
		LASSERT(!ols->ols_glimpse);
		LASSERT(ols->ols_hold);
		/*
		 * Move lock into OLS_RELEASED state before calling
		 * osc_cancel_base() so that possible synchronous cancellation
		 * (that always happens e.g., for liblustre) sees that lock is
		 * released.
		 */
		ols->ols_state = OLS_RELEASED;
		return osc_lock_unhold(ols);
	default:
		CERROR("Impossible state: %d\n", ols->ols_state);
		LBUG();
	}
}
예제 #3
0
int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
		    void *lmm, int lmmsize, struct lookup_intent *it,
		    int flags, struct ptlrpc_request **reqp,
		    ldlm_blocking_callback cb_blocking,
		    __u64 extra_lock_flags)
{
	struct obd_device *obd = exp->exp_obd;
	int		rc;

	LASSERT(it != NULL);
	LASSERT(fid_is_sane(&op_data->op_fid1));

	CDEBUG(D_INODE, "INTENT LOCK '%s' for '%*s' on "DFID"\n",
	       LL_IT2STR(it), op_data->op_namelen, op_data->op_name,
	       PFID(&op_data->op_fid1));

	rc = lmv_check_connect(obd);
	if (rc)
		return rc;

	if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_LAYOUT))
		rc = lmv_intent_lookup(exp, op_data, lmm, lmmsize, it,
				       flags, reqp, cb_blocking,
				       extra_lock_flags);
	else if (it->it_op & IT_OPEN)
		rc = lmv_intent_open(exp, op_data, lmm, lmmsize, it,
				     flags, reqp, cb_blocking,
				     extra_lock_flags);
	else
		LBUG();
	return rc;
}
예제 #4
0
파일: module.c 프로젝트: 020gzh/linux
static void
lnet_selftest_exit(void)
{
	int i;

	switch (lst_init_step) {
	case LST_INIT_CONSOLE:
		lstcon_console_fini();
	case LST_INIT_FW:
		sfw_shutdown();
	case LST_INIT_RPC:
		srpc_shutdown();
	case LST_INIT_WI_TEST:
		for (i = 0;
		     i < cfs_cpt_number(lnet_cpt_table()); i++) {
			if (!lst_sched_test[i])
				continue;
			cfs_wi_sched_destroy(lst_sched_test[i]);
		}
		LIBCFS_FREE(lst_sched_test,
			    sizeof(lst_sched_test[0]) *
			    cfs_cpt_number(lnet_cpt_table()));
		lst_sched_test = NULL;

	case LST_INIT_WI_SERIAL:
		cfs_wi_sched_destroy(lst_sched_serial);
		lst_sched_serial = NULL;
	case LST_INIT_NONE:
		break;
	default:
		LBUG();
	}
}
예제 #5
0
static osd_obj_type_f osd_create_type_f(enum dt_format_type type)
{
	osd_obj_type_f result;

	switch (type) {
	case DFT_DIR:
		result = osd_mkdir;
		break;
	case DFT_INDEX:
		result = osd_mkidx;
		break;
	case DFT_REGULAR:
		result = osd_mkreg;
		break;
	case DFT_SYM:
		result = osd_mksym;
		break;
	case DFT_NODE:
		result = osd_mknod;
		break;
	default:
		LBUG();
		break;
	}
	return result;
}
예제 #6
0
파일: llog_lvfs.c 프로젝트: DCteam/lustre
static int llog_lvfs_next_block(struct llog_handle *loghandle, int *cur_idx,
                                int next_idx, __u64 *cur_offset, void *buf,
                                int len)
{
        LBUG();
        return 0;
}
예제 #7
0
static int osd_dir_it_key_size(const struct lu_env *env, const struct dt_it *di)
{
	struct osd_zap_it *it = (struct osd_zap_it *)di;
	zap_attribute_t	  *za = &osd_oti_get(env)->oti_za;
	int		   rc;
	ENTRY;

	if (it->ozi_pos <= 1) {
		it->ozi_pos = 1;
		RETURN(2);
	} else if (it->ozi_pos == 2) {
		RETURN(3);
	}

	if ((rc = -zap_cursor_retrieve(it->ozi_zc, za)) == 0)
		rc = strlen(za->za_name);

#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 3, 90, 0)
	if (rc == 0 && za->za_name[0] == '.') {
		if (za->za_name[1] == 0 || (za->za_name[1] == '.' &&
		    za->za_name[2] == 0)) {
			/* we should not get onto . and ..
			 * stored in the directory. ->next() and
			 * other methods should prevent this
			 */
			LBUG();
		}
	}
#endif
	RETURN(rc);
}
예제 #8
0
int lov_read_and_clear_async_rc(struct cl_object *clob)
{
	struct lu_object *luobj;
	int rc = 0;

	luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
				 &lov_device_type);
	if (luobj != NULL) {
		struct lov_object *lov = lu2lov(luobj);

		lov_conf_freeze(lov);
		switch (lov->lo_type) {
		case LLT_RAID0: {
			struct lov_stripe_md *lsm;
			int i;

			lsm = lov->lo_lsm;
			LASSERT(lsm != NULL);
			for (i = 0; i < lsm->lsm_stripe_count; i++) {
				struct lov_oinfo *loi = lsm->lsm_oinfo[i];
				if (loi->loi_ar.ar_rc && !rc)
					rc = loi->loi_ar.ar_rc;
				loi->loi_ar.ar_rc = 0;
			}
		}
		case LLT_RELEASED:
		case LLT_EMPTY:
			break;
		default:
			LBUG();
		}
		lov_conf_thaw(lov);
	}
	return rc;
}
예제 #9
0
파일: events.c 프로젝트: IDM350/linux
void ptlrpc_ni_fini(void)
{
	wait_queue_head_t	 waitq;
	struct l_wait_info  lwi;
	int		 rc;
	int		 retries;

	/* Wait for the event queue to become idle since there may still be
	 * messages in flight with pending events (i.e. the fire-and-forget
	 * messages == client requests and "non-difficult" server
	 * replies */

	for (retries = 0;; retries++) {
		rc = LNetEQFree(ptlrpc_eq_h);
		switch (rc) {
		default:
			LBUG();

		case 0:
			LNetNIFini();
			return;

		case -EBUSY:
			if (retries != 0)
				CWARN("Event queue still busy\n");

			/* Wait for a bit */
			init_waitqueue_head(&waitq);
			lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
			l_wait_event(waitq, 0, &lwi);
			break;
		}
	}
	/* notreached */
}
예제 #10
0
파일: linux-proc.c 프로젝트: Abioy/kasan
static int libcfs_force_lbug(struct ctl_table *table, int write,
			     void __user *buffer,
			     size_t *lenp, loff_t *ppos)
{
	if (write)
		LBUG();
	return 0;
}
예제 #11
0
파일: llog_lvfs.c 프로젝트: DCteam/lustre
static int llog_lvfs_write_rec(struct llog_handle *loghandle,
                               struct llog_rec_hdr *rec,
                               struct llog_cookie *reccookie, int cookiecount,
                               void *buf, int idx)
{
        LBUG();
        return 0;
}
예제 #12
0
파일: mdt_lib.c 프로젝트: hpc/lustre
/* if object is dying, pack the lov/llog data,
 * parameter info->mti_attr should be valid at this point! */
int mdt_handle_last_unlink(struct mdt_thread_info *info, struct mdt_object *mo,
                           const struct md_attr *ma)
{
        struct mdt_body       *repbody;
        const struct lu_attr *la = &ma->ma_attr;
        int rc;
        ENTRY;

        repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
        LASSERT(repbody != NULL);

        if (ma->ma_valid & MA_INODE)
                mdt_pack_attr2body(info, repbody, la, mdt_object_fid(mo));

        if (ma->ma_valid & MA_LOV) {
                __u32 mode;

                if (mdt_object_exists(mo) < 0)
                        /* If it is a remote object, and we do not retrieve
                         * EA back unlink reg file*/
                        mode = S_IFREG;
                else
                        mode = lu_object_attr(&mo->mot_obj.mo_lu);

                LASSERT(ma->ma_lmm_size);
                mdt_dump_lmm(D_INFO, ma->ma_lmm);
                repbody->eadatasize = ma->ma_lmm_size;
                if (S_ISREG(mode))
                        repbody->valid |= OBD_MD_FLEASIZE;
                else if (S_ISDIR(mode))
                        repbody->valid |= OBD_MD_FLDIREA;
                else
                        LBUG();
        }

        if (ma->ma_cookie_size && (ma->ma_valid & MA_COOKIE)) {
                repbody->aclsize = ma->ma_cookie_size;
                repbody->valid |= OBD_MD_FLCOOKIE;
        }

        if (info->mti_mdt->mdt_opts.mo_oss_capa &&
            info->mti_exp->exp_connect_flags & OBD_CONNECT_OSS_CAPA &&
            repbody->valid & OBD_MD_FLEASIZE) {
                struct lustre_capa *capa;

                capa = req_capsule_server_get(info->mti_pill, &RMF_CAPA2);
                LASSERT(capa);
                capa->lc_opc = CAPA_OPC_OSS_DESTROY;
                rc = mo_capa_get(info->mti_env, mdt_object_child(mo), capa, 0);
                if (rc)
                        RETURN(rc);

                repbody->valid |= OBD_MD_FLOSSCAPA;
        }

        RETURN(0);
}
예제 #13
0
/**
 * Companion of qsd_request_enter() dropping lqe_pending_req to 0.
 */
static inline void qsd_request_exit(struct lquota_entry *lqe)
{
	if (lqe->lqe_pending_req != 1) {
		LQUOTA_ERROR(lqe, "lqe_pending_req != 1!!!");
		LBUG();
	}
	lqe->lqe_pending_req--;
	lqe->lqe_pending_rel = 0;
	wake_up_all(&lqe->lqe_waiters);
}
예제 #14
0
/**
 * Lock upcall function that is executed either when a reply to ENQUEUE rpc is
 * received from a server, or after osc_enqueue_base() matched a local DLM
 * lock.
 */
static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
			   int errcode)
{
	struct osc_lock         *oscl  = cookie;
	struct cl_lock_slice    *slice = &oscl->ols_cl;
	struct lu_env           *env;
	int			rc;

	ENTRY;

	env = cl_env_percpu_get();
	/* should never happen, similar to osc_ldlm_blocking_ast(). */
	LASSERT(!IS_ERR(env));

	rc = ldlm_error2errno(errcode);
	if (oscl->ols_state == OLS_ENQUEUED) {
		oscl->ols_state = OLS_UPCALL_RECEIVED;
	} else if (oscl->ols_state == OLS_CANCELLED) {
		rc = -EIO;
	} else {
		CERROR("Impossible state: %d\n", oscl->ols_state);
		LBUG();
	}

	if (rc == 0)
		osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);

	/* Error handling, some errors are tolerable. */
	if (oscl->ols_locklessable && rc == -EUSERS) {
		/* This is a tolerable error, turn this lock into
		 * lockless lock.
		 */
		osc_object_set_contended(cl2osc(slice->cls_obj));
		LASSERT(slice->cls_ops != oscl->ols_lockless_ops);

		/* Change this lock to ldlmlock-less lock. */
		osc_lock_to_lockless(env, oscl, 1);
		oscl->ols_state = OLS_GRANTED;
		rc = 0;
	} else if (oscl->ols_glimpse && rc == -ENAVAIL) {
		LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
		osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
				    NULL, &oscl->ols_lvb);
		/* Hide the error. */
		rc = 0;
	} else if (rc < 0 && oscl->ols_flags & LDLM_FL_NDELAY) {
		rc = -EWOULDBLOCK;
	}

	if (oscl->ols_owner != NULL)
		cl_sync_io_note(env, oscl->ols_owner, rc);
	cl_env_percpu_put(env);

	RETURN(rc);
}
예제 #15
0
static struct obd_capa *osd_capa_get(const struct lu_env *env,
				     struct dt_object *dt,
				     struct lustre_capa *old,
				     __u64 opc)
{
	struct osd_thread_info	*info = osd_oti_get(env);
	const struct lu_fid	*fid = lu_object_fid(&dt->do_lu);
	struct osd_object	*obj = osd_dt_obj(dt);
	struct osd_device	*dev = osd_obj2dev(obj);
	struct lustre_capa_key	*key = &info->oti_capa_key;
	struct lustre_capa	*capa = &info->oti_capa;
	struct obd_capa		*oc;
	int			 rc;
	ENTRY;

	if (!dev->od_fl_capa)
		RETURN(ERR_PTR(-ENOENT));

	LASSERT(dt_object_exists(dt));
	LASSERT(osd_invariant(obj));

	/* renewal sanity check */
	if (old && osd_object_auth(env, dt, old, opc))
		RETURN(ERR_PTR(-EACCES));

	capa->lc_fid = *fid;
	capa->lc_opc = opc;
	capa->lc_uid = 0;
	capa->lc_flags = dev->od_capa_alg << 24;
	capa->lc_timeout = dev->od_capa_timeout;
	capa->lc_expiry = 0;

	oc = capa_lookup(dev->od_capa_hash, capa, 1);
	if (oc) {
		LASSERT(!capa_is_expired(oc));
		RETURN(oc);
	}

	spin_lock(&capa_lock);
	*key = dev->od_capa_keys[1];
	spin_unlock(&capa_lock);

	capa->lc_keyid = key->lk_keyid;
	capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;

	rc = capa_hmac(capa->lc_hmac, capa, key->lk_key);
	if (rc) {
		DEBUG_CAPA(D_ERROR, capa, "HMAC failed: %d for", rc);
		LBUG();
		RETURN(ERR_PTR(rc));
	}

	oc = capa_add(dev->od_capa_hash, capa);
	RETURN(oc);
}
예제 #16
0
int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
		    struct lookup_intent *it, struct ptlrpc_request **reqp,
		    ldlm_blocking_callback cb_blocking,
		    __u64 extra_lock_flags)
{
	struct obd_device *obd = exp->exp_obd;
	int		   rc;
	ENTRY;

	LASSERT(it != NULL);
	LASSERT(fid_is_sane(&op_data->op_fid1));

	CDEBUG(D_INODE, "INTENT LOCK '%s' for "DFID" '%.*s' on "DFID"\n",
		LL_IT2STR(it), PFID(&op_data->op_fid2),
		(int)op_data->op_namelen, op_data->op_name,
		PFID(&op_data->op_fid1));

	rc = lmv_check_connect(obd);
	if (rc)
		RETURN(rc);

	if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_LAYOUT))
		rc = lmv_intent_lookup(exp, op_data, it, reqp, cb_blocking,
				       extra_lock_flags);
	else if (it->it_op & IT_OPEN)
		rc = lmv_intent_open(exp, op_data, it, reqp, cb_blocking,
				     extra_lock_flags);
	else
		LBUG();

	if (rc < 0) {
		struct lustre_handle lock_handle;

		if (it->d.lustre.it_lock_mode != 0) {
			lock_handle.cookie = it->d.lustre.it_lock_handle;
			ldlm_lock_decref(&lock_handle,
					 it->d.lustre.it_lock_mode);
		}

		it->d.lustre.it_lock_handle = 0;
		it->d.lustre.it_lock_mode = 0;

		if (it->d.lustre.it_remote_lock_mode != 0) {
			lock_handle.cookie = it->d.lustre.it_remote_lock_handle;
			ldlm_lock_decref(&lock_handle,
					 it->d.lustre.it_remote_lock_mode);
		}

		it->d.lustre.it_remote_lock_handle = 0;
		it->d.lustre.it_remote_lock_mode = 0;
	}

	RETURN(rc);
}
예제 #17
0
/* Unpack LOV object metadata from disk storage.  It is packed in LE byte
 * order and is opaque to the networking layer.
 */
int lov_unpackmd(struct obd_export *exp,  struct lov_stripe_md **lsmp,
                 struct lov_mds_md *lmm, int lmm_bytes)
{
        struct obd_device *obd = class_exp2obd(exp);
        struct lov_obd *lov = &obd->u.lov;
        int rc = 0, lsm_size;
        __u16 stripe_count;
        __u32 magic;
	__u32 pattern;
        ENTRY;

        /* If passed an MDS struct use values from there, otherwise defaults */
        if (lmm) {
                rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
                if (rc)
                        RETURN(rc);
                magic = le32_to_cpu(lmm->lmm_magic);
		pattern = le32_to_cpu(lmm->lmm_pattern);
        } else {
                magic = LOV_MAGIC;
                stripe_count = lov_get_stripecnt(lov, magic, 0);
		pattern = LOV_PATTERN_RAID0;
        }

        /* If we aren't passed an lsmp struct, we just want the size */
        if (!lsmp) {
                /* XXX LOV STACKING call into osc for sizes */
                LBUG();
                RETURN(lov_stripe_md_size(stripe_count));
        }
        /* If we are passed an allocated struct but nothing to unpack, free */
        if (*lsmp && !lmm) {
                lov_free_memmd(lsmp);
                RETURN(0);
        }

        lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
        if (lsm_size < 0)
                RETURN(lsm_size);

        /* If we are passed a pointer but nothing to unpack, we only alloc */
        if (!lmm)
                RETURN(lsm_size);

        LASSERT(lsm_op_find(magic) != NULL);
        rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
        if (rc) {
                lov_free_memmd(lsmp);
                RETURN(rc);
        }

        RETURN(lsm_size);
}
예제 #18
0
void
_kgnilnd_api_rc_lbug(const char* rcstr, int rc, struct libcfs_debug_msg_data *msgdata,
			const char *fmt, ...)
{
	va_list args;

	va_start(args, fmt);
	libcfs_debug_vmsg2(msgdata, fmt, args,
			   " GNI API violated? Unexpected rc %s(%d)!\n",
			   rcstr, rc);
	va_end(args);
	LBUG();
}
예제 #19
0
/**
 * helper function bumping lqe_pending_req if there is no quota request in
 * flight for the lquota entry \a lqe. Otherwise, EBUSY is returned.
 */
static inline int qsd_request_enter(struct lquota_entry *lqe)
{
	/* is there already a quota request in flight? */
	if (lqe->lqe_pending_req != 0) {
		LQUOTA_DEBUG(lqe, "already a request in flight");
		return -EBUSY;
	}

	if (lqe->lqe_pending_rel != 0) {
		LQUOTA_ERROR(lqe, "no request in flight with pending_rel="LPU64,
			     lqe->lqe_pending_rel);
		LBUG();
	}

	lqe->lqe_pending_req++;
	return 0;
}
예제 #20
0
파일: lov_page.c 프로젝트: Chong-Li/cse522
static int lov_page_own(const struct lu_env *env,
			const struct cl_page_slice *slice, struct cl_io *io,
			int nonblock)
{
	struct lov_io     *lio = lov_env_io(env);
	struct lov_io_sub *sub;

	LINVRNT(lov_page_invariant(slice));
	LINVRNT(!cl2lov_page(slice)->lps_invalid);

	sub = lov_page_subio(env, lio, slice);
	if (!IS_ERR(sub)) {
		lov_sub_page(slice)->cp_owner = sub->sub_io;
		lov_sub_put(sub);
	} else
		LBUG(); /* Arrgh */
	return 0;
}
예제 #21
0
파일: skills.c 프로젝트: Lundex/lima
int chance_to_hit(object weapon, object target)
{
  string attack_skill = weapon->query_skill_used();
  string defend_skill = target->query_defend_skill_used();
  int attack_value = aggregate_skill(attack_skill);
  int defend_value = target->aggregate_skill(defend_skill);
  int res;

// Skill tests to potentially increase the skills
  this_body()->test_skill(attack_skill, defend_value);
  target->test_skill(defend_skill, attack_value);

// Calculate the value
  res = attack_value-defend_value+MAX_SKILL_VALUE;
  res = res*100/(2*MAX_SKILL_VALUE);
LBUG( res );
  return res;
}
예제 #22
0
static
void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
{
	LASSERT(ctx->cc_sec);
	LASSERT(atomic_read(&ctx->cc_refcount) > 0);

	cli_ctx_expire(ctx);

	spin_lock(&ctx->cc_sec->ps_lock);

	if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
		LASSERT(!hlist_unhashed(&ctx->cc_cache));
		LASSERT(atomic_read(&ctx->cc_refcount) > 1);

		hlist_del_init(&ctx->cc_cache);
		if (atomic_dec_and_test(&ctx->cc_refcount))
			LBUG();
	}

	spin_unlock(&ctx->cc_sec->ps_lock);
}
예제 #23
0
static int slp_page_init(const struct lu_env *env, struct cl_object *obj,
			struct cl_page *page, struct page *vmpage)
{
        struct ccc_page *cpg = cl_object_page_slice(obj, page);

        CLOBINVRNT(env, obj, ccc_object_invariant(obj));

	cpg->cpg_page = vmpage;

	if (page->cp_type == CPT_CACHEABLE) {
		LBUG();
	} else {
		struct ccc_object *clobj = cl2ccc(obj);

		cl_page_slice_add(page, &cpg->cpg_cl, obj,
				&slp_transient_page_ops);
		clobj->cob_transient_pages++;
	}

        return 0;
}
예제 #24
0
/* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
static int ldlm_callback_handler(struct ptlrpc_request *req)
{
	struct ldlm_namespace *ns;
	struct ldlm_request *dlm_req;
	struct ldlm_lock *lock;
	int rc;

	/* Requests arrive in sender's byte order.  The ptlrpc service
	 * handler has already checked and, if necessary, byte-swapped the
	 * incoming request message body, but I am responsible for the
	 * message buffers. */

	/* do nothing for sec context finalize */
	if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
		return 0;

	req_capsule_init(&req->rq_pill, req, RCL_SERVER);

	if (req->rq_export == NULL) {
		rc = ldlm_callback_reply(req, -ENOTCONN);
		ldlm_callback_errmsg(req, "Operate on unconnected server",
				     rc, NULL);
		return 0;
	}

	LASSERT(req->rq_export != NULL);
	LASSERT(req->rq_export->exp_obd != NULL);

	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
	case LDLM_BL_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
			return 0;
		break;
	case LDLM_CP_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
			return 0;
		break;
	case LDLM_GL_CALLBACK:
		if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
			return 0;
		break;
	case LDLM_SET_INFO:
		rc = ldlm_handle_setinfo(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
		CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
		req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
			return 0;
		rc = llog_origin_handle_cancel(req);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
			return 0;
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_CREATE:
		req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_open(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
		req_capsule_set(&req->rq_pill,
				&RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_next_block(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_READ_HEADER:
		req_capsule_set(&req->rq_pill,
				&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_read_header(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case LLOG_ORIGIN_HANDLE_CLOSE:
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
			return 0;
		rc = llog_origin_handle_close(req);
		ldlm_callback_reply(req, rc);
		return 0;
	case OBD_QC_CALLBACK:
		req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
			return 0;
		rc = ldlm_handle_qc_callback(req);
		ldlm_callback_reply(req, rc);
		return 0;
	default:
		CERROR("unknown opcode %u\n",
		       lustre_msg_get_opc(req->rq_reqmsg));
		ldlm_callback_reply(req, -EPROTO);
		return 0;
	}

	ns = req->rq_export->exp_obd->obd_namespace;
	LASSERT(ns != NULL);

	req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);

	dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
	if (dlm_req == NULL) {
		rc = ldlm_callback_reply(req, -EPROTO);
		ldlm_callback_errmsg(req, "Operate without parameter", rc,
				     NULL);
		return 0;
	}

	/* Force a known safe race, send a cancel to the server for a lock
	 * which the server has already started a blocking callback on. */
	if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
	    lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
		rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
		if (rc < 0)
			CERROR("ldlm_cli_cancel: %d\n", rc);
	}

	lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
	if (!lock) {
		CDEBUG(D_DLMTRACE, "callback on lock "LPX64" - lock "
		       "disappeared\n", dlm_req->lock_handle[0].cookie);
		rc = ldlm_callback_reply(req, -EINVAL);
		ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
				     &dlm_req->lock_handle[0]);
		return 0;
	}

	if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
	    lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
		OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);

	/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
	lock_res_and_lock(lock);
	lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
					      LDLM_AST_FLAGS);
	if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
		/* If somebody cancels lock and cache is already dropped,
		 * or lock is failed before cp_ast received on client,
		 * we can tell the server we have no lock. Otherwise, we
		 * should send cancel after dropping the cache. */
		if (((lock->l_flags & LDLM_FL_CANCELING) &&
		    (lock->l_flags & LDLM_FL_BL_DONE)) ||
		    (lock->l_flags & LDLM_FL_FAILED)) {
			LDLM_DEBUG(lock, "callback on lock "
				   LPX64" - lock disappeared\n",
				   dlm_req->lock_handle[0].cookie);
			unlock_res_and_lock(lock);
			LDLM_LOCK_RELEASE(lock);
			rc = ldlm_callback_reply(req, -EINVAL);
			ldlm_callback_errmsg(req, "Operate on stale lock", rc,
					     &dlm_req->lock_handle[0]);
			return 0;
		}
		/* BL_AST locks are not needed in LRU.
		 * Let ldlm_cancel_lru() be fast. */
		ldlm_lock_remove_from_lru(lock);
		lock->l_flags |= LDLM_FL_BL_AST;
	}
	unlock_res_and_lock(lock);

	/* We want the ost thread to get this reply so that it can respond
	 * to ost requests (write cache writeback) that might be triggered
	 * in the callback.
	 *
	 * But we'd also like to be able to indicate in the reply that we're
	 * cancelling right now, because it's unused, or have an intent result
	 * in the reply, so we might have to push the responsibility for sending
	 * the reply down into the AST handlers, alas. */

	switch (lustre_msg_get_opc(req->rq_reqmsg)) {
	case LDLM_BL_CALLBACK:
		CDEBUG(D_INODE, "blocking ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
		if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
			rc = ldlm_callback_reply(req, 0);
			if (req->rq_no_reply || rc)
				ldlm_callback_errmsg(req, "Normal process", rc,
						     &dlm_req->lock_handle[0]);
		}
		if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
			ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
		break;
	case LDLM_CP_CALLBACK:
		CDEBUG(D_INODE, "completion ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
		ldlm_callback_reply(req, 0);
		ldlm_handle_cp_callback(req, ns, dlm_req, lock);
		break;
	case LDLM_GL_CALLBACK:
		CDEBUG(D_INODE, "glimpse ast\n");
		req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
		ldlm_handle_gl_callback(req, ns, dlm_req, lock);
		break;
	default:
		LBUG();			 /* checked above */
	}

	return 0;
}
예제 #25
0
파일: mdt_xattr.c 프로젝트: LLNL/lustre
int mdt_getxattr(struct mdt_thread_info *info)
{
        struct ptlrpc_request  *req = mdt_info_req(info);
        struct mdt_export_data *med = mdt_req2med(req);
        struct md_ucred        *uc  = mdt_ucred(info);
        struct mdt_body        *reqbody;
        struct mdt_body        *repbody = NULL;
        struct md_object       *next;
        struct lu_buf          *buf;
        __u32                   remote = exp_connect_rmtclient(info->mti_exp);
        __u32                   perm;
        int                     easize, rc;
        ENTRY;

        LASSERT(info->mti_object != NULL);
        LASSERT(lu_object_assert_exists(&info->mti_object->mot_obj.mo_lu));

        CDEBUG(D_INODE, "getxattr "DFID"\n", PFID(&info->mti_body->fid1));

        reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
        if (reqbody == NULL)
                RETURN(err_serious(-EFAULT));

        rc = mdt_init_ucred(info, reqbody);
        if (rc)
                RETURN(err_serious(rc));

        next = mdt_object_child(info->mti_object);

        if (info->mti_body->valid & OBD_MD_FLRMTRGETFACL) {
                if (unlikely(!remote))
                        GOTO(out, rc = err_serious(-EINVAL));

                perm = mdt_identity_get_perm(uc->mu_identity, remote,
                                             req->rq_peer.nid);
                if (!(perm & CFS_RMTACL_PERM))
                        GOTO(out, rc = err_serious(-EPERM));

                rc = mo_permission(info->mti_env, NULL, next, NULL,
                                   MAY_RGETFACL);
                if (rc)
                        GOTO(out, rc = err_serious(rc));
        }

        easize = mdt_getxattr_pack_reply(info);
        if (easize < 0)
                GOTO(out, rc = err_serious(easize));

        repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
        LASSERT(repbody != NULL);

        /* No need further getxattr. */
        if (easize == 0 || reqbody->eadatasize == 0)
                GOTO(out, rc = easize);


        buf = &info->mti_buf;
        buf->lb_buf = req_capsule_server_get(info->mti_pill, &RMF_EADATA);
        buf->lb_len = easize;

        if (info->mti_body->valid & OBD_MD_FLXATTR) {
                int flags = CFS_IC_NOTHING;
                char *xattr_name = req_capsule_client_get(info->mti_pill,
                                                          &RMF_NAME);
                CDEBUG(D_INODE, "getxattr %s\n", xattr_name);

                rc = mo_xattr_get(info->mti_env, next, buf, xattr_name);
                if (rc < 0) {
                        CERROR("getxattr failed: %d\n", rc);
                        GOTO(out, rc);
                }

                if (info->mti_body->valid &
                    (OBD_MD_FLRMTLSETFACL | OBD_MD_FLRMTLGETFACL))
                        flags = CFS_IC_ALL;
                else if (info->mti_body->valid & OBD_MD_FLRMTRGETFACL)
                        flags = CFS_IC_MAPPED;

                if (rc > 0 && flags != CFS_IC_NOTHING) {
                        int rc1;

                        if (unlikely(!remote))
                                GOTO(out, rc = -EINVAL);

                        rc1 = lustre_posix_acl_xattr_id2client(uc,
                                        med->med_idmap,
                                        (posix_acl_xattr_header *)(buf->lb_buf),
                                        rc, flags);
                        if (unlikely(rc1 < 0))
                                rc = rc1;
                }
        } else if (info->mti_body->valid & OBD_MD_FLXATTRLS) {
                CDEBUG(D_INODE, "listxattr\n");

                rc = mo_xattr_list(info->mti_env, next, buf);
                if (rc < 0)
                        CDEBUG(D_INFO, "listxattr failed: %d\n", rc);
        } else
                LBUG();

        EXIT;
out:
        if (rc >= 0) {
                mdt_counter_incr(req->rq_export, LPROC_MDT_GETXATTR);
                repbody->eadatasize = rc;
                rc = 0;
        }
        mdt_exit_ucred(info);
        return rc;
}
예제 #26
0
int mdt_getxattr(struct mdt_thread_info *info)
{
	struct ptlrpc_request  *req = mdt_info_req(info);
	struct mdt_export_data *med = mdt_req2med(req);
	struct lu_ucred        *uc  = lu_ucred(info->mti_env);
        struct mdt_body        *reqbody;
        struct mdt_body        *repbody = NULL;
        struct md_object       *next;
        struct lu_buf          *buf;
        __u32                   remote = exp_connect_rmtclient(info->mti_exp);
        __u32                   perm;
        int                     easize, rc;
	obd_valid		valid;
        ENTRY;

        LASSERT(info->mti_object != NULL);
	LASSERT(lu_object_assert_exists(&info->mti_object->mot_obj));

        CDEBUG(D_INODE, "getxattr "DFID"\n", PFID(&info->mti_body->fid1));

        reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
        if (reqbody == NULL)
                RETURN(err_serious(-EFAULT));

	rc = mdt_init_ucred(info, reqbody);
        if (rc)
                RETURN(err_serious(rc));

        next = mdt_object_child(info->mti_object);

        if (info->mti_body->valid & OBD_MD_FLRMTRGETFACL) {
                if (unlikely(!remote))
                        GOTO(out, rc = err_serious(-EINVAL));

		perm = mdt_identity_get_perm(uc->uc_identity, remote,
					     req->rq_peer.nid);
                if (!(perm & CFS_RMTACL_PERM))
                        GOTO(out, rc = err_serious(-EPERM));

                rc = mo_permission(info->mti_env, NULL, next, NULL,
                                   MAY_RGETFACL);
                if (rc)
                        GOTO(out, rc = err_serious(rc));
        }

        easize = mdt_getxattr_pack_reply(info);
        if (easize < 0)
                GOTO(out, rc = err_serious(easize));

        repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
        LASSERT(repbody != NULL);

        /* No need further getxattr. */
        if (easize == 0 || reqbody->eadatasize == 0)
                GOTO(out, rc = easize);

        buf = &info->mti_buf;
        buf->lb_buf = req_capsule_server_get(info->mti_pill, &RMF_EADATA);
        buf->lb_len = easize;

	valid = info->mti_body->valid & (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS);

	if (valid == OBD_MD_FLXATTR) {
		char *xattr_name = req_capsule_client_get(info->mti_pill,
							  &RMF_NAME);
		rc = mdt_getxattr_one(info, xattr_name, next, buf, med, uc);
	} else if (valid == OBD_MD_FLXATTRLS) {
		CDEBUG(D_INODE, "listxattr\n");

		rc = mo_xattr_list(info->mti_env, next, buf);
		if (rc < 0)
			CDEBUG(D_INFO, "listxattr failed: %d\n", rc);
	} else if (valid == OBD_MD_FLXATTRALL) {
		rc = mdt_getxattr_all(info, reqbody, repbody,
				      buf, next);
	} else
		LBUG();

	EXIT;
out:
	if (rc >= 0) {
		mdt_counter_incr(req, LPROC_MDT_GETXATTR);
		repbody->eadatasize = rc;
		rc = 0;
	}
	mdt_exit_ucred(info);
	return rc;
}
예제 #27
0
/**
 * Implementation of the llog_operations::lop_write
 *
 * This function writes the new record in the llog or modify the existed one.
 *
 * \param[in]  env		execution environment
 * \param[in]  loghandle	llog handle of the current llog
 * \param[in]  rec		llog record header. This is a real header of
 *				the full llog record to write. This is
 *				the beginning of buffer to write, the length
 *				of buffer is stored in \a rec::lrh_len
 * \param[out] reccookie	pointer to the cookie to return back if needed.
 *				It is used for further cancel of this llog
 *				record.
 * \param[in]  idx		index of the llog record. If \a idx == -1 then
 *				this is append case, otherwise \a idx is
 *				the index of record to modify
 * \param[in]  th		current transaction handle
 *
 * \retval			0 on successful write && \a reccookie == NULL
 *				1 on successful write && \a reccookie != NULL
 * \retval			negative error if write failed
 */
static int llog_osd_write_rec(const struct lu_env *env,
			      struct llog_handle *loghandle,
			      struct llog_rec_hdr *rec,
			      struct llog_cookie *reccookie,
			      int idx, struct thandle *th)
{
	struct llog_thread_info	*lgi = llog_info(env);
	struct llog_log_hdr	*llh;
	int			 reclen = rec->lrh_len;
	int			 index, rc;
	struct llog_rec_tail	*lrt;
	struct dt_object	*o;
	size_t			 left;
	bool			 header_is_updated = false;

	ENTRY;

	LASSERT(env);
	llh = loghandle->lgh_hdr;
	LASSERT(llh);
	o = loghandle->lgh_obj;
	LASSERT(o);
	LASSERT(th);

	CDEBUG(D_OTHER, "new record %x to "DFID"\n",
	       rec->lrh_type, PFID(lu_object_fid(&o->do_lu)));

	/* record length should not bigger than LLOG_CHUNK_SIZE */
	if (reclen > LLOG_CHUNK_SIZE)
		RETURN(-E2BIG);

	rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
	if (rc)
		RETURN(rc);

	/**
	 * The modification case.
	 * If idx set then the record with that index must be modified.
	 * There are three cases possible:
	 * 1) the common case is the llog header update (idx == 0)
	 * 2) the llog record modification during llog process.
	 *    This is indicated by the \a loghandle::lgh_cur_idx > 0.
	 *    In that case the \a loghandle::lgh_cur_offset
	 * 3) otherwise this is assumed that llog consist of records of
	 *    fixed size, i.e. catalog. The llog header must has llh_size
	 *    field equal to record size. The record offset is calculated
	 *    just by /a idx value
	 *
	 * During modification we don't need extra header update because
	 * the bitmap and record count are not changed. The record header
	 * and tail remains the same too.
	 */
	if (idx != LLOG_NEXT_IDX) {
		/* llog can be empty only when first record is being written */
		LASSERT(ergo(idx > 0, lgi->lgi_attr.la_size > 0));

		if (!ext2_test_bit(idx, llh->llh_bitmap)) {
			CERROR("%s: modify unset record %u\n",
			       o->do_lu.lo_dev->ld_obd->obd_name, idx);
			RETURN(-ENOENT);
		}

		if (idx != rec->lrh_index) {
			CERROR("%s: modify index mismatch %d %u\n",
			       o->do_lu.lo_dev->ld_obd->obd_name, idx,
			       rec->lrh_index);
			RETURN(-EFAULT);
		}

		if (idx == LLOG_HEADER_IDX) {
			/* llog header update */
			LASSERT(reclen == sizeof(struct llog_log_hdr));
			LASSERT(rec == &llh->llh_hdr);

			lgi->lgi_off = 0;
			lgi->lgi_buf.lb_len = reclen;
			lgi->lgi_buf.lb_buf = rec;
			rc = dt_record_write(env, o, &lgi->lgi_buf,
					     &lgi->lgi_off, th);
			RETURN(rc);
		} else if (loghandle->lgh_cur_idx > 0) {
			/**
			 * The lgh_cur_offset can be used only if index is
			 * the same.
			 */
			if (idx != loghandle->lgh_cur_idx) {
				CERROR("%s: modify index mismatch %d %d\n",
				       o->do_lu.lo_dev->ld_obd->obd_name, idx,
				       loghandle->lgh_cur_idx);
				RETURN(-EFAULT);
			}

			lgi->lgi_off = loghandle->lgh_cur_offset;
			CDEBUG(D_OTHER, "modify record "DOSTID": idx:%d, "
			       "len:%u offset %llu\n",
			       POSTID(&loghandle->lgh_id.lgl_oi), idx,
			       rec->lrh_len, (long long)lgi->lgi_off);
		} else if (llh->llh_size > 0) {
			if (llh->llh_size != rec->lrh_len) {
				CERROR("%s: wrong record size, llh_size is %u"
				       " but record size is %u\n",
				       o->do_lu.lo_dev->ld_obd->obd_name,
				       llh->llh_size, rec->lrh_len);
				RETURN(-EINVAL);
			}
			lgi->lgi_off = sizeof(*llh) + (idx - 1) * reclen;
		} else {
			/* This can be result of lgh_cur_idx is not set during
			 * llog processing or llh_size is not set to proper
			 * record size for fixed records llog. Therefore it is
			 * impossible to get record offset. */
			CERROR("%s: can't get record offset, idx:%d, "
			       "len:%u.\n", o->do_lu.lo_dev->ld_obd->obd_name,
			       idx, rec->lrh_len);
			RETURN(-EFAULT);
		}

		/* update only data, header and tail remain the same */
		lgi->lgi_off += sizeof(struct llog_rec_hdr);
		lgi->lgi_buf.lb_len = REC_DATA_LEN(rec);
		lgi->lgi_buf.lb_buf = REC_DATA(rec);
		rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
		if (rc == 0 && reccookie) {
			reccookie->lgc_lgl = loghandle->lgh_id;
			reccookie->lgc_index = idx;
			rc = 1;
		}
		RETURN(rc);
	}

	/**
	 * The append case.
	 * The most common case of using llog. The new index is assigned to
	 * the new record, new bit is set in llog bitmap and llog count is
	 * incremented.
	 *
	 * Make sure that records don't cross a chunk boundary, so we can
	 * process them page-at-a-time if needed.  If it will cross a chunk
	 * boundary, write in a fake (but referenced) entry to pad the chunk.
	 */
	LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
	lgi->lgi_off = lgi->lgi_attr.la_size;
	left = LLOG_CHUNK_SIZE - (lgi->lgi_off & (LLOG_CHUNK_SIZE - 1));
	/* NOTE: padding is a record, but no bit is set */
	if (left != 0 && left != reclen &&
	    left < (reclen + LLOG_MIN_REC_SIZE)) {
		index = loghandle->lgh_last_idx + 1;
		rc = llog_osd_pad(env, o, &lgi->lgi_off, left, index, th);
		if (rc)
			RETURN(rc);
		loghandle->lgh_last_idx++; /* for pad rec */
	}
	/* if it's the last idx in log file, then return -ENOSPC */
	if (loghandle->lgh_last_idx >= LLOG_BITMAP_SIZE(llh) - 1)
		RETURN(-ENOSPC);

	/* increment the last_idx along with llh_tail index, they should
	 * be equal for a llog lifetime */
	loghandle->lgh_last_idx++;
	index = loghandle->lgh_last_idx;
	llh->llh_tail.lrt_index = index;
	/**
	 * NB: the caller should make sure only 1 process access
	 * the lgh_last_idx, e.g. append should be exclusive.
	 * Otherwise it might hit the assert.
	 */
	LASSERT(index < LLOG_BITMAP_SIZE(llh));
	rec->lrh_index = index;
	lrt = rec_tail(rec);
	lrt->lrt_len = rec->lrh_len;
	lrt->lrt_index = rec->lrh_index;

	/* the lgh_hdr_lock protects llog header data from concurrent
	 * update/cancel, the llh_count and llh_bitmap are protected */
	spin_lock(&loghandle->lgh_hdr_lock);
	if (ext2_set_bit(index, llh->llh_bitmap)) {
		CERROR("%s: index %u already set in log bitmap\n",
		       o->do_lu.lo_dev->ld_obd->obd_name, index);
		spin_unlock(&loghandle->lgh_hdr_lock);
		LBUG(); /* should never happen */
	}
	llh->llh_count++;
	spin_unlock(&loghandle->lgh_hdr_lock);

	lgi->lgi_off = 0;
	lgi->lgi_buf.lb_len = llh->llh_hdr.lrh_len;
	lgi->lgi_buf.lb_buf = &llh->llh_hdr;
	rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
	if (rc)
		GOTO(out, rc);

	header_is_updated = true;
	rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
	if (rc)
		GOTO(out, rc);

	LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
	lgi->lgi_off = lgi->lgi_attr.la_size;
	lgi->lgi_buf.lb_len = reclen;
	lgi->lgi_buf.lb_buf = rec;
	rc = dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
	if (rc < 0)
		GOTO(out, rc);

	CDEBUG(D_OTHER, "added record "DOSTID": idx: %u, %u\n",
	       POSTID(&loghandle->lgh_id.lgl_oi), index, rec->lrh_len);
	if (reccookie != NULL) {
		reccookie->lgc_lgl = loghandle->lgh_id;
		reccookie->lgc_index = index;
		if ((rec->lrh_type == MDS_UNLINK_REC) ||
		    (rec->lrh_type == MDS_SETATTR64_REC))
			reccookie->lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
		else if (rec->lrh_type == OST_SZ_REC)
			reccookie->lgc_subsys = LLOG_SIZE_ORIG_CTXT;
		else
			reccookie->lgc_subsys = -1;
		rc = 1;
	}
	RETURN(rc);
out:
	/* cleanup llog for error case */
	spin_lock(&loghandle->lgh_hdr_lock);
	ext2_clear_bit(index, llh->llh_bitmap);
	llh->llh_count--;
	spin_unlock(&loghandle->lgh_hdr_lock);

	/* restore llog last_idx */
	loghandle->lgh_last_idx--;
	llh->llh_tail.lrt_index = loghandle->lgh_last_idx;

	/* restore the header on disk if it was written */
	if (header_is_updated) {
		lgi->lgi_off = 0;
		lgi->lgi_buf.lb_len = llh->llh_hdr.lrh_len;
		lgi->lgi_buf.lb_buf = &llh->llh_hdr;
		dt_record_write(env, o, &lgi->lgi_buf, &lgi->lgi_off, th);
	}

	RETURN(rc);
}
예제 #28
0
static int null_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
{
        /* should never reach here */
        LBUG();
        return 0;
}
예제 #29
0
파일: remote_perm.c 프로젝트: 3null/linux
int lustre_check_remote_perm(struct inode *inode, int mask)
{
	struct ll_inode_info *lli = ll_i2info(inode);
	struct ll_sb_info *sbi = ll_i2sbi(inode);
	struct ptlrpc_request *req = NULL;
	struct mdt_remote_perm *perm;
	struct obd_capa *oc;
	unsigned long save;
	int i = 0, rc;

	do {
		save = lli->lli_rmtperm_time;
		rc = do_check_remote_perm(lli, mask);
		if (!rc || (rc != -ENOENT && i))
			break;

		might_sleep();

		mutex_lock(&lli->lli_rmtperm_mutex);
		/* check again */
		if (save != lli->lli_rmtperm_time) {
			rc = do_check_remote_perm(lli, mask);
			if (!rc || (rc != -ENOENT && i)) {
				mutex_unlock(&lli->lli_rmtperm_mutex);
				break;
			}
		}

		if (i++ > 5) {
			CERROR("check remote perm falls in dead loop!\n");
			LBUG();
		}

		oc = ll_mdscapa_get(inode);
		rc = md_get_remote_perm(sbi->ll_md_exp, ll_inode2fid(inode), oc,
					ll_i2suppgid(inode), &req);
		capa_put(oc);
		if (rc) {
			mutex_unlock(&lli->lli_rmtperm_mutex);
			break;
		}

		perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
						   lustre_swab_mdt_remote_perm);
		if (unlikely(perm == NULL)) {
			mutex_unlock(&lli->lli_rmtperm_mutex);
			rc = -EPROTO;
			break;
		}

		rc = ll_update_remote_perm(inode, perm);
		mutex_unlock(&lli->lli_rmtperm_mutex);
		if (rc == -ENOMEM)
			break;

		ptlrpc_req_finished(req);
		req = NULL;
	} while (1);
	ptlrpc_req_finished(req);
	return rc;
}
예제 #30
0
int llu_md_blocking_ast(struct ldlm_lock *lock,
                        struct ldlm_lock_desc *desc,
                        void *data, int flag)
{
        struct lustre_handle lockh;
        int rc;
        ENTRY;


        switch (flag) {
        case LDLM_CB_BLOCKING:
                ldlm_lock2handle(lock, &lockh);
		rc = ldlm_cli_cancel(&lockh, 0);
                if (rc < 0) {
                        CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
                        RETURN(rc);
                }
                break;
        case LDLM_CB_CANCELING: {
		struct inode *inode = llu_inode_from_resource_lock(lock);
                struct llu_inode_info *lli;
                struct intnl_stat *st;
                __u64 bits = lock->l_policy_data.l_inodebits.bits;
                struct lu_fid *fid;

		/* Inode is set to lock->l_resource->lr_lvb_inode
		* for mdc - bug 24555 */
		LASSERT(lock->l_ast_data == NULL);

                /* Invalidate all dentries associated with this inode */
                if (inode == NULL)
                        break;

                lli =  llu_i2info(inode);
                st = llu_i2stat(inode);

                if (bits & MDS_INODELOCK_UPDATE)
                        lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;

		fid = &lli->lli_fid;
		if (!fid_res_name_eq(fid, &lock->l_resource->lr_name))
			LDLM_ERROR(lock, "data mismatch with object "
				   DFID" (%p)", PFID(fid), inode);
                if (S_ISDIR(st->st_mode) &&
                    (bits & MDS_INODELOCK_UPDATE)) {
                        CDEBUG(D_INODE, "invalidating inode %llu\n",
                               (long long)st->st_ino);

                        llu_invalidate_inode_pages(inode);
                }

/*
                if (inode->i_sb->s_root &&
                    inode != inode->i_sb->s_root->d_inode)
                        ll_unhash_aliases(inode);
*/
                I_RELE(inode);
                break;
        }
        default:
                LBUG();
        }

        RETURN(0);
}