int llog_origin_handle_destroy(struct ptlrpc_request *req) { struct llogd_body *body; struct llog_logid *logid = NULL; struct llog_ctxt *ctxt; int rc; ENTRY; body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); if (body == NULL) RETURN(err_serious(-EFAULT)); rc = req_capsule_server_pack(&req->rq_pill); if (rc < 0) RETURN(err_serious(-ENOMEM)); if (ostid_id(&body->lgd_logid.lgl_oi) > 0) logid = &body->lgd_logid; if (!(body->lgd_llh_flags & LLOG_F_IS_PLAIN)) CERROR("%s: wrong llog flags %x\n", req->rq_export->exp_obd->obd_name, body->lgd_llh_flags); ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx); if (ctxt == NULL) RETURN(-ENODEV); rc = llog_erase(req->rq_svc_thread->t_env, ctxt, logid, NULL); llog_ctxt_put(ctxt); RETURN(rc); }
/* Add log records for each OSC that this object is striped over, and return * cookies for each one. We _would_ have nice abstraction here, except that * we need to keep cookies in stripe order, even if some are NULL, so that * the right cookies are passed back to the right OSTs at the client side. * Unset cookies should be all-zero (which will never occur naturally). */ static int lov_llog_origin_add(const struct lu_env *env, struct llog_ctxt *ctxt, struct llog_rec_hdr *rec, struct lov_stripe_md *lsm, struct llog_cookie *logcookies, int numcookies) { struct obd_device *obd = ctxt->loc_obd; struct lov_obd *lov = &obd->u.lov; int i, rc = 0, cookies = 0; ENTRY; LASSERTF(logcookies && numcookies >= lsm->lsm_stripe_count, "logcookies %p, numcookies %d lsm->lsm_stripe_count %d \n", logcookies, numcookies, lsm->lsm_stripe_count); for (i = 0; i < lsm->lsm_stripe_count; i++) { struct lov_oinfo *loi = lsm->lsm_oinfo[i]; struct obd_device *child = lov->lov_tgts[loi->loi_ost_idx]->ltd_exp->exp_obd; struct llog_ctxt *cctxt = llog_get_context(child, ctxt->loc_idx); /* fill mds unlink/setattr log record */ switch (rec->lrh_type) { case MDS_UNLINK_REC: { struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec; lur->lur_oid = ostid_id(&loi->loi_oi); lur->lur_oseq = (__u32)ostid_seq(&loi->loi_oi); break; } case MDS_SETATTR64_REC: { struct llog_setattr64_rec *lsr = (struct llog_setattr64_rec *)rec; lsr->lsr_oi = loi->loi_oi; break; } default: break; } /* inject error in llog_obd_add() below */ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_FAIL_LOV_LOG_ADD)) { llog_ctxt_put(cctxt); cctxt = NULL; } rc = llog_obd_add(env, cctxt, rec, NULL, logcookies + cookies, numcookies - cookies); llog_ctxt_put(cctxt); if (rc < 0) { CERROR("Can't add llog (rc = %d) for stripe %d\n", rc, cookies); memset(logcookies + cookies, 0, sizeof(struct llog_cookie)); rc = 1; /* skip this cookie */ } /* Note that rc is always 1 if llog_obd_add was successful */ cookies += rc; } RETURN(cookies); }
/* Only open is supported, no new llog can be created remotely */ int llog_origin_handle_open(struct ptlrpc_request *req) { struct obd_export *exp = req->rq_export; struct obd_device *obd = exp->exp_obd; struct llog_handle *loghandle; struct llogd_body *body; struct llog_logid *logid = NULL; struct llog_ctxt *ctxt; char *name = NULL; int rc; ENTRY; body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); if (body == NULL) RETURN(err_serious(-EFAULT)); rc = req_capsule_server_pack(&req->rq_pill); if (rc) RETURN(err_serious(-ENOMEM)); if (ostid_id(&body->lgd_logid.lgl_oi) > 0) logid = &body->lgd_logid; if (req_capsule_field_present(&req->rq_pill, &RMF_NAME, RCL_CLIENT)) { name = req_capsule_client_get(&req->rq_pill, &RMF_NAME); if (name == NULL) RETURN(-EFAULT); CDEBUG(D_INFO, "%s: opening log %s\n", obd->obd_name, name); } if (body->lgd_ctxt_idx >= LLOG_MAX_CTXTS) { CDEBUG(D_WARNING, "%s: bad ctxt ID: idx=%d name=%s\n", obd->obd_name, body->lgd_ctxt_idx, name); RETURN(-EPROTO); } ctxt = llog_get_context(obd, body->lgd_ctxt_idx); if (ctxt == NULL) { CDEBUG(D_WARNING, "%s: no ctxt. group=%p idx=%d name=%s\n", obd->obd_name, &obd->obd_olg, body->lgd_ctxt_idx, name); RETURN(-ENODEV); } rc = llog_open(req->rq_svc_thread->t_env, ctxt, &loghandle, logid, name, LLOG_OPEN_EXISTS); if (rc) GOTO(out_ctxt, rc); body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); body->lgd_logid = loghandle->lgh_id; llog_origin_close(req->rq_svc_thread->t_env, loghandle); EXIT; out_ctxt: llog_ctxt_put(ctxt); return rc; }
/** * alloc fids for precreation. * rc = 0 Success, @grow is the count of real allocation. * rc = 1 Current seq is used up. * rc < 0 Other error. **/ static int osp_precreate_fids(const struct lu_env *env, struct osp_device *osp, struct lu_fid *fid, int *grow) { struct osp_thread_info *osi = osp_env_info(env); __u64 end; int i = 0; if (fid_is_idif(fid)) { struct lu_fid *last_fid; struct ost_id *oi = &osi->osi_oi; spin_lock(&osp->opd_pre_lock); last_fid = &osp->opd_pre_last_created_fid; fid_to_ostid(last_fid, oi); end = min(ostid_id(oi) + *grow, IDIF_MAX_OID); *grow = end - ostid_id(oi); ostid_set_id(oi, ostid_id(oi) + *grow); spin_unlock(&osp->opd_pre_lock); if (*grow == 0) return 1; ostid_to_fid(fid, oi, osp->opd_index); return 0; } spin_lock(&osp->opd_pre_lock); *fid = osp->opd_pre_last_created_fid; end = fid->f_oid; end = min((end + *grow), (__u64)LUSTRE_DATA_SEQ_MAX_WIDTH); *grow = end - fid->f_oid; fid->f_oid += end - fid->f_oid; spin_unlock(&osp->opd_pre_lock); CDEBUG(D_INFO, "Expect %d, actual %d ["DFID" -- "DFID"]\n", *grow, i, PFID(fid), PFID(&osp->opd_pre_last_created_fid)); return *grow > 0 ? 0 : 1; }
static inline int osp_objs_precreated(const struct lu_env *env, struct osp_device *osp) { struct lu_fid *fid1 = &osp->opd_pre_last_created_fid; struct lu_fid *fid2 = &osp->opd_pre_used_fid; LASSERTF(fid_seq(fid1) == fid_seq(fid2), "Created fid"DFID" Next fid "DFID"\n", PFID(fid1), PFID(fid2)); if (fid_is_idif(fid1)) { struct ost_id *oi1 = &osp_env_info(env)->osi_oi; struct ost_id *oi2 = &osp_env_info(env)->osi_oi2; LASSERT(fid_is_idif(fid1) && fid_is_idif(fid2)); fid_to_ostid(fid1, oi1); fid_to_ostid(fid2, oi2); LASSERT(ostid_id(oi1) >= ostid_id(oi2)); return ostid_id(oi1) - ostid_id(oi2); } return fid_oid(fid1) - fid_oid(fid2); }
int lov_setea(struct obd_export *exp, struct lov_stripe_md **lsmp, struct lov_user_md *lump) { int i; int rc; struct obd_export *oexp; struct lov_obd *lov = &exp->exp_obd->u.lov; obd_id last_id = 0; struct lov_user_ost_data_v1 *lmm_objects; ENTRY; if (lump->lmm_magic == LOV_USER_MAGIC_V3) lmm_objects = ((struct lov_user_md_v3 *)lump)->lmm_objects; else lmm_objects = lump->lmm_objects; for (i = 0; i < lump->lmm_stripe_count; i++) { __u32 len = sizeof(last_id); oexp = lov->lov_tgts[lmm_objects[i].l_ost_idx]->ltd_exp; rc = obd_get_info(NULL, oexp, sizeof(KEY_LAST_ID), KEY_LAST_ID, &len, &last_id, NULL); if (rc) RETURN(rc); if (ostid_id(&lmm_objects[i].l_ost_oi) > last_id) { CERROR("Setting EA for object > than last id on" " ost idx %d "DOSTID" > "LPD64" \n", lmm_objects[i].l_ost_idx, POSTID(&lmm_objects[i].l_ost_oi), last_id); RETURN(-EINVAL); } } rc = lov_setstripe(exp, 0, lsmp, lump); if (rc) RETURN(rc); for (i = 0; i < lump->lmm_stripe_count; i++) { (*lsmp)->lsm_oinfo[i]->loi_ost_idx = lmm_objects[i].l_ost_idx; (*lsmp)->lsm_oinfo[i]->loi_oi = lmm_objects[i].l_ost_oi; } RETURN(0); }
/* Pack LOV object metadata for disk storage. It is packed in LE byte * order and is opaque to the networking layer. * * XXX In the future, this will be enhanced to get the EA size from the * underlying OSC device(s) to get their EA sizes so we can stack * LOVs properly. For now lov_mds_md_size() just assumes one obd_id * per stripe. */ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, struct lov_stripe_md *lsm) { struct obd_device *obd = class_exp2obd(exp); struct lov_obd *lov = &obd->u.lov; struct lov_mds_md_v1 *lmmv1; struct lov_mds_md_v3 *lmmv3; __u16 stripe_count; struct lov_ost_data_v1 *lmm_objects; int lmm_size, lmm_magic; int i; int cplen = 0; if (lsm) { lmm_magic = lsm->lsm_magic; } else { if (lmmp && *lmmp) lmm_magic = le32_to_cpu((*lmmp)->lmm_magic); else /* lsm == NULL and lmmp == NULL */ lmm_magic = LOV_MAGIC; } if ((lmm_magic != LOV_MAGIC_V1) && (lmm_magic != LOV_MAGIC_V3)) { CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); return -EINVAL; } if (lsm) { /* If we are just sizing the EA, limit the stripe count * to the actual number of OSTs in this filesystem. */ if (!lmmp) { stripe_count = lov_get_stripecnt(lov, lmm_magic, lsm->lsm_stripe_count); lsm->lsm_stripe_count = stripe_count; } else if (!lsm_is_released(lsm)) { stripe_count = lsm->lsm_stripe_count; } else { stripe_count = 0; } } else { /* No need to allocate more than maximum supported stripes. * Anyway, this is pretty inaccurate since ld_tgt_count now * represents max index and we should rely on the actual number * of OSTs instead */ stripe_count = lov_mds_md_max_stripe_count( lov->lov_ocd.ocd_max_easize, lmm_magic); if (stripe_count > lov->desc.ld_tgt_count) stripe_count = lov->desc.ld_tgt_count; } /* XXX LOV STACKING call into osc for sizes */ lmm_size = lov_mds_md_size(stripe_count, lmm_magic); if (!lmmp) return lmm_size; if (*lmmp && !lsm) { stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count); lmm_size = lov_mds_md_size(stripe_count, lmm_magic); OBD_FREE_LARGE(*lmmp, lmm_size); *lmmp = NULL; return 0; } if (!*lmmp) { OBD_ALLOC_LARGE(*lmmp, lmm_size); if (!*lmmp) return -ENOMEM; } CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n", lmm_magic, lmm_size); lmmv1 = *lmmp; lmmv3 = (struct lov_mds_md_v3 *)*lmmp; if (lmm_magic == LOV_MAGIC_V3) lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3); else lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1); if (!lsm) return lmm_size; /* lmmv1 and lmmv3 point to the same struct and have the * same first fields */ lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi); lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size); lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count); lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern); lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen); if (lsm->lsm_magic == LOV_MAGIC_V3) { cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name, sizeof(lmmv3->lmm_pool_name)); if (cplen >= sizeof(lmmv3->lmm_pool_name)) return -E2BIG; lmm_objects = lmmv3->lmm_objects; } else { lmm_objects = lmmv1->lmm_objects; } for (i = 0; i < stripe_count; i++) { struct lov_oinfo *loi = lsm->lsm_oinfo[i]; /* XXX LOV STACKING call down to osc_packmd() to do packing */ LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi), i, stripe_count, loi->loi_ost_idx); ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi); lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen); lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx); } return lmm_size; }
/** * Implementation of struct cl_object_operations::coo_req_attr_set() for osc * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq * fields. */ static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj, struct cl_req_attr *attr) { struct lov_oinfo *oinfo; struct obdo *oa; struct ost_lvb *lvb; u64 flags = attr->cra_flags; oinfo = cl2osc(obj)->oo_oinfo; lvb = &oinfo->loi_lvb; oa = attr->cra_oa; if ((flags & OBD_MD_FLMTIME) != 0) { oa->o_mtime = lvb->lvb_mtime; oa->o_valid |= OBD_MD_FLMTIME; } if ((flags & OBD_MD_FLATIME) != 0) { oa->o_atime = lvb->lvb_atime; oa->o_valid |= OBD_MD_FLATIME; } if ((flags & OBD_MD_FLCTIME) != 0) { oa->o_ctime = lvb->lvb_ctime; oa->o_valid |= OBD_MD_FLCTIME; } if (flags & OBD_MD_FLGROUP) { ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLGROUP; } if (flags & OBD_MD_FLID) { ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLID; } if (flags & OBD_MD_FLHANDLE) { struct ldlm_lock *lock; struct osc_page *opg; opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj)); lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg), OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING); if (lock == NULL && !opg->ops_srvlock) { struct ldlm_resource *res; struct ldlm_res_id *resname; CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page, "uncovered page!\n"); resname = &osc_env_info(env)->oti_resname; ostid_build_res_name(&oinfo->loi_oi, resname); res = ldlm_resource_get( osc_export(cl2osc(obj))->exp_obd->obd_namespace, NULL, resname, LDLM_EXTENT, 0); ldlm_resource_dump(D_ERROR, res); libcfs_debug_dumpstack(NULL); LBUG(); } /* check for lockless io. */ if (lock != NULL) { oa->o_handle = lock->l_remote_handle; oa->o_valid |= OBD_MD_FLHANDLE; LDLM_LOCK_PUT(lock); } } }
/** * Implementation of struct cl_req_operations::cro_attr_set() for osc * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq * fields. */ static void osc_req_attr_set(const struct lu_env *env, const struct cl_req_slice *slice, const struct cl_object *obj, struct cl_req_attr *attr, u64 flags) { struct lov_oinfo *oinfo; struct cl_req *clerq; struct cl_page *apage; /* _some_ page in @clerq */ struct cl_lock *lock; /* _some_ lock protecting @apage */ struct osc_lock *olck; struct osc_page *opg; struct obdo *oa; struct ost_lvb *lvb; oinfo = cl2osc(obj)->oo_oinfo; lvb = &oinfo->loi_lvb; oa = attr->cra_oa; if ((flags & OBD_MD_FLMTIME) != 0) { oa->o_mtime = lvb->lvb_mtime; oa->o_valid |= OBD_MD_FLMTIME; } if ((flags & OBD_MD_FLATIME) != 0) { oa->o_atime = lvb->lvb_atime; oa->o_valid |= OBD_MD_FLATIME; } if ((flags & OBD_MD_FLCTIME) != 0) { oa->o_ctime = lvb->lvb_ctime; oa->o_valid |= OBD_MD_FLCTIME; } if (flags & OBD_MD_FLGROUP) { ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLGROUP; } if (flags & OBD_MD_FLID) { ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi)); oa->o_valid |= OBD_MD_FLID; } if (flags & OBD_MD_FLHANDLE) { clerq = slice->crs_req; LASSERT(!list_empty(&clerq->crq_pages)); apage = container_of(clerq->crq_pages.next, struct cl_page, cp_flight); opg = osc_cl_page_osc(apage); apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */ lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1); if (lock == NULL) { struct cl_object_header *head; struct cl_lock *scan; head = cl_object_header(apage->cp_obj); list_for_each_entry(scan, &head->coh_locks, cll_linkage) CL_LOCK_DEBUG(D_ERROR, env, scan, "no cover page!\n"); CL_PAGE_DEBUG(D_ERROR, env, apage, "dump uncover page!\n"); dump_stack(); LBUG(); } olck = osc_lock_at(lock); LASSERT(olck != NULL); LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL)); /* check for lockless io. */ if (olck->ols_lock != NULL) { oa->o_handle = olck->ols_lock->l_remote_handle; oa->o_valid |= OBD_MD_FLHANDLE; } cl_lock_put(env, lock); }
/* Pack LOV object metadata for disk storage. It is packed in LE byte * order and is opaque to the networking layer. * * XXX In the future, this will be enhanced to get the EA size from the * underlying OSC device(s) to get their EA sizes so we can stack * LOVs properly. For now lov_mds_md_size() just assumes one u64 * per stripe. */ int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp, struct lov_stripe_md *lsm) { struct lov_mds_md_v1 *lmmv1; struct lov_mds_md_v3 *lmmv3; __u16 stripe_count; struct lov_ost_data_v1 *lmm_objects; int lmm_size, lmm_magic; int i; int cplen = 0; if (lsm) { lmm_magic = lsm->lsm_magic; } else { if (lmmp && *lmmp) lmm_magic = le32_to_cpu((*lmmp)->lmm_magic); else /* lsm == NULL and lmmp == NULL */ lmm_magic = LOV_MAGIC; } if ((lmm_magic != LOV_MAGIC_V1) && (lmm_magic != LOV_MAGIC_V3)) { CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); return -EINVAL; } if (lsm) { /* If we are just sizing the EA, limit the stripe count * to the actual number of OSTs in this filesystem. */ if (!lmmp) { stripe_count = lov_get_stripecnt(lov, lmm_magic, lsm->lsm_stripe_count); lsm->lsm_stripe_count = stripe_count; } else if (!lsm_is_released(lsm)) { stripe_count = lsm->lsm_stripe_count; } else { stripe_count = 0; } } else { /* * To calculate maximum easize by active targets at present, * which is exactly the maximum easize to be seen by LOV */ stripe_count = lov->desc.ld_active_tgt_count; } /* XXX LOV STACKING call into osc for sizes */ lmm_size = lov_mds_md_size(stripe_count, lmm_magic); if (!lmmp) return lmm_size; if (*lmmp && !lsm) { stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count); lmm_size = lov_mds_md_size(stripe_count, lmm_magic); kvfree(*lmmp); *lmmp = NULL; return 0; } if (!*lmmp) { *lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS); if (!*lmmp) return -ENOMEM; } CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n", lmm_magic, lmm_size); lmmv1 = *lmmp; lmmv3 = (struct lov_mds_md_v3 *)*lmmp; if (lmm_magic == LOV_MAGIC_V3) lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3); else lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1); if (!lsm) return lmm_size; /* lmmv1 and lmmv3 point to the same struct and have the * same first fields */ lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi); lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size); lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count); lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern); lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen); if (lsm->lsm_magic == LOV_MAGIC_V3) { cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name, sizeof(lmmv3->lmm_pool_name)); if (cplen >= sizeof(lmmv3->lmm_pool_name)) return -E2BIG; lmm_objects = lmmv3->lmm_objects; } else { lmm_objects = lmmv1->lmm_objects; } for (i = 0; i < stripe_count; i++) { struct lov_oinfo *loi = lsm->lsm_oinfo[i]; /* XXX LOV STACKING call down to osc_packmd() to do packing */ LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi), i, stripe_count, loi->loi_ost_idx); ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi); lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen); lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx); } return lmm_size; }