int osd_oi_mod_init(void) { if (osd_oi_count == 0 || osd_oi_count > OSD_OI_FID_NR_MAX) osd_oi_count = OSD_OI_FID_NR; if ((osd_oi_count & (osd_oi_count - 1)) != 0) { LCONSOLE_WARN("Round up oi_count %d to power2 %d\n", osd_oi_count, size_roundup_power2(osd_oi_count)); osd_oi_count = size_roundup_power2(osd_oi_count); } mutex_init(&oi_init_lock); return 0; }
static int null_enlarge_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int segment, int newsize) { struct lustre_msg *newbuf; struct lustre_msg *oldbuf = req->rq_reqmsg; int oldsize, newmsg_size, alloc_size; LASSERT(req->rq_reqbuf); LASSERT(req->rq_reqbuf == req->rq_reqmsg); LASSERT(req->rq_reqbuf_len >= req->rq_reqlen); LASSERT(req->rq_reqlen == lustre_packed_msg_size(oldbuf)); /* compute new message size */ oldsize = req->rq_reqbuf->lm_buflens[segment]; req->rq_reqbuf->lm_buflens[segment] = newsize; newmsg_size = lustre_packed_msg_size(oldbuf); req->rq_reqbuf->lm_buflens[segment] = oldsize; /* request from pool should always have enough buffer */ LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size); if (req->rq_reqbuf_len < newmsg_size) { alloc_size = size_roundup_power2(newmsg_size); newbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS); if (!newbuf) return -ENOMEM; /* Must lock this, so that otherwise unprotected change of * rq_reqmsg is not racing with parallel processing of * imp_replay_list traversing threads. See LU-3333 * This is a bandaid at best, we really need to deal with this * in request enlarging code before unpacking that's already * there */ if (req->rq_import) spin_lock(&req->rq_import->imp_lock); memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen); kvfree(req->rq_reqbuf); req->rq_reqbuf = newbuf; req->rq_reqmsg = newbuf; req->rq_reqbuf_len = alloc_size; if (req->rq_import) spin_unlock(&req->rq_import->imp_lock); } _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); req->rq_reqlen = newmsg_size; return 0; }
static int null_alloc_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int msgsize) { /* add space for early replied */ msgsize += lustre_msg_early_size(); msgsize = size_roundup_power2(msgsize); OBD_ALLOC(req->rq_repbuf, msgsize); if (!req->rq_repbuf) return -ENOMEM; req->rq_repbuf_len = msgsize; return 0; }
static int null_enlarge_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int segment, int newsize) { struct lustre_msg *newbuf; struct lustre_msg *oldbuf = req->rq_reqmsg; int oldsize, newmsg_size, alloc_size; LASSERT(req->rq_reqbuf); LASSERT(req->rq_reqbuf == req->rq_reqmsg); LASSERT(req->rq_reqbuf_len >= req->rq_reqlen); LASSERT(req->rq_reqlen == lustre_packed_msg_size(oldbuf)); /* compute new message size */ oldsize = req->rq_reqbuf->lm_buflens[segment]; req->rq_reqbuf->lm_buflens[segment] = newsize; newmsg_size = lustre_packed_msg_size(oldbuf); req->rq_reqbuf->lm_buflens[segment] = oldsize; /* request from pool should always have enough buffer */ LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size); if (req->rq_reqbuf_len < newmsg_size) { alloc_size = size_roundup_power2(newmsg_size); OBD_ALLOC(newbuf, alloc_size); if (newbuf == NULL) return -ENOMEM; memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen); OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len); req->rq_reqbuf = req->rq_reqmsg = newbuf; req->rq_reqbuf_len = alloc_size; } _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); req->rq_reqlen = newmsg_size; return 0; }
static int null_alloc_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int msgsize) { if (!req->rq_reqbuf) { int alloc_size = size_roundup_power2(msgsize); LASSERT(!req->rq_pool); OBD_ALLOC(req->rq_reqbuf, alloc_size); if (!req->rq_reqbuf) return -ENOMEM; req->rq_reqbuf_len = alloc_size; } else { LASSERT(req->rq_pool); LASSERT(req->rq_reqbuf_len >= msgsize); memset(req->rq_reqbuf, 0, msgsize); } req->rq_reqmsg = req->rq_reqbuf; return 0; }
static int osd_declare_object_create(const struct lu_env *env, struct dt_object *dt, struct lu_attr *attr, struct dt_allocation_hint *hint, struct dt_object_format *dof, struct thandle *handle) { const struct lu_fid *fid = lu_object_fid(&dt->do_lu); struct osd_object *obj = osd_dt_obj(dt); struct osd_device *osd = osd_obj2dev(obj); struct osd_thandle *oh; uint64_t zapid; int rc, dnode_size; ENTRY; LASSERT(dof); switch (dof->dof_type) { case DFT_REGULAR: case DFT_SYM: case DFT_NODE: if (obj->oo_dt.do_body_ops == NULL) obj->oo_dt.do_body_ops = &osd_body_ops; break; default: break; } LASSERT(handle != NULL); oh = container_of0(handle, struct osd_thandle, ot_super); LASSERT(oh->ot_tx != NULL); /* this is the minimum set of EAs on every Lustre object */ obj->oo_ea_in_bonus = ZFS_SA_BASE_ATTR_SIZE + sizeof(__u64) + /* VBR VERSION */ sizeof(struct lustre_mdt_attrs); /* LMA */ /* reserve 32 bytes for extra stuff like ACLs */ dnode_size = size_roundup_power2(obj->oo_ea_in_bonus + 32); switch (dof->dof_type) { case DFT_DIR: dt->do_index_ops = &osd_dir_ops; case DFT_INDEX: /* for zap create */ dmu_tx_hold_zap(oh->ot_tx, DMU_NEW_OBJECT, FALSE, NULL); dmu_tx_hold_sa_create(oh->ot_tx, dnode_size); break; case DFT_REGULAR: case DFT_SYM: case DFT_NODE: /* first, we'll create new object */ dmu_tx_hold_sa_create(oh->ot_tx, dnode_size); break; default: LBUG(); break; } /* and we'll add it to some mapping */ zapid = osd_get_name_n_idx(env, osd, fid, NULL, 0); dmu_tx_hold_zap(oh->ot_tx, zapid, TRUE, NULL); /* we will also update inode accounting ZAPs */ dmu_tx_hold_zap(oh->ot_tx, osd->od_iusr_oid, FALSE, NULL); dmu_tx_hold_zap(oh->ot_tx, osd->od_igrp_oid, FALSE, NULL); rc = osd_declare_quota(env, osd, attr->la_uid, attr->la_gid, 1, oh, false, NULL, false); RETURN(rc); }