int mds_quota_ctl(struct obd_device *obd, struct obd_export *unused, struct obd_quotactl *oqctl) { struct obd_device_target *obt = &obd->u.obt; struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt; struct timeval work_start; struct timeval work_end; long timediff; int rc = 0; ENTRY; cfs_gettimeofday(&work_start); switch (oqctl->qc_cmd) { case Q_QUOTAON: oqctl->qc_id = obt->obt_qfmt; /* override qfmt version */ rc = mds_quota_on(obd, oqctl); break; case Q_QUOTAOFF: oqctl->qc_id = obt->obt_qfmt; /* override qfmt version */ rc = mds_quota_off(obd, oqctl); break; case Q_SETINFO: rc = mds_set_dqinfo(obd, oqctl); break; case Q_GETINFO: rc = mds_get_dqinfo(obd, oqctl); break; case Q_SETQUOTA: rc = mds_set_dqblk(obd, oqctl); break; case Q_GETQUOTA: rc = mds_get_dqblk(obd, oqctl); break; case Q_GETOINFO: case Q_GETOQUOTA: rc = mds_get_obd_quota(obd, oqctl); break; case LUSTRE_Q_INVALIDATE: rc = mds_quota_invalidate(obd, oqctl); break; case LUSTRE_Q_FINVALIDATE: oqctl->qc_id = obt->obt_qfmt; /* override qfmt version */ rc = mds_quota_finvalidate(obd, oqctl); break; default: CERROR("%s: unsupported mds_quotactl command: %d\n", obd->obd_name, oqctl->qc_cmd); RETURN(-EFAULT); } if (rc) CDEBUG(D_INFO, "mds_quotactl admin quota command %d, id %u, " "type %d, failed: rc = %d\n", oqctl->qc_cmd, oqctl->qc_id, oqctl->qc_type, rc); cfs_gettimeofday(&work_end); timediff = cfs_timeval_sub(&work_end, &work_start, NULL); lprocfs_counter_add(qctxt->lqc_stats, LQUOTA_QUOTA_CTL, timediff); RETURN(rc); }
static int osc_stats_seq_show(struct seq_file *seq, void *v) { struct timeval now; struct obd_device *dev = seq->private; struct osc_stats *stats = &obd2osc_dev(dev)->od_stats; cfs_gettimeofday(&now); seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n", now.tv_sec, now.tv_usec); seq_printf(seq, "lockless_write_bytes\t\t"LPU64"\n", stats->os_lockless_writes); seq_printf(seq, "lockless_read_bytes\t\t"LPU64"\n", stats->os_lockless_reads); seq_printf(seq, "lockless_truncate\t\t"LPU64"\n", stats->os_lockless_truncates); return 0; }
static void brw_stats_show(struct seq_file *seq, struct brw_stats *brw_stats) { struct timeval now; /* this sampling races with updates */ cfs_gettimeofday(&now); seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n", now.tv_sec, now.tv_usec); display_brw_stats(seq, "pages per bulk r/w", "rpcs", &brw_stats->hist[BRW_R_PAGES], &brw_stats->hist[BRW_W_PAGES], 1); display_brw_stats(seq, "discontiguous pages", "rpcs", &brw_stats->hist[BRW_R_DISCONT_PAGES], &brw_stats->hist[BRW_W_DISCONT_PAGES], 0); display_brw_stats(seq, "discontiguous blocks", "rpcs", &brw_stats->hist[BRW_R_DISCONT_BLOCKS], &brw_stats->hist[BRW_W_DISCONT_BLOCKS], 0); display_brw_stats(seq, "disk fragmented I/Os", "ios", &brw_stats->hist[BRW_R_DIO_FRAGS], &brw_stats->hist[BRW_W_DIO_FRAGS], 0); display_brw_stats(seq, "disk I/Os in flight", "ios", &brw_stats->hist[BRW_R_RPC_HIST], &brw_stats->hist[BRW_W_RPC_HIST], 0); display_brw_stats(seq, "I/O time (1/1000s)", "ios", &brw_stats->hist[BRW_R_IO_TIME], &brw_stats->hist[BRW_W_IO_TIME], 1000 / CFS_HZ); display_brw_stats(seq, "disk I/O size", "ios", &brw_stats->hist[BRW_R_DISK_IOSIZE], &brw_stats->hist[BRW_W_DISK_IOSIZE], 1); }
int filter_quota_ctl(struct obd_device *unused, struct obd_export *exp, struct obd_quotactl *oqctl) { struct obd_device *obd = exp->exp_obd; struct obd_device_target *obt = &obd->u.obt; struct lvfs_run_ctxt saved; struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt; struct lustre_qunit_size *lqs; void *handle = NULL; struct timeval work_start; struct timeval work_end; long timediff; int rc = 0; ENTRY; cfs_gettimeofday(&work_start); switch (oqctl->qc_cmd) { case Q_QUOTAON: oqctl->qc_id = obt->obt_qfmt; rc = generic_quota_on(obd, oqctl, 0); break; case Q_FINVALIDATE: case Q_QUOTAOFF: cfs_down(&obt->obt_quotachecking); if (oqctl->qc_cmd == Q_FINVALIDATE && (obt->obt_qctxt.lqc_flags & UGQUOTA2LQC(oqctl->qc_type))) { CWARN("quota[%u] is on yet\n", oqctl->qc_type); cfs_up(&obt->obt_quotachecking); rc = -EBUSY; break; } oqctl->qc_id = obt->obt_qfmt; /* override qfmt version */ case Q_GETOINFO: case Q_GETOQUOTA: case Q_GETQUOTA: /* In recovery scenario, this pending dqacq/dqrel might have * been processed by master successfully before it's dquot * on master enter recovery mode. We must wait for this * dqacq/dqrel done then return the correct limits to master */ if (oqctl->qc_stat == QUOTA_RECOVERING) handle = quota_barrier(&obd->u.obt.obt_qctxt, oqctl, 1); push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); rc = fsfilt_quotactl(obd, obt->obt_sb, oqctl); pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); if (oqctl->qc_stat == QUOTA_RECOVERING) quota_unbarrier(handle); if (oqctl->qc_cmd == Q_QUOTAOFF || oqctl->qc_cmd == Q_FINVALIDATE) { if (oqctl->qc_cmd == Q_QUOTAOFF) { if (!rc) obt->obt_qctxt.lqc_flags &= ~UGQUOTA2LQC(oqctl->qc_type); else if (quota_is_off(qctxt, oqctl)) rc = -EALREADY; CDEBUG(D_QUOTA, "%s: quotaoff type:flags:rc " "%u:%lu:%d\n", obd->obd_name, oqctl->qc_type, qctxt->lqc_flags, rc); } cfs_up(&obt->obt_quotachecking); } break; case Q_SETQUOTA: /* currently, it is only used for nullifying the quota */ handle = quota_barrier(&obd->u.obt.obt_qctxt, oqctl, 1); push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); rc = fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl); if (!rc) { oqctl->qc_cmd = Q_SYNC; fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl); oqctl->qc_cmd = Q_SETQUOTA; } pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); quota_unbarrier(handle); lqs = quota_search_lqs(LQS_KEY(oqctl->qc_type, oqctl->qc_id), qctxt, 0); if (lqs == NULL || IS_ERR(lqs)){ CERROR("fail to create lqs during setquota operation " "for %sid %u\n", oqctl->qc_type ? "g" : "u", oqctl->qc_id); } else { lqs->lqs_flags &= ~QB_SET; lqs_putref(lqs); } break; case Q_INITQUOTA: { unsigned int id[MAXQUOTAS] = { 0, 0 }; /* Initialize quota limit to MIN_QLIMIT */ LASSERT(oqctl->qc_dqblk.dqb_valid == QIF_BLIMITS); LASSERT(oqctl->qc_dqblk.dqb_bsoftlimit == 0); if (!oqctl->qc_dqblk.dqb_bhardlimit) goto adjust; /* There might be a pending dqacq/dqrel (which is going to * clear stale limits on slave). we should wait for it's * completion then initialize limits */ handle = quota_barrier(&obd->u.obt.obt_qctxt, oqctl, 1); LASSERT(oqctl->qc_dqblk.dqb_bhardlimit == MIN_QLIMIT); push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); rc = fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl); /* Update on-disk quota, in case of lose the changed limits * (MIN_QLIMIT) on crash, which cannot be recovered.*/ if (!rc) { oqctl->qc_cmd = Q_SYNC; fsfilt_quotactl(obd, obd->u.obt.obt_sb, oqctl); oqctl->qc_cmd = Q_INITQUOTA; } pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL); quota_unbarrier(handle); if (rc) RETURN(rc); adjust: lqs = quota_search_lqs(LQS_KEY(oqctl->qc_type, oqctl->qc_id), qctxt, 1); if (lqs == NULL || IS_ERR(lqs)){ CERROR("fail to create lqs during setquota operation " "for %sid %u\n", oqctl->qc_type ? "g" : "u", oqctl->qc_id); break; } else { lqs->lqs_flags |= QB_SET; lqs_putref(lqs); } /* Trigger qunit pre-acquire */ if (oqctl->qc_type == USRQUOTA) id[USRQUOTA] = oqctl->qc_id; else id[GRPQUOTA] = oqctl->qc_id; rc = qctxt_adjust_qunit(obd, &obd->u.obt.obt_qctxt, id, 1, 0, NULL); if (rc == -EDQUOT || rc == -EBUSY) { CDEBUG(D_QUOTA, "rc: %d.\n", rc); rc = 0; } break; } default: CERROR("%s: unsupported filter_quotactl command: %d\n", obd->obd_name, oqctl->qc_cmd); RETURN(-EFAULT); } cfs_gettimeofday(&work_end); timediff = cfs_timeval_sub(&work_end, &work_start, NULL); lprocfs_counter_add(qctxt->lqc_stats, LQUOTA_QUOTA_CTL, timediff); RETURN(rc); }
static int __init init_lustre_lite(void) { int i, rc, seed[2]; struct timeval tv; lnet_process_id_t lnet_id; CLASSERT(sizeof(LUSTRE_VOLATILE_HDR) == LUSTRE_VOLATILE_HDR_LEN + 1); /* print an address of _any_ initialized kernel symbol from this * module, to allow debugging with gdb that doesn't support data * symbols from modules.*/ CDEBUG(D_INFO, "Lustre client module (%p).\n", &lustre_super_operations); rc = ll_init_inodecache(); if (rc) return -ENOMEM; ll_file_data_slab = cfs_mem_cache_create("ll_file_data", sizeof(struct ll_file_data), 0, CFS_SLAB_HWCACHE_ALIGN); if (ll_file_data_slab == NULL) { ll_destroy_inodecache(); return -ENOMEM; } ll_remote_perm_cachep = cfs_mem_cache_create("ll_remote_perm_cache", sizeof(struct ll_remote_perm), 0, 0); if (ll_remote_perm_cachep == NULL) { cfs_mem_cache_destroy(ll_file_data_slab); ll_file_data_slab = NULL; ll_destroy_inodecache(); return -ENOMEM; } ll_rmtperm_hash_cachep = cfs_mem_cache_create("ll_rmtperm_hash_cache", REMOTE_PERM_HASHSIZE * sizeof(cfs_list_t), 0, 0); if (ll_rmtperm_hash_cachep == NULL) { cfs_mem_cache_destroy(ll_remote_perm_cachep); ll_remote_perm_cachep = NULL; cfs_mem_cache_destroy(ll_file_data_slab); ll_file_data_slab = NULL; ll_destroy_inodecache(); return -ENOMEM; } proc_lustre_fs_root = proc_lustre_root ? lprocfs_register("llite", proc_lustre_root, NULL, NULL) : NULL; lustre_register_client_fill_super(ll_fill_super); lustre_register_kill_super_cb(ll_kill_super); lustre_register_client_process_config(ll_process_config); cfs_get_random_bytes(seed, sizeof(seed)); /* Nodes with small feet have little entropy * the NID for this node gives the most entropy in the low bits */ for (i=0; ; i++) { if (LNetGetId(i, &lnet_id) == -ENOENT) { break; } if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND) { seed[0] ^= LNET_NIDADDR(lnet_id.nid); } } cfs_gettimeofday(&tv); cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]); init_timer(&ll_capa_timer); ll_capa_timer.function = ll_capa_timer_callback; rc = ll_capa_thread_start(); /* * XXX normal cleanup is needed here. */ if (rc == 0) rc = vvp_global_init(); return rc; }
static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) { struct timeval now; struct obd_device *dev = seq->private; struct client_obd *cli = &dev->u.cli; unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum; int i; cfs_gettimeofday(&now); client_obd_list_lock(&cli->cl_loi_list_lock); seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n", now.tv_sec, now.tv_usec); seq_printf(seq, "read RPCs in flight: %d\n", cli->cl_r_in_flight); seq_printf(seq, "write RPCs in flight: %d\n", cli->cl_w_in_flight); seq_printf(seq, "pending write pages: %d\n", cli->cl_pending_w_pages); seq_printf(seq, "pending read pages: %d\n", cli->cl_pending_r_pages); seq_printf(seq, "\n\t\t\tread\t\t\twrite\n"); seq_printf(seq, "pages per rpc rpcs %% cum %% |"); seq_printf(seq, " rpcs %% cum %%\n"); read_tot = lprocfs_oh_sum(&cli->cl_read_page_hist); write_tot = lprocfs_oh_sum(&cli->cl_write_page_hist); read_cum = 0; write_cum = 0; for (i = 0; i < OBD_HIST_MAX; i++) { unsigned long r = cli->cl_read_page_hist.oh_buckets[i]; unsigned long w = cli->cl_write_page_hist.oh_buckets[i]; read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", 1 << i, r, pct(r, read_tot), pct(read_cum, read_tot), w, pct(w, write_tot), pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } seq_printf(seq, "\n\t\t\tread\t\t\twrite\n"); seq_printf(seq, "rpcs in flight rpcs %% cum %% |"); seq_printf(seq, " rpcs %% cum %%\n"); read_tot = lprocfs_oh_sum(&cli->cl_read_rpc_hist); write_tot = lprocfs_oh_sum(&cli->cl_write_rpc_hist); read_cum = 0; write_cum = 0; for (i = 0; i < OBD_HIST_MAX; i++) { unsigned long r = cli->cl_read_rpc_hist.oh_buckets[i]; unsigned long w = cli->cl_write_rpc_hist.oh_buckets[i]; read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", i, r, pct(r, read_tot), pct(read_cum, read_tot), w, pct(w, write_tot), pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } seq_printf(seq, "\n\t\t\tread\t\t\twrite\n"); seq_printf(seq, "offset rpcs %% cum %% |"); seq_printf(seq, " rpcs %% cum %%\n"); read_tot = lprocfs_oh_sum(&cli->cl_read_offset_hist); write_tot = lprocfs_oh_sum(&cli->cl_write_offset_hist); read_cum = 0; write_cum = 0; for (i = 0; i < OBD_HIST_MAX; i++) { unsigned long r = cli->cl_read_offset_hist.oh_buckets[i]; unsigned long w = cli->cl_write_offset_hist.oh_buckets[i]; read_cum += r; write_cum += w; seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n", (i == 0) ? 0 : 1 << (i - 1), r, pct(r, read_tot), pct(read_cum, read_tot), w, pct(w, write_tot), pct(write_cum, write_tot)); if (read_cum == read_tot && write_cum == write_tot) break; } client_obd_list_unlock(&cli->cl_loi_list_lock); return 0; }
/** * when a block_write or inode_create rpc is finished, adjust the record for * pending blocks and inodes */ static int quota_pending_commit(struct obd_device *obd, const unsigned int id[], int pending[], int isblk) { struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt; struct timeval work_start; struct timeval work_end; long timediff; int i; struct qunit_data qdata[MAXQUOTAS]; ENTRY; CDEBUG(D_QUOTA, "commit pending quota for %s\n", obd->obd_name); CLASSERT(MAXQUOTAS < 4); if (!ll_sb_any_quota_active(qctxt->lqc_sb)) RETURN(0); cfs_gettimeofday(&work_start); for (i = 0; i < MAXQUOTAS; i++) { struct lustre_qunit_size *lqs = NULL; LASSERT(pending[i] >= 0); if (pending[i] == 0) continue; qdata[i].qd_id = id[i]; qdata[i].qd_flags = i; if (isblk) QDATA_SET_BLK(&qdata[i]); qdata[i].qd_count = 0; if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i])) continue; lqs = quota_search_lqs(LQS_KEY(i, qdata[i].qd_id), qctxt, 0); if (lqs == NULL || IS_ERR(lqs)) { CERROR("can not find lqs for pending_commit: " "[id %u] [%c] [pending %u] [isblk %d] (rc %ld), " "maybe cause unexpected lqs refcount error!\n", id[i], i ? 'g': 'u', pending[i], isblk, lqs ? PTR_ERR(lqs) : -1); continue; } cfs_spin_lock(&lqs->lqs_lock); if (isblk) { LASSERTF(lqs->lqs_bwrite_pending >= pending[i], "there are too many blocks! [id %u] [%c] " "[bwrite_pending %lu] [pending %u]\n", id[i], i % 2 ? 'g' : 'u', lqs->lqs_bwrite_pending, pending[i]); lqs->lqs_bwrite_pending -= pending[i]; } else { LASSERTF(lqs->lqs_iwrite_pending >= pending[i], "there are too many files! [id %u] [%c] " "[iwrite_pending %lu] [pending %u]\n", id[i], i % 2 ? 'g' : 'u', lqs->lqs_iwrite_pending, pending[i]); lqs->lqs_iwrite_pending -= pending[i]; } CDEBUG(D_QUOTA, "%s: lqs_pending=%lu pending[%d]=%d isblk=%d\n", obd->obd_name, isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending, i, pending[i], isblk); cfs_spin_unlock(&lqs->lqs_lock); /* for quota_search_lqs in pending_commit */ lqs_putref(lqs); /* for quota_search_lqs in quota_check */ lqs_putref(lqs); } cfs_gettimeofday(&work_end); timediff = cfs_timeval_sub(&work_end, &work_start, NULL); lprocfs_counter_add(qctxt->lqc_stats, isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK : LQUOTA_WAIT_FOR_COMMIT_INO, timediff); RETURN(0); }
static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp, const unsigned int id[], int pending[], int count, quota_acquire acquire, struct obd_trans_info *oti, int isblk, struct inode *inode, int frags) { struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt; struct timeval work_start; struct timeval work_end; long timediff; struct l_wait_info lwi = { 0 }; int rc = 0, cycle = 0, count_err = 1; ENTRY; if (!quota_is_set(obd, id, isblk ? QB_SET : QI_SET)) RETURN(0); if (isblk && (exp->exp_failed || exp->exp_abort_active_req)) /* If the client has been evicted or if it * timed out and tried to reconnect already, * abort the request immediately */ RETURN(-ENOTCONN); CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name); pending[USRQUOTA] = pending[GRPQUOTA] = 0; /* Unfortunately, if quota master is too busy to handle the * pre-dqacq in time and quota hash on ost is used up, we * have to wait for the completion of in flight dqacq/dqrel, * in order to get enough quota for write b=12588 */ cfs_gettimeofday(&work_start); while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk, inode, frags)) & QUOTA_RET_ACQUOTA) { cfs_spin_lock(&qctxt->lqc_lock); if (!qctxt->lqc_import && oti) { cfs_spin_unlock(&qctxt->lqc_lock); LASSERT(oti && oti->oti_thread && oti->oti_thread->t_watchdog); lc_watchdog_disable(oti->oti_thread->t_watchdog); CDEBUG(D_QUOTA, "sleep for quota master\n"); l_wait_event(qctxt->lqc_wait_for_qmaster, check_qm(qctxt), &lwi); CDEBUG(D_QUOTA, "wake up when quota master is back\n"); lc_watchdog_touch(oti->oti_thread->t_watchdog, CFS_GET_TIMEOUT(oti->oti_thread->t_svc)); } else { cfs_spin_unlock(&qctxt->lqc_lock); } cycle++; if (isblk) OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90); /* after acquire(), we should run quota_check_common again * so that we confirm there are enough quota to finish write */ rc = acquire(obd, id, oti, isblk); /* please reference to dqacq_completion for the below */ /* a new request is finished, try again */ if (rc == QUOTA_REQ_RETURNED) { CDEBUG(D_QUOTA, "finish a quota req, try again\n"); continue; } /* it is out of quota already */ if (rc == -EDQUOT) { CDEBUG(D_QUOTA, "out of quota, return -EDQUOT\n"); break; } /* Related quota has been disabled by master, but enabled by * slave, do not try again. */ if (unlikely(rc == -ESRCH)) { CERROR("mismatched quota configuration, stop try.\n"); break; } if (isblk && (exp->exp_failed || exp->exp_abort_active_req)) /* The client has been evicted or tried to * to reconnect already, abort the request */ RETURN(-ENOTCONN); /* -EBUSY and others, wait a second and try again */ if (rc < 0) { cfs_waitq_t waitq; struct l_wait_info lwi; if (oti && oti->oti_thread && oti->oti_thread->t_watchdog) lc_watchdog_touch(oti->oti_thread->t_watchdog, CFS_GET_TIMEOUT(oti->oti_thread->t_svc)); CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc, count_err++); cfs_waitq_init(&waitq); lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL, NULL); l_wait_event(waitq, 0, &lwi); } if (rc < 0 || cycle % 10 == 0) { cfs_spin_lock(&last_print_lock); if (last_print == 0 || cfs_time_before((last_print + cfs_time_seconds(30)), cfs_time_current())) { last_print = cfs_time_current(); cfs_spin_unlock(&last_print_lock); CWARN("still haven't managed to acquire quota " "space from the quota master after %d " "retries (err=%d, rc=%d)\n", cycle, count_err - 1, rc); } else { cfs_spin_unlock(&last_print_lock); } } CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc, cycle); } cfs_gettimeofday(&work_end); timediff = cfs_timeval_sub(&work_end, &work_start, NULL); lprocfs_counter_add(qctxt->lqc_stats, isblk ? LQUOTA_WAIT_FOR_CHK_BLK : LQUOTA_WAIT_FOR_CHK_INO, timediff); if (rc > 0) rc = 0; RETURN(rc); }
/** * Send request \a request. * if \a noreply is set, don't expect any reply back and don't set up * reply buffers. * Returns 0 on success or error code. */ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) { int rc; int rc2; int mpflag = 0; struct ptlrpc_connection *connection; lnet_handle_me_t reply_me_h; lnet_md_t reply_md; struct obd_device *obd = request->rq_import->imp_obd; ENTRY; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC)) RETURN(0); LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST); LASSERT(request->rq_wait_ctx == 0); /* If this is a re-transmit, we're required to have disengaged * cleanly from the previous attempt */ LASSERT(!request->rq_receiving_reply); if (request->rq_import->imp_obd && request->rq_import->imp_obd->obd_fail) { CDEBUG(D_HA, "muting rpc for failed imp obd %s\n", request->rq_import->imp_obd->obd_name); /* this prevents us from waiting in ptlrpc_queue_wait */ request->rq_err = 1; request->rq_status = -ENODEV; RETURN(-ENODEV); } connection = request->rq_import->imp_connection; lustre_msg_set_handle(request->rq_reqmsg, &request->rq_import->imp_remote_handle); lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST); lustre_msg_set_conn_cnt(request->rq_reqmsg, request->rq_import->imp_conn_cnt); lustre_msghdr_set_flags(request->rq_reqmsg, request->rq_import->imp_msghdr_flags); if (request->rq_resend) lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT); if (request->rq_memalloc) mpflag = cfs_memory_pressure_get_and_set(); rc = sptlrpc_cli_wrap_request(request); if (rc) GOTO(out, rc); /* bulk register should be done after wrap_request() */ if (request->rq_bulk != NULL) { rc = ptlrpc_register_bulk (request); if (rc != 0) GOTO(out, rc); } if (!noreply) { LASSERT (request->rq_replen != 0); if (request->rq_repbuf == NULL) { LASSERT(request->rq_repdata == NULL); LASSERT(request->rq_repmsg == NULL); rc = sptlrpc_cli_alloc_repbuf(request, request->rq_replen); if (rc) { /* this prevents us from looping in * ptlrpc_queue_wait */ request->rq_err = 1; request->rq_status = rc; GOTO(cleanup_bulk, rc); } } else { request->rq_repdata = NULL; request->rq_repmsg = NULL; } rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/ connection->c_peer, request->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &reply_me_h); if (rc != 0) { CERROR("LNetMEAttach failed: %d\n", rc); LASSERT (rc == -ENOMEM); GOTO(cleanup_bulk, rc = -ENOMEM); } } spin_lock(&request->rq_lock); /* If the MD attach succeeds, there _will_ be a reply_in callback */ request->rq_receiving_reply = !noreply; /* We are responsible for unlinking the reply buffer */ request->rq_must_unlink = !noreply; /* Clear any flags that may be present from previous sends. */ request->rq_replied = 0; request->rq_err = 0; request->rq_timedout = 0; request->rq_net_err = 0; request->rq_resend = 0; request->rq_restart = 0; request->rq_reply_truncate = 0; spin_unlock(&request->rq_lock); if (!noreply) { reply_md.start = request->rq_repbuf; reply_md.length = request->rq_repbuf_len; /* Allow multiple early replies */ reply_md.threshold = LNET_MD_THRESH_INF; /* Manage remote for early replies */ reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MANAGE_REMOTE | LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */; reply_md.user_ptr = &request->rq_reply_cbid; reply_md.eq_handle = ptlrpc_eq_h; /* We must see the unlink callback to unset rq_must_unlink, so we can't auto-unlink */ rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN, &request->rq_reply_md_h); if (rc != 0) { CERROR("LNetMDAttach failed: %d\n", rc); LASSERT (rc == -ENOMEM); spin_lock(&request->rq_lock); /* ...but the MD attach didn't succeed... */ request->rq_receiving_reply = 0; spin_unlock(&request->rq_lock); GOTO(cleanup_me, rc = -ENOMEM); } CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid "LPU64 ", portal %u\n", request->rq_repbuf_len, request->rq_xid, request->rq_reply_portal); } /* add references on request for request_out_callback */ ptlrpc_request_addref(request); if (obd->obd_svc_stats != NULL) lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR, cfs_atomic_read(&request->rq_import->imp_inflight)); OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5); cfs_gettimeofday(&request->rq_arrival_time); request->rq_sent = cfs_time_current_sec(); /* We give the server rq_timeout secs to process the req, and add the network latency for our local timeout. */ request->rq_deadline = request->rq_sent + request->rq_timeout + ptlrpc_at_get_net_latency(request); ptlrpc_pinger_sending_on_import(request->rq_import); DEBUG_REQ(D_INFO, request, "send flg=%x", lustre_msg_get_flags(request->rq_reqmsg)); rc = ptl_send_buf(&request->rq_req_md_h, request->rq_reqbuf, request->rq_reqdata_len, LNET_NOACK_REQ, &request->rq_req_cbid, connection, request->rq_request_portal, request->rq_xid, 0); if (rc == 0) GOTO(out, rc); ptlrpc_req_finished(request); if (noreply) GOTO(out, rc); cleanup_me: /* MEUnlink is safe; the PUT didn't even get off the ground, and * nobody apart from the PUT's target has the right nid+XID to * access the reply buffer. */ rc2 = LNetMEUnlink(reply_me_h); LASSERT (rc2 == 0); /* UNLINKED callback called synchronously */ LASSERT(!request->rq_receiving_reply); cleanup_bulk: /* We do sync unlink here as there was no real transfer here so * the chance to have long unlink to sluggish net is smaller here. */ ptlrpc_unregister_bulk(request, 0); out: if (request->rq_memalloc) cfs_memory_pressure_restore(mpflag); return rc; }