int osc_object_is_contended(struct osc_object *obj) { struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev); int osc_contention_time = dev->od_contention_time; unsigned long cur_time = cfs_time_current(); unsigned long retry_time; if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION)) return 1; if (!obj->oo_contended) return 0; /* * I like copy-paste. the code is copied from * ll_file_is_contended. */ retry_time = cfs_time_add(obj->oo_contention_time, cfs_time_seconds(osc_contention_time)); if (cfs_time_after(cur_time, retry_time)) { osc_object_clear_contended(obj); return 0; } return 1; }
/** * Callback handler for receiving incoming glimpse ASTs. * * This only can happen on client side. After handling the glimpse AST * we also consider dropping the lock here if it is unused locally for a * long time. */ static void ldlm_handle_gl_callback(struct ptlrpc_request *req, struct ldlm_namespace *ns, struct ldlm_request *dlm_req, struct ldlm_lock *lock) { int rc = -ENOSYS; LDLM_DEBUG(lock, "client glimpse AST callback handler"); if (lock->l_glimpse_ast != NULL) rc = lock->l_glimpse_ast(lock, req); if (req->rq_repmsg != NULL) { ptlrpc_reply(req); } else { req->rq_status = rc; ptlrpc_error(req); } lock_res_and_lock(lock); if (lock->l_granted_mode == LCK_PW && !lock->l_readers && !lock->l_writers && cfs_time_after(cfs_time_current(), cfs_time_add(lock->l_last_used, cfs_time_seconds(10)))) { unlock_res_and_lock(lock); if (ldlm_bl_to_thread_lock(ns, NULL, lock)) ldlm_handle_bl_callback(ns, NULL, lock); return; } unlock_res_and_lock(lock); LDLM_LOCK_RELEASE(lock); }
/* Read from wire as much data as possible. * Returns 0 or 1 on succsess, <0 if error or EOF. * 0 means partial read, 1 - complete */ int usocklnd_read_data(usock_conn_t *conn) { struct iovec *iov; int nob; cfs_time_t t; LASSERT (conn->uc_rx_nob_wanted != 0); do { usock_peer_t *peer = conn->uc_peer; LASSERT (conn->uc_rx_niov > 0); nob = libcfs_sock_readv(conn->uc_sock, conn->uc_rx_iov, conn->uc_rx_niov); if (nob <= 0) {/* read nothing or error */ if (nob < 0) conn->uc_errored = 1; return nob; } LASSERT (nob <= conn->uc_rx_nob_wanted); conn->uc_rx_nob_wanted -= nob; conn->uc_rx_nob_left -= nob; t = cfs_time_current(); conn->uc_rx_deadline = cfs_time_add(t, cfs_time_seconds(usock_tuns.ut_timeout)); if(peer != NULL) peer->up_last_alive = t; /* "consume" iov */ iov = conn->uc_rx_iov; do { LASSERT (conn->uc_rx_niov > 0); if (nob < iov->iov_len) { iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob); iov->iov_len -= nob; break; } nob -= iov->iov_len; conn->uc_rx_iov = ++iov; conn->uc_rx_niov--; } while (nob != 0); } while (conn->uc_rx_nob_wanted != 0); return 1; /* read complete */ }
/* Send as much tx data as possible. * Returns 0 or 1 on succsess, <0 if fatal error. * 0 means partial send or non-fatal error, 1 - complete. * Rely on libcfs_sock_writev() for differentiating fatal and * non-fatal errors. An error should be considered as non-fatal if: * 1) it still makes sense to continue reading && * 2) anyway, poll() will set up POLLHUP|POLLERR flags */ int usocklnd_send_tx(usock_conn_t *conn, usock_tx_t *tx) { struct iovec *iov; int nob; cfs_time_t t; LASSERT (tx->tx_resid != 0); do { usock_peer_t *peer = conn->uc_peer; LASSERT (tx->tx_niov > 0); nob = libcfs_sock_writev(conn->uc_sock, tx->tx_iov, tx->tx_niov); if (nob < 0) conn->uc_errored = 1; if (nob <= 0) /* write queue is flow-controlled or error */ return nob; LASSERT (nob <= tx->tx_resid); tx->tx_resid -= nob; t = cfs_time_current(); conn->uc_tx_deadline = cfs_time_add(t, cfs_time_seconds(usock_tuns.ut_timeout)); if(peer != NULL) peer->up_last_alive = t; /* "consume" iov */ iov = tx->tx_iov; do { LASSERT (tx->tx_niov > 0); if (nob < iov->iov_len) { iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob); iov->iov_len -= nob; break; } nob -= iov->iov_len; tx->tx_iov = ++iov; tx->tx_niov--; } while (nob != 0); } while (tx->tx_resid != 0); return 1; /* send complete */ }
int quota_adjust_slave_lqs(struct quota_adjust_qunit *oqaq, struct lustre_quota_ctxt *qctxt) { struct lustre_qunit_size *lqs = NULL; unsigned long *unit, *tune; signed long tmp = 0; cfs_time_t time_limit = 0, *shrink; int i, rc = 0; ENTRY; LASSERT(qctxt); lqs = quota_search_lqs(LQS_KEY(QAQ_IS_GRP(oqaq), oqaq->qaq_id), qctxt, QAQ_IS_CREATE_LQS(oqaq) ? 1 : 0); if (lqs == NULL || IS_ERR(lqs)){ CERROR("fail to find a lqs for %sid %u!\n", QAQ_IS_GRP(oqaq) ? "g" : "u", oqaq->qaq_id); RETURN(PTR_ERR(lqs)); } CDEBUG(D_QUOTA, "before: bunit: %lu, iunit: %lu.\n", lqs->lqs_bunit_sz, lqs->lqs_iunit_sz); cfs_spin_lock(&lqs->lqs_lock); for (i = 0; i < 2; i++) { if (i == 0 && !QAQ_IS_ADJBLK(oqaq)) continue; if (i == 1 && !QAQ_IS_ADJINO(oqaq)) continue; tmp = i ? (lqs->lqs_iunit_sz - oqaq->qaq_iunit_sz) : (lqs->lqs_bunit_sz - oqaq->qaq_bunit_sz); shrink = i ? &lqs->lqs_last_ishrink : &lqs->lqs_last_bshrink; time_limit = cfs_time_add(i ? lqs->lqs_last_ishrink : lqs->lqs_last_bshrink, cfs_time_seconds(qctxt->lqc_switch_seconds)); unit = i ? &lqs->lqs_iunit_sz : &lqs->lqs_bunit_sz; tune = i ? &lqs->lqs_itune_sz : &lqs->lqs_btune_sz; /* quota master shrinks */ if (qctxt->lqc_handler && tmp > 0) *shrink = cfs_time_current(); /* quota master enlarges */ if (qctxt->lqc_handler && tmp < 0) { /* in case of ping-pong effect, don't enlarge lqs * in a short time */ if (*shrink && cfs_time_before(cfs_time_current(), time_limit)) tmp = 0; } /* when setquota, don't enlarge lqs b=18616 */ if (QAQ_IS_CREATE_LQS(oqaq) && tmp < 0) tmp = 0; if (tmp != 0) { *unit = i ? oqaq->qaq_iunit_sz : oqaq->qaq_bunit_sz; *tune = (*unit) / 2; } if (tmp > 0) rc |= i ? LQS_INO_DECREASE : LQS_BLK_DECREASE; if (tmp < 0) rc |= i ? LQS_INO_INCREASE : LQS_BLK_INCREASE; } cfs_spin_unlock(&lqs->lqs_lock); CDEBUG(D_QUOTA, "after: bunit: %lu, iunit: %lu.\n", lqs->lqs_bunit_sz, lqs->lqs_iunit_sz); lqs_putref(lqs); RETURN(rc); }
/* Send as much tx data as possible. * Returns 0 or 1 on succsess, <0 if fatal error. * 0 means partial send or non-fatal error, 1 - complete. * Rely on libcfs_sock_writev() for differentiating fatal and * non-fatal errors. An error should be considered as non-fatal if: * 1) it still makes sense to continue reading && * 2) anyway, poll() will set up POLLHUP|POLLERR flags */ int usocklnd_send_tx(usock_conn_t *conn, usock_tx_t *tx) { struct iovec *iov; int nob; struct lnet_xport *lx = conn->uc_lx; struct pfl_opstat *opst; cfs_time_t t; LASSERT (tx->tx_resid != 0); do { usock_peer_t *peer = conn->uc_peer; LASSERT (tx->tx_niov > 0); nob = lx_writev(lx, tx->tx_iov, tx->tx_niov); if (nob < 0) conn->uc_errored = 1; if (nob <= 0) /* write queue is flow-controlled or error */ return nob; if (peer && peer->up_ni) opst = peer->up_ni->ni_iostats.wr; else if (conn->uc_ni) opst = conn->uc_ni->ni_iostats.wr; else opst = usock_pasv_iostats.wr; pfl_opstat_add(opst, nob); if (peer) pfl_opstat_add(peer->up_iostats.wr, nob); pfl_opstat_add(usock_aggr_iostats.wr, nob); LASSERT (nob <= tx->tx_resid); tx->tx_resid -= nob; t = cfs_time_current(); conn->uc_tx_deadline = cfs_time_add(t, cfs_time_seconds(usock_tuns.ut_timeout)); if(peer != NULL) peer->up_last_alive = t; /* "consume" iov */ iov = tx->tx_iov; do { LASSERT (tx->tx_niov > 0); if ((size_t)nob < iov->iov_len) { iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob); iov->iov_len -= nob; break; } nob -= iov->iov_len; tx->tx_iov = ++iov; tx->tx_niov--; } while (nob != 0); } while (tx->tx_resid != 0); return 1; /* send complete */ }
/* Read from wire as much data as possible. * Returns 0 or 1 on succsess, <0 if error or EOF. * 0 means partial read, 1 - complete */ int usocklnd_read_data(usock_conn_t *conn) { struct pfl_opstat *opst; struct iovec *iov; int nob; cfs_time_t t; LASSERT (conn->uc_rx_nob_wanted != 0); do { usock_peer_t *peer = conn->uc_peer; LASSERT (conn->uc_rx_niov > 0); nob = lx_readv(conn->uc_lx, conn->uc_rx_iov, conn->uc_rx_niov); if (nob <= 0) {/* read nothing or error */ if (nob < 0) conn->uc_errored = 1; return nob; } if (peer && peer->up_ni) opst = peer->up_ni->ni_iostats.rd; else if (conn->uc_ni) opst = conn->uc_ni->ni_iostats.rd; else opst = usock_pasv_iostats.rd; pfl_opstat_add(opst, nob); if (peer) pfl_opstat_add(peer->up_iostats.rd, nob); pfl_opstat_add(usock_aggr_iostats.rd, nob); LASSERT (nob <= conn->uc_rx_nob_wanted); conn->uc_rx_nob_wanted -= nob; conn->uc_rx_nob_left -= nob; t = cfs_time_current(); conn->uc_rx_deadline = cfs_time_add(t, cfs_time_seconds(usock_tuns.ut_timeout)); if(peer != NULL) peer->up_last_alive = t; /* "consume" iov */ iov = conn->uc_rx_iov; do { LASSERT (conn->uc_rx_niov > 0); if ((size_t)nob < iov->iov_len) { iov->iov_base = (void *)(((unsigned long)(iov->iov_base)) + nob); iov->iov_len -= nob; break; } nob -= iov->iov_len; conn->uc_rx_iov = ++iov; conn->uc_rx_niov--; } while (nob != 0); } while (conn->uc_rx_nob_wanted != 0); return 1; /* read complete */ }