/** * Start recovery on disconnected import. * This is done by just attempting a connect */ void ptlrpc_initiate_recovery(struct obd_import *imp) { ENTRY; CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd)); ptlrpc_connect_import(imp); EXIT; }
static int ptlrpc_ping(struct obd_import *imp) { struct ptlrpc_request *req; req = ptlrpc_prep_ping(imp); if (!req) { CERROR("OOM trying to ping %s->%s\n", imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); return -ENOMEM; } DEBUG_REQ(D_INFO, req, "pinging %s->%s", imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); ptlrpcd_add_req(req); return 0; }
int ptlrpc_ping(struct obd_import *imp) { struct ptlrpc_request *req; ENTRY; req = ptlrpc_prep_ping(imp); if (req == NULL) { CERROR("OOM trying to ping %s->%s\n", imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); RETURN(-ENOMEM); } DEBUG_REQ(D_INFO, req, "pinging %s->%s", imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd)); ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); RETURN(0); }
/** * Identify what request from replay list needs to be replayed next * (based on what we have already replayed) and send it to server. */ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) { int rc = 0; struct list_head *tmp, *pos; struct ptlrpc_request *req = NULL; __u64 last_transno; *inflight = 0; /* It might have committed some after we last spoke, so make sure we * get rid of them now. */ spin_lock(&imp->imp_lock); imp->imp_last_transno_checked = 0; ptlrpc_free_committed(imp); last_transno = imp->imp_last_replay_transno; spin_unlock(&imp->imp_lock); CDEBUG(D_HA, "import %p from %s committed %llu last %llu\n", imp, obd2cli_tgt(imp->imp_obd), imp->imp_peer_committed_transno, last_transno); /* Do I need to hold a lock across this iteration? We shouldn't be * racing with any additions to the list, because we're in recovery * and are therefore not processing additional requests to add. Calls * to ptlrpc_free_committed might commit requests, but nothing "newer" * than the one we're replaying (it can't be committed until it's * replayed, and we're doing that here). l_f_e_safe protects against * problems with the current request being committed, in the unlikely * event of that race. So, in conclusion, I think that it's safe to * perform this list-walk without the imp_lock held. * * But, the {mdc,osc}_replay_open callbacks both iterate * request lists, and have comments saying they assume the * imp_lock is being held by ptlrpc_replay, but it's not. it's * just a little race... */ /* Replay all the committed open requests on committed_list first */ if (!list_empty(&imp->imp_committed_list)) { tmp = imp->imp_committed_list.prev; req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); /* The last request on committed_list hasn't been replayed */ if (req->rq_transno > last_transno) { /* Since the imp_committed_list is immutable before * all of it's requests being replayed, it's safe to * use a cursor to accelerate the search */ imp->imp_replay_cursor = imp->imp_replay_cursor->next; while (imp->imp_replay_cursor != &imp->imp_committed_list) { req = list_entry(imp->imp_replay_cursor, struct ptlrpc_request, rq_replay_list); if (req->rq_transno > last_transno) break; req = NULL; imp->imp_replay_cursor = imp->imp_replay_cursor->next; } } else {