/* nvram_write_error_log * * We need to buffer the error logs into nvram to ensure that we have * the failure information to decode. */ int nvram_write_error_log(char * buff, int length, unsigned int err_type, unsigned int error_log_cnt) { int rc = nvram_write_os_partition(&rtas_log_partition, buff, length, err_type, error_log_cnt); if (!rc) { last_unread_rtas_event = ktime_get_real_seconds(); #ifdef CONFIG_PSTORE last_rtas_event = ktime_get_real_seconds(); #endif } return rc; }
static int test_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_test_data *rtd = dev_get_drvdata(dev); ktime_t timeout; u64 expires; timeout = rtc_tm_to_time64(&alrm->time) - ktime_get_real_seconds(); timeout -= rtd->offset; del_timer(&rtd->alarm); expires = jiffies + timeout * HZ; if (expires > U32_MAX) expires = U32_MAX; pr_err("ABE: %s +%d %s\n", __FILE__, __LINE__, __func__); rtd->alarm.expires = expires; if (alrm->enabled) add_timer(&rtd->alarm); rtd->alarm_en = alrm->enabled; return 0; }
static int llog_read_header(const struct lu_env *env, struct llog_handle *handle, struct obd_uuid *uuid) { struct llog_operations *lop; int rc; rc = llog_handle2ops(handle, &lop); if (rc) return rc; if (lop->lop_read_header == NULL) return -EOPNOTSUPP; rc = lop->lop_read_header(env, handle); if (rc == LLOG_EEMPTY) { struct llog_log_hdr *llh = handle->lgh_hdr; handle->lgh_last_idx = 0; /* header is record with index 0 */ llh->llh_count = 1; /* for the header record */ llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC; llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE; llh->llh_hdr.lrh_index = llh->llh_tail.lrt_index = 0; llh->llh_timestamp = ktime_get_real_seconds(); if (uuid) memcpy(&llh->llh_tgtuuid, uuid, sizeof(llh->llh_tgtuuid)); llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap); ext2_set_bit(0, llh->llh_bitmap); rc = 0; } return rc; }
/** * Disconnect a bulk desc from the network. Idempotent. Not * thread-safe (i.e. only interlocks with completion callback). * Returns 1 on success or 0 if network unregistration failed for whatever * reason. */ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async) { struct ptlrpc_bulk_desc *desc = req->rq_bulk; wait_queue_head_t *wq; int rc; LASSERT(!in_interrupt()); /* might sleep */ /* Let's setup deadline for reply unlink. */ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) && async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0) req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK; if (ptlrpc_client_bulk_active(req) == 0) /* completed or */ return 1; /* never registered */ LASSERT(desc->bd_req == req); /* bd_req NULL until registered */ /* the unlink ensures the callback happens ASAP and is the last * one. If it fails, it must be because completion just happened, * but we must still wait_event() in this case to give liblustre * a chance to run client_bulk_callback() */ mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); if (ptlrpc_client_bulk_active(req) == 0) /* completed or */ return 1; /* never registered */ /* Move to "Unregistering" phase as bulk was not unlinked yet. */ ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK); /* Do not wait for unlink to finish. */ if (async) return 0; if (req->rq_set) wq = &req->rq_set->set_waitq; else wq = &req->rq_reply_waitq; for (;;) { /* Network access will complete in finite time but the HUGE * timeout lets us CWARN for visibility of sluggish LNDs */ int cnt = 0; while (cnt < LONG_UNLINK && (rc = wait_event_idle_timeout(*wq, !ptlrpc_client_bulk_active(req), HZ)) == 0) cnt += 1; if (rc > 0) { ptlrpc_rqphase_move(req, req->rq_next_phase); return 1; } DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p", desc); } return 0; }
/* * Are we using the ibm,rtas-log for oops/panic reports? And if so, * would logging this oops/panic overwrite an RTAS event that rtas_errd * hasn't had a chance to read and process? Return 1 if so, else 0. * * We assume that if rtas_errd hasn't read the RTAS event in * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to. */ int clobbering_unread_rtas_event(void) { return (oops_log_partition.index == rtas_log_partition.index && last_unread_rtas_event && ktime_get_real_seconds() - last_unread_rtas_event <= NVRAM_RTAS_READ_TIMEOUT); }
void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info) { struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; mutex_lock(&dev_replace->lock_finishing_cancel_unmount); down_write(&dev_replace->rwsem); switch (dev_replace->replace_state) { case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED: case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED: case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: break; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; dev_replace->time_stopped = ktime_get_real_seconds(); dev_replace->item_needs_writeback = 1; btrfs_info(fs_info, "suspending dev_replace for unmount"); break; } up_write(&dev_replace->rwsem); mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); }
int nilfs_commit_super(struct super_block *sb, int flag) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp = nilfs->ns_sbp; time64_t t; /* nilfs->ns_sem must be locked by the caller. */ t = ktime_get_real_seconds(); nilfs->ns_sbwtime = t; sbp[0]->s_wtime = cpu_to_le64(t); sbp[0]->s_sum = 0; sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[0], nilfs->ns_sbsize)); if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) { sbp[1]->s_wtime = sbp[0]->s_wtime; sbp[1]->s_sum = 0; sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[1], nilfs->ns_sbsize)); } clear_nilfs_sb_dirty(nilfs); nilfs->ns_flushed_device = 1; /* make sure store to ns_flushed_device cannot be reordered */ smp_wmb(); return nilfs_sync_super(sb, flag); }
static int test_rtc_set_mmss64(struct device *dev, time64_t secs) { struct rtc_test_data *rtd = dev_get_drvdata(dev); rtd->offset = secs - ktime_get_real_seconds(); return 0; }
static int test_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct rtc_test_data *rtd = dev_get_drvdata(dev); rtc_time64_to_tm(ktime_get_real_seconds() + rtd->offset, tm); return 0; }
static void setup_inj_struct(struct mce *m) { memset(m, 0, sizeof(struct mce)); m->cpuvendor = boot_cpu_data.x86_vendor; m->time = ktime_get_real_seconds(); m->cpuid = cpuid_eax(1); m->microcode = boot_cpu_data.microcode; }
static int test_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_test_data *rtd = dev_get_drvdata(dev); time64_t alarm; alarm = (rtd->alarm.expires - jiffies) / HZ; alarm += ktime_get_real_seconds() + rtd->offset; rtc_time64_to_tm(alarm, &alrm->time); alrm->enabled = rtd->alarm_en; return 0; }
/* * CPER record ID need to be unique even after reboot, because record * ID is used as index for ERST storage, while CPER records from * multiple boot may co-exist in ERST. */ u64 cper_next_record_id(void) { static atomic64_t seq; if (!atomic64_read(&seq)) { time64_t time = ktime_get_real_seconds(); /* * This code is unlikely to still be needed in year 2106, * but just in case, let's use a few more bits for timestamps * after y2038 to be sure they keep increasing monotonically * for the next few hundred years... */ if (time < 0x80000000) atomic64_set(&seq, (ktime_get_real_seconds()) << 32); else atomic64_set(&seq, 0x8000000000000000ull | ktime_get_real_seconds() << 24); } return atomic64_inc_return(&seq); }
static int osc_io_read_start(const struct lu_env *env, const struct cl_io_slice *slice) { struct cl_object *obj = slice->cis_obj; struct cl_attr *attr = &osc_env_info(env)->oti_attr; int rc = 0; if (!slice->cis_io->ci_noatime) { cl_object_attr_lock(obj); attr->cat_atime = ktime_get_real_seconds(); rc = cl_object_attr_set(env, obj, attr, CAT_ATIME); cl_object_attr_unlock(obj); } return rc; }
static int osc_io_write_start(const struct lu_env *env, const struct cl_io_slice *slice) { struct cl_object *obj = slice->cis_obj; struct cl_attr *attr = &osc_env_info(env)->oti_attr; int rc = 0; OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1); cl_object_attr_lock(obj); attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds(); rc = cl_object_attr_set(env, obj, attr, CAT_MTIME | CAT_CTIME); cl_object_attr_unlock(obj); return rc; }
/* * Schedule a garbage collection run. * - time precision isn't particularly important */ void key_schedule_gc(time64_t gc_at) { unsigned long expires; time64_t now = ktime_get_real_seconds(); kenter("%lld", gc_at - now); if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) { kdebug("IMMEDIATE"); schedule_work(&key_gc_work); } else if (gc_at < key_gc_next_run) { kdebug("DEFERRED"); key_gc_next_run = gc_at; expires = jiffies + (gc_at - now) * HZ; mod_timer(&key_gc_timer, expires); } }
static int osd_scrub_post(const struct lu_env *env, struct osd_device *dev, int result) { struct lustre_scrub *scrub = &dev->od_scrub; struct scrub_file *sf = &scrub->os_file; int rc; ENTRY; CDEBUG(D_LFSCK, "%s: OI scrub post with result = %d\n", scrub->os_name, result); down_write(&scrub->os_rwsem); spin_lock(&scrub->os_lock); thread_set_flags(&scrub->os_thread, SVC_STOPPING); spin_unlock(&scrub->os_lock); if (scrub->os_new_checked > 0) { sf->sf_items_checked += scrub->os_new_checked; scrub->os_new_checked = 0; sf->sf_pos_last_checkpoint = scrub->os_pos_current; } sf->sf_time_last_checkpoint = ktime_get_real_seconds(); if (result > 0) { sf->sf_status = SS_COMPLETED; if (!(sf->sf_param & SP_DRYRUN)) { memset(sf->sf_oi_bitmap, 0, SCRUB_OI_BITMAP_SIZE); sf->sf_flags &= ~(SF_RECREATED | SF_INCONSISTENT | SF_UPGRADE | SF_AUTO); } sf->sf_time_last_complete = sf->sf_time_last_checkpoint; sf->sf_success_count++; } else if (result == 0) { if (scrub->os_paused) sf->sf_status = SS_PAUSED; else sf->sf_status = SS_STOPPED; } else { sf->sf_status = SS_FAILED; } sf->sf_run_time += ktime_get_seconds() - scrub->os_time_last_checkpoint; rc = scrub_file_store(env, scrub); up_write(&scrub->os_rwsem); RETURN(rc < 0 ? rc : result); }
/** * Dump Lustre log to ::debug_file_path by calling tracefile_dump_all_pages() */ void libcfs_debug_dumplog_internal(void *arg) { void *journal_info; journal_info = current->journal_info; current->journal_info = NULL; if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) { snprintf(debug_file_name, sizeof(debug_file_name) - 1, "%s.%lld.%ld", libcfs_debug_file_path_arr, (s64)ktime_get_real_seconds(), (long_ptr_t)arg); pr_alert("LustreError: dumping log to %s\n", debug_file_name); cfs_tracefile_dump_all_pages(debug_file_name); libcfs_run_debug_log_upcall(debug_file_name); } current->journal_info = journal_info; }
static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags) { struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; struct ptlrpc_service *svc = svcpt->scp_service; int service_time = max_t(int, ktime_get_real_seconds() - req->rq_arrival_time.tv_sec, 1); if (!(flags & PTLRPC_REPLY_EARLY) && (req->rq_type != PTL_RPC_MSG_ERR) && req->rq_reqmsg && !(lustre_msg_get_flags(req->rq_reqmsg) & (MSG_RESENT | MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) { /* early replies, errors and recovery requests don't count * toward our service time estimate */ int oldse = at_measured(&svcpt->scp_at_estimate, service_time); if (oldse != 0) { DEBUG_REQ(D_ADAPTTO, req, "svc %s changed estimate from %d to %d", svc->srv_name, oldse, at_get(&svcpt->scp_at_estimate)); } } /* Report actual service time for client latency calc */ lustre_msg_set_service_time(req->rq_repmsg, service_time); /* Report service time estimate for future client reqs, but report 0 * (to be ignored by client) if it's a error reply during recovery. * (bz15815) */ if (req->rq_type == PTL_RPC_MSG_ERR && !req->rq_export) lustre_msg_set_timeout(req->rq_repmsg, 0); else lustre_msg_set_timeout(req->rq_repmsg, at_get(&svcpt->scp_at_estimate)); if (req->rq_reqmsg && !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) { CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x req_flags=%#x magic=%x/%x len=%d\n", flags, lustre_msg_get_flags(req->rq_reqmsg), lustre_msg_get_magic(req->rq_reqmsg), lustre_msg_get_magic(req->rq_repmsg), req->rq_replen); } }
static int llog_read_header(const struct lu_env *env, struct llog_handle *handle, struct obd_uuid *uuid) { struct llog_operations *lop; int rc; rc = llog_handle2ops(handle, &lop); if (rc) return rc; if (!lop->lop_read_header) return -EOPNOTSUPP; rc = lop->lop_read_header(env, handle); if (rc == LLOG_EEMPTY) { struct llog_log_hdr *llh = handle->lgh_hdr; size_t len; /* lrh_len should be initialized in llog_init_handle */ handle->lgh_last_idx = 0; /* header is record with index 0 */ llh->llh_count = 1; /* for the header record */ llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC; LASSERT(handle->lgh_ctxt->loc_chunk_size >= LLOG_MIN_CHUNK_SIZE); llh->llh_hdr.lrh_len = handle->lgh_ctxt->loc_chunk_size; llh->llh_hdr.lrh_index = 0; llh->llh_timestamp = ktime_get_real_seconds(); if (uuid) memcpy(&llh->llh_tgtuuid, uuid, sizeof(llh->llh_tgtuuid)); llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap); /* * Since update llog header might also call this function, * let's reset the bitmap to 0 here */ len = llh->llh_hdr.lrh_len - llh->llh_bitmap_offset; memset(LLOG_HDR_BITMAP(llh), 0, len - sizeof(llh->llh_tail)); ext2_set_bit(0, LLOG_HDR_BITMAP(llh)); LLOG_HDR_TAIL(llh)->lrt_len = llh->llh_hdr.lrh_len; LLOG_HDR_TAIL(llh)->lrt_index = llh->llh_hdr.lrh_index; rc = 0; } return rc; }
static int iostat_info_seq_show(struct seq_file *seq, void *offset) { struct super_block *sb = seq->private; struct f2fs_sb_info *sbi = F2FS_SB(sb); time64_t now = ktime_get_real_seconds(); if (!sbi->iostat_enable) return 0; seq_printf(seq, "time: %-16llu\n", now); /* print app IOs */ seq_printf(seq, "app buffered: %-16llu\n", sbi->write_iostat[APP_BUFFERED_IO]); seq_printf(seq, "app direct: %-16llu\n", sbi->write_iostat[APP_DIRECT_IO]); seq_printf(seq, "app mapped: %-16llu\n", sbi->write_iostat[APP_MAPPED_IO]); /* print fs IOs */ seq_printf(seq, "fs data: %-16llu\n", sbi->write_iostat[FS_DATA_IO]); seq_printf(seq, "fs node: %-16llu\n", sbi->write_iostat[FS_NODE_IO]); seq_printf(seq, "fs meta: %-16llu\n", sbi->write_iostat[FS_META_IO]); seq_printf(seq, "fs gc data: %-16llu\n", sbi->write_iostat[FS_GC_DATA_IO]); seq_printf(seq, "fs gc node: %-16llu\n", sbi->write_iostat[FS_GC_NODE_IO]); seq_printf(seq, "fs cp data: %-16llu\n", sbi->write_iostat[FS_CP_DATA_IO]); seq_printf(seq, "fs cp node: %-16llu\n", sbi->write_iostat[FS_CP_NODE_IO]); seq_printf(seq, "fs cp meta: %-16llu\n", sbi->write_iostat[FS_CP_META_IO]); seq_printf(seq, "fs discard: %-16llu\n", sbi->write_iostat[FS_DISCARD]); return 0; }
/* * Begin iteration through a server list, starting with the last used server if * possible, or the last recorded good server if not. */ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc) { struct afs_cell *cell = vc->cell; unsigned int dns_lookup_count; if (cell->dns_source == DNS_RECORD_UNAVAILABLE || cell->dns_expiry <= ktime_get_real_seconds()) { dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count); set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags); queue_work(afs_wq, &cell->manager); if (cell->dns_source == DNS_RECORD_UNAVAILABLE) { if (wait_var_event_interruptible( &cell->dns_lookup_count, smp_load_acquire(&cell->dns_lookup_count) != dns_lookup_count) < 0) { vc->error = -ERESTARTSYS; return false; } } /* Status load is ordered after lookup counter load */ if (cell->dns_source == DNS_RECORD_UNAVAILABLE) { vc->error = -EDESTADDRREQ; return false; } } read_lock(&cell->vl_servers_lock); vc->server_list = afs_get_vlserverlist( rcu_dereference_protected(cell->vl_servers, lockdep_is_held(&cell->vl_servers_lock))); read_unlock(&cell->vl_servers_lock); if (!vc->server_list->nr_servers) return false; vc->untried = (1UL << vc->server_list->nr_servers) - 1; vc->index = -1; return true; }
/* * Drop a reference on a cell record. */ void afs_put_cell(struct afs_net *net, struct afs_cell *cell) { time64_t now, expire_delay; if (!cell) return; _enter("%s", cell->name); now = ktime_get_real_seconds(); cell->last_inactive = now; expire_delay = 0; if (!test_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags) && !test_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags)) expire_delay = afs_cell_gc_delay; if (atomic_dec_return(&cell->usage) > 1) return; /* 'cell' may now be garbage collected. */ afs_set_cell_timer(net, expire_delay); }
/* * Display the list of cells known to the namespace. */ static int afs_proc_cells_show(struct seq_file *m, void *v) { struct afs_vlserver_list *vllist; struct afs_cell *cell; if (v == SEQ_START_TOKEN) { /* display header on line 1 */ seq_puts(m, "USE TTL SV NAME\n"); return 0; } cell = list_entry(v, struct afs_cell, proc_link); vllist = rcu_dereference(cell->vl_servers); /* display one cell per line on subsequent lines */ seq_printf(m, "%3u %6lld %2u %s\n", atomic_read(&cell->usage), cell->dns_expiry - ktime_get_real_seconds(), vllist ? vllist->nr_servers : 0, cell->name); return 0; }
static int nilfs_setup_super(struct super_block *sb, int is_mount) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; int max_mnt_count; int mnt_count; /* nilfs->ns_sem must be locked by the caller. */ sbp = nilfs_prepare_super(sb, 0); if (!sbp) return -EIO; if (!is_mount) goto skip_mount_setup; max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count); mnt_count = le16_to_cpu(sbp[0]->s_mnt_count); if (nilfs->ns_mount_state & NILFS_ERROR_FS) { nilfs_msg(sb, KERN_WARNING, "mounting fs with errors"); #if 0 } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) { nilfs_msg(sb, KERN_WARNING, "maximal mount count reached"); #endif } if (!max_mnt_count) sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT); sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1); sbp[0]->s_mtime = cpu_to_le64(ktime_get_real_seconds()); skip_mount_setup: sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS); /* synchronize sbp[1] with sbp[0] */ if (sbp[1]) memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); return nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL); }
/** * Dump Lustre log to ::debug_file_path by calling tracefile_dump_all_pages() */ void libcfs_debug_dumplog_internal(void *arg) { static time64_t last_dump_time; time64_t current_time; void *journal_info; journal_info = current->journal_info; current->journal_info = NULL; current_time = ktime_get_real_seconds(); if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) && current_time > last_dump_time) { last_dump_time = current_time; snprintf(debug_file_name, sizeof(debug_file_name) - 1, "%s.%lld.%ld", libcfs_debug_file_path_arr, (s64)current_time, (long)arg); pr_alert("LustreError: dumping log to %s\n", debug_file_name); cfs_tracefile_dump_all_pages(debug_file_name); libcfs_run_debug_log_upcall(debug_file_name); } current->journal_info = journal_info; }
static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v) { struct obd_device *dev = seq->private; struct client_obd *cli = &dev->u.cli; struct ptlrpc_sec *sec = NULL; char str[32]; LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 || strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 || strcmp(dev->obd_type->typ_name, LUSTRE_MGC_NAME) == 0); if (cli->cl_import) sec = sptlrpc_import_sec_ref(cli->cl_import); if (sec == NULL) goto out; sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)); seq_printf(seq, "rpc flavor: %s\n", sptlrpc_flavor2name_base(sec->ps_flvr.sf_rpc)); seq_printf(seq, "bulk flavor: %s\n", sptlrpc_flavor2name_bulk(&sec->ps_flvr, str, sizeof(str))); seq_printf(seq, "flags: %s\n", sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str))); seq_printf(seq, "id: %d\n", sec->ps_id); seq_printf(seq, "refcount: %d\n", atomic_read(&sec->ps_refcount)); seq_printf(seq, "nctx: %d\n", atomic_read(&sec->ps_nctx)); seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval); seq_printf(seq, "gc next %lld\n", sec->ps_gc_interval ? (s64)(sec->ps_gc_next - ktime_get_real_seconds()) : 0ll); sptlrpc_sec_put(sec); out: return 0; }
/** * Send request \a request. * if \a noreply is set, don't expect any reply back and don't set up * reply buffers. * Returns 0 on success or error code. */ int ptl_send_rpc(struct ptlrpc_request *request, int noreply) { int rc; int rc2; int mpflag = 0; struct ptlrpc_connection *connection; lnet_handle_me_t reply_me_h; lnet_md_t reply_md; struct obd_device *obd = request->rq_import->imp_obd; if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC)) return 0; LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST); LASSERT(request->rq_wait_ctx == 0); /* If this is a re-transmit, we're required to have disengaged * cleanly from the previous attempt */ LASSERT(!request->rq_receiving_reply); LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) && (request->rq_import->imp_state == LUSTRE_IMP_FULL))); if (unlikely(obd != NULL && obd->obd_fail)) { CDEBUG(D_HA, "muting rpc for failed imp obd %s\n", obd->obd_name); /* this prevents us from waiting in ptlrpc_queue_wait */ spin_lock(&request->rq_lock); request->rq_err = 1; spin_unlock(&request->rq_lock); request->rq_status = -ENODEV; return -ENODEV; } connection = request->rq_import->imp_connection; lustre_msg_set_handle(request->rq_reqmsg, &request->rq_import->imp_remote_handle); lustre_msg_set_type(request->rq_reqmsg, PTL_RPC_MSG_REQUEST); lustre_msg_set_conn_cnt(request->rq_reqmsg, request->rq_import->imp_conn_cnt); lustre_msghdr_set_flags(request->rq_reqmsg, request->rq_import->imp_msghdr_flags); if (request->rq_resend) lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT); if (request->rq_memalloc) mpflag = cfs_memory_pressure_get_and_set(); rc = sptlrpc_cli_wrap_request(request); if (rc) goto out; /* bulk register should be done after wrap_request() */ if (request->rq_bulk != NULL) { rc = ptlrpc_register_bulk(request); if (rc != 0) goto out; } if (!noreply) { LASSERT(request->rq_replen != 0); if (request->rq_repbuf == NULL) { LASSERT(request->rq_repdata == NULL); LASSERT(request->rq_repmsg == NULL); rc = sptlrpc_cli_alloc_repbuf(request, request->rq_replen); if (rc) { /* this prevents us from looping in * ptlrpc_queue_wait */ spin_lock(&request->rq_lock); request->rq_err = 1; spin_unlock(&request->rq_lock); request->rq_status = rc; goto cleanup_bulk; } } else { request->rq_repdata = NULL; request->rq_repmsg = NULL; } rc = LNetMEAttach(request->rq_reply_portal,/*XXX FIXME bug 249*/ connection->c_peer, request->rq_xid, 0, LNET_UNLINK, LNET_INS_AFTER, &reply_me_h); if (rc != 0) { CERROR("LNetMEAttach failed: %d\n", rc); LASSERT(rc == -ENOMEM); rc = -ENOMEM; goto cleanup_bulk; } } spin_lock(&request->rq_lock); /* If the MD attach succeeds, there _will_ be a reply_in callback */ request->rq_receiving_reply = !noreply; request->rq_req_unlink = 1; /* We are responsible for unlinking the reply buffer */ request->rq_reply_unlink = !noreply; /* Clear any flags that may be present from previous sends. */ request->rq_replied = 0; request->rq_err = 0; request->rq_timedout = 0; request->rq_net_err = 0; request->rq_resend = 0; request->rq_restart = 0; request->rq_reply_truncate = 0; spin_unlock(&request->rq_lock); if (!noreply) { reply_md.start = request->rq_repbuf; reply_md.length = request->rq_repbuf_len; /* Allow multiple early replies */ reply_md.threshold = LNET_MD_THRESH_INF; /* Manage remote for early replies */ reply_md.options = PTLRPC_MD_OPTIONS | LNET_MD_OP_PUT | LNET_MD_MANAGE_REMOTE | LNET_MD_TRUNCATE; /* allow to make EOVERFLOW error */ reply_md.user_ptr = &request->rq_reply_cbid; reply_md.eq_handle = ptlrpc_eq_h; /* We must see the unlink callback to unset rq_reply_unlink, so we can't auto-unlink */ rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN, &request->rq_reply_md_h); if (rc != 0) { CERROR("LNetMDAttach failed: %d\n", rc); LASSERT(rc == -ENOMEM); spin_lock(&request->rq_lock); /* ...but the MD attach didn't succeed... */ request->rq_receiving_reply = 0; spin_unlock(&request->rq_lock); rc = -ENOMEM; goto cleanup_me; } CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu, portal %u\n", request->rq_repbuf_len, request->rq_xid, request->rq_reply_portal); } /* add references on request for request_out_callback */ ptlrpc_request_addref(request); if (obd != NULL && obd->obd_svc_stats != NULL) lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR, atomic_read(&request->rq_import->imp_inflight)); OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5); ktime_get_real_ts64(&request->rq_arrival_time); request->rq_sent = ktime_get_real_seconds(); /* We give the server rq_timeout secs to process the req, and add the network latency for our local timeout. */ request->rq_deadline = request->rq_sent + request->rq_timeout + ptlrpc_at_get_net_latency(request); ptlrpc_pinger_sending_on_import(request->rq_import); DEBUG_REQ(D_INFO, request, "send flg=%x", lustre_msg_get_flags(request->rq_reqmsg)); rc = ptl_send_buf(&request->rq_req_md_h, request->rq_reqbuf, request->rq_reqdata_len, LNET_NOACK_REQ, &request->rq_req_cbid, connection, request->rq_request_portal, request->rq_xid, 0); if (rc == 0) goto out; ptlrpc_req_finished(request); if (noreply) goto out; cleanup_me: /* MEUnlink is safe; the PUT didn't even get off the ground, and * nobody apart from the PUT's target has the right nid+XID to * access the reply buffer. */ rc2 = LNetMEUnlink(reply_me_h); LASSERT(rc2 == 0); /* UNLINKED callback called synchronously */ LASSERT(!request->rq_receiving_reply); cleanup_bulk: /* We do sync unlink here as there was no real transfer here so * the chance to have long unlink to sluggish net is smaller here. */ ptlrpc_unregister_bulk(request, 0); out: if (request->rq_memalloc) cfs_memory_pressure_restore(mpflag); return rc; }
int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr) { char *buf; struct libcfs_ioctl_data *data; int opc; int rc; if (cmd != IOC_LIBCFS_LNETST) return -EINVAL; data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr); opc = data->ioc_u32[0]; if (data->ioc_plen1 > PAGE_SIZE) return -EINVAL; LIBCFS_ALLOC(buf, data->ioc_plen1); if (!buf) return -ENOMEM; /* copy in parameter */ if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) { LIBCFS_FREE(buf, data->ioc_plen1); return -EFAULT; } mutex_lock(&console_session.ses_mutex); console_session.ses_laststamp = ktime_get_real_seconds(); if (console_session.ses_shutdown) { rc = -ESHUTDOWN; goto out; } if (console_session.ses_expired) lstcon_session_end(); if (opc != LSTIO_SESSION_NEW && console_session.ses_state == LST_SESSION_NONE) { CDEBUG(D_NET, "LST no active session\n"); rc = -ESRCH; goto out; } memset(&console_session.ses_trans_stat, 0, sizeof(lstcon_trans_stat_t)); switch (opc) { case LSTIO_SESSION_NEW: rc = lst_session_new_ioctl((lstio_session_new_args_t *)buf); break; case LSTIO_SESSION_END: rc = lst_session_end_ioctl((lstio_session_end_args_t *)buf); break; case LSTIO_SESSION_INFO: rc = lst_session_info_ioctl((lstio_session_info_args_t *)buf); break; case LSTIO_DEBUG: rc = lst_debug_ioctl((lstio_debug_args_t *)buf); break; case LSTIO_GROUP_ADD: rc = lst_group_add_ioctl((lstio_group_add_args_t *)buf); break; case LSTIO_GROUP_DEL: rc = lst_group_del_ioctl((lstio_group_del_args_t *)buf); break; case LSTIO_GROUP_UPDATE: rc = lst_group_update_ioctl((lstio_group_update_args_t *)buf); break; case LSTIO_NODES_ADD: rc = lst_nodes_add_ioctl((lstio_group_nodes_args_t *)buf); break; case LSTIO_GROUP_LIST: rc = lst_group_list_ioctl((lstio_group_list_args_t *)buf); break; case LSTIO_GROUP_INFO: rc = lst_group_info_ioctl((lstio_group_info_args_t *)buf); break; case LSTIO_BATCH_ADD: rc = lst_batch_add_ioctl((lstio_batch_add_args_t *)buf); break; case LSTIO_BATCH_START: rc = lst_batch_run_ioctl((lstio_batch_run_args_t *)buf); break; case LSTIO_BATCH_STOP: rc = lst_batch_stop_ioctl((lstio_batch_stop_args_t *)buf); break; case LSTIO_BATCH_QUERY: rc = lst_batch_query_ioctl((lstio_batch_query_args_t *)buf); break; case LSTIO_BATCH_LIST: rc = lst_batch_list_ioctl((lstio_batch_list_args_t *)buf); break; case LSTIO_BATCH_INFO: rc = lst_batch_info_ioctl((lstio_batch_info_args_t *)buf); break; case LSTIO_TEST_ADD: rc = lst_test_add_ioctl((lstio_test_args_t *)buf); break; case LSTIO_STAT_QUERY: rc = lst_stat_query_ioctl((lstio_stat_args_t *)buf); break; default: rc = -EINVAL; } if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat, sizeof(lstcon_trans_stat_t))) rc = -EFAULT; out: mutex_unlock(&console_session.ses_mutex); LIBCFS_FREE(buf, data->ioc_plen1); return rc; }
/** * Send request reply from request \a req reply buffer. * \a flags defines reply types * Returns 0 on success or error code */ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) { struct ptlrpc_reply_state *rs = req->rq_reply_state; struct ptlrpc_connection *conn; int rc; /* We must already have a reply buffer (only ptlrpc_error() may be * called without one). The reply generated by sptlrpc layer (e.g. * error notify, etc.) might have NULL rq->reqmsg; Otherwise we must * have a request buffer which is either the actual (swabbed) incoming * request, or a saved copy if this is a req saved in * target_queue_final_reply(). */ LASSERT(req->rq_no_reply == 0); LASSERT(req->rq_reqbuf != NULL); LASSERT(rs != NULL); LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult); LASSERT(req->rq_repmsg != NULL); LASSERT(req->rq_repmsg == rs->rs_msg); LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback); LASSERT(rs->rs_cb_id.cbid_arg == rs); /* There may be no rq_export during failover */ if (unlikely(req->rq_export && req->rq_export->exp_obd && req->rq_export->exp_obd->obd_fail)) { /* Failed obd's only send ENODEV */ req->rq_type = PTL_RPC_MSG_ERR; req->rq_status = -ENODEV; CDEBUG(D_HA, "sending ENODEV from failed obd %d\n", req->rq_export->exp_obd->obd_minor); } /* In order to keep interoperability with the client (< 2.3) which * doesn't have pb_jobid in ptlrpc_body, We have to shrink the * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the * reply buffer on client will be overflow. * * XXX Remove this whenever we drop the interoperability with * such client. */ req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0, sizeof(struct ptlrpc_body_v2), 1); if (req->rq_type != PTL_RPC_MSG_ERR) req->rq_type = PTL_RPC_MSG_REPLY; lustre_msg_set_type(req->rq_repmsg, req->rq_type); lustre_msg_set_status(req->rq_repmsg, ptlrpc_status_hton(req->rq_status)); lustre_msg_set_opc(req->rq_repmsg, req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0); target_pack_pool_reply(req); ptlrpc_at_set_reply(req, flags); if (req->rq_export == NULL || req->rq_export->exp_connection == NULL) conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL); else conn = ptlrpc_connection_addref(req->rq_export->exp_connection); if (unlikely(conn == NULL)) { CERROR("not replying on NULL connection\n"); /* bug 9635 */ return -ENOTCONN; } ptlrpc_rs_addref(rs); /* +1 ref for the network */ rc = sptlrpc_svc_wrap_reply(req); if (unlikely(rc)) goto out; req->rq_sent = ktime_get_real_seconds(); rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len, (rs->rs_difficult && !rs->rs_no_ack) ? LNET_ACK_REQ : LNET_NOACK_REQ, &rs->rs_cb_id, conn, ptlrpc_req2svc(req)->srv_rep_portal, req->rq_xid, req->rq_reply_off); out: if (unlikely(rc != 0)) ptlrpc_req_drop_rs(req); ptlrpc_connection_put(conn); return rc; }
static int test_rtc_read_time(struct device *dev, struct rtc_time *tm) { rtc_time64_to_tm(ktime_get_real_seconds(), tm); return 0; }