static void migration_thread(void *__data) { int cpu = (long) __data; edf_wm_task_t *et; struct timespec ts; set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { spin_lock_irq(&kthread[cpu].lock); if (list_empty(&kthread[cpu].list)) { spin_unlock_irq(&kthread[cpu].lock); schedule(); set_current_state(TASK_INTERRUPTIBLE); continue; } /* get a task in the list by fifo. */ et = list_first_entry(&kthread[cpu].list, edf_wm_task_t, migration_list); list_del_init(&et->migration_list); spin_unlock_irq(&kthread[cpu].lock); /* account runtime. */ jiffies_to_timespec(et->runtime[cpu], &ts); et->rt->task->dl.sched_runtime = timespec_to_ns(&ts); /* trace precise deadlines. */ et->rt->deadline_time += et->deadline; et->rt->task->dl.sched_deadline = et->sched_split_deadline; et->rt->task->dl.deadline = et->next_release; et->next_release += et->sched_split_deadline; /* now let's migrate the task! */ et->rt->task->dl.flags |= DL_NEW; migrate_task(et->rt, cpu); wake_up_process(et->rt->task); /* when the budget is exhausted, the deadline should be added by et->sched_deadline but not by et->sched_split_deadline. */ et->rt->task->dl.sched_deadline = et->sched_deadline; /* account runtime. */ jiffies_to_timespec(et->runtime[cpu], &ts); et->rt->task->dl.runtime = timespec_to_ns(&ts); /* activate the timer for the next migration of this task. */ if (et->last_cpu != cpu) { et->rt->task->dl.flags &= ~SCHED_EXHAUSTIVE; start_window_timer(et); } else { et->rt->task->dl.flags |= SCHED_EXHAUSTIVE; } } }
int rt_down_timeout(struct semaphore *sem, long jiff) { struct hrtimer_sleeper t; struct timespec ts; unsigned long expires = jiffies + jiff + 1; int ret; /* * rt_mutex_slowlock can use an interruptible, but this needs to * be TASK_INTERRUPTIBLE. The down_timeout uses TASK_UNINTERRUPTIBLE. * To handle this we loop if a signal caused the timeout and the * we recalculate the new timeout. * Yes Thomas, this is a hack! But we can fix it right later. */ do { jiffies_to_timespec(jiff, &ts); hrtimer_init_on_stack(&t.timer, HRTIMER_MODE_REL, CLOCK_MONOTONIC); t.timer._expires = timespec_to_ktime(ts); ret = rt_mutex_timed_lock(&sem->lock, &t, 0); if (ret != -EINTR) break; /* signal occured, but the down_timeout doesn't handle them */ jiff = expires - jiffies; } while (jiff > 0); if (!ret) __down_complete(sem); else ret = -ETIME; return ret; }
asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) { struct timespec t; struct restart_block *restart; unsigned long expire; if (get_compat_timespec(&t, rqtp)) return -EFAULT; if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0)) return -EINVAL; expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); expire = schedule_timeout_interruptible(expire); if (expire == 0) return 0; if (rmtp) { jiffies_to_timespec(expire, &t); if (put_compat_timespec(&t, rmtp)) return -EFAULT; } restart = ¤t_thread_info()->restart_block; restart->fn = compat_nanosleep_restart; restart->arg0 = jiffies + expire; restart->arg1 = (unsigned long) rmtp; return -ERESTART_RESTARTBLOCK; }
static long compat_nanosleep_restart(struct restart_block *restart) { unsigned long expire = restart->arg0, now = jiffies; struct compat_timespec __user *rmtp; /* Did it expire while we handled signals? */ if (!time_after(expire, now)) return 0; expire = schedule_timeout_interruptible(expire - now); if (expire == 0) return 0; rmtp = (struct compat_timespec __user *)restart->arg1; if (rmtp) { struct compat_timespec ct; struct timespec t; jiffies_to_timespec(expire, &t); ct.tv_sec = t.tv_sec; ct.tv_nsec = t.tv_nsec; if (copy_to_user(rmtp, &ct, sizeof(ct))) return -EFAULT; } /* The 'restart' block is already filled in */ return -ERESTART_RESTARTBLOCK; }
static int socle_otg_seq_show(struct seq_file *s, void *v) { struct timespec ts; u32 m_sec; u32 tran_speed_rate; jiffies_to_timespec(1, &ts); m_sec = ts.tv_sec * 1000 + ts.tv_nsec / 1000000; printk("tran_jiffies = %ld\n", tran_jiffies); printk("tran_timers = %ld\n", tran_timers); m_sec = (m_sec * tran_jiffies) + (m_sec * tran_timers / otg_timer_load); seq_printf(s, "Total time for tran: %10d ms\n", m_sec); seq_printf(s, "Total bytes for tran: %10ld Bytes\n", tran_bytes); if (0 == m_sec) tran_speed_rate = 0; else tran_speed_rate = tran_bytes / m_sec; seq_printf(s, "Speed rate of tran: %10d B/ms\n", tran_speed_rate); tran_timers=0; tran_jiffies=0; tran_bytes=0; return 0; }
/** * fc_get_host_stats() - Return the Scsi_Host's statistics * @shost: The SCSI host whose statistics are to be returned */ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) { struct fc_host_statistics *fcoe_stats; struct fc_lport *lport = shost_priv(shost); struct timespec v0, v1; unsigned int cpu; u64 fcp_in_bytes = 0; u64 fcp_out_bytes = 0; fcoe_stats = &lport->host_stats; memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); jiffies_to_timespec(jiffies, &v0); jiffies_to_timespec(lport->boot_time, &v1); fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); for_each_possible_cpu(cpu) { struct fcoe_dev_stats *stats; stats = per_cpu_ptr(lport->dev_stats, cpu); fcoe_stats->tx_frames += stats->TxFrames; fcoe_stats->tx_words += stats->TxWords; fcoe_stats->rx_frames += stats->RxFrames; fcoe_stats->rx_words += stats->RxWords; fcoe_stats->error_frames += stats->ErrorFrames; fcoe_stats->invalid_crc_count += stats->InvalidCRCCount; fcoe_stats->fcp_input_requests += stats->InputRequests; fcoe_stats->fcp_output_requests += stats->OutputRequests; fcoe_stats->fcp_control_requests += stats->ControlRequests; fcp_in_bytes += stats->InputBytes; fcp_out_bytes += stats->OutputBytes; fcoe_stats->link_failure_count += stats->LinkFailureCount; } fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); fcoe_stats->lip_count = -1; fcoe_stats->nos_count = -1; fcoe_stats->loss_of_sync_count = -1; fcoe_stats->loss_of_signal_count = -1; fcoe_stats->prim_seq_protocol_err_count = -1; fcoe_stats->dumped_frames = -1; return fcoe_stats; }
int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) { s64 tmp; struct timespec ts; unsigned long t1,t2,t3; unsigned long flags; /* Though tsk->delays accessed later, early exit avoids * unnecessary returning of other data */ if (!tsk->delays) goto done; tmp = (s64)d->cpu_run_real_total; cputime_to_timespec(tsk->utime + tsk->stime, &ts); tmp += timespec_to_ns(&ts); d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; /* * No locking available for sched_info (and too expensive to add one) * Mitigate by taking snapshot of values */ t1 = tsk->sched_info.pcnt; t2 = tsk->sched_info.run_delay; t3 = tsk->sched_info.cpu_time; d->cpu_count += t1; jiffies_to_timespec(t2, &ts); tmp = (s64)d->cpu_delay_total + timespec_to_ns(&ts); d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; tmp = (s64)d->cpu_run_virtual_total + (s64)jiffies_to_usecs(t3) * 1000; d->cpu_run_virtual_total = (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp; /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ spin_lock_irqsave(&tsk->delays->lock, flags); tmp = d->blkio_delay_total + tsk->delays->blkio_delay; d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; tmp = d->swapin_delay_total + tsk->delays->swapin_delay; d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; d->blkio_count += tsk->delays->blkio_count; d->swapin_count += tsk->delays->swapin_count; spin_unlock_irqrestore(&tsk->delays->lock, flags); done: return 0; }
/** * set the scheduler internally in the Linux kernel. */ static int edf_set_scheduler(resch_task_t *rt, int prio) { struct sched_param sp; struct sched_param_ex spx; struct timespec ts_period, ts_deadline, ts_runtime; jiffies_to_timespec(rt->period, &ts_period); jiffies_to_timespec(rt->deadline, &ts_deadline); jiffies_to_timespec(usecs_to_jiffies(rt->runtime), &ts_runtime); sp.sched_priority = 0; spx.sched_priority = 0; spx.sched_period = ts_period; spx.sched_deadline = ts_deadline; spx.sched_runtime = ts_runtime; spx.sched_flags = 0; if (sched_setscheduler_ex(rt->task, SCHED_DEADLINE, &sp, &spx) < 0) { printk(KERN_WARNING "RESCH: edf_set_scheduler() failed.\n"); printk(KERN_WARNING "RESCH: task#%d (process#%d) priority=%d.\n", rt->rid, rt->task->pid, prio); return false; } rt->prio = prio; if (task_has_reserve(rt)) { rt->task->dl.flags &= ~SCHED_EXHAUSTIVE; rt->task->dl.flags |= SCHED_FCBS; /* you can additionally set the following flags, if wanted. rt->task->dl.flags |= SCHED_FCBS_NO_CATCH_UP; */ } else { rt->task->dl.flags |= SCHED_EXHAUSTIVE; rt->task->dl.flags &= ~SCHED_FCBS; } return true; }
/** * wait until the next period. */ static void edf_wait_period(resch_task_t *rt) { struct timespec ts_period; if (rt->release_time > jiffies) { jiffies_to_timespec(rt->release_time - jiffies, &ts_period); } else { ts_period.tv_sec = 0; ts_period.tv_nsec = 0; } if (rt->task->dl.flags & SCHED_EXHAUSTIVE) { rt->task->dl.deadline = cpu_clock(smp_processor_id()); } sched_wait_interval(!TIMER_ABSTIME, &ts_period, NULL); }
/** * start the accounting on the reserve of the given task in SCHED_DEADLINE. * if the task is properly requested to reserve CPU time through the API, * we do nothing here, since it is handled by SCHED_DEADLINE. * otherwise, we consider this is a request to forcefully account CPU time. * the latter case is useful if the kernel functions want to account CPU * time, but do not want to use the CBS policy. */ static void edf_start_account(resch_task_t *rt) { /* set the budget explicitly, only if the task is not requested to reserve CPU time through the API. */ if (!task_has_reserve(rt)) { struct timespec ts; jiffies_to_timespec(rt->budget, &ts); rt->task->dl.runtime = timespec_to_ns(&ts); } /* set the flag to notify applications when the budget is exhausted. */ if (rt->xcpu) { rt->task->dl.flags |= SCHED_SIG_RORUN; } /* make sure to use Flexible CBS. */ rt->task->dl.flags &= ~SCHED_EXHAUSTIVE; rt->task->dl.flags |= SCHED_FCBS; }
/* * fnic_get_stats_data - Copy fnic stats buffer to a memory file * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer * * Description: * This routine gathers the fnic stats debugfs data from the fnic_stats struct * and dumps it to stats_debug_info. * * Return Value: * This routine returns the amount of bytes that were dumped into * stats_debug_info */ int fnic_get_stats_data(struct stats_debug_info *debug, struct fnic_stats *stats) { int len = 0; int buf_size = debug->buf_size; struct timespec val1, val2; len = snprintf(debug->debug_buffer + len, buf_size - len, "------------------------------------------\n" "\t\tIO Statistics\n" "------------------------------------------\n"); len += snprintf(debug->debug_buffer + len, buf_size - len, "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n" "Number of IOs: %lld\nNumber of IO Completions: %lld\n" "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" "Number of Memory alloc Failures: %lld\n" "Number of IOREQ Null: %lld\n" "Number of SCSI cmd pointer Null: %lld\n", (u64)atomic64_read(&stats->io_stats.active_ios), (u64)atomic64_read(&stats->io_stats.max_active_ios), (u64)atomic64_read(&stats->io_stats.num_ios), (u64)atomic64_read(&stats->io_stats.io_completions), (u64)atomic64_read(&stats->io_stats.io_failures), (u64)atomic64_read(&stats->io_stats.io_not_found), (u64)atomic64_read(&stats->io_stats.alloc_failures), (u64)atomic64_read(&stats->io_stats.ioreq_null), (u64)atomic64_read(&stats->io_stats.sc_null)); len += snprintf(debug->debug_buffer + len, buf_size - len, "\n------------------------------------------\n" "\t\tAbort Statistics\n" "------------------------------------------\n"); len += snprintf(debug->debug_buffer + len, buf_size - len, "Number of Aborts: %lld\n" "Number of Abort Failures: %lld\n" "Number of Abort Driver Timeouts: %lld\n" "Number of Abort FW Timeouts: %lld\n" "Number of Abort IO NOT Found: %lld\n", (u64)atomic64_read(&stats->abts_stats.aborts), (u64)atomic64_read(&stats->abts_stats.abort_failures), (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), (u64)atomic64_read(&stats->abts_stats.abort_io_not_found)); len += snprintf(debug->debug_buffer + len, buf_size - len, "\n------------------------------------------\n" "\t\tTerminate Statistics\n" "------------------------------------------\n"); len += snprintf(debug->debug_buffer + len, buf_size - len, "Number of Terminates: %lld\n" "Maximum Terminates: %lld\n" "Number of Terminate Driver Timeouts: %lld\n" "Number of Terminate FW Timeouts: %lld\n" "Number of Terminate IO NOT Found: %lld\n" "Number of Terminate Failures: %lld\n", (u64)atomic64_read(&stats->term_stats.terminates), (u64)atomic64_read(&stats->term_stats.max_terminates), (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts), (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts), (u64)atomic64_read(&stats->term_stats.terminate_io_not_found), (u64)atomic64_read(&stats->term_stats.terminate_failures)); len += snprintf(debug->debug_buffer + len, buf_size - len, "\n------------------------------------------\n" "\t\tReset Statistics\n" "------------------------------------------\n"); len += snprintf(debug->debug_buffer + len, buf_size - len, "Number of Device Resets: %lld\n" "Number of Device Reset Failures: %lld\n" "Number of Device Reset Aborts: %lld\n" "Number of Device Reset Timeouts: %lld\n" "Number of Device Reset Terminates: %lld\n" "Number of FW Resets: %lld\n" "Number of FW Reset Completions: %lld\n" "Number of FW Reset Failures: %lld\n" "Number of Fnic Reset: %lld\n" "Number of Fnic Reset Completions: %lld\n" "Number of Fnic Reset Failures: %lld\n", (u64)atomic64_read(&stats->reset_stats.device_resets), (u64)atomic64_read(&stats->reset_stats.device_reset_failures), (u64)atomic64_read(&stats->reset_stats.device_reset_aborts), (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts), (u64)atomic64_read( &stats->reset_stats.device_reset_terminates), (u64)atomic64_read(&stats->reset_stats.fw_resets), (u64)atomic64_read(&stats->reset_stats.fw_reset_completions), (u64)atomic64_read(&stats->reset_stats.fw_reset_failures), (u64)atomic64_read(&stats->reset_stats.fnic_resets), (u64)atomic64_read( &stats->reset_stats.fnic_reset_completions), (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures)); len += snprintf(debug->debug_buffer + len, buf_size - len, "\n------------------------------------------\n" "\t\tFirmware Statistics\n" "------------------------------------------\n"); len += snprintf(debug->debug_buffer + len, buf_size - len, "Number of Active FW Requests %lld\n" "Maximum FW Requests: %lld\n" "Number of FW out of resources: %lld\n" "Number of FW IO errors: %lld\n", (u64)atomic64_read(&stats->fw_stats.active_fw_reqs), (u64)atomic64_read(&stats->fw_stats.max_fw_reqs), (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources), (u64)atomic64_read(&stats->fw_stats.io_fw_errs)); len += snprintf(debug->debug_buffer + len, buf_size - len, "\n------------------------------------------\n" "\t\tVlan Discovery Statistics\n" "------------------------------------------\n"); len += snprintf(debug->debug_buffer + len, buf_size - len, "Number of Vlan Discovery Requests Sent %lld\n" "Vlan Response Received with no FCF VLAN ID: %lld\n" "No solicitations recvd after vlan set, expiry count: %lld\n" "Flogi rejects count: %lld\n", (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs), (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID), (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count), (u64)atomic64_read(&stats->vlan_stats.flogi_rejects)); len += snprintf(debug->debug_buffer + len, buf_size - len, "\n------------------------------------------\n" "\t\tOther Important Statistics\n" "------------------------------------------\n"); jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1); jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2); len += snprintf(debug->debug_buffer + len, buf_size - len, "Last ISR time: %llu (%8lu.%8lu)\n" "Last ACK time: %llu (%8lu.%8lu)\n" "Number of ISRs: %lld\n" "Maximum CQ Entries: %lld\n" "Number of ACK index out of range: %lld\n" "Number of data count mismatch: %lld\n" "Number of FCPIO Timeouts: %lld\n" "Number of FCPIO Aborted: %lld\n" "Number of SGL Invalid: %lld\n" "Number of Copy WQ Alloc Failures for ABTs: %lld\n" "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" "Number of Copy WQ Alloc Failures for IOs: %lld\n" "Number of no icmnd itmf Completions: %lld\n" "Number of QUEUE Fulls: %lld\n" "Number of rport not ready: %lld\n" "Number of receive frame errors: %lld\n", (u64)stats->misc_stats.last_isr_time, val1.tv_sec, val1.tv_nsec, (u64)stats->misc_stats.last_ack_time, val2.tv_sec, val2.tv_nsec, (u64)atomic64_read(&stats->misc_stats.isr_count), (u64)atomic64_read(&stats->misc_stats.max_cq_entries), (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), (u64)atomic64_read(&stats->misc_stats.data_count_mismatch), (u64)atomic64_read(&stats->misc_stats.fcpio_timeout), (u64)atomic64_read(&stats->misc_stats.fcpio_aborted), (u64)atomic64_read(&stats->misc_stats.sgl_invalid), (u64)atomic64_read( &stats->misc_stats.abts_cpwq_alloc_failures), (u64)atomic64_read( &stats->misc_stats.devrst_cpwq_alloc_failures), (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), (u64)atomic64_read(&stats->misc_stats.queue_fulls), (u64)atomic64_read(&stats->misc_stats.rport_not_ready), (u64)atomic64_read(&stats->misc_stats.frame_errors)); return len; }
/* * fnic_get_trace_data - Copy trace buffer to a memory file * @fnic_dbgfs_t: pointer to debugfs trace buffer * * Description: * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in * the log and process the log until the end of the buffer. Then it will gather * from the beginning of the log and process until the current entry @wr_idx. * * Return Value: * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t */ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt) { int rd_idx; int wr_idx; int len = 0; unsigned long flags; char str[KSYM_SYMBOL_LEN]; struct timespec val; fnic_trace_data_t *tbp; spin_lock_irqsave(&fnic_trace_lock, flags); rd_idx = fnic_trace_entries.rd_idx; wr_idx = fnic_trace_entries.wr_idx; if (wr_idx < rd_idx) { while (1) { /* Start from read index @rd_idx */ tbp = (fnic_trace_data_t *) fnic_trace_entries.page_offset[rd_idx]; if (!tbp) { spin_unlock_irqrestore(&fnic_trace_lock, flags); return 0; } /* Convert function pointer to function name */ if (sizeof(unsigned long) < 8) { sprint_symbol(str, tbp->fnaddr.low); jiffies_to_timespec(tbp->timestamp.low, &val); } else { sprint_symbol(str, tbp->fnaddr.val); jiffies_to_timespec(tbp->timestamp.val, &val); } /* * Dump trace buffer entry to memory file * and increment read index @rd_idx */ len += snprintf(fnic_dbgfs_prt->buffer + len, (trace_max_pages * PAGE_SIZE * 3) - len, "%16lu.%16lu %-50s %8x %8x %16llx %16llx " "%16llx %16llx %16llx\n", val.tv_sec, val.tv_nsec, str, tbp->host_no, tbp->tag, tbp->data[0], tbp->data[1], tbp->data[2], tbp->data[3], tbp->data[4]); rd_idx++; /* * If rd_idx is reached to maximum trace entries * then move rd_idx to zero */ if (rd_idx > (fnic_max_trace_entries-1)) rd_idx = 0; /* * Continure dumpping trace buffer entries into * memory file till rd_idx reaches write index */ if (rd_idx == wr_idx) break; } } else if (wr_idx > rd_idx) { while (1) { /* Start from read index @rd_idx */ tbp = (fnic_trace_data_t *) fnic_trace_entries.page_offset[rd_idx]; if (!tbp) { spin_unlock_irqrestore(&fnic_trace_lock, flags); return 0; } /* Convert function pointer to function name */ if (sizeof(unsigned long) < 8) { sprint_symbol(str, tbp->fnaddr.low); jiffies_to_timespec(tbp->timestamp.low, &val); } else { sprint_symbol(str, tbp->fnaddr.val); jiffies_to_timespec(tbp->timestamp.val, &val); } /* * Dump trace buffer entry to memory file * and increment read index @rd_idx */ len += snprintf(fnic_dbgfs_prt->buffer + len, (trace_max_pages * PAGE_SIZE * 3) - len, "%16lu.%16lu %-50s %8x %8x %16llx %16llx " "%16llx %16llx %16llx\n", val.tv_sec, val.tv_nsec, str, tbp->host_no, tbp->tag, tbp->data[0], tbp->data[1], tbp->data[2], tbp->data[3], tbp->data[4]); rd_idx++; /* * Continue dumpping trace buffer entries into * memory file till rd_idx reaches write index */ if (rd_idx == wr_idx) break; } } spin_unlock_irqrestore(&fnic_trace_lock, flags); return len; }
/* * snic_stats_show - Formats and prints per host specific driver stats. */ static int snic_stats_show(struct seq_file *sfp, void *data) { struct snic *snic = (struct snic *) sfp->private; struct snic_stats *stats = &snic->s_stats; struct timespec last_isr_tms, last_ack_tms; u64 maxio_tm; int i; /* Dump IO Stats */ seq_printf(sfp, "------------------------------------------\n" "\t\t IO Statistics\n" "------------------------------------------\n"); maxio_tm = (u64) atomic64_read(&stats->io.max_time); seq_printf(sfp, "Active IOs : %lld\n" "Max Active IOs : %lld\n" "Total IOs : %lld\n" "IOs Completed : %lld\n" "IOs Failed : %lld\n" "IOs Not Found : %lld\n" "Memory Alloc Failures : %lld\n" "REQs Null : %lld\n" "SCSI Cmd Pointers Null : %lld\n" "Max SGL for any IO : %lld\n" "Max IO Size : %lld Sectors\n" "Max Queuing Time : %lld\n" "Max Completion Time : %lld\n" "Max IO Process Time(FW) : %lld (%u msec)\n", (u64) atomic64_read(&stats->io.active), (u64) atomic64_read(&stats->io.max_active), (u64) atomic64_read(&stats->io.num_ios), (u64) atomic64_read(&stats->io.compl), (u64) atomic64_read(&stats->io.fail), (u64) atomic64_read(&stats->io.io_not_found), (u64) atomic64_read(&stats->io.alloc_fail), (u64) atomic64_read(&stats->io.req_null), (u64) atomic64_read(&stats->io.sc_null), (u64) atomic64_read(&stats->io.max_sgl), (u64) atomic64_read(&stats->io.max_io_sz), (u64) atomic64_read(&stats->io.max_qtime), (u64) atomic64_read(&stats->io.max_cmpl_time), maxio_tm, jiffies_to_msecs(maxio_tm)); seq_puts(sfp, "\nSGL Counters\n"); for (i = 0; i < SNIC_MAX_SG_DESC_CNT; i++) { seq_printf(sfp, "%10lld ", (u64) atomic64_read(&stats->io.sgl_cnt[i])); if ((i + 1) % 8 == 0) seq_puts(sfp, "\n"); } /* Dump Abort Stats */ seq_printf(sfp, "\n-------------------------------------------\n" "\t\t Abort Statistics\n" "---------------------------------------------\n"); seq_printf(sfp, "Aborts : %lld\n" "Aborts Fail : %lld\n" "Aborts Driver Timeout : %lld\n" "Abort FW Timeout : %lld\n" "Abort IO NOT Found : %lld\n" "Abort Queuing Failed : %lld\n", (u64) atomic64_read(&stats->abts.num), (u64) atomic64_read(&stats->abts.fail), (u64) atomic64_read(&stats->abts.drv_tmo), (u64) atomic64_read(&stats->abts.fw_tmo), (u64) atomic64_read(&stats->abts.io_not_found), (u64) atomic64_read(&stats->abts.q_fail)); /* Dump Reset Stats */ seq_printf(sfp, "\n-------------------------------------------\n" "\t\t Reset Statistics\n" "---------------------------------------------\n"); seq_printf(sfp, "HBA Resets : %lld\n" "HBA Reset Cmpls : %lld\n" "HBA Reset Fail : %lld\n", (u64) atomic64_read(&stats->reset.hba_resets), (u64) atomic64_read(&stats->reset.hba_reset_cmpl), (u64) atomic64_read(&stats->reset.hba_reset_fail)); /* Dump Firmware Stats */ seq_printf(sfp, "\n-------------------------------------------\n" "\t\t Firmware Statistics\n" "---------------------------------------------\n"); seq_printf(sfp, "Active FW Requests : %lld\n" "Max FW Requests : %lld\n" "FW Out Of Resource Errs : %lld\n" "FW IO Errors : %lld\n" "FW SCSI Errors : %lld\n", (u64) atomic64_read(&stats->fw.actv_reqs), (u64) atomic64_read(&stats->fw.max_actv_reqs), (u64) atomic64_read(&stats->fw.out_of_res), (u64) atomic64_read(&stats->fw.io_errs), (u64) atomic64_read(&stats->fw.scsi_errs)); /* Dump Miscellenous Stats */ seq_printf(sfp, "\n---------------------------------------------\n" "\t\t Other Statistics\n" "\n---------------------------------------------\n"); jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms); jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms); seq_printf(sfp, "Last ISR Time : %llu (%8lu.%8lu)\n" "Last Ack Time : %llu (%8lu.%8lu)\n" "Ack ISRs : %llu\n" "IO Cmpl ISRs : %llu\n" "Err Notify ISRs : %llu\n" "Max CQ Entries : %lld\n" "Data Count Mismatch : %lld\n" "IOs w/ Timeout Status : %lld\n" "IOs w/ Aborted Status : %lld\n" "IOs w/ SGL Invalid Stat : %lld\n" "WQ Desc Alloc Fail : %lld\n" "Queue Full : %lld\n" "Queue Ramp Up : %lld\n" "Queue Ramp Down : %lld\n" "Queue Last Queue Depth : %lld\n" "Target Not Ready : %lld\n", (u64) stats->misc.last_isr_time, last_isr_tms.tv_sec, last_isr_tms.tv_nsec, (u64)stats->misc.last_ack_time, last_ack_tms.tv_sec, last_ack_tms.tv_nsec, (u64) atomic64_read(&stats->misc.ack_isr_cnt), (u64) atomic64_read(&stats->misc.cmpl_isr_cnt), (u64) atomic64_read(&stats->misc.errnotify_isr_cnt), (u64) atomic64_read(&stats->misc.max_cq_ents), (u64) atomic64_read(&stats->misc.data_cnt_mismat), (u64) atomic64_read(&stats->misc.io_tmo), (u64) atomic64_read(&stats->misc.io_aborted), (u64) atomic64_read(&stats->misc.sgl_inval), (u64) atomic64_read(&stats->misc.wq_alloc_fail), (u64) atomic64_read(&stats->misc.qfull), (u64) atomic64_read(&stats->misc.qsz_rampup), (u64) atomic64_read(&stats->misc.qsz_rampdown), (u64) atomic64_read(&stats->misc.last_qsz), (u64) atomic64_read(&stats->misc.tgt_not_rdy)); return 0; }