void __sched account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) { unsigned long flags; int i, q; struct latency_record lat; if (!latencytop_enabled) return; /* Long interruptible waits are generally user requested... */ if (inter && usecs > 5000) return; memset(&lat, 0, sizeof(lat)); lat.count = 1; lat.time = usecs; lat.max = usecs; store_stacktrace(tsk, &lat); spin_lock_irqsave(&latency_lock, flags); account_global_scheduler_latency(tsk, &lat); /* * short term hack; if we're > 32 we stop; future we recycle: */ tsk->latency_record_count++; if (tsk->latency_record_count >= LT_SAVECOUNT) goto out_unlock; for (i = 0; i < LT_SAVECOUNT ; i++) { struct latency_record *mylat; int same = 1; mylat = &tsk->latency_record[i]; for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { if (mylat->backtrace[q] != lat.backtrace[q]) same = 0; if (same && lat.backtrace[q] == 0) break; if (same && lat.backtrace[q] == ULONG_MAX) break; } if (same) { mylat->count++; mylat->time += lat.time; if (lat.time > mylat->max) mylat->max = lat.time; goto out_unlock; } } /* Allocated a new one: */ i = tsk->latency_record_count; memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); out_unlock: spin_unlock_irqrestore(&latency_lock, flags); }
void __sched __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) { unsigned long flags; int i, q; struct latency_record lat; if (inter && usecs > 5000) return; if (usecs <= 0) return; memset(&lat, 0, sizeof(lat)); lat.count = 1; lat.time = usecs; lat.max = usecs; store_stacktrace(tsk, &lat); spin_lock_irqsave(&latency_lock, flags); account_global_scheduler_latency(tsk, &lat); tsk->latency_record_count++; if (tsk->latency_record_count >= LT_SAVECOUNT) goto out_unlock; for (i = 0; i < LT_SAVECOUNT; i++) { struct latency_record *mylat; int same = 1; mylat = &tsk->latency_record[i]; for (q = 0; q < LT_BACKTRACEDEPTH; q++) { unsigned long record = lat.backtrace[q]; if (mylat->backtrace[q] != record) { same = 0; break; } if (record == 0 || record == ULONG_MAX) break; } if (same) { mylat->count++; mylat->time += lat.time; if (lat.time > mylat->max) mylat->max = lat.time; goto out_unlock; } } i = tsk->latency_record_count; memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); out_unlock: spin_unlock_irqrestore(&latency_lock, flags); }
/** * __account_scheduler_latency - record an occurred latency * @tsk - the task struct of the task hitting the latency * @usecs - the duration of the latency in microseconds * @inter - 1 if the sleep was interruptible, 0 if uninterruptible * * This function is the main entry point for recording latency entries * as called by the scheduler. * * This function has a few special cases to deal with normal 'non-latency' * sleeps: specifically, interruptible sleep longer than 5 msec is skipped * since this usually is caused by waiting for events via select() and co. * * Negative latencies (caused by time going backwards) are also explicitly * skipped. */ void __sched __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) { unsigned long flags; int i, q; struct latency_record lat; /* Long interruptible waits are generally user requested... */ if (inter && usecs > 5000) return; /* Negative sleeps are time going backwards */ /* Zero-time sleeps are non-interesting */ if (usecs <= 0) return; memset(&lat, 0, sizeof(lat)); lat.count = 1; lat.time = usecs; lat.max = usecs; store_stacktrace(tsk, &lat); raw_spin_lock_irqsave(&latency_lock, flags); account_global_scheduler_latency(tsk, &lat); for (i = 0; i < tsk->latency_record_count; i++) { struct latency_record *mylat; int same = 1; mylat = &tsk->latency_record[i]; for (q = 0; q < LT_BACKTRACEDEPTH; q++) { unsigned long record = lat.backtrace[q]; if (mylat->backtrace[q] != record) { same = 0; break; } /* 0 and ULONG_MAX entries mean end of backtrace: */ if (record == 0 || record == ULONG_MAX) break; } if (same) { mylat->count++; mylat->time += lat.time; if (lat.time > mylat->max) mylat->max = lat.time; goto out_unlock; } } /* * short term hack; if we're > 32 we stop; future we recycle: */ if (tsk->latency_record_count >= LT_SAVECOUNT) goto out_unlock; /* Allocated a new one: */ i = tsk->latency_record_count++; memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); out_unlock: raw_spin_unlock_irqrestore(&latency_lock, flags); }