static void latency_tracker_handle_timeouts(struct latency_tracker *tracker, int flush) { struct cds_wfcq_node *qnode; struct latency_tracker_event *s; u64 now; if (unlikely(flush)) now = -1ULL; else now = trace_clock_monotonic_wrapper(); for (;;) { if (cds_wfcq_empty(&tracker->timeout_head, &tracker->timeout_tail)) break; if (likely(!flush)) { /* Check before dequeue. */ qnode = &tracker->timeout_head.node; if (!qnode->next) break; s = caa_container_of(qnode->next, struct latency_tracker_event, timeout_node); if (atomic_read(&s->refcount.refcount) > 1 && s->timeout > now) break; } qnode = __cds_wfcq_dequeue_nonblocking(&tracker->timeout_head, &tracker->timeout_tail); if (!qnode) break; s = caa_container_of(qnode, struct latency_tracker_event, timeout_node); latency_tracker_timeout_cb(s, flush); }
static void latency_tracker_handle_timeouts(struct latency_tracker *tracker, int flush) { struct cds_wfcq_node *qnode; struct latency_tracker_event *s; u64 now; if (unlikely(flush)) now = -1ULL; else now = trace_clock_monotonic_wrapper(); for (;;) { if (cds_wfcq_empty(&tracker->timeout_head, &tracker->timeout_tail)) break; if (likely(!flush)) { /* Check before dequeue. */ qnode = &tracker->timeout_head.node; if (!qnode->next) break; s = caa_container_of(qnode->next, struct latency_tracker_event, u.timeout_node); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) if (refcount_read(&s->refcount.refcount) > 1 && (s->start_ts + tracker->timeout) > now) #else if (atomic_read(&s->refcount.refcount) > 1 && (s->start_ts + tracker->timeout) > now) #endif break; } qnode = __cds_wfcq_dequeue_nonblocking(&tracker->timeout_head, &tracker->timeout_tail); if (!qnode) break; s = caa_container_of(qnode, struct latency_tracker_event, u.timeout_node); latency_tracker_timeout_cb(tracker, s, flush); }