/* this needs to be called prior to updating the queue fill level */ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) { unsigned long long now, span; int used; now = get_clock_monotonic(); span = (now - qdio->req_q_time) >> 12; used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); qdio->req_q_util += used * span; qdio->req_q_time = now; }
/* this needs to be called prior to updating the queue fill level */ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) { unsigned long long now, span; int free, used; spin_lock(&qdio->stat_lock); now = get_clock_monotonic(); span = (now - qdio->req_q_time) >> 12; free = atomic_read(&qdio->req_q.count); used = QDIO_MAX_BUFFERS_PER_Q - free; qdio->req_q_util += used * span; qdio->req_q_time = now; spin_unlock(&qdio->stat_lock); }
/* * Scheduler clock - returns current time in nanosec units. */ unsigned long long notrace sched_clock(void) { return (get_clock_monotonic() * 125) >> 9; }
/* * Scheduler clock - returns current time in nanosec units. */ unsigned long long notrace __kprobes sched_clock(void) { return tod_to_ns(get_clock_monotonic()); }