static void dst_run_gc(unsigned long dummy) { int delayed = 0; struct dst_entry * dst, **dstp; if (!spin_trylock(&dst_lock)) { mod_timer(&dst_gc_timer, jiffies + HZ/10); return; } del_timer(&dst_gc_timer); dstp = &dst_garbage_list; while ((dst = *dstp) != NULL) { if (atomic_read(&dst->__refcnt)) { dstp = &dst->next; delayed++; continue; } *dstp = dst->next; dst = dst_destroy(dst); if (dst) { /* NOHASH and still referenced. Unless it is already * on gc list, invalidate it and add to gc list. * * Note: this is temporary. Actually, NOHASH dst's * must be obsoleted when parent is obsoleted. * But we do not have state "obsoleted, but * referenced by parent", so it is right. */ if (dst->obsolete > 1) continue; ___dst_free(dst); dst->next = *dstp; *dstp = dst; dstp = &dst->next; } } if (!dst_garbage_list) { dst_gc_timer_inc = DST_GC_MAX; goto out; } if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX) dst_gc_timer_expires = DST_GC_MAX; dst_gc_timer_inc += DST_GC_INC; dst_gc_timer.expires = jiffies + dst_gc_timer_expires; #if RT_CACHE_DEBUG >= 2 printk("dst_total: %d/%d %ld\n", atomic_read(&dst_total), delayed, dst_gc_timer_expires); #endif add_timer(&dst_gc_timer); out: spin_unlock(&dst_lock); }
void __dst_free(struct dst_entry * dst) { spin_lock_bh(&dst_lock); ___dst_free(dst); dst->next = dst_garbage_list; dst_garbage_list = dst; if (dst_gc_timer_inc > DST_GC_INC) { dst_gc_timer_inc = DST_GC_INC; dst_gc_timer_expires = DST_GC_MIN; mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires); } spin_unlock_bh(&dst_lock); }
void __dst_free(struct dst_entry *dst) { spin_lock_bh(&dst_garbage.lock); ___dst_free(dst); dst->next = dst_garbage.list; dst_garbage.list = dst; if (dst_garbage.timer_inc > DST_GC_INC) { dst_garbage.timer_inc = DST_GC_INC; dst_garbage.timer_expires = DST_GC_MIN; cancel_delayed_work(&dst_gc_work); schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); } spin_unlock_bh(&dst_garbage.lock); }
static void dst_gc_task(struct work_struct *work) { int delayed = 0; int work_performed = 0; unsigned long expires = ~0L; struct dst_entry *dst, *next, head; struct dst_entry *last = &head; #if RT_CACHE_DEBUG >= 2 ktime_t time_start = ktime_get(); struct timespec elapsed; #endif mutex_lock(&dst_gc_mutex); next = dst_busy_list; loop: while ((dst = next) != NULL) { next = dst->next; prefetch(&next->next); cond_resched(); if (likely(atomic_read(&dst->__refcnt))) { last->next = dst; last = dst; delayed++; continue; } work_performed++; dst = dst_destroy(dst); if (dst) { /* NOHASH and still referenced. Unless it is already * on gc list, invalidate it and add to gc list. * * Note: this is temporary. Actually, NOHASH dst's * must be obsoleted when parent is obsoleted. * But we do not have state "obsoleted, but * referenced by parent", so it is right. */ if (dst->obsolete > 1) continue; ___dst_free(dst); dst->next = next; next = dst; } } spin_lock_bh(&dst_garbage.lock); next = dst_garbage.list; if (next) { dst_garbage.list = NULL; spin_unlock_bh(&dst_garbage.lock); goto loop; } last->next = NULL; dst_busy_list = head.next; if (!dst_busy_list) dst_garbage.timer_inc = DST_GC_MAX; else { /* * if we freed less than 1/10 of delayed entries, * we can sleep longer. */ if (work_performed <= delayed/10) { dst_garbage.timer_expires += dst_garbage.timer_inc; if (dst_garbage.timer_expires > DST_GC_MAX) dst_garbage.timer_expires = DST_GC_MAX; dst_garbage.timer_inc += DST_GC_INC; } else { dst_garbage.timer_inc = DST_GC_INC; dst_garbage.timer_expires = DST_GC_MIN; } expires = dst_garbage.timer_expires; /* * if the next desired timer is more than 4 seconds in the * future then round the timer to whole seconds */ if (expires > 4*HZ) expires = round_jiffies_relative(expires); schedule_delayed_work(&dst_gc_work, expires); } spin_unlock_bh(&dst_garbage.lock); mutex_unlock(&dst_gc_mutex); #if RT_CACHE_DEBUG >= 2 elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start)); printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d" " expires: %lu elapsed: %lu us\n", atomic_read(&dst_total), delayed, work_performed, expires, elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); #endif }