static void qlist_move_cache(struct qlist_head *from, struct qlist_head *to, struct kmem_cache *cache) { struct qlist_node *curr; if (unlikely(qlist_empty(from))) return; curr = from->head; qlist_init(from); while (curr) { struct qlist_node *next = curr->next; struct kmem_cache *obj_cache = qlink_to_cache(curr); if (obj_cache == cache) qlist_put(to, curr, obj_cache->size); else qlist_put(from, curr, obj_cache->size); curr = next; } }
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) { unsigned long flags; struct qlist_head *q; struct qlist_head temp = QLIST_INIT; local_irq_save(flags); q = this_cpu_ptr(&cpu_quarantine); qlist_put(q, &info->quarantine_link, cache->size); if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) qlist_move_all(q, &temp); local_irq_restore(flags); if (unlikely(!qlist_empty(&temp))) { spin_lock_irqsave(&quarantine_lock, flags); qlist_move_all(&temp, &global_quarantine); spin_unlock_irqrestore(&quarantine_lock, flags); } }
static void qlist_move_cache(struct qlist *from, struct qlist *to, struct kmem_cache *cache) { void ***prev; if (unlikely(qlist_empty(from))) return; prev = &from->head; while (*prev) { void **qlink = *prev; struct kmem_cache *obj_cache = qlink_to_cache(qlink); if (obj_cache == cache) { if (unlikely(from->tail == qlink)) from->tail = (void **) prev; *prev = (void **) *qlink; from->bytes -= cache->size; qlist_put(to, qlink, cache->size); } else prev = (void ***) *prev; } }