示例#1
0
static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
{
	struct qlist_node *qlink;

	if (unlikely(qlist_empty(q)))
		return;

	qlink = q->head;
	while (qlink) {
		struct kmem_cache *obj_cache =
			cache ? cache :	qlink_to_cache(qlink);
		struct qlist_node *next = qlink->next;

		qlink_free(qlink, obj_cache);
		qlink = next;
	}
	qlist_init(q);
}
示例#2
0
void quarantine_reduce(void)
{
	size_t new_quarantine_size, percpu_quarantines;
	unsigned long flags;
	struct qlist_head to_free = QLIST_INIT;
	size_t size_to_free = 0;
	struct qlist_node *last;

	if (likely(READ_ONCE(global_quarantine.bytes) <=
		   READ_ONCE(quarantine_size)))
		return;

	spin_lock_irqsave(&quarantine_lock, flags);

	/*
	 * Update quarantine size in case of hotplug. Allocate a fraction of
	 * the installed memory to quarantine minus per-cpu queue limits.
	 */
	new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
		QUARANTINE_FRACTION;
	percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
	new_quarantine_size = (new_quarantine_size < percpu_quarantines) ?
		0 : new_quarantine_size - percpu_quarantines;
	WRITE_ONCE(quarantine_size, new_quarantine_size);

	last = global_quarantine.head;
	while (last) {
		struct kmem_cache *cache = qlink_to_cache(last);

		size_to_free += cache->size;
		if (!last->next || size_to_free >
		    global_quarantine.bytes - QUARANTINE_LOW_SIZE)
			break;
		last = last->next;
	}
	qlist_move(&global_quarantine, last, &to_free, size_to_free);

	spin_unlock_irqrestore(&quarantine_lock, flags);

	qlist_free_all(&to_free, NULL);
}
示例#3
0
void quarantine_reduce(void)
{
	size_t new_quarantine_size;
	unsigned long flags;
	struct qlist to_free = QLIST_INIT;
	size_t size_to_free = 0;
	void **last;

	/* smp_load_acquire() here pairs with smp_store_release() below. */
	if (likely(ACCESS_ONCE(global_quarantine.bytes) <=
		   smp_load_acquire(&quarantine_size)))
		return;

	spin_lock_irqsave(&quarantine_lock, flags);

	/* Update quarantine size in case of hotplug. Allocate a fraction of
	 * the installed memory to quarantine minus per-cpu queue limits.
	 */
	new_quarantine_size = (ACCESS_ONCE(totalram_pages) << PAGE_SHIFT) /
		QUARANTINE_FRACTION;
	new_quarantine_size -= QUARANTINE_PERCPU_SIZE * num_online_cpus();
	/* Pairs with smp_load_acquire() above and in QUARANTINE_LOW_SIZE. */
	smp_store_release(&quarantine_size, new_quarantine_size);

	last = global_quarantine.head;
	while (last) {
		struct kmem_cache *cache = qlink_to_cache(last);

		size_to_free += cache->size;
		if (!*last || size_to_free >
		    global_quarantine.bytes - QUARANTINE_LOW_SIZE)
			break;
		last = (void **) *last;
	}
	qlist_move(&global_quarantine, last, &to_free, size_to_free);

	spin_unlock_irqrestore(&quarantine_lock, flags);

	qlist_free_all(&to_free, NULL);
}
示例#4
0
static void qlist_move_cache(struct qlist_head *from,
				   struct qlist_head *to,
				   struct kmem_cache *cache)
{
	struct qlist_node *curr;

	if (unlikely(qlist_empty(from)))
		return;

	curr = from->head;
	qlist_init(from);
	while (curr) {
		struct qlist_node *next = curr->next;
		struct kmem_cache *obj_cache = qlink_to_cache(curr);

		if (obj_cache == cache)
			qlist_put(to, curr, obj_cache->size);
		else
			qlist_put(from, curr, obj_cache->size);

		curr = next;
	}
}
示例#5
0
static void qlist_move_cache(struct qlist *from,
				   struct qlist *to,
				   struct kmem_cache *cache)
{
	void ***prev;

	if (unlikely(qlist_empty(from)))
		return;

	prev = &from->head;
	while (*prev) {
		void **qlink = *prev;
		struct kmem_cache *obj_cache = qlink_to_cache(qlink);

		if (obj_cache == cache) {
			if (unlikely(from->tail == qlink))
				from->tail = (void **) prev;
			*prev = (void **) *qlink;
			from->bytes -= cache->size;
			qlist_put(to, qlink, cache->size);
		} else
			prev = (void ***) *prev;
	}
}