Example #1
0
/******************************************************************//**
Calling this function is obligatory only if the memory buffer containing
the rw-lock is freed. Removes an rw-lock object from the global list. The
rw-lock is checked to be in the non-locked state. */
UNIV_INTERN
void
rw_lock_free_func(
/*==============*/
	rw_lock_t*	lock)	/*!< in: rw-lock */
{
	ut_ad(rw_lock_validate(lock));
	ut_a(lock->lock_word == X_LOCK_DECR);

#ifndef INNODB_RW_LOCKS_USE_ATOMICS
	mutex_free(rw_lock_get_mutex(lock));
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */

	mutex_enter(&rw_lock_list_mutex);
	os_event_free(lock->event);

	os_event_free(lock->wait_ex_event);

	ut_ad(UT_LIST_GET_PREV(list, lock) == NULL
	      || UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
	ut_ad(UT_LIST_GET_NEXT(list, lock) == NULL
	      || UT_LIST_GET_NEXT(list, lock)->magic_n == RW_LOCK_MAGIC_N);

	UT_LIST_REMOVE(list, rw_lock_list, lock);

	mutex_exit(&rw_lock_list_mutex);

	ut_d(lock->magic_n = 0);
}
Example #2
0
void
rw_lock_free(
    /*=========*/
    rw_lock_t*	lock)	/* in: rw-lock */
{
    ut_ad(rw_lock_validate(lock));
    ut_a(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED);
    ut_a(rw_lock_get_waiters(lock) == 0);
    ut_a(rw_lock_get_reader_count(lock) == 0);

    lock->magic_n = 0;

    mutex_free(rw_lock_get_mutex(lock));

    mutex_enter(&rw_lock_list_mutex);
    os_event_free(lock->event);

#ifdef __WIN__
    os_event_free(lock->wait_ex_event);
#endif

    if (UT_LIST_GET_PREV(list, lock)) {
        ut_a(UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
    }
    if (UT_LIST_GET_NEXT(list, lock)) {
        ut_a(UT_LIST_GET_NEXT(list, lock)->magic_n == RW_LOCK_MAGIC_N);
    }

    UT_LIST_REMOVE(list, rw_lock_list, lock);

    mutex_exit(&rw_lock_list_mutex);
}
Example #3
0
/******************************************************************//**
Calling this function is obligatory only if the memory buffer containing
the mutex is freed. Removes a mutex object from the mutex list. The mutex
is checked to be in the reset state. */
UNIV_INTERN
void
mutex_free(
/*=======*/
	mutex_t*	mutex)	/*!< in: mutex */
{
	ut_ad(mutex_validate(mutex));
	ut_a(mutex_get_lock_word(mutex) == 0);
	ut_a(mutex_get_waiters(mutex) == 0);

#ifdef UNIV_MEM_DEBUG
	if (mutex == &mem_hash_mutex) {
		ut_ad(UT_LIST_GET_LEN(mutex_list) == 1);
		ut_ad(UT_LIST_GET_FIRST(mutex_list) == &mem_hash_mutex);
		UT_LIST_REMOVE(list, mutex_list, mutex);
		goto func_exit;
	}
#endif /* UNIV_MEM_DEBUG */

	if (mutex != &mutex_list_mutex
#ifdef UNIV_SYNC_DEBUG
	    && mutex != &sync_thread_mutex
#endif /* UNIV_SYNC_DEBUG */
	    ) {

		mutex_enter(&mutex_list_mutex);

		ut_ad(!UT_LIST_GET_PREV(list, mutex)
		      || UT_LIST_GET_PREV(list, mutex)->magic_n
		      == MUTEX_MAGIC_N);
		ut_ad(!UT_LIST_GET_NEXT(list, mutex)
		      || UT_LIST_GET_NEXT(list, mutex)->magic_n
		      == MUTEX_MAGIC_N);

		UT_LIST_REMOVE(list, mutex_list, mutex);

		mutex_exit(&mutex_list_mutex);
	}

	os_event_free(mutex->event);
#ifdef UNIV_MEM_DEBUG
func_exit:
#endif /* UNIV_MEM_DEBUG */
#if !defined(HAVE_ATOMIC_BUILTINS)
	os_fast_mutex_free(&(mutex->os_fast_mutex));
#endif
	/* If we free the mutex protecting the mutex list (freeing is
	not necessary), we have to reset the magic number AFTER removing
	it from the list. */
#ifdef UNIV_DEBUG
	mutex->magic_n = 0;
#endif /* UNIV_DEBUG */
}
Example #4
0
/****************************************************************//**
Inserts the trx handle in the trx system trx list in the right position.
The list is sorted on the trx id so that the biggest id is at the list
start. This function is used at the database startup to insert incomplete
transactions to the list. */
static
void
trx_list_insert_ordered(
/*====================*/
	trx_t*	trx)	/*!< in: trx handle */
{
	trx_t*	trx2;

	ut_ad(mutex_own(&kernel_mutex));

	trx2 = UT_LIST_GET_FIRST(trx_sys->trx_list);

	while (trx2 != NULL) {
		if (ut_dulint_cmp(trx->id, trx2->id) >= 0) {

			ut_ad(ut_dulint_cmp(trx->id, trx2->id) == 1);
			break;
		}
		trx2 = UT_LIST_GET_NEXT(trx_list, trx2);
	}

	if (trx2 != NULL) {
		trx2 = UT_LIST_GET_PREV(trx_list, trx2);

		if (trx2 == NULL) {
			UT_LIST_ADD_FIRST(trx_list, trx_sys->trx_list, trx);
		} else {
			UT_LIST_INSERT_AFTER(trx_list, trx_sys->trx_list,
					     trx2, trx);
		}
	} else {
		UT_LIST_ADD_LAST(trx_list, trx_sys->trx_list, trx);
	}
}
Example #5
0
/*******************************************************************//**
Gets the previous lock in the lock queue, returns NULL if there are no
more locks (i.e. the current lock is the first one). The iterator is
receded (if not-NULL is returned).
@return	previous lock or NULL */
UNIV_INTERN
const lock_t*
lock_queue_iterator_get_prev(
    /*=========================*/
    lock_queue_iterator_t*	iter)	/*!< in/out: iterator */
{
    const lock_t*	prev_lock;

    switch (lock_get_type_low(iter->current_lock)) {
    case LOCK_REC:
        prev_lock = lock_rec_get_prev(
                        iter->current_lock, iter->bit_no);
        break;
    case LOCK_TABLE:
        prev_lock = UT_LIST_GET_PREV(
                        un_member.tab_lock.locks, iter->current_lock);
        break;
    default:
        ut_error;
    }

    if (prev_lock != NULL) {

        iter->current_lock = prev_lock;
    }

    return(prev_lock);
}
Example #6
0
/**********************************************************************
Removes a block from the LRU list. */
UNIV_INLINE
void
buf_LRU_remove_block(
/*=================*/
	buf_block_t*	block)	/* in: control block */
{
	ut_ad(buf_pool);
	ut_ad(block);
	ut_ad(mutex_own(&(buf_pool->mutex)));

	ut_a(block->state == BUF_BLOCK_FILE_PAGE);
	ut_a(block->in_LRU_list);

	/* If the LRU_old pointer is defined and points to just this block,
	move it backward one step */

	if (block == buf_pool->LRU_old) {

		/* Below: the previous block is guaranteed to exist, because
		the LRU_old pointer is only allowed to differ by the
		tolerance value from strict 3/8 of the LRU list length. */

		buf_pool->LRU_old = UT_LIST_GET_PREV(LRU, block);
		(buf_pool->LRU_old)->old = TRUE;

		buf_pool->LRU_old_len++;
		ut_a(buf_pool->LRU_old);
	}

	/* Remove the block from the LRU list */
	UT_LIST_REMOVE(LRU, buf_pool->LRU, block);
	block->in_LRU_list = FALSE;

	if (srv_use_awe && block->frame) {
		/* Remove from the list of mapped pages */

		UT_LIST_REMOVE(awe_LRU_free_mapped,
			       buf_pool->awe_LRU_free_mapped, block);
	}

	/* If the LRU list is so short that LRU_old not defined, return */
	if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {

		buf_pool->LRU_old = NULL;

		return;
	}

	ut_ad(buf_pool->LRU_old);

	/* Update the LRU_old_len field if necessary */
	if (block->old) {

		buf_pool->LRU_old_len--;
	}

	/* Adjust the length of the old block list if necessary */
	buf_LRU_old_adjust_len();
}
Example #7
0
ibool
buf_LRU_search_and_free_block(
/*==========================*/
				/* out: TRUE if freed */
	ulint	n_iterations)	/* in: how many times this has been called
				repeatedly without result: a high value means
				that we should search farther; if value is
				k < 10, then we only search k/10 * [number
				of pages in the buffer pool] from the end
				of the LRU list */
{
	buf_block_t*	block;
	ulint		distance = 0;
	ibool		freed;

	mutex_enter(&(buf_pool->mutex));

	freed = FALSE;
	block = UT_LIST_GET_LAST(buf_pool->LRU);

	while (block != NULL) {
		ut_a(block->in_LRU_list);

		mutex_enter(&block->mutex);
		freed = buf_LRU_free_block(block);
		mutex_exit(&block->mutex);

		if (freed) {
			break;
		}

		block = UT_LIST_GET_PREV(LRU, block);
		distance++;

		if (!freed && n_iterations <= 10
		    && distance > 100 + (n_iterations * buf_pool->curr_size)
		    / 10) {
			buf_pool->LRU_flush_ended = 0;

			mutex_exit(&(buf_pool->mutex));

			return(FALSE);
		}
	}
	if (buf_pool->LRU_flush_ended > 0) {
		buf_pool->LRU_flush_ended--;
	}
	if (!freed) {
		buf_pool->LRU_flush_ended = 0;
	}
	mutex_exit(&(buf_pool->mutex));

	return(freed);
}
Example #8
0
/**********************************************************************
Gives a recommendation of how many blocks should be flushed to establish
a big enough margin of replaceable blocks near the end of the LRU list
and in the free list. */
static
ulint
buf_flush_LRU_recommendation(void)
/*==============================*/
			/* out: number of blocks which should be flushed
			from the end of the LRU list */
{
	buf_block_t*	block;
	ulint		n_replaceable;
	ulint		distance	= 0;

	mutex_enter(&(buf_pool->mutex));

	n_replaceable = UT_LIST_GET_LEN(buf_pool->free);

	block = UT_LIST_GET_LAST(buf_pool->LRU);

	while ((block != NULL)
	       && (n_replaceable < BUF_FLUSH_FREE_BLOCK_MARGIN
		   + BUF_FLUSH_EXTRA_MARGIN)
	       && (distance < BUF_LRU_FREE_SEARCH_LEN)) {

		mutex_enter(&block->mutex);

		if (buf_flush_ready_for_replace(block)) {
			n_replaceable++;
		}

		mutex_exit(&block->mutex);

		distance++;

		block = UT_LIST_GET_PREV(LRU, block);
	}

	mutex_exit(&(buf_pool->mutex));

	if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) {

		return(0);
	}

	return(BUF_FLUSH_FREE_BLOCK_MARGIN + BUF_FLUSH_EXTRA_MARGIN
	       - n_replaceable);
}
Example #9
0
void OSEventThread::rollback_timer(ULONG now){
    TRACE("~~~~ +rollback_timer ~~~~\n");
    OSMutexLocker _locker(&mMutex);

    //event_req_t * tev = mTimerList.prev;
    event_timer_t * tev = UT_LIST_GET_LAST(mTimerList);
    event_timer_t * prev;

    // walk list, see if now >= ev->timeout for any events
    TRACE("~~~~ Looking for timers <= %lu ~~~~\n", (unsigned long)now);
    while ((tev) && (now < tev->timeout) && (now + 0xFFFFFF) < tev->timeout) {
        // Timer expired
        TRACE("~~~~ firing timer ~~~~\n");
        prev = UT_LIST_GET_PREV(watchNode,tev);
        UT_LIST_REMOVE(watchNode,mTimerList,tev);
        UT_LIST_ADD_FIRST(activeNode,mTimeoutList,tev);
        tev = prev;
    }
    TRACE("~~~~ -rollback_timer ~~~~\n");
}
Example #10
0
/**********************************************************************//**
Remove a block from the appropriate buddy free list. */
UNIV_INLINE
void
buf_buddy_remove_from_free(
/*=======================*/
	buf_pool_t*	buf_pool,	/*!< in: buffer pool instance */
	buf_page_t*	bpage,		/*!< in: block to be removed */
	ulint		i)		/*!< in: index of
					buf_pool->zip_free[] */
{
#ifdef UNIV_DEBUG
	buf_page_t*	prev = UT_LIST_GET_PREV(list, bpage);
	buf_page_t*	next = UT_LIST_GET_NEXT(list, bpage);

	ut_ad(!prev || buf_page_get_state(prev) == BUF_BLOCK_ZIP_FREE);
	ut_ad(!next || buf_page_get_state(next) == BUF_BLOCK_ZIP_FREE);
#endif /* UNIV_DEBUG */

	ut_ad(buf_pool_mutex_own(buf_pool));
	ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE);
	UT_LIST_REMOVE(list, buf_pool->zip_free[i], bpage);
}
Example #11
0
/***********************************************************************
Moves the LRU_old pointer so that the length of the old blocks list
is inside the allowed limits. */
UNIV_INLINE
void
buf_LRU_old_adjust_len(void)
/*========================*/
{
	ulint	old_len;
	ulint	new_len;

	ut_a(buf_pool->LRU_old);
	ut_ad(mutex_own(&(buf_pool->mutex)));
	ut_ad(3 * (BUF_LRU_OLD_MIN_LEN / 8) > BUF_LRU_OLD_TOLERANCE + 5);

	for (;;) {
		old_len = buf_pool->LRU_old_len;
		new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8);

		ut_a(buf_pool->LRU_old->in_LRU_list);

		/* Update the LRU_old pointer if necessary */

		if (old_len < new_len - BUF_LRU_OLD_TOLERANCE) {

			buf_pool->LRU_old = UT_LIST_GET_PREV(
				LRU, buf_pool->LRU_old);
			(buf_pool->LRU_old)->old = TRUE;
			buf_pool->LRU_old_len++;

		} else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {

			(buf_pool->LRU_old)->old = FALSE;
			buf_pool->LRU_old = UT_LIST_GET_NEXT(
				LRU, buf_pool->LRU_old);
			buf_pool->LRU_old_len--;
		} else {
			ut_a(buf_pool->LRU_old); /* Check that we did not
						 fall out of the LRU list */
			return;
		}
	}
}
Example #12
0
// Remove timer from watch list
void OSEventThread::DelTimer(event_timer_t * ev)
{
    TRACE("~~~~ +DelTimer ~~~~\n");
    if(!ev) return;

    {
        OSMutexLocker _locker(&mMutex);
        if(NULL == UT_LIST_GET_NEXT(watchNode,ev) || NULL == UT_LIST_GET_PREV(watchNode,ev))
        {
            UT_LIST_REMOVE(watchNode,mTimerList,ev);
            DecreaseWatchNum();
        }
    }
    wakeup();

    while (ev->status != 0)//just for single thread
    {
        WARN("wait exit from pending list \n");
        OSThread::Sleep(1);
    }
    TRACE("~~~~ -DelTimer ~~~~\n");
}
Example #13
0
ulint
buf_flush_batch(
/*============*/
				/* out: number of blocks for which the write
				request was queued; ULINT_UNDEFINED if there
				was a flush of the same type already running */
	ulint	flush_type,	/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST; if
				BUF_FLUSH_LIST, then the caller must not own
				any latches on pages */
	ulint	min_n,		/* in: wished minimum mumber of blocks flushed
				(it is not guaranteed that the actual number
				is that big, though) */
	dulint	lsn_limit)	/* in the case BUF_FLUSH_LIST all blocks whose
				oldest_modification is smaller than this
				should be flushed (if their number does not
				exceed min_n), otherwise ignored */
{
	buf_block_t*	block;
	ulint		page_count	= 0;
	ulint		old_page_count;
	ulint		space;
	ulint		offset;
	ibool		found;

	ut_ad((flush_type == BUF_FLUSH_LRU)
	      || (flush_type == BUF_FLUSH_LIST));
#ifdef UNIV_SYNC_DEBUG
	ut_ad((flush_type != BUF_FLUSH_LIST)
	      || sync_thread_levels_empty_gen(TRUE));
#endif /* UNIV_SYNC_DEBUG */
	mutex_enter(&(buf_pool->mutex));

	if ((buf_pool->n_flush[flush_type] > 0)
	    || (buf_pool->init_flush[flush_type] == TRUE)) {

		/* There is already a flush batch of the same type running */

		mutex_exit(&(buf_pool->mutex));

		return(ULINT_UNDEFINED);
	}

	(buf_pool->init_flush)[flush_type] = TRUE;

	for (;;) {
		/* If we have flushed enough, leave the loop */
		if (page_count >= min_n) {

			break;
		}

		/* Start from the end of the list looking for a suitable
		block to be flushed. */

		if (flush_type == BUF_FLUSH_LRU) {
			block = UT_LIST_GET_LAST(buf_pool->LRU);
		} else {
			ut_ad(flush_type == BUF_FLUSH_LIST);

			block = UT_LIST_GET_LAST(buf_pool->flush_list);
			if (!block
			    || (ut_dulint_cmp(block->oldest_modification,
					      lsn_limit) >= 0)) {
				/* We have flushed enough */

				break;
			}
		}

		found = FALSE;

		/* Note that after finding a single flushable page, we try to
		flush also all its neighbors, and after that start from the
		END of the LRU list or flush list again: the list may change
		during the flushing and we cannot safely preserve within this
		function a pointer to a block in the list! */

		while ((block != NULL) && !found) {
			ut_a(block->state == BUF_BLOCK_FILE_PAGE);

			mutex_enter(&block->mutex);

			if (buf_flush_ready_for_flush(block, flush_type)) {

				found = TRUE;
				space = block->space;
				offset = block->offset;

				mutex_exit(&block->mutex);
				mutex_exit(&(buf_pool->mutex));

				old_page_count = page_count;

				/* Try to flush also all the neighbors */
				page_count += buf_flush_try_neighbors(
					space, offset, flush_type);
				/* fprintf(stderr,
				"Flush type %lu, page no %lu, neighb %lu\n",
				flush_type, offset,
				page_count - old_page_count); */

				mutex_enter(&(buf_pool->mutex));

			} else if (flush_type == BUF_FLUSH_LRU) {

				mutex_exit(&block->mutex);

				block = UT_LIST_GET_PREV(LRU, block);
			} else {
				ut_ad(flush_type == BUF_FLUSH_LIST);

				mutex_exit(&block->mutex);

				block = UT_LIST_GET_PREV(flush_list, block);
			}
		}

		/* If we could not find anything to flush, leave the loop */

		if (!found) {
			break;
		}
	}

	(buf_pool->init_flush)[flush_type] = FALSE;

	if ((buf_pool->n_flush[flush_type] == 0)
	    && (buf_pool->init_flush[flush_type] == FALSE)) {

		/* The running flush batch has ended */

		os_event_set(buf_pool->no_flush[flush_type]);
	}

	mutex_exit(&(buf_pool->mutex));

	buf_flush_buffered_writes();

#ifdef UNIV_DEBUG
	if (buf_debug_prints && page_count > 0) {
		ut_a(flush_type == BUF_FLUSH_LRU
		     || flush_type == BUF_FLUSH_LIST);
		fprintf(stderr, flush_type == BUF_FLUSH_LRU
			? "Flushed %lu pages in LRU flush\n"
			: "Flushed %lu pages in flush list flush\n",
			(ulong) page_count);
	}
#endif /* UNIV_DEBUG */

	srv_buf_pool_flushed += page_count;

	return(page_count);
}
Example #14
0
/**********************************************************************
When doing a DROP TABLE/DISCARD TABLESPACE we have to drop all page
hash index entries belonging to that table. This function tries to
do that in batch. Note that this is a 'best effort' attempt and does
not guarantee that ALL hash entries will be removed. */
static
void
buf_LRU_drop_page_hash_for_tablespace(
/*==================================*/
	ulint	id)	/* in: space id */
{
	buf_block_t*	block;
	ulint*		page_arr;
	ulint		num_entries;

	page_arr = ut_malloc(sizeof(ulint)
			     * BUF_LRU_DROP_SEARCH_HASH_SIZE);
	mutex_enter(&buf_pool->mutex);

scan_again:
	num_entries = 0;
	block = UT_LIST_GET_LAST(buf_pool->LRU);

	while (block != NULL) {
		buf_block_t*	prev_block;

		mutex_enter(&block->mutex);
		prev_block = UT_LIST_GET_PREV(LRU, block);

		ut_a(block->state == BUF_BLOCK_FILE_PAGE);

		if (block->space != id
		    || block->buf_fix_count > 0
		    || block->io_fix != 0) {
			/* We leave the fixed pages as is in this scan.
			To be dealt with later in the final scan. */
			mutex_exit(&block->mutex);
			goto next_page;
		}

		ut_ad(block->space == id);
		if (block->is_hashed) {

			/* Store the offset(i.e.: page_no) in the array
			so that we can drop hash index in a batch
			later. */
			page_arr[num_entries] = block->offset;
			mutex_exit(&block->mutex);
			ut_a(num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE);
			++num_entries;

			if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) {
				goto next_page;
			}
			/* Array full. We release the buf_pool->mutex to
			obey the latching order. */
			mutex_exit(&buf_pool->mutex);

			buf_LRU_drop_page_hash_batch(id, page_arr,
						     num_entries);
			num_entries = 0;
			mutex_enter(&buf_pool->mutex);
		} else {
			mutex_exit(&block->mutex);
		}

next_page:
		/* Note that we may have released the buf_pool->mutex
		above after reading the prev_block during processing
		of a page_hash_batch (i.e.: when the array was full).
		This means that prev_block can change in LRU list.
		This is OK because this function is a 'best effort'
		to drop as many search hash entries as possible and
		it does not guarantee that ALL such entries will be
		dropped. */
		block = prev_block;

		/* If, however, block has been removed from LRU list
		to the free list then we should restart the scan.
		block->state is protected by buf_pool->mutex. */
		if (block && block->state != BUF_BLOCK_FILE_PAGE) {
			ut_a(num_entries == 0);
			goto scan_again;
		}
	}

	mutex_exit(&buf_pool->mutex);

	/* Drop any remaining batch of search hashed pages. */
	buf_LRU_drop_page_hash_batch(id, page_arr, num_entries);
	ut_free(page_arr);
}
Example #15
0
void
buf_LRU_invalidate_tablespace(
/*==========================*/
	ulint	id)	/* in: space id */
{
	buf_block_t*	block;
	ulint		page_no;
	ibool		all_freed;

	/* Before we attempt to drop pages one by one we first
	attempt to drop page hash index entries in batches to make
	it more efficient. The batching attempt is a best effort
	attempt and does not guarantee that all pages hash entries
	will be dropped. We get rid of remaining page hash entries
	one by one below. */
	buf_LRU_drop_page_hash_for_tablespace(id);

scan_again:
	mutex_enter(&(buf_pool->mutex));

	all_freed = TRUE;

	block = UT_LIST_GET_LAST(buf_pool->LRU);

	while (block != NULL) {
		buf_block_t*	prev_block;

		mutex_enter(&block->mutex);
		prev_block = UT_LIST_GET_PREV(LRU, block);

		ut_a(block->state == BUF_BLOCK_FILE_PAGE);

		if (block->space == id
		    && (block->buf_fix_count > 0 || block->io_fix != 0)) {

			/* We cannot remove this page during this scan yet;
			maybe the system is currently reading it in, or
			flushing the modifications to the file */

			all_freed = FALSE;

			goto next_page;
		}

		if (block->space == id) {
#ifdef UNIV_DEBUG
			if (buf_debug_prints) {
				fprintf(stderr,
					"Dropping space %lu page %lu\n",
					(ulong) block->space,
					(ulong) block->offset);
			}
#endif
			if (block->is_hashed) {
				page_no = block->offset;

				mutex_exit(&block->mutex);

				mutex_exit(&(buf_pool->mutex));

				/* Note that the following call will acquire
				an S-latch on the page */

				btr_search_drop_page_hash_when_freed(id,
								     page_no);
				goto scan_again;
			}

			if (0 != ut_dulint_cmp(block->oldest_modification,
					       ut_dulint_zero)) {

				/* Remove from the flush list of modified
				blocks */
				block->oldest_modification = ut_dulint_zero;

				UT_LIST_REMOVE(flush_list,
					       buf_pool->flush_list, block);
			}

			/* Remove from the LRU list */
			buf_LRU_block_remove_hashed_page(block);
			buf_LRU_block_free_hashed_page(block);
		}
next_page:
		mutex_exit(&block->mutex);
		block = prev_block;
	}

	mutex_exit(&(buf_pool->mutex));

	if (!all_freed) {
		os_thread_sleep(20000);

		goto scan_again;
	}
}